problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_33834
|
rasdani/github-patches
|
git_diff
|
pwndbg__pwndbg-1407
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
System calls display is wrong for x86 syscalls on x86-64
When we step on `int 0x80` in a 64-bit (x86-64) program, we display wrong syscall number and arguments.
System calls display is wrong for x86 syscalls on x86-64
When we step on `int 0x80` in a 64-bit (x86-64) program, we display wrong syscall number and arguments.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/constants/__init__.py`
Content:
```
1 import pwndbg.gdblib.arch
2
3 from . import aarch64
4 from . import amd64
5 from . import arm
6 from . import i386
7 from . import mips
8 from . import thumb
9
10 arches = {"arm": arm, "armcm": arm, "i386": i386, "mips": mips, "x86-64": amd64, "aarch64": aarch64}
11
12
13 def syscall(value):
14 """
15 Given a value for a syscall number (e.g. execve == 11), return
16 the *name* of the syscall.
17 """
18 arch = arches.get(pwndbg.gdblib.arch.current, None)
19
20 if not arch:
21 return None
22
23 prefix = "__NR_"
24
25 for k, v in arch.__dict__.items():
26 if v != value:
27 continue
28
29 if not k.startswith(prefix):
30 continue
31
32 return k[len(prefix) :].lower()
33
34 return None
35
```
Path: `pwndbg/arguments.py`
Content:
```
1 """
2 Allows describing functions, specifically enumerating arguments which
3 may be passed in a combination of registers and stack values.
4 """
5 import gdb
6 from capstone import CS_GRP_CALL
7 from capstone import CS_GRP_INT
8
9 import pwndbg.chain
10 import pwndbg.constants
11 import pwndbg.disasm
12 import pwndbg.gdblib.arch
13 import pwndbg.gdblib.memory
14 import pwndbg.gdblib.regs
15 import pwndbg.gdblib.symbol
16 import pwndbg.gdblib.typeinfo
17 import pwndbg.ida
18 import pwndbg.lib.abi
19 import pwndbg.lib.funcparser
20 import pwndbg.lib.functions
21 from pwndbg.commands.nearpc import c as N
22
23 ida_replacements = {
24 "__int64": "signed long long int",
25 "__int32": "signed int",
26 "__int16": "signed short",
27 "__int8": "signed char",
28 "__uint64": "unsigned long long int",
29 "__uint32": "unsigned int",
30 "__uint16": "unsigned short",
31 "__uint8": "unsigned char",
32 "_BOOL_1": "unsigned char",
33 "_BOOL_2": "unsigned short",
34 "_BOOL_4": "unsigned int",
35 "_BYTE": "unsigned char",
36 "_WORD": "unsigned short",
37 "_DWORD": "unsigned int",
38 "_QWORD": "unsigned long long",
39 "__pure": "",
40 "__hidden": "",
41 "__return_ptr": "",
42 "__struct_ptr": "",
43 "__array_ptr": "",
44 "__fastcall": "",
45 "__cdecl": "",
46 "__thiscall": "",
47 "__userpurge": "",
48 }
49
50
51 def get_syscall_name(instruction):
52 if CS_GRP_INT not in instruction.groups:
53 return None
54
55 syscall_register = pwndbg.lib.abi.ABI.syscall().syscall_register
56
57 # If we are on x86/x64, return no syscall name for other instructions than syscall and int 0x80
58 if syscall_register in ("eax", "rax"):
59 mnemonic = instruction.mnemonic
60 if not (mnemonic == "syscall" or (mnemonic == "int" and instruction.op_str == "0x80")):
61 return None
62
63 syscall_number = getattr(pwndbg.gdblib.regs, syscall_register)
64 return pwndbg.constants.syscall(syscall_number) or "<unk_%d>" % syscall_number
65
66
67 def get(instruction):
68 """
69 Returns an array containing the arguments to the current function,
70 if $pc is a 'call' or 'bl' type instruction.
71
72 Otherwise, returns None.
73 """
74 n_args_default = 4
75
76 if instruction is None:
77 return []
78
79 if instruction.address != pwndbg.gdblib.regs.pc:
80 return []
81
82 if CS_GRP_CALL in instruction.groups:
83 try:
84 abi = pwndbg.lib.abi.ABI.default()
85 except KeyError:
86 return []
87
88 # Not sure of any OS which allows multiple operands on
89 # a call instruction.
90 assert len(instruction.operands) == 1
91
92 target = instruction.operands[0].int
93
94 if not target:
95 return []
96
97 name = pwndbg.gdblib.symbol.get(target)
98 if not name:
99 return []
100 elif CS_GRP_INT in instruction.groups:
101 # Get the syscall number and name
102 name = get_syscall_name(instruction)
103 abi = pwndbg.lib.abi.ABI.syscall()
104 target = None
105
106 if name is None:
107 return []
108 else:
109 return []
110
111 result = []
112 name = name or ""
113
114 sym = gdb.lookup_symbol(name)
115 name = name.replace("isoc99_", "") # __isoc99_sscanf
116 name = name.replace("@plt", "") # getpwiod@plt
117
118 # If we have particular `XXX_chk` function in our database, we use it.
119 # Otherwise, we show args for its unchecked version.
120 # We also lstrip `_` in here, as e.g. `__printf_chk` needs the underscores.
121 if name not in pwndbg.lib.functions.functions:
122 name = name.replace("_chk", "")
123 name = name.strip().lstrip("_") # _malloc
124
125 func = pwndbg.lib.functions.functions.get(name, None)
126
127 # Try to extract the data from GDB.
128 # Note that this is currently broken, pending acceptance of
129 # my patch: https://sourceware.org/ml/gdb-patches/2015-06/msg00268.html
130 if sym and sym[0]:
131 try:
132 n_args_default = len(sym[0].type.fields())
133 except TypeError:
134 pass
135
136 # Try to grab the data out of IDA
137 if not func and target:
138 typename = pwndbg.ida.GetType(target)
139
140 if typename:
141 typename += ";"
142
143 # GetType() does not include the name.
144 typename = typename.replace("(", " function_name(", 1)
145
146 for k, v in ida_replacements.items():
147 typename = typename.replace(k, v)
148
149 func = pwndbg.lib.funcparser.ExtractFuncDeclFromSource(typename + ";")
150
151 if func:
152 args = func.args
153 else:
154 args = (
155 pwndbg.lib.functions.Argument("int", 0, argname(i, abi)) for i in range(n_args_default)
156 )
157
158 for i, arg in enumerate(args):
159 result.append((arg, argument(i, abi)))
160
161 return result
162
163
164 def argname(n, abi=None):
165 abi = abi or pwndbg.lib.abi.ABI.default()
166 regs = abi.register_arguments
167
168 if n < len(regs):
169 return regs[n]
170
171 return "arg[%i]" % n
172
173
174 def argument(n, abi=None):
175 """
176 Returns the nth argument, as if $pc were a 'call' or 'bl' type
177 instruction.
178 Works only for ABIs that use registers for arguments.
179 """
180 abi = abi or pwndbg.lib.abi.ABI.default()
181 regs = abi.register_arguments
182
183 if n < len(regs):
184 return getattr(pwndbg.gdblib.regs, regs[n])
185
186 n -= len(regs)
187
188 sp = pwndbg.gdblib.regs.sp + (n * pwndbg.gdblib.arch.ptrsize)
189
190 return int(pwndbg.gdblib.memory.poi(pwndbg.gdblib.typeinfo.ppvoid, sp))
191
192
193 def arguments(abi=None):
194 """
195 Yields (arg_name, arg_value) tuples for arguments from a given ABI.
196 Works only for ABIs that use registers for arguments.
197 """
198 abi = abi or pwndbg.lib.abi.ABI.default()
199 regs = abi.register_arguments
200
201 for i in range(len(regs)):
202 yield argname(i, abi), argument(i, abi)
203
204
205 def format_args(instruction):
206 result = []
207 for arg, value in get(instruction):
208 code = arg.type != "char"
209 pretty = pwndbg.chain.format(value, code=code)
210
211 # Enhance args display
212 if arg.name == "fd" and isinstance(value, int):
213 path = pwndbg.gdblib.file.readlink("/proc/%d/fd/%d" % (pwndbg.gdblib.proc.pid, value))
214 if path:
215 pretty += " (%s)" % path
216
217 result.append("%-10s %s" % (N.argument(arg.name) + ":", pretty))
218 return result
219
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pwndbg/arguments.py b/pwndbg/arguments.py
--- a/pwndbg/arguments.py
+++ b/pwndbg/arguments.py
@@ -53,15 +53,26 @@
return None
syscall_register = pwndbg.lib.abi.ABI.syscall().syscall_register
+ syscall_arch = pwndbg.gdblib.arch.current
- # If we are on x86/x64, return no syscall name for other instructions than syscall and int 0x80
+ # On x86/x64 `syscall` and `int <value>` instructions are in CS_GRP_INT
+ # but only `syscall` and `int 0x80` actually execute syscalls on Linux.
+ # So here, we return no syscall name for other instructions and we also
+ # handle a case when 32-bit syscalls are executed on x64
if syscall_register in ("eax", "rax"):
mnemonic = instruction.mnemonic
- if not (mnemonic == "syscall" or (mnemonic == "int" and instruction.op_str == "0x80")):
+
+ is_32bit = mnemonic == "int" and instruction.op_str == "0x80"
+ if not (mnemonic == "syscall" or is_32bit):
return None
+ # On x64 the int 0x80 instruction executes 32-bit syscalls from i386
+ # On x86, the syscall_arch is already i386, so its all fine
+ if is_32bit:
+ syscall_arch = "i386"
+
syscall_number = getattr(pwndbg.gdblib.regs, syscall_register)
- return pwndbg.constants.syscall(syscall_number) or "<unk_%d>" % syscall_number
+ return pwndbg.constants.syscall(syscall_number, syscall_arch) or "<unk_%d>" % syscall_number
def get(instruction):
diff --git a/pwndbg/constants/__init__.py b/pwndbg/constants/__init__.py
--- a/pwndbg/constants/__init__.py
+++ b/pwndbg/constants/__init__.py
@@ -10,20 +10,20 @@
arches = {"arm": arm, "armcm": arm, "i386": i386, "mips": mips, "x86-64": amd64, "aarch64": aarch64}
-def syscall(value):
+def syscall(number, arch):
"""
- Given a value for a syscall number (e.g. execve == 11), return
- the *name* of the syscall.
+ Given a syscall number and architecture, returns the name of the syscall.
+ E.g. execve == 59 on x86-64
"""
- arch = arches.get(pwndbg.gdblib.arch.current, None)
+ arch = arches.get(arch, None)
- if not arch:
+ if arch is None:
return None
prefix = "__NR_"
for k, v in arch.__dict__.items():
- if v != value:
+ if v != number:
continue
if not k.startswith(prefix):
|
{"golden_diff": "diff --git a/pwndbg/arguments.py b/pwndbg/arguments.py\n--- a/pwndbg/arguments.py\n+++ b/pwndbg/arguments.py\n@@ -53,15 +53,26 @@\n return None\n \n syscall_register = pwndbg.lib.abi.ABI.syscall().syscall_register\n+ syscall_arch = pwndbg.gdblib.arch.current\n \n- # If we are on x86/x64, return no syscall name for other instructions than syscall and int 0x80\n+ # On x86/x64 `syscall` and `int <value>` instructions are in CS_GRP_INT\n+ # but only `syscall` and `int 0x80` actually execute syscalls on Linux.\n+ # So here, we return no syscall name for other instructions and we also\n+ # handle a case when 32-bit syscalls are executed on x64\n if syscall_register in (\"eax\", \"rax\"):\n mnemonic = instruction.mnemonic\n- if not (mnemonic == \"syscall\" or (mnemonic == \"int\" and instruction.op_str == \"0x80\")):\n+\n+ is_32bit = mnemonic == \"int\" and instruction.op_str == \"0x80\"\n+ if not (mnemonic == \"syscall\" or is_32bit):\n return None\n \n+ # On x64 the int 0x80 instruction executes 32-bit syscalls from i386\n+ # On x86, the syscall_arch is already i386, so its all fine\n+ if is_32bit:\n+ syscall_arch = \"i386\"\n+\n syscall_number = getattr(pwndbg.gdblib.regs, syscall_register)\n- return pwndbg.constants.syscall(syscall_number) or \"<unk_%d>\" % syscall_number\n+ return pwndbg.constants.syscall(syscall_number, syscall_arch) or \"<unk_%d>\" % syscall_number\n \n \n def get(instruction):\ndiff --git a/pwndbg/constants/__init__.py b/pwndbg/constants/__init__.py\n--- a/pwndbg/constants/__init__.py\n+++ b/pwndbg/constants/__init__.py\n@@ -10,20 +10,20 @@\n arches = {\"arm\": arm, \"armcm\": arm, \"i386\": i386, \"mips\": mips, \"x86-64\": amd64, \"aarch64\": aarch64}\n \n \n-def syscall(value):\n+def syscall(number, arch):\n \"\"\"\n- Given a value for a syscall number (e.g. execve == 11), return\n- the *name* of the syscall.\n+ Given a syscall number and architecture, returns the name of the syscall.\n+ E.g. execve == 59 on x86-64\n \"\"\"\n- arch = arches.get(pwndbg.gdblib.arch.current, None)\n+ arch = arches.get(arch, None)\n \n- if not arch:\n+ if arch is None:\n return None\n \n prefix = \"__NR_\"\n \n for k, v in arch.__dict__.items():\n- if v != value:\n+ if v != number:\n continue\n \n if not k.startswith(prefix):\n", "issue": "System calls display is wrong for x86 syscalls on x86-64\nWhen we step on `int 0x80` in a 64-bit (x86-64) program, we display wrong syscall number and arguments.\nSystem calls display is wrong for x86 syscalls on x86-64\nWhen we step on `int 0x80` in a 64-bit (x86-64) program, we display wrong syscall number and arguments.\n", "before_files": [{"content": "import pwndbg.gdblib.arch\n\nfrom . import aarch64\nfrom . import amd64\nfrom . import arm\nfrom . import i386\nfrom . import mips\nfrom . import thumb\n\narches = {\"arm\": arm, \"armcm\": arm, \"i386\": i386, \"mips\": mips, \"x86-64\": amd64, \"aarch64\": aarch64}\n\n\ndef syscall(value):\n \"\"\"\n Given a value for a syscall number (e.g. execve == 11), return\n the *name* of the syscall.\n \"\"\"\n arch = arches.get(pwndbg.gdblib.arch.current, None)\n\n if not arch:\n return None\n\n prefix = \"__NR_\"\n\n for k, v in arch.__dict__.items():\n if v != value:\n continue\n\n if not k.startswith(prefix):\n continue\n\n return k[len(prefix) :].lower()\n\n return None\n", "path": "pwndbg/constants/__init__.py"}, {"content": "\"\"\"\nAllows describing functions, specifically enumerating arguments which\nmay be passed in a combination of registers and stack values.\n\"\"\"\nimport gdb\nfrom capstone import CS_GRP_CALL\nfrom capstone import CS_GRP_INT\n\nimport pwndbg.chain\nimport pwndbg.constants\nimport pwndbg.disasm\nimport pwndbg.gdblib.arch\nimport pwndbg.gdblib.memory\nimport pwndbg.gdblib.regs\nimport pwndbg.gdblib.symbol\nimport pwndbg.gdblib.typeinfo\nimport pwndbg.ida\nimport pwndbg.lib.abi\nimport pwndbg.lib.funcparser\nimport pwndbg.lib.functions\nfrom pwndbg.commands.nearpc import c as N\n\nida_replacements = {\n \"__int64\": \"signed long long int\",\n \"__int32\": \"signed int\",\n \"__int16\": \"signed short\",\n \"__int8\": \"signed char\",\n \"__uint64\": \"unsigned long long int\",\n \"__uint32\": \"unsigned int\",\n \"__uint16\": \"unsigned short\",\n \"__uint8\": \"unsigned char\",\n \"_BOOL_1\": \"unsigned char\",\n \"_BOOL_2\": \"unsigned short\",\n \"_BOOL_4\": \"unsigned int\",\n \"_BYTE\": \"unsigned char\",\n \"_WORD\": \"unsigned short\",\n \"_DWORD\": \"unsigned int\",\n \"_QWORD\": \"unsigned long long\",\n \"__pure\": \"\",\n \"__hidden\": \"\",\n \"__return_ptr\": \"\",\n \"__struct_ptr\": \"\",\n \"__array_ptr\": \"\",\n \"__fastcall\": \"\",\n \"__cdecl\": \"\",\n \"__thiscall\": \"\",\n \"__userpurge\": \"\",\n}\n\n\ndef get_syscall_name(instruction):\n if CS_GRP_INT not in instruction.groups:\n return None\n\n syscall_register = pwndbg.lib.abi.ABI.syscall().syscall_register\n\n # If we are on x86/x64, return no syscall name for other instructions than syscall and int 0x80\n if syscall_register in (\"eax\", \"rax\"):\n mnemonic = instruction.mnemonic\n if not (mnemonic == \"syscall\" or (mnemonic == \"int\" and instruction.op_str == \"0x80\")):\n return None\n\n syscall_number = getattr(pwndbg.gdblib.regs, syscall_register)\n return pwndbg.constants.syscall(syscall_number) or \"<unk_%d>\" % syscall_number\n\n\ndef get(instruction):\n \"\"\"\n Returns an array containing the arguments to the current function,\n if $pc is a 'call' or 'bl' type instruction.\n\n Otherwise, returns None.\n \"\"\"\n n_args_default = 4\n\n if instruction is None:\n return []\n\n if instruction.address != pwndbg.gdblib.regs.pc:\n return []\n\n if CS_GRP_CALL in instruction.groups:\n try:\n abi = pwndbg.lib.abi.ABI.default()\n except KeyError:\n return []\n\n # Not sure of any OS which allows multiple operands on\n # a call instruction.\n assert len(instruction.operands) == 1\n\n target = instruction.operands[0].int\n\n if not target:\n return []\n\n name = pwndbg.gdblib.symbol.get(target)\n if not name:\n return []\n elif CS_GRP_INT in instruction.groups:\n # Get the syscall number and name\n name = get_syscall_name(instruction)\n abi = pwndbg.lib.abi.ABI.syscall()\n target = None\n\n if name is None:\n return []\n else:\n return []\n\n result = []\n name = name or \"\"\n\n sym = gdb.lookup_symbol(name)\n name = name.replace(\"isoc99_\", \"\") # __isoc99_sscanf\n name = name.replace(\"@plt\", \"\") # getpwiod@plt\n\n # If we have particular `XXX_chk` function in our database, we use it.\n # Otherwise, we show args for its unchecked version.\n # We also lstrip `_` in here, as e.g. `__printf_chk` needs the underscores.\n if name not in pwndbg.lib.functions.functions:\n name = name.replace(\"_chk\", \"\")\n name = name.strip().lstrip(\"_\") # _malloc\n\n func = pwndbg.lib.functions.functions.get(name, None)\n\n # Try to extract the data from GDB.\n # Note that this is currently broken, pending acceptance of\n # my patch: https://sourceware.org/ml/gdb-patches/2015-06/msg00268.html\n if sym and sym[0]:\n try:\n n_args_default = len(sym[0].type.fields())\n except TypeError:\n pass\n\n # Try to grab the data out of IDA\n if not func and target:\n typename = pwndbg.ida.GetType(target)\n\n if typename:\n typename += \";\"\n\n # GetType() does not include the name.\n typename = typename.replace(\"(\", \" function_name(\", 1)\n\n for k, v in ida_replacements.items():\n typename = typename.replace(k, v)\n\n func = pwndbg.lib.funcparser.ExtractFuncDeclFromSource(typename + \";\")\n\n if func:\n args = func.args\n else:\n args = (\n pwndbg.lib.functions.Argument(\"int\", 0, argname(i, abi)) for i in range(n_args_default)\n )\n\n for i, arg in enumerate(args):\n result.append((arg, argument(i, abi)))\n\n return result\n\n\ndef argname(n, abi=None):\n abi = abi or pwndbg.lib.abi.ABI.default()\n regs = abi.register_arguments\n\n if n < len(regs):\n return regs[n]\n\n return \"arg[%i]\" % n\n\n\ndef argument(n, abi=None):\n \"\"\"\n Returns the nth argument, as if $pc were a 'call' or 'bl' type\n instruction.\n Works only for ABIs that use registers for arguments.\n \"\"\"\n abi = abi or pwndbg.lib.abi.ABI.default()\n regs = abi.register_arguments\n\n if n < len(regs):\n return getattr(pwndbg.gdblib.regs, regs[n])\n\n n -= len(regs)\n\n sp = pwndbg.gdblib.regs.sp + (n * pwndbg.gdblib.arch.ptrsize)\n\n return int(pwndbg.gdblib.memory.poi(pwndbg.gdblib.typeinfo.ppvoid, sp))\n\n\ndef arguments(abi=None):\n \"\"\"\n Yields (arg_name, arg_value) tuples for arguments from a given ABI.\n Works only for ABIs that use registers for arguments.\n \"\"\"\n abi = abi or pwndbg.lib.abi.ABI.default()\n regs = abi.register_arguments\n\n for i in range(len(regs)):\n yield argname(i, abi), argument(i, abi)\n\n\ndef format_args(instruction):\n result = []\n for arg, value in get(instruction):\n code = arg.type != \"char\"\n pretty = pwndbg.chain.format(value, code=code)\n\n # Enhance args display\n if arg.name == \"fd\" and isinstance(value, int):\n path = pwndbg.gdblib.file.readlink(\"/proc/%d/fd/%d\" % (pwndbg.gdblib.proc.pid, value))\n if path:\n pretty += \" (%s)\" % path\n\n result.append(\"%-10s %s\" % (N.argument(arg.name) + \":\", pretty))\n return result\n", "path": "pwndbg/arguments.py"}], "after_files": [{"content": "import pwndbg.gdblib.arch\n\nfrom . import aarch64\nfrom . import amd64\nfrom . import arm\nfrom . import i386\nfrom . import mips\nfrom . import thumb\n\narches = {\"arm\": arm, \"armcm\": arm, \"i386\": i386, \"mips\": mips, \"x86-64\": amd64, \"aarch64\": aarch64}\n\n\ndef syscall(number, arch):\n \"\"\"\n Given a syscall number and architecture, returns the name of the syscall.\n E.g. execve == 59 on x86-64\n \"\"\"\n arch = arches.get(arch, None)\n\n if arch is None:\n return None\n\n prefix = \"__NR_\"\n\n for k, v in arch.__dict__.items():\n if v != number:\n continue\n\n if not k.startswith(prefix):\n continue\n\n return k[len(prefix) :].lower()\n\n return None\n", "path": "pwndbg/constants/__init__.py"}, {"content": "\"\"\"\nAllows describing functions, specifically enumerating arguments which\nmay be passed in a combination of registers and stack values.\n\"\"\"\nimport gdb\nfrom capstone import CS_GRP_CALL\nfrom capstone import CS_GRP_INT\n\nimport pwndbg.chain\nimport pwndbg.constants\nimport pwndbg.disasm\nimport pwndbg.gdblib.arch\nimport pwndbg.gdblib.memory\nimport pwndbg.gdblib.regs\nimport pwndbg.gdblib.symbol\nimport pwndbg.gdblib.typeinfo\nimport pwndbg.ida\nimport pwndbg.lib.abi\nimport pwndbg.lib.funcparser\nimport pwndbg.lib.functions\nfrom pwndbg.commands.nearpc import c as N\n\nida_replacements = {\n \"__int64\": \"signed long long int\",\n \"__int32\": \"signed int\",\n \"__int16\": \"signed short\",\n \"__int8\": \"signed char\",\n \"__uint64\": \"unsigned long long int\",\n \"__uint32\": \"unsigned int\",\n \"__uint16\": \"unsigned short\",\n \"__uint8\": \"unsigned char\",\n \"_BOOL_1\": \"unsigned char\",\n \"_BOOL_2\": \"unsigned short\",\n \"_BOOL_4\": \"unsigned int\",\n \"_BYTE\": \"unsigned char\",\n \"_WORD\": \"unsigned short\",\n \"_DWORD\": \"unsigned int\",\n \"_QWORD\": \"unsigned long long\",\n \"__pure\": \"\",\n \"__hidden\": \"\",\n \"__return_ptr\": \"\",\n \"__struct_ptr\": \"\",\n \"__array_ptr\": \"\",\n \"__fastcall\": \"\",\n \"__cdecl\": \"\",\n \"__thiscall\": \"\",\n \"__userpurge\": \"\",\n}\n\n\ndef get_syscall_name(instruction):\n if CS_GRP_INT not in instruction.groups:\n return None\n\n syscall_register = pwndbg.lib.abi.ABI.syscall().syscall_register\n syscall_arch = pwndbg.gdblib.arch.current\n\n # On x86/x64 `syscall` and `int <value>` instructions are in CS_GRP_INT\n # but only `syscall` and `int 0x80` actually execute syscalls on Linux.\n # So here, we return no syscall name for other instructions and we also\n # handle a case when 32-bit syscalls are executed on x64\n if syscall_register in (\"eax\", \"rax\"):\n mnemonic = instruction.mnemonic\n\n is_32bit = mnemonic == \"int\" and instruction.op_str == \"0x80\"\n if not (mnemonic == \"syscall\" or is_32bit):\n return None\n\n # On x64 the int 0x80 instruction executes 32-bit syscalls from i386\n # On x86, the syscall_arch is already i386, so its all fine\n if is_32bit:\n syscall_arch = \"i386\"\n\n syscall_number = getattr(pwndbg.gdblib.regs, syscall_register)\n return pwndbg.constants.syscall(syscall_number, syscall_arch) or \"<unk_%d>\" % syscall_number\n\n\ndef get(instruction):\n \"\"\"\n Returns an array containing the arguments to the current function,\n if $pc is a 'call' or 'bl' type instruction.\n\n Otherwise, returns None.\n \"\"\"\n n_args_default = 4\n\n if instruction is None:\n return []\n\n if instruction.address != pwndbg.gdblib.regs.pc:\n return []\n\n if CS_GRP_CALL in instruction.groups:\n try:\n abi = pwndbg.lib.abi.ABI.default()\n except KeyError:\n return []\n\n # Not sure of any OS which allows multiple operands on\n # a call instruction.\n assert len(instruction.operands) == 1\n\n target = instruction.operands[0].int\n\n if not target:\n return []\n\n name = pwndbg.gdblib.symbol.get(target)\n if not name:\n return []\n elif CS_GRP_INT in instruction.groups:\n # Get the syscall number and name\n name = get_syscall_name(instruction)\n abi = pwndbg.lib.abi.ABI.syscall()\n target = None\n\n if name is None:\n return []\n else:\n return []\n\n result = []\n name = name or \"\"\n\n sym = gdb.lookup_symbol(name)\n name = name.replace(\"isoc99_\", \"\") # __isoc99_sscanf\n name = name.replace(\"@plt\", \"\") # getpwiod@plt\n\n # If we have particular `XXX_chk` function in our database, we use it.\n # Otherwise, we show args for its unchecked version.\n # We also lstrip `_` in here, as e.g. `__printf_chk` needs the underscores.\n if name not in pwndbg.lib.functions.functions:\n name = name.replace(\"_chk\", \"\")\n name = name.strip().lstrip(\"_\") # _malloc\n\n func = pwndbg.lib.functions.functions.get(name, None)\n\n # Try to extract the data from GDB.\n # Note that this is currently broken, pending acceptance of\n # my patch: https://sourceware.org/ml/gdb-patches/2015-06/msg00268.html\n if sym and sym[0]:\n try:\n n_args_default = len(sym[0].type.fields())\n except TypeError:\n pass\n\n # Try to grab the data out of IDA\n if not func and target:\n typename = pwndbg.ida.GetType(target)\n\n if typename:\n typename += \";\"\n\n # GetType() does not include the name.\n typename = typename.replace(\"(\", \" function_name(\", 1)\n\n for k, v in ida_replacements.items():\n typename = typename.replace(k, v)\n\n func = pwndbg.lib.funcparser.ExtractFuncDeclFromSource(typename + \";\")\n\n if func:\n args = func.args\n else:\n args = (\n pwndbg.lib.functions.Argument(\"int\", 0, argname(i, abi)) for i in range(n_args_default)\n )\n\n for i, arg in enumerate(args):\n result.append((arg, argument(i, abi)))\n\n return result\n\n\ndef argname(n, abi=None):\n abi = abi or pwndbg.lib.abi.ABI.default()\n regs = abi.register_arguments\n\n if n < len(regs):\n return regs[n]\n\n return \"arg[%i]\" % n\n\n\ndef argument(n, abi=None):\n \"\"\"\n Returns the nth argument, as if $pc were a 'call' or 'bl' type\n instruction.\n Works only for ABIs that use registers for arguments.\n \"\"\"\n abi = abi or pwndbg.lib.abi.ABI.default()\n regs = abi.register_arguments\n\n if n < len(regs):\n return getattr(pwndbg.gdblib.regs, regs[n])\n\n n -= len(regs)\n\n sp = pwndbg.gdblib.regs.sp + (n * pwndbg.gdblib.arch.ptrsize)\n\n return int(pwndbg.gdblib.memory.poi(pwndbg.gdblib.typeinfo.ppvoid, sp))\n\n\ndef arguments(abi=None):\n \"\"\"\n Yields (arg_name, arg_value) tuples for arguments from a given ABI.\n Works only for ABIs that use registers for arguments.\n \"\"\"\n abi = abi or pwndbg.lib.abi.ABI.default()\n regs = abi.register_arguments\n\n for i in range(len(regs)):\n yield argname(i, abi), argument(i, abi)\n\n\ndef format_args(instruction):\n result = []\n for arg, value in get(instruction):\n code = arg.type != \"char\"\n pretty = pwndbg.chain.format(value, code=code)\n\n # Enhance args display\n if arg.name == \"fd\" and isinstance(value, int):\n path = pwndbg.gdblib.file.readlink(\"/proc/%d/fd/%d\" % (pwndbg.gdblib.proc.pid, value))\n if path:\n pretty += \" (%s)\" % path\n\n result.append(\"%-10s %s\" % (N.argument(arg.name) + \":\", pretty))\n return result\n", "path": "pwndbg/arguments.py"}]}
| 2,877 | 732 |
gh_patches_debug_2019
|
rasdani/github-patches
|
git_diff
|
litestar-org__litestar-1005
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: openapi render for multiple tags isn't consistent
**Describe the bug**
When the openapi renders tags from both a controller and a route it is not deterministic. This may not be a bug? But it surprised me so thought I'd raise it.
I'm unsure if I'm doing something crazy but for a project, we check in the generated json openapi schema so we can browse the API live in gitlab. I've recently added a tag to both a controller and a route in it. But because the order of the tags isn't consistent they are going to keep flip flopping as we have a pre-commit that generates the json to make sure it's up to date. I hope that ramble makes sense...
**To Reproduce**
```python
from typing import Dict
from starlite import Starlite, Controller, get
class TestController(Controller):
tags = ["a"]
@get("/", tags=["b"])
def hello_world(self) -> Dict[str, str]:
"""Handler function that returns a greeting dictionary."""
return {"hello": "world"}
app = Starlite(route_handlers=[TestController])
print(app.openapi_schema.paths["/"].get.tags)
```
If you run that multiple times, you will see you get either:
```python
['a', 'b']
```
or
```python
['b', 'a']
```
**Additional context**
I believe the problem is [here](https://github.com/starlite-api/starlite/blob/835749112e8364c1516f45973c924774aca22ca9/starlite/openapi/path_item.py#L59) as it forces construction of a new set. Sorting them before returning would be viable as there shouldn't be _too many_ tags and it's a one time thing I believe?
But as I said, it may not be a problem you care about as I could be doing something silly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlite/openapi/path_item.py`
Content:
```
1 from inspect import cleandoc
2 from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, cast
3
4 from pydantic_openapi_schema.v3_1_0.operation import Operation
5 from pydantic_openapi_schema.v3_1_0.path_item import PathItem
6
7 from starlite.openapi.parameters import create_parameter_for_handler
8 from starlite.openapi.request_body import create_request_body
9 from starlite.openapi.responses import create_responses
10 from starlite.utils.helpers import unwrap_partial
11
12 if TYPE_CHECKING:
13 from pydantic import BaseModel
14 from pydantic_openapi_schema.v3_1_0 import SecurityRequirement
15
16 from starlite.handlers import HTTPRouteHandler
17 from starlite.plugins.base import PluginProtocol
18 from starlite.routes import HTTPRoute
19
20
21 def get_description_for_handler(route_handler: "HTTPRouteHandler", use_handler_docstrings: bool) -> Optional[str]:
22 """Produce the operation description for a route handler, either by using the description value if provided,
23
24 or the docstring - if config is enabled.
25
26 Args:
27 route_handler: A route handler instance.
28 use_handler_docstrings: If `True` and `route_handler.description` is `None` returns docstring of wrapped
29 handler function.
30
31 Returns:
32 An optional description string
33 """
34 handler_description = route_handler.description
35 if handler_description is None and use_handler_docstrings:
36 fn = unwrap_partial(route_handler.fn.value)
37 return cleandoc(fn.__doc__) if fn.__doc__ else None
38 return handler_description
39
40
41 def extract_layered_values(
42 route_handler: "HTTPRouteHandler",
43 ) -> Tuple[Optional[List[str]], Optional[List[Dict[str, List[str]]]]]:
44 """Extract the tags and security values from the route handler layers.
45
46 Args:
47 route_handler: A Route Handler instance.
48
49 Returns:
50 A tuple of optional lists.
51 """
52 tags: List[str] = []
53 security: List["SecurityRequirement"] = []
54 for layer in route_handler.ownership_layers:
55 if layer.tags:
56 tags.extend(layer.tags)
57 if layer.security:
58 security.extend(layer.security)
59 return list(set(tags)) if tags else None, security or None
60
61
62 def create_path_item(
63 route: "HTTPRoute", create_examples: bool, plugins: List["PluginProtocol"], use_handler_docstrings: bool
64 ) -> PathItem:
65 """Create a PathItem model for the given route parsing all http_methods into Operation Models."""
66 path_item = PathItem()
67 for http_method, handler_tuple in route.route_handler_map.items():
68 route_handler, _ = handler_tuple
69 if route_handler.include_in_schema:
70 handler_fields = cast("BaseModel", route_handler.signature_model).__fields__
71 parameters = (
72 create_parameter_for_handler(
73 route_handler=route_handler,
74 handler_fields=handler_fields,
75 path_parameters=route.path_parameters,
76 generate_examples=create_examples,
77 )
78 or None
79 )
80 raises_validation_error = bool("data" in handler_fields or path_item.parameters or parameters)
81 handler_name = unwrap_partial(route_handler.handler_name).replace("_", " ").title()
82 request_body = None
83 if "data" in handler_fields:
84 request_body = create_request_body(
85 field=handler_fields["data"], generate_examples=create_examples, plugins=plugins
86 )
87
88 tags, security = extract_layered_values(route_handler)
89 operation = Operation(
90 operationId=route_handler.operation_id or handler_name,
91 tags=tags,
92 summary=route_handler.summary,
93 description=get_description_for_handler(route_handler, use_handler_docstrings),
94 deprecated=route_handler.deprecated,
95 responses=create_responses(
96 route_handler=route_handler,
97 raises_validation_error=raises_validation_error,
98 generate_examples=create_examples,
99 plugins=plugins,
100 ),
101 requestBody=request_body,
102 parameters=parameters, # type: ignore[arg-type]
103 security=security,
104 )
105 setattr(path_item, http_method.lower(), operation)
106 return path_item
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/starlite/openapi/path_item.py b/starlite/openapi/path_item.py
--- a/starlite/openapi/path_item.py
+++ b/starlite/openapi/path_item.py
@@ -56,7 +56,7 @@
tags.extend(layer.tags)
if layer.security:
security.extend(layer.security)
- return list(set(tags)) if tags else None, security or None
+ return sorted(set(tags)) if tags else None, security or None
def create_path_item(
|
{"golden_diff": "diff --git a/starlite/openapi/path_item.py b/starlite/openapi/path_item.py\n--- a/starlite/openapi/path_item.py\n+++ b/starlite/openapi/path_item.py\n@@ -56,7 +56,7 @@\n tags.extend(layer.tags)\n if layer.security:\n security.extend(layer.security)\n- return list(set(tags)) if tags else None, security or None\n+ return sorted(set(tags)) if tags else None, security or None\n \n \n def create_path_item(\n", "issue": "Bug: openapi render for multiple tags isn't consistent\n**Describe the bug**\r\nWhen the openapi renders tags from both a controller and a route it is not deterministic. This may not be a bug? But it surprised me so thought I'd raise it.\r\n\r\nI'm unsure if I'm doing something crazy but for a project, we check in the generated json openapi schema so we can browse the API live in gitlab. I've recently added a tag to both a controller and a route in it. But because the order of the tags isn't consistent they are going to keep flip flopping as we have a pre-commit that generates the json to make sure it's up to date. I hope that ramble makes sense...\r\n\r\n**To Reproduce**\r\n```python\r\nfrom typing import Dict \r\n \r\nfrom starlite import Starlite, Controller, get \r\n \r\nclass TestController(Controller): \r\n tags = [\"a\"] \r\n \r\n @get(\"/\", tags=[\"b\"]) \r\n def hello_world(self) -> Dict[str, str]: \r\n \"\"\"Handler function that returns a greeting dictionary.\"\"\"\r\n return {\"hello\": \"world\"} \r\n \r\n \r\napp = Starlite(route_handlers=[TestController]) \r\nprint(app.openapi_schema.paths[\"/\"].get.tags) \r\n```\r\nIf you run that multiple times, you will see you get either:\r\n```python\r\n['a', 'b']\r\n```\r\nor\r\n```python\r\n['b', 'a']\r\n```\r\n\r\n**Additional context**\r\nI believe the problem is [here](https://github.com/starlite-api/starlite/blob/835749112e8364c1516f45973c924774aca22ca9/starlite/openapi/path_item.py#L59) as it forces construction of a new set. Sorting them before returning would be viable as there shouldn't be _too many_ tags and it's a one time thing I believe?\r\n\r\nBut as I said, it may not be a problem you care about as I could be doing something silly.\r\n\n", "before_files": [{"content": "from inspect import cleandoc\nfrom typing import TYPE_CHECKING, Dict, List, Optional, Tuple, cast\n\nfrom pydantic_openapi_schema.v3_1_0.operation import Operation\nfrom pydantic_openapi_schema.v3_1_0.path_item import PathItem\n\nfrom starlite.openapi.parameters import create_parameter_for_handler\nfrom starlite.openapi.request_body import create_request_body\nfrom starlite.openapi.responses import create_responses\nfrom starlite.utils.helpers import unwrap_partial\n\nif TYPE_CHECKING:\n from pydantic import BaseModel\n from pydantic_openapi_schema.v3_1_0 import SecurityRequirement\n\n from starlite.handlers import HTTPRouteHandler\n from starlite.plugins.base import PluginProtocol\n from starlite.routes import HTTPRoute\n\n\ndef get_description_for_handler(route_handler: \"HTTPRouteHandler\", use_handler_docstrings: bool) -> Optional[str]:\n \"\"\"Produce the operation description for a route handler, either by using the description value if provided,\n\n or the docstring - if config is enabled.\n\n Args:\n route_handler: A route handler instance.\n use_handler_docstrings: If `True` and `route_handler.description` is `None` returns docstring of wrapped\n handler function.\n\n Returns:\n An optional description string\n \"\"\"\n handler_description = route_handler.description\n if handler_description is None and use_handler_docstrings:\n fn = unwrap_partial(route_handler.fn.value)\n return cleandoc(fn.__doc__) if fn.__doc__ else None\n return handler_description\n\n\ndef extract_layered_values(\n route_handler: \"HTTPRouteHandler\",\n) -> Tuple[Optional[List[str]], Optional[List[Dict[str, List[str]]]]]:\n \"\"\"Extract the tags and security values from the route handler layers.\n\n Args:\n route_handler: A Route Handler instance.\n\n Returns:\n A tuple of optional lists.\n \"\"\"\n tags: List[str] = []\n security: List[\"SecurityRequirement\"] = []\n for layer in route_handler.ownership_layers:\n if layer.tags:\n tags.extend(layer.tags)\n if layer.security:\n security.extend(layer.security)\n return list(set(tags)) if tags else None, security or None\n\n\ndef create_path_item(\n route: \"HTTPRoute\", create_examples: bool, plugins: List[\"PluginProtocol\"], use_handler_docstrings: bool\n) -> PathItem:\n \"\"\"Create a PathItem model for the given route parsing all http_methods into Operation Models.\"\"\"\n path_item = PathItem()\n for http_method, handler_tuple in route.route_handler_map.items():\n route_handler, _ = handler_tuple\n if route_handler.include_in_schema:\n handler_fields = cast(\"BaseModel\", route_handler.signature_model).__fields__\n parameters = (\n create_parameter_for_handler(\n route_handler=route_handler,\n handler_fields=handler_fields,\n path_parameters=route.path_parameters,\n generate_examples=create_examples,\n )\n or None\n )\n raises_validation_error = bool(\"data\" in handler_fields or path_item.parameters or parameters)\n handler_name = unwrap_partial(route_handler.handler_name).replace(\"_\", \" \").title()\n request_body = None\n if \"data\" in handler_fields:\n request_body = create_request_body(\n field=handler_fields[\"data\"], generate_examples=create_examples, plugins=plugins\n )\n\n tags, security = extract_layered_values(route_handler)\n operation = Operation(\n operationId=route_handler.operation_id or handler_name,\n tags=tags,\n summary=route_handler.summary,\n description=get_description_for_handler(route_handler, use_handler_docstrings),\n deprecated=route_handler.deprecated,\n responses=create_responses(\n route_handler=route_handler,\n raises_validation_error=raises_validation_error,\n generate_examples=create_examples,\n plugins=plugins,\n ),\n requestBody=request_body,\n parameters=parameters, # type: ignore[arg-type]\n security=security,\n )\n setattr(path_item, http_method.lower(), operation)\n return path_item\n", "path": "starlite/openapi/path_item.py"}], "after_files": [{"content": "from inspect import cleandoc\nfrom typing import TYPE_CHECKING, Dict, List, Optional, Tuple, cast\n\nfrom pydantic_openapi_schema.v3_1_0.operation import Operation\nfrom pydantic_openapi_schema.v3_1_0.path_item import PathItem\n\nfrom starlite.openapi.parameters import create_parameter_for_handler\nfrom starlite.openapi.request_body import create_request_body\nfrom starlite.openapi.responses import create_responses\nfrom starlite.utils.helpers import unwrap_partial\n\nif TYPE_CHECKING:\n from pydantic import BaseModel\n from pydantic_openapi_schema.v3_1_0 import SecurityRequirement\n\n from starlite.handlers import HTTPRouteHandler\n from starlite.plugins.base import PluginProtocol\n from starlite.routes import HTTPRoute\n\n\ndef get_description_for_handler(route_handler: \"HTTPRouteHandler\", use_handler_docstrings: bool) -> Optional[str]:\n \"\"\"Produce the operation description for a route handler, either by using the description value if provided,\n\n or the docstring - if config is enabled.\n\n Args:\n route_handler: A route handler instance.\n use_handler_docstrings: If `True` and `route_handler.description` is `None` returns docstring of wrapped\n handler function.\n\n Returns:\n An optional description string\n \"\"\"\n handler_description = route_handler.description\n if handler_description is None and use_handler_docstrings:\n fn = unwrap_partial(route_handler.fn.value)\n return cleandoc(fn.__doc__) if fn.__doc__ else None\n return handler_description\n\n\ndef extract_layered_values(\n route_handler: \"HTTPRouteHandler\",\n) -> Tuple[Optional[List[str]], Optional[List[Dict[str, List[str]]]]]:\n \"\"\"Extract the tags and security values from the route handler layers.\n\n Args:\n route_handler: A Route Handler instance.\n\n Returns:\n A tuple of optional lists.\n \"\"\"\n tags: List[str] = []\n security: List[\"SecurityRequirement\"] = []\n for layer in route_handler.ownership_layers:\n if layer.tags:\n tags.extend(layer.tags)\n if layer.security:\n security.extend(layer.security)\n return sorted(set(tags)) if tags else None, security or None\n\n\ndef create_path_item(\n route: \"HTTPRoute\", create_examples: bool, plugins: List[\"PluginProtocol\"], use_handler_docstrings: bool\n) -> PathItem:\n \"\"\"Create a PathItem model for the given route parsing all http_methods into Operation Models.\"\"\"\n path_item = PathItem()\n for http_method, handler_tuple in route.route_handler_map.items():\n route_handler, _ = handler_tuple\n if route_handler.include_in_schema:\n handler_fields = cast(\"BaseModel\", route_handler.signature_model).__fields__\n parameters = (\n create_parameter_for_handler(\n route_handler=route_handler,\n handler_fields=handler_fields,\n path_parameters=route.path_parameters,\n generate_examples=create_examples,\n )\n or None\n )\n raises_validation_error = bool(\"data\" in handler_fields or path_item.parameters or parameters)\n handler_name = unwrap_partial(route_handler.handler_name).replace(\"_\", \" \").title()\n request_body = None\n if \"data\" in handler_fields:\n request_body = create_request_body(\n field=handler_fields[\"data\"], generate_examples=create_examples, plugins=plugins\n )\n\n tags, security = extract_layered_values(route_handler)\n operation = Operation(\n operationId=route_handler.operation_id or handler_name,\n tags=tags,\n summary=route_handler.summary,\n description=get_description_for_handler(route_handler, use_handler_docstrings),\n deprecated=route_handler.deprecated,\n responses=create_responses(\n route_handler=route_handler,\n raises_validation_error=raises_validation_error,\n generate_examples=create_examples,\n plugins=plugins,\n ),\n requestBody=request_body,\n parameters=parameters, # type: ignore[arg-type]\n security=security,\n )\n setattr(path_item, http_method.lower(), operation)\n return path_item\n", "path": "starlite/openapi/path_item.py"}]}
| 1,762 | 107 |
gh_patches_debug_3584
|
rasdani/github-patches
|
git_diff
|
vas3k__vas3k.club-220
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Только часть id до дефиса выделена когда тебя @тэгнули

https://vas3k.club/post/2295/#comment-8177cee9-5bef-49bf-bade-44deea61e5d5
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `common/regexp.py`
Content:
```
1 import re
2
3 USERNAME_RE = re.compile(r"(?:\s|\n|^)@([A-Za-z0-9_]{3,})")
4 IMAGE_RE = re.compile(r"(http(s?):)([/|.|\w|\s|-])*\.(?:jpg|jpeg|gif|png)")
5 VIDEO_RE = re.compile(r"(http(s?):)([/|.|\w|\s|-])*\.(?:mov|mp4)")
6 YOUTUBE_RE = re.compile(
7 r"http(?:s?):\/\/(?:www\.)?youtu(?:be\.com\/watch\?v=|\.be\/)([\w\-\_]*)(&(amp;)?[\w\?=]*)?"
8 )
9 TWITTER_RE = re.compile(r"(https?:\/\/twitter.com\/[a-zA-Z0-9_]+\/status\/[\d]+)")
10 FAVICON_RE = re.compile(r"(http(s?):)([/|.|\w|\s|-])*\.(?:jpg|jpeg|gif|png|ico)")
11
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/common/regexp.py b/common/regexp.py
--- a/common/regexp.py
+++ b/common/regexp.py
@@ -1,6 +1,6 @@
import re
-USERNAME_RE = re.compile(r"(?:\s|\n|^)@([A-Za-z0-9_]{3,})")
+USERNAME_RE = re.compile(r"(?:\s|\n|^)@([A-Za-z0-9_-]{3,})")
IMAGE_RE = re.compile(r"(http(s?):)([/|.|\w|\s|-])*\.(?:jpg|jpeg|gif|png)")
VIDEO_RE = re.compile(r"(http(s?):)([/|.|\w|\s|-])*\.(?:mov|mp4)")
YOUTUBE_RE = re.compile(
|
{"golden_diff": "diff --git a/common/regexp.py b/common/regexp.py\n--- a/common/regexp.py\n+++ b/common/regexp.py\n@@ -1,6 +1,6 @@\n import re\n \n-USERNAME_RE = re.compile(r\"(?:\\s|\\n|^)@([A-Za-z0-9_]{3,})\")\n+USERNAME_RE = re.compile(r\"(?:\\s|\\n|^)@([A-Za-z0-9_-]{3,})\")\n IMAGE_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:jpg|jpeg|gif|png)\")\n VIDEO_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:mov|mp4)\")\n YOUTUBE_RE = re.compile(\n", "issue": "\u0422\u043e\u043b\u044c\u043a\u043e \u0447\u0430\u0441\u0442\u044c id \u0434\u043e \u0434\u0435\u0444\u0438\u0441\u0430 \u0432\u044b\u0434\u0435\u043b\u0435\u043d\u0430 \u043a\u043e\u0433\u0434\u0430 \u0442\u0435\u0431\u044f @\u0442\u044d\u0433\u043d\u0443\u043b\u0438\n\r\nhttps://vas3k.club/post/2295/#comment-8177cee9-5bef-49bf-bade-44deea61e5d5\r\n\r\n\r\n\n", "before_files": [{"content": "import re\n\nUSERNAME_RE = re.compile(r\"(?:\\s|\\n|^)@([A-Za-z0-9_]{3,})\")\nIMAGE_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:jpg|jpeg|gif|png)\")\nVIDEO_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:mov|mp4)\")\nYOUTUBE_RE = re.compile(\n r\"http(?:s?):\\/\\/(?:www\\.)?youtu(?:be\\.com\\/watch\\?v=|\\.be\\/)([\\w\\-\\_]*)(&(amp;)?\u200c\u200b[\\w\\?\u200c\u200b=]*)?\"\n)\nTWITTER_RE = re.compile(r\"(https?:\\/\\/twitter.com\\/[a-zA-Z0-9_]+\\/status\\/[\\d]+)\")\nFAVICON_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:jpg|jpeg|gif|png|ico)\")\n", "path": "common/regexp.py"}], "after_files": [{"content": "import re\n\nUSERNAME_RE = re.compile(r\"(?:\\s|\\n|^)@([A-Za-z0-9_-]{3,})\")\nIMAGE_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:jpg|jpeg|gif|png)\")\nVIDEO_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:mov|mp4)\")\nYOUTUBE_RE = re.compile(\n r\"http(?:s?):\\/\\/(?:www\\.)?youtu(?:be\\.com\\/watch\\?v=|\\.be\\/)([\\w\\-\\_]*)(&(amp;)?\u200c\u200b[\\w\\?\u200c\u200b=]*)?\"\n)\nTWITTER_RE = re.compile(r\"(https?:\\/\\/twitter.com\\/[a-zA-Z0-9_]+\\/status\\/[\\d]+)\")\nFAVICON_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:jpg|jpeg|gif|png|ico)\")\n", "path": "common/regexp.py"}]}
| 607 | 164 |
gh_patches_debug_10369
|
rasdani/github-patches
|
git_diff
|
mitmproxy__mitmproxy-3235
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Crash when editing str-options
##### Steps to reproduce the problem:
1. Start `mitmproxy` in interactive mode
2. Type `O` to switch to the options view
3. Edit a random `str`-option
4. See `mitmproxy` crash with the following stacktrace:
```
Traceback (most recent call last):
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/mitmproxy/master.py", line 86, in run_loop
loop()
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/main_loop.py", line 286, in run
self._run()
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/main_loop.py", line 384, in _run
self.event_loop.run()
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/main_loop.py", line 1484, in run
reraise(*exc_info)
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/compat.py", line 58, in reraise
raise value
File "/usr/lib/python3.6/asyncio/events.py", line 145, in _run
self._callback(*self._args)
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/raw_display.py", line 404, in <lambda>
event_loop, callback, self.get_available_raw_input())
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/raw_display.py", line 502, in parse_input
callback(processed, processed_codes)
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/main_loop.py", line 411, in _update
self.process_input(keys)
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/main_loop.py", line 511, in process_input
k = self._topmost_widget.keypress(self.screen_size, k)
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/mitmproxy/tools/console/window.py", line 309, in keypress
k = super().keypress(size, k)
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/container.py", line 1131, in keypress
return self.body.keypress( (maxcol, remaining), key )
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/mitmproxy/tools/console/window.py", line 44, in keypress
ret = super().keypress(size, key)
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/container.py", line 1131, in keypress
return self.body.keypress( (maxcol, remaining), key )
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/mitmproxy/tools/console/options.py", line 283, in keypress
return self.focus_item.keypress(tsize, key)
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/mitmproxy/tools/console/options.py", line 177, in keypress
d = self.master.options.parse_setval(foc.opt.name, v)
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/mitmproxy/optmanager.py", line 332, in parse_setval
if o.typespec in (str, typing.Optional[str]):
AttributeError: 'str' object has no attribute 'typespec'
mitmproxy has crashed!
Please lodge a bug report at:
https://github.com/mitmproxy/mitmproxy
```
##### System information
```
Mitmproxy: 4.0.3
Python: 3.6.6
OpenSSL: OpenSSL 1.1.0h 27 Mar 2018
Platform: Linux-4.17.0-x86_64-with-debian-buster-sid
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/tools/console/options.py`
Content:
```
1 import urwid
2 import blinker
3 import textwrap
4 import pprint
5 from typing import Optional, Sequence
6
7 from mitmproxy import exceptions
8 from mitmproxy import optmanager
9 from mitmproxy.tools.console import layoutwidget
10 from mitmproxy.tools.console import signals
11 from mitmproxy.tools.console import overlay
12
13 HELP_HEIGHT = 5
14
15
16 def can_edit_inplace(opt):
17 if opt.choices:
18 return False
19 if opt.typespec in [str, int, Optional[str], Optional[int]]:
20 return True
21
22
23 def fcol(s, width, attr):
24 s = str(s)
25 return (
26 "fixed",
27 width,
28 urwid.Text((attr, s))
29 )
30
31
32 option_focus_change = blinker.Signal()
33
34
35 class OptionItem(urwid.WidgetWrap):
36 def __init__(self, walker, opt, focused, namewidth, editing):
37 self.walker, self.opt, self.focused = walker, opt, focused
38 self.namewidth = namewidth
39 self.editing = editing
40 super().__init__(None)
41 self._w = self.get_widget()
42
43 def get_widget(self):
44 val = self.opt.current()
45 if self.opt.typespec == bool:
46 displayval = "true" if val else "false"
47 elif not val:
48 displayval = ""
49 elif self.opt.typespec == Sequence[str]:
50 displayval = pprint.pformat(val, indent=1)
51 else:
52 displayval = str(val)
53
54 changed = self.walker.master.options.has_changed(self.opt.name)
55 if self.focused:
56 valstyle = "option_active_selected" if changed else "option_selected"
57 else:
58 valstyle = "option_active" if changed else "text"
59
60 if self.editing:
61 valw = urwid.Edit(edit_text=displayval)
62 else:
63 valw = urwid.AttrMap(
64 urwid.Padding(
65 urwid.Text([(valstyle, displayval)])
66 ),
67 valstyle
68 )
69
70 return urwid.Columns(
71 [
72 (
73 self.namewidth,
74 urwid.Text([("title", self.opt.name.ljust(self.namewidth))])
75 ),
76 valw
77 ],
78 dividechars=2,
79 focus_column=1
80 )
81
82 def get_edit_text(self):
83 return self._w[1].get_edit_text()
84
85 def selectable(self):
86 return True
87
88 def keypress(self, size, key):
89 if self.editing:
90 self._w[1].keypress(size, key)
91 return
92 return key
93
94
95 class OptionListWalker(urwid.ListWalker):
96 def __init__(self, master):
97 self.master = master
98
99 self.index = 0
100 self.focusobj = None
101
102 self.opts = sorted(master.options.keys())
103 self.maxlen = max(len(i) for i in self.opts)
104 self.editing = False
105 self.set_focus(0)
106 self.master.options.changed.connect(self.sig_mod)
107
108 def sig_mod(self, *args, **kwargs):
109 self.opts = sorted(self.master.options.keys())
110 self.maxlen = max(len(i) for i in self.opts)
111 self._modified()
112 self.set_focus(self.index)
113
114 def start_editing(self):
115 self.editing = True
116 self.focus_obj = self._get(self.index, True)
117 self._modified()
118
119 def stop_editing(self):
120 self.editing = False
121 self.focus_obj = self._get(self.index, False)
122 self.set_focus(self.index)
123 self._modified()
124
125 def get_edit_text(self):
126 return self.focus_obj.get_edit_text()
127
128 def _get(self, pos, editing):
129 name = self.opts[pos]
130 opt = self.master.options._options[name]
131 return OptionItem(
132 self, opt, pos == self.index, self.maxlen, editing
133 )
134
135 def get_focus(self):
136 return self.focus_obj, self.index
137
138 def set_focus(self, index):
139 self.editing = False
140 name = self.opts[index]
141 opt = self.master.options._options[name]
142 self.index = index
143 self.focus_obj = self._get(self.index, self.editing)
144 option_focus_change.send(opt.help)
145
146 def get_next(self, pos):
147 if pos >= len(self.opts) - 1:
148 return None, None
149 pos = pos + 1
150 return self._get(pos, False), pos
151
152 def get_prev(self, pos):
153 pos = pos - 1
154 if pos < 0:
155 return None, None
156 return self._get(pos, False), pos
157
158
159 class OptionsList(urwid.ListBox):
160 def __init__(self, master):
161 self.master = master
162 self.walker = OptionListWalker(master)
163 super().__init__(self.walker)
164
165 def save_config(self, path):
166 try:
167 optmanager.save(self.master.options, path)
168 except exceptions.OptionsError as e:
169 signals.status_message.send(message=str(e))
170
171 def keypress(self, size, key):
172 if self.walker.editing:
173 if key == "enter":
174 foc, idx = self.get_focus()
175 v = self.walker.get_edit_text()
176 try:
177 d = self.master.options.parse_setval(foc.opt.name, v)
178 self.master.options.update(**{foc.opt.name: d})
179 except exceptions.OptionsError as v:
180 signals.status_message.send(message=str(v))
181 self.walker.stop_editing()
182 return None
183 elif key == "esc":
184 self.walker.stop_editing()
185 return None
186 else:
187 if key == "m_start":
188 self.set_focus(0)
189 self.walker._modified()
190 elif key == "m_end":
191 self.set_focus(len(self.walker.opts) - 1)
192 self.walker._modified()
193 elif key == "m_select":
194 foc, idx = self.get_focus()
195 if foc.opt.typespec == bool:
196 self.master.options.toggler(foc.opt.name)()
197 # Bust the focus widget cache
198 self.set_focus(self.walker.index)
199 elif can_edit_inplace(foc.opt):
200 self.walker.start_editing()
201 self.walker._modified()
202 elif foc.opt.choices:
203 self.master.overlay(
204 overlay.Chooser(
205 self.master,
206 foc.opt.name,
207 foc.opt.choices,
208 foc.opt.current(),
209 self.master.options.setter(foc.opt.name)
210 )
211 )
212 elif foc.opt.typespec == Sequence[str]:
213 self.master.overlay(
214 overlay.OptionsOverlay(
215 self.master,
216 foc.opt.name,
217 foc.opt.current(),
218 HELP_HEIGHT + 5
219 ),
220 valign="top"
221 )
222 else:
223 raise NotImplementedError()
224 return super().keypress(size, key)
225
226
227 class OptionHelp(urwid.Frame):
228 def __init__(self, master):
229 self.master = master
230 super().__init__(self.widget(""))
231 self.set_active(False)
232 option_focus_change.connect(self.sig_mod)
233
234 def set_active(self, val):
235 h = urwid.Text("Option Help")
236 style = "heading" if val else "heading_inactive"
237 self.header = urwid.AttrWrap(h, style)
238
239 def widget(self, txt):
240 cols, _ = self.master.ui.get_cols_rows()
241 return urwid.ListBox(
242 [urwid.Text(i) for i in textwrap.wrap(txt, cols)]
243 )
244
245 def sig_mod(self, txt):
246 self.set_body(self.widget(txt))
247
248
249 class Options(urwid.Pile, layoutwidget.LayoutWidget):
250 title = "Options"
251 keyctx = "options"
252
253 def __init__(self, master):
254 oh = OptionHelp(master)
255 self.optionslist = OptionsList(master)
256 super().__init__(
257 [
258 self.optionslist,
259 (HELP_HEIGHT, oh),
260 ]
261 )
262 self.master = master
263
264 def current_name(self):
265 foc, idx = self.optionslist.get_focus()
266 return foc.opt.name
267
268 def keypress(self, size, key):
269 if key == "m_next":
270 self.focus_position = (
271 self.focus_position + 1
272 ) % len(self.widget_list)
273 self.widget_list[1].set_active(self.focus_position == 1)
274 key = None
275
276 # This is essentially a copypasta from urwid.Pile's keypress handler.
277 # So much for "closed for modification, but open for extension".
278 item_rows = None
279 if len(size) == 2:
280 item_rows = self.get_item_rows(size, focus = True)
281 i = self.widget_list.index(self.focus_item)
282 tsize = self.get_item_size(size, i, True, item_rows)
283 return self.focus_item.keypress(tsize, key)
284
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mitmproxy/tools/console/options.py b/mitmproxy/tools/console/options.py
--- a/mitmproxy/tools/console/options.py
+++ b/mitmproxy/tools/console/options.py
@@ -174,7 +174,7 @@
foc, idx = self.get_focus()
v = self.walker.get_edit_text()
try:
- d = self.master.options.parse_setval(foc.opt.name, v)
+ d = self.master.options.parse_setval(foc.opt, v)
self.master.options.update(**{foc.opt.name: d})
except exceptions.OptionsError as v:
signals.status_message.send(message=str(v))
|
{"golden_diff": "diff --git a/mitmproxy/tools/console/options.py b/mitmproxy/tools/console/options.py\n--- a/mitmproxy/tools/console/options.py\n+++ b/mitmproxy/tools/console/options.py\n@@ -174,7 +174,7 @@\n foc, idx = self.get_focus()\n v = self.walker.get_edit_text()\n try:\n- d = self.master.options.parse_setval(foc.opt.name, v)\n+ d = self.master.options.parse_setval(foc.opt, v)\n self.master.options.update(**{foc.opt.name: d})\n except exceptions.OptionsError as v:\n signals.status_message.send(message=str(v))\n", "issue": "Crash when editing str-options\n##### Steps to reproduce the problem:\r\n\r\n1. Start `mitmproxy` in interactive mode\r\n2. Type `O` to switch to the options view\r\n3. Edit a random `str`-option\r\n4. See `mitmproxy` crash with the following stacktrace:\r\n```\r\nTraceback (most recent call last):\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/mitmproxy/master.py\", line 86, in run_loop\r\n loop()\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/main_loop.py\", line 286, in run\r\n self._run()\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/main_loop.py\", line 384, in _run\r\n self.event_loop.run()\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/main_loop.py\", line 1484, in run\r\n reraise(*exc_info)\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/compat.py\", line 58, in reraise\r\n raise value\r\n File \"/usr/lib/python3.6/asyncio/events.py\", line 145, in _run\r\n self._callback(*self._args)\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/raw_display.py\", line 404, in <lambda>\r\n event_loop, callback, self.get_available_raw_input())\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/raw_display.py\", line 502, in parse_input\r\n callback(processed, processed_codes)\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/main_loop.py\", line 411, in _update\r\n self.process_input(keys)\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/main_loop.py\", line 511, in process_input\r\n k = self._topmost_widget.keypress(self.screen_size, k)\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/mitmproxy/tools/console/window.py\", line 309, in keypress\r\n k = super().keypress(size, k)\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/container.py\", line 1131, in keypress\r\n return self.body.keypress( (maxcol, remaining), key )\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/mitmproxy/tools/console/window.py\", line 44, in keypress\r\n ret = super().keypress(size, key)\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/container.py\", line 1131, in keypress\r\n return self.body.keypress( (maxcol, remaining), key )\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/mitmproxy/tools/console/options.py\", line 283, in keypress\r\n return self.focus_item.keypress(tsize, key)\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/mitmproxy/tools/console/options.py\", line 177, in keypress\r\n d = self.master.options.parse_setval(foc.opt.name, v)\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/mitmproxy/optmanager.py\", line 332, in parse_setval\r\n if o.typespec in (str, typing.Optional[str]):\r\nAttributeError: 'str' object has no attribute 'typespec'\r\n\r\nmitmproxy has crashed!\r\nPlease lodge a bug report at:\r\n https://github.com/mitmproxy/mitmproxy\r\n```\r\n\r\n\r\n\r\n##### System information\r\n\r\n```\r\nMitmproxy: 4.0.3\r\nPython: 3.6.6\r\nOpenSSL: OpenSSL 1.1.0h 27 Mar 2018\r\nPlatform: Linux-4.17.0-x86_64-with-debian-buster-sid\r\n```\n", "before_files": [{"content": "import urwid\nimport blinker\nimport textwrap\nimport pprint\nfrom typing import Optional, Sequence\n\nfrom mitmproxy import exceptions\nfrom mitmproxy import optmanager\nfrom mitmproxy.tools.console import layoutwidget\nfrom mitmproxy.tools.console import signals\nfrom mitmproxy.tools.console import overlay\n\nHELP_HEIGHT = 5\n\n\ndef can_edit_inplace(opt):\n if opt.choices:\n return False\n if opt.typespec in [str, int, Optional[str], Optional[int]]:\n return True\n\n\ndef fcol(s, width, attr):\n s = str(s)\n return (\n \"fixed\",\n width,\n urwid.Text((attr, s))\n )\n\n\noption_focus_change = blinker.Signal()\n\n\nclass OptionItem(urwid.WidgetWrap):\n def __init__(self, walker, opt, focused, namewidth, editing):\n self.walker, self.opt, self.focused = walker, opt, focused\n self.namewidth = namewidth\n self.editing = editing\n super().__init__(None)\n self._w = self.get_widget()\n\n def get_widget(self):\n val = self.opt.current()\n if self.opt.typespec == bool:\n displayval = \"true\" if val else \"false\"\n elif not val:\n displayval = \"\"\n elif self.opt.typespec == Sequence[str]:\n displayval = pprint.pformat(val, indent=1)\n else:\n displayval = str(val)\n\n changed = self.walker.master.options.has_changed(self.opt.name)\n if self.focused:\n valstyle = \"option_active_selected\" if changed else \"option_selected\"\n else:\n valstyle = \"option_active\" if changed else \"text\"\n\n if self.editing:\n valw = urwid.Edit(edit_text=displayval)\n else:\n valw = urwid.AttrMap(\n urwid.Padding(\n urwid.Text([(valstyle, displayval)])\n ),\n valstyle\n )\n\n return urwid.Columns(\n [\n (\n self.namewidth,\n urwid.Text([(\"title\", self.opt.name.ljust(self.namewidth))])\n ),\n valw\n ],\n dividechars=2,\n focus_column=1\n )\n\n def get_edit_text(self):\n return self._w[1].get_edit_text()\n\n def selectable(self):\n return True\n\n def keypress(self, size, key):\n if self.editing:\n self._w[1].keypress(size, key)\n return\n return key\n\n\nclass OptionListWalker(urwid.ListWalker):\n def __init__(self, master):\n self.master = master\n\n self.index = 0\n self.focusobj = None\n\n self.opts = sorted(master.options.keys())\n self.maxlen = max(len(i) for i in self.opts)\n self.editing = False\n self.set_focus(0)\n self.master.options.changed.connect(self.sig_mod)\n\n def sig_mod(self, *args, **kwargs):\n self.opts = sorted(self.master.options.keys())\n self.maxlen = max(len(i) for i in self.opts)\n self._modified()\n self.set_focus(self.index)\n\n def start_editing(self):\n self.editing = True\n self.focus_obj = self._get(self.index, True)\n self._modified()\n\n def stop_editing(self):\n self.editing = False\n self.focus_obj = self._get(self.index, False)\n self.set_focus(self.index)\n self._modified()\n\n def get_edit_text(self):\n return self.focus_obj.get_edit_text()\n\n def _get(self, pos, editing):\n name = self.opts[pos]\n opt = self.master.options._options[name]\n return OptionItem(\n self, opt, pos == self.index, self.maxlen, editing\n )\n\n def get_focus(self):\n return self.focus_obj, self.index\n\n def set_focus(self, index):\n self.editing = False\n name = self.opts[index]\n opt = self.master.options._options[name]\n self.index = index\n self.focus_obj = self._get(self.index, self.editing)\n option_focus_change.send(opt.help)\n\n def get_next(self, pos):\n if pos >= len(self.opts) - 1:\n return None, None\n pos = pos + 1\n return self._get(pos, False), pos\n\n def get_prev(self, pos):\n pos = pos - 1\n if pos < 0:\n return None, None\n return self._get(pos, False), pos\n\n\nclass OptionsList(urwid.ListBox):\n def __init__(self, master):\n self.master = master\n self.walker = OptionListWalker(master)\n super().__init__(self.walker)\n\n def save_config(self, path):\n try:\n optmanager.save(self.master.options, path)\n except exceptions.OptionsError as e:\n signals.status_message.send(message=str(e))\n\n def keypress(self, size, key):\n if self.walker.editing:\n if key == \"enter\":\n foc, idx = self.get_focus()\n v = self.walker.get_edit_text()\n try:\n d = self.master.options.parse_setval(foc.opt.name, v)\n self.master.options.update(**{foc.opt.name: d})\n except exceptions.OptionsError as v:\n signals.status_message.send(message=str(v))\n self.walker.stop_editing()\n return None\n elif key == \"esc\":\n self.walker.stop_editing()\n return None\n else:\n if key == \"m_start\":\n self.set_focus(0)\n self.walker._modified()\n elif key == \"m_end\":\n self.set_focus(len(self.walker.opts) - 1)\n self.walker._modified()\n elif key == \"m_select\":\n foc, idx = self.get_focus()\n if foc.opt.typespec == bool:\n self.master.options.toggler(foc.opt.name)()\n # Bust the focus widget cache\n self.set_focus(self.walker.index)\n elif can_edit_inplace(foc.opt):\n self.walker.start_editing()\n self.walker._modified()\n elif foc.opt.choices:\n self.master.overlay(\n overlay.Chooser(\n self.master,\n foc.opt.name,\n foc.opt.choices,\n foc.opt.current(),\n self.master.options.setter(foc.opt.name)\n )\n )\n elif foc.opt.typespec == Sequence[str]:\n self.master.overlay(\n overlay.OptionsOverlay(\n self.master,\n foc.opt.name,\n foc.opt.current(),\n HELP_HEIGHT + 5\n ),\n valign=\"top\"\n )\n else:\n raise NotImplementedError()\n return super().keypress(size, key)\n\n\nclass OptionHelp(urwid.Frame):\n def __init__(self, master):\n self.master = master\n super().__init__(self.widget(\"\"))\n self.set_active(False)\n option_focus_change.connect(self.sig_mod)\n\n def set_active(self, val):\n h = urwid.Text(\"Option Help\")\n style = \"heading\" if val else \"heading_inactive\"\n self.header = urwid.AttrWrap(h, style)\n\n def widget(self, txt):\n cols, _ = self.master.ui.get_cols_rows()\n return urwid.ListBox(\n [urwid.Text(i) for i in textwrap.wrap(txt, cols)]\n )\n\n def sig_mod(self, txt):\n self.set_body(self.widget(txt))\n\n\nclass Options(urwid.Pile, layoutwidget.LayoutWidget):\n title = \"Options\"\n keyctx = \"options\"\n\n def __init__(self, master):\n oh = OptionHelp(master)\n self.optionslist = OptionsList(master)\n super().__init__(\n [\n self.optionslist,\n (HELP_HEIGHT, oh),\n ]\n )\n self.master = master\n\n def current_name(self):\n foc, idx = self.optionslist.get_focus()\n return foc.opt.name\n\n def keypress(self, size, key):\n if key == \"m_next\":\n self.focus_position = (\n self.focus_position + 1\n ) % len(self.widget_list)\n self.widget_list[1].set_active(self.focus_position == 1)\n key = None\n\n # This is essentially a copypasta from urwid.Pile's keypress handler.\n # So much for \"closed for modification, but open for extension\".\n item_rows = None\n if len(size) == 2:\n item_rows = self.get_item_rows(size, focus = True)\n i = self.widget_list.index(self.focus_item)\n tsize = self.get_item_size(size, i, True, item_rows)\n return self.focus_item.keypress(tsize, key)\n", "path": "mitmproxy/tools/console/options.py"}], "after_files": [{"content": "import urwid\nimport blinker\nimport textwrap\nimport pprint\nfrom typing import Optional, Sequence\n\nfrom mitmproxy import exceptions\nfrom mitmproxy import optmanager\nfrom mitmproxy.tools.console import layoutwidget\nfrom mitmproxy.tools.console import signals\nfrom mitmproxy.tools.console import overlay\n\nHELP_HEIGHT = 5\n\n\ndef can_edit_inplace(opt):\n if opt.choices:\n return False\n if opt.typespec in [str, int, Optional[str], Optional[int]]:\n return True\n\n\ndef fcol(s, width, attr):\n s = str(s)\n return (\n \"fixed\",\n width,\n urwid.Text((attr, s))\n )\n\n\noption_focus_change = blinker.Signal()\n\n\nclass OptionItem(urwid.WidgetWrap):\n def __init__(self, walker, opt, focused, namewidth, editing):\n self.walker, self.opt, self.focused = walker, opt, focused\n self.namewidth = namewidth\n self.editing = editing\n super().__init__(None)\n self._w = self.get_widget()\n\n def get_widget(self):\n val = self.opt.current()\n if self.opt.typespec == bool:\n displayval = \"true\" if val else \"false\"\n elif not val:\n displayval = \"\"\n elif self.opt.typespec == Sequence[str]:\n displayval = pprint.pformat(val, indent=1)\n else:\n displayval = str(val)\n\n changed = self.walker.master.options.has_changed(self.opt.name)\n if self.focused:\n valstyle = \"option_active_selected\" if changed else \"option_selected\"\n else:\n valstyle = \"option_active\" if changed else \"text\"\n\n if self.editing:\n valw = urwid.Edit(edit_text=displayval)\n else:\n valw = urwid.AttrMap(\n urwid.Padding(\n urwid.Text([(valstyle, displayval)])\n ),\n valstyle\n )\n\n return urwid.Columns(\n [\n (\n self.namewidth,\n urwid.Text([(\"title\", self.opt.name.ljust(self.namewidth))])\n ),\n valw\n ],\n dividechars=2,\n focus_column=1\n )\n\n def get_edit_text(self):\n return self._w[1].get_edit_text()\n\n def selectable(self):\n return True\n\n def keypress(self, size, key):\n if self.editing:\n self._w[1].keypress(size, key)\n return\n return key\n\n\nclass OptionListWalker(urwid.ListWalker):\n def __init__(self, master):\n self.master = master\n\n self.index = 0\n self.focusobj = None\n\n self.opts = sorted(master.options.keys())\n self.maxlen = max(len(i) for i in self.opts)\n self.editing = False\n self.set_focus(0)\n self.master.options.changed.connect(self.sig_mod)\n\n def sig_mod(self, *args, **kwargs):\n self.opts = sorted(self.master.options.keys())\n self.maxlen = max(len(i) for i in self.opts)\n self._modified()\n self.set_focus(self.index)\n\n def start_editing(self):\n self.editing = True\n self.focus_obj = self._get(self.index, True)\n self._modified()\n\n def stop_editing(self):\n self.editing = False\n self.focus_obj = self._get(self.index, False)\n self.set_focus(self.index)\n self._modified()\n\n def get_edit_text(self):\n return self.focus_obj.get_edit_text()\n\n def _get(self, pos, editing):\n name = self.opts[pos]\n opt = self.master.options._options[name]\n return OptionItem(\n self, opt, pos == self.index, self.maxlen, editing\n )\n\n def get_focus(self):\n return self.focus_obj, self.index\n\n def set_focus(self, index):\n self.editing = False\n name = self.opts[index]\n opt = self.master.options._options[name]\n self.index = index\n self.focus_obj = self._get(self.index, self.editing)\n option_focus_change.send(opt.help)\n\n def get_next(self, pos):\n if pos >= len(self.opts) - 1:\n return None, None\n pos = pos + 1\n return self._get(pos, False), pos\n\n def get_prev(self, pos):\n pos = pos - 1\n if pos < 0:\n return None, None\n return self._get(pos, False), pos\n\n\nclass OptionsList(urwid.ListBox):\n def __init__(self, master):\n self.master = master\n self.walker = OptionListWalker(master)\n super().__init__(self.walker)\n\n def save_config(self, path):\n try:\n optmanager.save(self.master.options, path)\n except exceptions.OptionsError as e:\n signals.status_message.send(message=str(e))\n\n def keypress(self, size, key):\n if self.walker.editing:\n if key == \"enter\":\n foc, idx = self.get_focus()\n v = self.walker.get_edit_text()\n try:\n d = self.master.options.parse_setval(foc.opt, v)\n self.master.options.update(**{foc.opt.name: d})\n except exceptions.OptionsError as v:\n signals.status_message.send(message=str(v))\n self.walker.stop_editing()\n return None\n elif key == \"esc\":\n self.walker.stop_editing()\n return None\n else:\n if key == \"m_start\":\n self.set_focus(0)\n self.walker._modified()\n elif key == \"m_end\":\n self.set_focus(len(self.walker.opts) - 1)\n self.walker._modified()\n elif key == \"m_select\":\n foc, idx = self.get_focus()\n if foc.opt.typespec == bool:\n self.master.options.toggler(foc.opt.name)()\n # Bust the focus widget cache\n self.set_focus(self.walker.index)\n elif can_edit_inplace(foc.opt):\n self.walker.start_editing()\n self.walker._modified()\n elif foc.opt.choices:\n self.master.overlay(\n overlay.Chooser(\n self.master,\n foc.opt.name,\n foc.opt.choices,\n foc.opt.current(),\n self.master.options.setter(foc.opt.name)\n )\n )\n elif foc.opt.typespec == Sequence[str]:\n self.master.overlay(\n overlay.OptionsOverlay(\n self.master,\n foc.opt.name,\n foc.opt.current(),\n HELP_HEIGHT + 5\n ),\n valign=\"top\"\n )\n else:\n raise NotImplementedError()\n return super().keypress(size, key)\n\n\nclass OptionHelp(urwid.Frame):\n def __init__(self, master):\n self.master = master\n super().__init__(self.widget(\"\"))\n self.set_active(False)\n option_focus_change.connect(self.sig_mod)\n\n def set_active(self, val):\n h = urwid.Text(\"Option Help\")\n style = \"heading\" if val else \"heading_inactive\"\n self.header = urwid.AttrWrap(h, style)\n\n def widget(self, txt):\n cols, _ = self.master.ui.get_cols_rows()\n return urwid.ListBox(\n [urwid.Text(i) for i in textwrap.wrap(txt, cols)]\n )\n\n def sig_mod(self, txt):\n self.set_body(self.widget(txt))\n\n\nclass Options(urwid.Pile, layoutwidget.LayoutWidget):\n title = \"Options\"\n keyctx = \"options\"\n\n def __init__(self, master):\n oh = OptionHelp(master)\n self.optionslist = OptionsList(master)\n super().__init__(\n [\n self.optionslist,\n (HELP_HEIGHT, oh),\n ]\n )\n self.master = master\n\n def current_name(self):\n foc, idx = self.optionslist.get_focus()\n return foc.opt.name\n\n def keypress(self, size, key):\n if key == \"m_next\":\n self.focus_position = (\n self.focus_position + 1\n ) % len(self.widget_list)\n self.widget_list[1].set_active(self.focus_position == 1)\n key = None\n\n # This is essentially a copypasta from urwid.Pile's keypress handler.\n # So much for \"closed for modification, but open for extension\".\n item_rows = None\n if len(size) == 2:\n item_rows = self.get_item_rows(size, focus = True)\n i = self.widget_list.index(self.focus_item)\n tsize = self.get_item_size(size, i, True, item_rows)\n return self.focus_item.keypress(tsize, key)\n", "path": "mitmproxy/tools/console/options.py"}]}
| 3,801 | 140 |
gh_patches_debug_36705
|
rasdani/github-patches
|
git_diff
|
paperless-ngx__paperless-ngx-3209
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Selecting date ranges breaks searches
### Description
Searching documents using TWO filters will not work, if one parameter is "last month" or "last year" in the "added" or "created" field. I tried to be more precise than #3203
Example:
Selected document type is "Anleitung" -> everything works as expected:

Now a second filter "last year" is applied. Year is as expected but selected document type is ignored:

### Steps to reproduce
1. login as a superuser
2. go to web-ui -> documents
3. reset filters (all documents displayed now)
4. select document type.
5. select "created" oder "added" date range "last year"
-> all document types ara displayed.
### Webserver logs
```bash
none
```
### Browser logs
```bash
none
```
### Paperless-ngx version
1.14.0
### Host OS
Linux 3.10.108 #42661 SMP (x86_64)
### Installation method
Docker - official image
### Browser
Chrome
### Configuration changes
_No response_
### Other
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/documents/index.py`
Content:
```
1 import logging
2 import math
3 import os
4 from contextlib import contextmanager
5
6 from dateutil.parser import isoparse
7 from django.conf import settings
8 from django.utils import timezone
9 from guardian.shortcuts import get_users_with_perms
10 from whoosh import classify
11 from whoosh import highlight
12 from whoosh import query
13 from whoosh.fields import BOOLEAN
14 from whoosh.fields import DATETIME
15 from whoosh.fields import KEYWORD
16 from whoosh.fields import NUMERIC
17 from whoosh.fields import TEXT
18 from whoosh.fields import Schema
19 from whoosh.highlight import HtmlFormatter
20 from whoosh.index import create_in
21 from whoosh.index import exists_in
22 from whoosh.index import open_dir
23 from whoosh.qparser import MultifieldParser
24 from whoosh.qparser.dateparse import DateParserPlugin
25 from whoosh.searching import ResultsPage
26 from whoosh.searching import Searcher
27 from whoosh.writing import AsyncWriter
28
29 from documents.models import Document
30 from documents.models import Note
31
32 logger = logging.getLogger("paperless.index")
33
34
35 def get_schema():
36 return Schema(
37 id=NUMERIC(stored=True, unique=True),
38 title=TEXT(sortable=True),
39 content=TEXT(),
40 asn=NUMERIC(sortable=True, signed=False),
41 correspondent=TEXT(sortable=True),
42 correspondent_id=NUMERIC(),
43 has_correspondent=BOOLEAN(),
44 tag=KEYWORD(commas=True, scorable=True, lowercase=True),
45 tag_id=KEYWORD(commas=True, scorable=True),
46 has_tag=BOOLEAN(),
47 type=TEXT(sortable=True),
48 type_id=NUMERIC(),
49 has_type=BOOLEAN(),
50 created=DATETIME(sortable=True),
51 modified=DATETIME(sortable=True),
52 added=DATETIME(sortable=True),
53 path=TEXT(sortable=True),
54 path_id=NUMERIC(),
55 has_path=BOOLEAN(),
56 notes=TEXT(),
57 owner=TEXT(),
58 owner_id=NUMERIC(),
59 has_owner=BOOLEAN(),
60 viewer_id=KEYWORD(commas=True),
61 )
62
63
64 def open_index(recreate=False):
65 try:
66 if exists_in(settings.INDEX_DIR) and not recreate:
67 return open_dir(settings.INDEX_DIR, schema=get_schema())
68 except Exception:
69 logger.exception("Error while opening the index, recreating.")
70
71 if not os.path.isdir(settings.INDEX_DIR):
72 os.makedirs(settings.INDEX_DIR, exist_ok=True)
73 return create_in(settings.INDEX_DIR, get_schema())
74
75
76 @contextmanager
77 def open_index_writer(optimize=False):
78 writer = AsyncWriter(open_index())
79
80 try:
81 yield writer
82 except Exception as e:
83 logger.exception(str(e))
84 writer.cancel()
85 finally:
86 writer.commit(optimize=optimize)
87
88
89 @contextmanager
90 def open_index_searcher():
91 searcher = open_index().searcher()
92
93 try:
94 yield searcher
95 finally:
96 searcher.close()
97
98
99 def update_document(writer: AsyncWriter, doc: Document):
100 tags = ",".join([t.name for t in doc.tags.all()])
101 tags_ids = ",".join([str(t.id) for t in doc.tags.all()])
102 notes = ",".join([str(c.note) for c in Note.objects.filter(document=doc)])
103 asn = doc.archive_serial_number
104 if asn is not None and (
105 asn < Document.ARCHIVE_SERIAL_NUMBER_MIN
106 or asn > Document.ARCHIVE_SERIAL_NUMBER_MAX
107 ):
108 logger.error(
109 f"Not indexing Archive Serial Number {asn} of document {doc.pk}. "
110 f"ASN is out of range "
111 f"[{Document.ARCHIVE_SERIAL_NUMBER_MIN:,}, "
112 f"{Document.ARCHIVE_SERIAL_NUMBER_MAX:,}.",
113 )
114 asn = 0
115 users_with_perms = get_users_with_perms(
116 doc,
117 only_with_perms_in=["view_document"],
118 )
119 viewer_ids = ",".join([str(u.id) for u in users_with_perms])
120 writer.update_document(
121 id=doc.pk,
122 title=doc.title,
123 content=doc.content,
124 correspondent=doc.correspondent.name if doc.correspondent else None,
125 correspondent_id=doc.correspondent.id if doc.correspondent else None,
126 has_correspondent=doc.correspondent is not None,
127 tag=tags if tags else None,
128 tag_id=tags_ids if tags_ids else None,
129 has_tag=len(tags) > 0,
130 type=doc.document_type.name if doc.document_type else None,
131 type_id=doc.document_type.id if doc.document_type else None,
132 has_type=doc.document_type is not None,
133 created=doc.created,
134 added=doc.added,
135 asn=asn,
136 modified=doc.modified,
137 path=doc.storage_path.name if doc.storage_path else None,
138 path_id=doc.storage_path.id if doc.storage_path else None,
139 has_path=doc.storage_path is not None,
140 notes=notes,
141 owner=doc.owner.username if doc.owner else None,
142 owner_id=doc.owner.id if doc.owner else None,
143 has_owner=doc.owner is not None,
144 viewer_id=viewer_ids if viewer_ids else None,
145 )
146
147
148 def remove_document(writer, doc):
149 remove_document_by_id(writer, doc.pk)
150
151
152 def remove_document_by_id(writer, doc_id):
153 writer.delete_by_term("id", doc_id)
154
155
156 def add_or_update_document(document):
157 with open_index_writer() as writer:
158 update_document(writer, document)
159
160
161 def remove_document_from_index(document):
162 with open_index_writer() as writer:
163 remove_document(writer, document)
164
165
166 class DelayedQuery:
167 def _get_query(self):
168 raise NotImplementedError
169
170 def _get_query_filter(self):
171 criterias = []
172 for k, v in self.query_params.items():
173 if k == "correspondent__id":
174 criterias.append(query.Term("correspondent_id", v))
175 elif k == "tags__id__all":
176 for tag_id in v.split(","):
177 criterias.append(query.Term("tag_id", tag_id))
178 elif k == "tags__id__none":
179 for tag_id in v.split(","):
180 criterias.append(query.Not(query.Term("tag_id", tag_id)))
181 elif k == "document_type__id":
182 criterias.append(query.Term("type_id", v))
183 elif k == "correspondent__isnull":
184 criterias.append(query.Term("has_correspondent", v == "false"))
185 elif k == "is_tagged":
186 criterias.append(query.Term("has_tag", v == "true"))
187 elif k == "document_type__isnull":
188 criterias.append(query.Term("has_type", v == "false"))
189 elif k == "created__date__lt":
190 criterias.append(
191 query.DateRange("created", start=None, end=isoparse(v)),
192 )
193 elif k == "created__date__gt":
194 criterias.append(
195 query.DateRange("created", start=isoparse(v), end=None),
196 )
197 elif k == "added__date__gt":
198 criterias.append(query.DateRange("added", start=isoparse(v), end=None))
199 elif k == "added__date__lt":
200 criterias.append(query.DateRange("added", start=None, end=isoparse(v)))
201 elif k == "storage_path__id":
202 criterias.append(query.Term("path_id", v))
203 elif k == "storage_path__isnull":
204 criterias.append(query.Term("has_path", v == "false"))
205
206 user_criterias = [query.Term("has_owner", False)]
207 if "user" in self.query_params:
208 user_criterias.append(query.Term("owner_id", self.query_params["user"]))
209 user_criterias.append(
210 query.Term("viewer_id", str(self.query_params["user"])),
211 )
212 if len(criterias) > 0:
213 criterias.append(query.Or(user_criterias))
214 return query.And(criterias)
215 else:
216 return query.Or(user_criterias)
217
218 def _get_query_sortedby(self):
219 if "ordering" not in self.query_params:
220 return None, False
221
222 field: str = self.query_params["ordering"]
223
224 sort_fields_map = {
225 "created": "created",
226 "modified": "modified",
227 "added": "added",
228 "title": "title",
229 "correspondent__name": "correspondent",
230 "document_type__name": "type",
231 "archive_serial_number": "asn",
232 }
233
234 if field.startswith("-"):
235 field = field[1:]
236 reverse = True
237 else:
238 reverse = False
239
240 if field not in sort_fields_map:
241 return None, False
242 else:
243 return sort_fields_map[field], reverse
244
245 def __init__(self, searcher: Searcher, query_params, page_size):
246 self.searcher = searcher
247 self.query_params = query_params
248 self.page_size = page_size
249 self.saved_results = dict()
250 self.first_score = None
251
252 def __len__(self):
253 page = self[0:1]
254 return len(page)
255
256 def __getitem__(self, item):
257 if item.start in self.saved_results:
258 return self.saved_results[item.start]
259
260 q, mask = self._get_query()
261 sortedby, reverse = self._get_query_sortedby()
262
263 page: ResultsPage = self.searcher.search_page(
264 q,
265 mask=mask,
266 filter=self._get_query_filter(),
267 pagenum=math.floor(item.start / self.page_size) + 1,
268 pagelen=self.page_size,
269 sortedby=sortedby,
270 reverse=reverse,
271 )
272 page.results.fragmenter = highlight.ContextFragmenter(surround=50)
273 page.results.formatter = HtmlFormatter(tagname="span", between=" ... ")
274
275 if not self.first_score and len(page.results) > 0 and sortedby is None:
276 self.first_score = page.results[0].score
277
278 page.results.top_n = list(
279 map(
280 lambda hit: (
281 (hit[0] / self.first_score) if self.first_score else None,
282 hit[1],
283 ),
284 page.results.top_n,
285 ),
286 )
287
288 self.saved_results[item.start] = page
289
290 return page
291
292
293 class DelayedFullTextQuery(DelayedQuery):
294 def _get_query(self):
295 q_str = self.query_params["query"]
296 qp = MultifieldParser(
297 ["content", "title", "correspondent", "tag", "type", "notes"],
298 self.searcher.ixreader.schema,
299 )
300 qp.add_plugin(DateParserPlugin(basedate=timezone.now()))
301 q = qp.parse(q_str)
302
303 corrected = self.searcher.correct_query(q, q_str)
304 if corrected.query != q:
305 corrected.query = corrected.string
306
307 return q, None
308
309
310 class DelayedMoreLikeThisQuery(DelayedQuery):
311 def _get_query(self):
312 more_like_doc_id = int(self.query_params["more_like_id"])
313 content = Document.objects.get(id=more_like_doc_id).content
314
315 docnum = self.searcher.document_number(id=more_like_doc_id)
316 kts = self.searcher.key_terms_from_text(
317 "content",
318 content,
319 numterms=20,
320 model=classify.Bo1Model,
321 normalize=False,
322 )
323 q = query.Or(
324 [query.Term("content", word, boost=weight) for word, weight in kts],
325 )
326 mask = {docnum}
327
328 return q, mask
329
330
331 def autocomplete(ix, term, limit=10):
332 with ix.reader() as reader:
333 terms = []
334 for score, t in reader.most_distinctive_terms(
335 "content",
336 number=limit,
337 prefix=term.lower(),
338 ):
339 terms.append(t)
340 return terms
341
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/documents/index.py b/src/documents/index.py
--- a/src/documents/index.py
+++ b/src/documents/index.py
@@ -172,6 +172,14 @@
for k, v in self.query_params.items():
if k == "correspondent__id":
criterias.append(query.Term("correspondent_id", v))
+ elif k == "correspondent__id__in":
+ for correspondent_id in v.split(","):
+ criterias.append(query.Term("correspondent_id", correspondent_id))
+ elif k == "correspondent__id__none":
+ for correspondent_id in v.split(","):
+ criterias.append(
+ query.Not(query.Term("correspondent_id", correspondent_id)),
+ )
elif k == "tags__id__all":
for tag_id in v.split(","):
criterias.append(query.Term("tag_id", tag_id))
@@ -180,6 +188,12 @@
criterias.append(query.Not(query.Term("tag_id", tag_id)))
elif k == "document_type__id":
criterias.append(query.Term("type_id", v))
+ elif k == "document_type__id__in":
+ for document_type_id in v.split(","):
+ criterias.append(query.Term("type_id", document_type_id))
+ elif k == "document_type__id__none":
+ for document_type_id in v.split(","):
+ criterias.append(query.Not(query.Term("type_id", document_type_id)))
elif k == "correspondent__isnull":
criterias.append(query.Term("has_correspondent", v == "false"))
elif k == "is_tagged":
@@ -200,6 +214,12 @@
criterias.append(query.DateRange("added", start=None, end=isoparse(v)))
elif k == "storage_path__id":
criterias.append(query.Term("path_id", v))
+ elif k == "storage_path__id__in":
+ for storage_path_id in v.split(","):
+ criterias.append(query.Term("path_id", storage_path_id))
+ elif k == "storage_path__id__none":
+ for storage_path_id in v.split(","):
+ criterias.append(query.Not(query.Term("path_id", storage_path_id)))
elif k == "storage_path__isnull":
criterias.append(query.Term("has_path", v == "false"))
|
{"golden_diff": "diff --git a/src/documents/index.py b/src/documents/index.py\n--- a/src/documents/index.py\n+++ b/src/documents/index.py\n@@ -172,6 +172,14 @@\n for k, v in self.query_params.items():\n if k == \"correspondent__id\":\n criterias.append(query.Term(\"correspondent_id\", v))\n+ elif k == \"correspondent__id__in\":\n+ for correspondent_id in v.split(\",\"):\n+ criterias.append(query.Term(\"correspondent_id\", correspondent_id))\n+ elif k == \"correspondent__id__none\":\n+ for correspondent_id in v.split(\",\"):\n+ criterias.append(\n+ query.Not(query.Term(\"correspondent_id\", correspondent_id)),\n+ )\n elif k == \"tags__id__all\":\n for tag_id in v.split(\",\"):\n criterias.append(query.Term(\"tag_id\", tag_id))\n@@ -180,6 +188,12 @@\n criterias.append(query.Not(query.Term(\"tag_id\", tag_id)))\n elif k == \"document_type__id\":\n criterias.append(query.Term(\"type_id\", v))\n+ elif k == \"document_type__id__in\":\n+ for document_type_id in v.split(\",\"):\n+ criterias.append(query.Term(\"type_id\", document_type_id))\n+ elif k == \"document_type__id__none\":\n+ for document_type_id in v.split(\",\"):\n+ criterias.append(query.Not(query.Term(\"type_id\", document_type_id)))\n elif k == \"correspondent__isnull\":\n criterias.append(query.Term(\"has_correspondent\", v == \"false\"))\n elif k == \"is_tagged\":\n@@ -200,6 +214,12 @@\n criterias.append(query.DateRange(\"added\", start=None, end=isoparse(v)))\n elif k == \"storage_path__id\":\n criterias.append(query.Term(\"path_id\", v))\n+ elif k == \"storage_path__id__in\":\n+ for storage_path_id in v.split(\",\"):\n+ criterias.append(query.Term(\"path_id\", storage_path_id))\n+ elif k == \"storage_path__id__none\":\n+ for storage_path_id in v.split(\",\"):\n+ criterias.append(query.Not(query.Term(\"path_id\", storage_path_id)))\n elif k == \"storage_path__isnull\":\n criterias.append(query.Term(\"has_path\", v == \"false\"))\n", "issue": "[BUG] Selecting date ranges breaks searches\n### Description\n\nSearching documents using TWO filters will not work, if one parameter is \"last month\" or \"last year\" in the \"added\" or \"created\" field. I tried to be more precise than #3203 \r\n\r\nExample: \r\n\r\nSelected document type is \"Anleitung\" -> everything works as expected:\r\n\r\n\r\nNow a second filter \"last year\" is applied. Year is as expected but selected document type is ignored:\r\n\r\n\n\n### Steps to reproduce\n\n1. login as a superuser\r\n2. go to web-ui -> documents\r\n3. reset filters (all documents displayed now)\r\n4. select document type.\r\n5. select \"created\" oder \"added\" date range \"last year\"\r\n-> all document types ara displayed.\n\n### Webserver logs\n\n```bash\nnone\n```\n\n\n### Browser logs\n\n```bash\nnone\n```\n\n\n### Paperless-ngx version\n\n1.14.0\n\n### Host OS\n\nLinux 3.10.108 #42661 SMP (x86_64)\n\n### Installation method\n\nDocker - official image\n\n### Browser\n\nChrome\n\n### Configuration changes\n\n_No response_\n\n### Other\n\n_No response_\n", "before_files": [{"content": "import logging\nimport math\nimport os\nfrom contextlib import contextmanager\n\nfrom dateutil.parser import isoparse\nfrom django.conf import settings\nfrom django.utils import timezone\nfrom guardian.shortcuts import get_users_with_perms\nfrom whoosh import classify\nfrom whoosh import highlight\nfrom whoosh import query\nfrom whoosh.fields import BOOLEAN\nfrom whoosh.fields import DATETIME\nfrom whoosh.fields import KEYWORD\nfrom whoosh.fields import NUMERIC\nfrom whoosh.fields import TEXT\nfrom whoosh.fields import Schema\nfrom whoosh.highlight import HtmlFormatter\nfrom whoosh.index import create_in\nfrom whoosh.index import exists_in\nfrom whoosh.index import open_dir\nfrom whoosh.qparser import MultifieldParser\nfrom whoosh.qparser.dateparse import DateParserPlugin\nfrom whoosh.searching import ResultsPage\nfrom whoosh.searching import Searcher\nfrom whoosh.writing import AsyncWriter\n\nfrom documents.models import Document\nfrom documents.models import Note\n\nlogger = logging.getLogger(\"paperless.index\")\n\n\ndef get_schema():\n return Schema(\n id=NUMERIC(stored=True, unique=True),\n title=TEXT(sortable=True),\n content=TEXT(),\n asn=NUMERIC(sortable=True, signed=False),\n correspondent=TEXT(sortable=True),\n correspondent_id=NUMERIC(),\n has_correspondent=BOOLEAN(),\n tag=KEYWORD(commas=True, scorable=True, lowercase=True),\n tag_id=KEYWORD(commas=True, scorable=True),\n has_tag=BOOLEAN(),\n type=TEXT(sortable=True),\n type_id=NUMERIC(),\n has_type=BOOLEAN(),\n created=DATETIME(sortable=True),\n modified=DATETIME(sortable=True),\n added=DATETIME(sortable=True),\n path=TEXT(sortable=True),\n path_id=NUMERIC(),\n has_path=BOOLEAN(),\n notes=TEXT(),\n owner=TEXT(),\n owner_id=NUMERIC(),\n has_owner=BOOLEAN(),\n viewer_id=KEYWORD(commas=True),\n )\n\n\ndef open_index(recreate=False):\n try:\n if exists_in(settings.INDEX_DIR) and not recreate:\n return open_dir(settings.INDEX_DIR, schema=get_schema())\n except Exception:\n logger.exception(\"Error while opening the index, recreating.\")\n\n if not os.path.isdir(settings.INDEX_DIR):\n os.makedirs(settings.INDEX_DIR, exist_ok=True)\n return create_in(settings.INDEX_DIR, get_schema())\n\n\n@contextmanager\ndef open_index_writer(optimize=False):\n writer = AsyncWriter(open_index())\n\n try:\n yield writer\n except Exception as e:\n logger.exception(str(e))\n writer.cancel()\n finally:\n writer.commit(optimize=optimize)\n\n\n@contextmanager\ndef open_index_searcher():\n searcher = open_index().searcher()\n\n try:\n yield searcher\n finally:\n searcher.close()\n\n\ndef update_document(writer: AsyncWriter, doc: Document):\n tags = \",\".join([t.name for t in doc.tags.all()])\n tags_ids = \",\".join([str(t.id) for t in doc.tags.all()])\n notes = \",\".join([str(c.note) for c in Note.objects.filter(document=doc)])\n asn = doc.archive_serial_number\n if asn is not None and (\n asn < Document.ARCHIVE_SERIAL_NUMBER_MIN\n or asn > Document.ARCHIVE_SERIAL_NUMBER_MAX\n ):\n logger.error(\n f\"Not indexing Archive Serial Number {asn} of document {doc.pk}. \"\n f\"ASN is out of range \"\n f\"[{Document.ARCHIVE_SERIAL_NUMBER_MIN:,}, \"\n f\"{Document.ARCHIVE_SERIAL_NUMBER_MAX:,}.\",\n )\n asn = 0\n users_with_perms = get_users_with_perms(\n doc,\n only_with_perms_in=[\"view_document\"],\n )\n viewer_ids = \",\".join([str(u.id) for u in users_with_perms])\n writer.update_document(\n id=doc.pk,\n title=doc.title,\n content=doc.content,\n correspondent=doc.correspondent.name if doc.correspondent else None,\n correspondent_id=doc.correspondent.id if doc.correspondent else None,\n has_correspondent=doc.correspondent is not None,\n tag=tags if tags else None,\n tag_id=tags_ids if tags_ids else None,\n has_tag=len(tags) > 0,\n type=doc.document_type.name if doc.document_type else None,\n type_id=doc.document_type.id if doc.document_type else None,\n has_type=doc.document_type is not None,\n created=doc.created,\n added=doc.added,\n asn=asn,\n modified=doc.modified,\n path=doc.storage_path.name if doc.storage_path else None,\n path_id=doc.storage_path.id if doc.storage_path else None,\n has_path=doc.storage_path is not None,\n notes=notes,\n owner=doc.owner.username if doc.owner else None,\n owner_id=doc.owner.id if doc.owner else None,\n has_owner=doc.owner is not None,\n viewer_id=viewer_ids if viewer_ids else None,\n )\n\n\ndef remove_document(writer, doc):\n remove_document_by_id(writer, doc.pk)\n\n\ndef remove_document_by_id(writer, doc_id):\n writer.delete_by_term(\"id\", doc_id)\n\n\ndef add_or_update_document(document):\n with open_index_writer() as writer:\n update_document(writer, document)\n\n\ndef remove_document_from_index(document):\n with open_index_writer() as writer:\n remove_document(writer, document)\n\n\nclass DelayedQuery:\n def _get_query(self):\n raise NotImplementedError\n\n def _get_query_filter(self):\n criterias = []\n for k, v in self.query_params.items():\n if k == \"correspondent__id\":\n criterias.append(query.Term(\"correspondent_id\", v))\n elif k == \"tags__id__all\":\n for tag_id in v.split(\",\"):\n criterias.append(query.Term(\"tag_id\", tag_id))\n elif k == \"tags__id__none\":\n for tag_id in v.split(\",\"):\n criterias.append(query.Not(query.Term(\"tag_id\", tag_id)))\n elif k == \"document_type__id\":\n criterias.append(query.Term(\"type_id\", v))\n elif k == \"correspondent__isnull\":\n criterias.append(query.Term(\"has_correspondent\", v == \"false\"))\n elif k == \"is_tagged\":\n criterias.append(query.Term(\"has_tag\", v == \"true\"))\n elif k == \"document_type__isnull\":\n criterias.append(query.Term(\"has_type\", v == \"false\"))\n elif k == \"created__date__lt\":\n criterias.append(\n query.DateRange(\"created\", start=None, end=isoparse(v)),\n )\n elif k == \"created__date__gt\":\n criterias.append(\n query.DateRange(\"created\", start=isoparse(v), end=None),\n )\n elif k == \"added__date__gt\":\n criterias.append(query.DateRange(\"added\", start=isoparse(v), end=None))\n elif k == \"added__date__lt\":\n criterias.append(query.DateRange(\"added\", start=None, end=isoparse(v)))\n elif k == \"storage_path__id\":\n criterias.append(query.Term(\"path_id\", v))\n elif k == \"storage_path__isnull\":\n criterias.append(query.Term(\"has_path\", v == \"false\"))\n\n user_criterias = [query.Term(\"has_owner\", False)]\n if \"user\" in self.query_params:\n user_criterias.append(query.Term(\"owner_id\", self.query_params[\"user\"]))\n user_criterias.append(\n query.Term(\"viewer_id\", str(self.query_params[\"user\"])),\n )\n if len(criterias) > 0:\n criterias.append(query.Or(user_criterias))\n return query.And(criterias)\n else:\n return query.Or(user_criterias)\n\n def _get_query_sortedby(self):\n if \"ordering\" not in self.query_params:\n return None, False\n\n field: str = self.query_params[\"ordering\"]\n\n sort_fields_map = {\n \"created\": \"created\",\n \"modified\": \"modified\",\n \"added\": \"added\",\n \"title\": \"title\",\n \"correspondent__name\": \"correspondent\",\n \"document_type__name\": \"type\",\n \"archive_serial_number\": \"asn\",\n }\n\n if field.startswith(\"-\"):\n field = field[1:]\n reverse = True\n else:\n reverse = False\n\n if field not in sort_fields_map:\n return None, False\n else:\n return sort_fields_map[field], reverse\n\n def __init__(self, searcher: Searcher, query_params, page_size):\n self.searcher = searcher\n self.query_params = query_params\n self.page_size = page_size\n self.saved_results = dict()\n self.first_score = None\n\n def __len__(self):\n page = self[0:1]\n return len(page)\n\n def __getitem__(self, item):\n if item.start in self.saved_results:\n return self.saved_results[item.start]\n\n q, mask = self._get_query()\n sortedby, reverse = self._get_query_sortedby()\n\n page: ResultsPage = self.searcher.search_page(\n q,\n mask=mask,\n filter=self._get_query_filter(),\n pagenum=math.floor(item.start / self.page_size) + 1,\n pagelen=self.page_size,\n sortedby=sortedby,\n reverse=reverse,\n )\n page.results.fragmenter = highlight.ContextFragmenter(surround=50)\n page.results.formatter = HtmlFormatter(tagname=\"span\", between=\" ... \")\n\n if not self.first_score and len(page.results) > 0 and sortedby is None:\n self.first_score = page.results[0].score\n\n page.results.top_n = list(\n map(\n lambda hit: (\n (hit[0] / self.first_score) if self.first_score else None,\n hit[1],\n ),\n page.results.top_n,\n ),\n )\n\n self.saved_results[item.start] = page\n\n return page\n\n\nclass DelayedFullTextQuery(DelayedQuery):\n def _get_query(self):\n q_str = self.query_params[\"query\"]\n qp = MultifieldParser(\n [\"content\", \"title\", \"correspondent\", \"tag\", \"type\", \"notes\"],\n self.searcher.ixreader.schema,\n )\n qp.add_plugin(DateParserPlugin(basedate=timezone.now()))\n q = qp.parse(q_str)\n\n corrected = self.searcher.correct_query(q, q_str)\n if corrected.query != q:\n corrected.query = corrected.string\n\n return q, None\n\n\nclass DelayedMoreLikeThisQuery(DelayedQuery):\n def _get_query(self):\n more_like_doc_id = int(self.query_params[\"more_like_id\"])\n content = Document.objects.get(id=more_like_doc_id).content\n\n docnum = self.searcher.document_number(id=more_like_doc_id)\n kts = self.searcher.key_terms_from_text(\n \"content\",\n content,\n numterms=20,\n model=classify.Bo1Model,\n normalize=False,\n )\n q = query.Or(\n [query.Term(\"content\", word, boost=weight) for word, weight in kts],\n )\n mask = {docnum}\n\n return q, mask\n\n\ndef autocomplete(ix, term, limit=10):\n with ix.reader() as reader:\n terms = []\n for score, t in reader.most_distinctive_terms(\n \"content\",\n number=limit,\n prefix=term.lower(),\n ):\n terms.append(t)\n return terms\n", "path": "src/documents/index.py"}], "after_files": [{"content": "import logging\nimport math\nimport os\nfrom contextlib import contextmanager\n\nfrom dateutil.parser import isoparse\nfrom django.conf import settings\nfrom django.utils import timezone\nfrom guardian.shortcuts import get_users_with_perms\nfrom whoosh import classify\nfrom whoosh import highlight\nfrom whoosh import query\nfrom whoosh.fields import BOOLEAN\nfrom whoosh.fields import DATETIME\nfrom whoosh.fields import KEYWORD\nfrom whoosh.fields import NUMERIC\nfrom whoosh.fields import TEXT\nfrom whoosh.fields import Schema\nfrom whoosh.highlight import HtmlFormatter\nfrom whoosh.index import create_in\nfrom whoosh.index import exists_in\nfrom whoosh.index import open_dir\nfrom whoosh.qparser import MultifieldParser\nfrom whoosh.qparser.dateparse import DateParserPlugin\nfrom whoosh.searching import ResultsPage\nfrom whoosh.searching import Searcher\nfrom whoosh.writing import AsyncWriter\n\nfrom documents.models import Document\nfrom documents.models import Note\n\nlogger = logging.getLogger(\"paperless.index\")\n\n\ndef get_schema():\n return Schema(\n id=NUMERIC(stored=True, unique=True),\n title=TEXT(sortable=True),\n content=TEXT(),\n asn=NUMERIC(sortable=True, signed=False),\n correspondent=TEXT(sortable=True),\n correspondent_id=NUMERIC(),\n has_correspondent=BOOLEAN(),\n tag=KEYWORD(commas=True, scorable=True, lowercase=True),\n tag_id=KEYWORD(commas=True, scorable=True),\n has_tag=BOOLEAN(),\n type=TEXT(sortable=True),\n type_id=NUMERIC(),\n has_type=BOOLEAN(),\n created=DATETIME(sortable=True),\n modified=DATETIME(sortable=True),\n added=DATETIME(sortable=True),\n path=TEXT(sortable=True),\n path_id=NUMERIC(),\n has_path=BOOLEAN(),\n notes=TEXT(),\n owner=TEXT(),\n owner_id=NUMERIC(),\n has_owner=BOOLEAN(),\n viewer_id=KEYWORD(commas=True),\n )\n\n\ndef open_index(recreate=False):\n try:\n if exists_in(settings.INDEX_DIR) and not recreate:\n return open_dir(settings.INDEX_DIR, schema=get_schema())\n except Exception:\n logger.exception(\"Error while opening the index, recreating.\")\n\n if not os.path.isdir(settings.INDEX_DIR):\n os.makedirs(settings.INDEX_DIR, exist_ok=True)\n return create_in(settings.INDEX_DIR, get_schema())\n\n\n@contextmanager\ndef open_index_writer(optimize=False):\n writer = AsyncWriter(open_index())\n\n try:\n yield writer\n except Exception as e:\n logger.exception(str(e))\n writer.cancel()\n finally:\n writer.commit(optimize=optimize)\n\n\n@contextmanager\ndef open_index_searcher():\n searcher = open_index().searcher()\n\n try:\n yield searcher\n finally:\n searcher.close()\n\n\ndef update_document(writer: AsyncWriter, doc: Document):\n tags = \",\".join([t.name for t in doc.tags.all()])\n tags_ids = \",\".join([str(t.id) for t in doc.tags.all()])\n notes = \",\".join([str(c.note) for c in Note.objects.filter(document=doc)])\n asn = doc.archive_serial_number\n if asn is not None and (\n asn < Document.ARCHIVE_SERIAL_NUMBER_MIN\n or asn > Document.ARCHIVE_SERIAL_NUMBER_MAX\n ):\n logger.error(\n f\"Not indexing Archive Serial Number {asn} of document {doc.pk}. \"\n f\"ASN is out of range \"\n f\"[{Document.ARCHIVE_SERIAL_NUMBER_MIN:,}, \"\n f\"{Document.ARCHIVE_SERIAL_NUMBER_MAX:,}.\",\n )\n asn = 0\n users_with_perms = get_users_with_perms(\n doc,\n only_with_perms_in=[\"view_document\"],\n )\n viewer_ids = \",\".join([str(u.id) for u in users_with_perms])\n writer.update_document(\n id=doc.pk,\n title=doc.title,\n content=doc.content,\n correspondent=doc.correspondent.name if doc.correspondent else None,\n correspondent_id=doc.correspondent.id if doc.correspondent else None,\n has_correspondent=doc.correspondent is not None,\n tag=tags if tags else None,\n tag_id=tags_ids if tags_ids else None,\n has_tag=len(tags) > 0,\n type=doc.document_type.name if doc.document_type else None,\n type_id=doc.document_type.id if doc.document_type else None,\n has_type=doc.document_type is not None,\n created=doc.created,\n added=doc.added,\n asn=asn,\n modified=doc.modified,\n path=doc.storage_path.name if doc.storage_path else None,\n path_id=doc.storage_path.id if doc.storage_path else None,\n has_path=doc.storage_path is not None,\n notes=notes,\n owner=doc.owner.username if doc.owner else None,\n owner_id=doc.owner.id if doc.owner else None,\n has_owner=doc.owner is not None,\n viewer_id=viewer_ids if viewer_ids else None,\n )\n\n\ndef remove_document(writer, doc):\n remove_document_by_id(writer, doc.pk)\n\n\ndef remove_document_by_id(writer, doc_id):\n writer.delete_by_term(\"id\", doc_id)\n\n\ndef add_or_update_document(document):\n with open_index_writer() as writer:\n update_document(writer, document)\n\n\ndef remove_document_from_index(document):\n with open_index_writer() as writer:\n remove_document(writer, document)\n\n\nclass DelayedQuery:\n def _get_query(self):\n raise NotImplementedError\n\n def _get_query_filter(self):\n criterias = []\n for k, v in self.query_params.items():\n if k == \"correspondent__id\":\n criterias.append(query.Term(\"correspondent_id\", v))\n elif k == \"correspondent__id__in\":\n for correspondent_id in v.split(\",\"):\n criterias.append(query.Term(\"correspondent_id\", correspondent_id))\n elif k == \"correspondent__id__none\":\n for correspondent_id in v.split(\",\"):\n criterias.append(\n query.Not(query.Term(\"correspondent_id\", correspondent_id)),\n )\n elif k == \"tags__id__all\":\n for tag_id in v.split(\",\"):\n criterias.append(query.Term(\"tag_id\", tag_id))\n elif k == \"tags__id__none\":\n for tag_id in v.split(\",\"):\n criterias.append(query.Not(query.Term(\"tag_id\", tag_id)))\n elif k == \"document_type__id\":\n criterias.append(query.Term(\"type_id\", v))\n elif k == \"document_type__id__in\":\n for document_type_id in v.split(\",\"):\n criterias.append(query.Term(\"type_id\", document_type_id))\n elif k == \"document_type__id__none\":\n for document_type_id in v.split(\",\"):\n criterias.append(query.Not(query.Term(\"type_id\", document_type_id)))\n elif k == \"correspondent__isnull\":\n criterias.append(query.Term(\"has_correspondent\", v == \"false\"))\n elif k == \"is_tagged\":\n criterias.append(query.Term(\"has_tag\", v == \"true\"))\n elif k == \"document_type__isnull\":\n criterias.append(query.Term(\"has_type\", v == \"false\"))\n elif k == \"created__date__lt\":\n criterias.append(\n query.DateRange(\"created\", start=None, end=isoparse(v)),\n )\n elif k == \"created__date__gt\":\n criterias.append(\n query.DateRange(\"created\", start=isoparse(v), end=None),\n )\n elif k == \"added__date__gt\":\n criterias.append(query.DateRange(\"added\", start=isoparse(v), end=None))\n elif k == \"added__date__lt\":\n criterias.append(query.DateRange(\"added\", start=None, end=isoparse(v)))\n elif k == \"storage_path__id\":\n criterias.append(query.Term(\"path_id\", v))\n elif k == \"storage_path__id__in\":\n for storage_path_id in v.split(\",\"):\n criterias.append(query.Term(\"path_id\", storage_path_id))\n elif k == \"storage_path__id__none\":\n for storage_path_id in v.split(\",\"):\n criterias.append(query.Not(query.Term(\"path_id\", storage_path_id)))\n elif k == \"storage_path__isnull\":\n criterias.append(query.Term(\"has_path\", v == \"false\"))\n\n user_criterias = [query.Term(\"has_owner\", False)]\n if \"user\" in self.query_params:\n user_criterias.append(query.Term(\"owner_id\", self.query_params[\"user\"]))\n user_criterias.append(\n query.Term(\"viewer_id\", str(self.query_params[\"user\"])),\n )\n if len(criterias) > 0:\n criterias.append(query.Or(user_criterias))\n return query.And(criterias)\n else:\n return query.Or(user_criterias)\n\n def _get_query_sortedby(self):\n if \"ordering\" not in self.query_params:\n return None, False\n\n field: str = self.query_params[\"ordering\"]\n\n sort_fields_map = {\n \"created\": \"created\",\n \"modified\": \"modified\",\n \"added\": \"added\",\n \"title\": \"title\",\n \"correspondent__name\": \"correspondent\",\n \"document_type__name\": \"type\",\n \"archive_serial_number\": \"asn\",\n }\n\n if field.startswith(\"-\"):\n field = field[1:]\n reverse = True\n else:\n reverse = False\n\n if field not in sort_fields_map:\n return None, False\n else:\n return sort_fields_map[field], reverse\n\n def __init__(self, searcher: Searcher, query_params, page_size):\n self.searcher = searcher\n self.query_params = query_params\n self.page_size = page_size\n self.saved_results = dict()\n self.first_score = None\n\n def __len__(self):\n page = self[0:1]\n return len(page)\n\n def __getitem__(self, item):\n if item.start in self.saved_results:\n return self.saved_results[item.start]\n\n q, mask = self._get_query()\n sortedby, reverse = self._get_query_sortedby()\n\n page: ResultsPage = self.searcher.search_page(\n q,\n mask=mask,\n filter=self._get_query_filter(),\n pagenum=math.floor(item.start / self.page_size) + 1,\n pagelen=self.page_size,\n sortedby=sortedby,\n reverse=reverse,\n )\n page.results.fragmenter = highlight.ContextFragmenter(surround=50)\n page.results.formatter = HtmlFormatter(tagname=\"span\", between=\" ... \")\n\n if not self.first_score and len(page.results) > 0 and sortedby is None:\n self.first_score = page.results[0].score\n\n page.results.top_n = list(\n map(\n lambda hit: (\n (hit[0] / self.first_score) if self.first_score else None,\n hit[1],\n ),\n page.results.top_n,\n ),\n )\n\n self.saved_results[item.start] = page\n\n return page\n\n\nclass DelayedFullTextQuery(DelayedQuery):\n def _get_query(self):\n q_str = self.query_params[\"query\"]\n qp = MultifieldParser(\n [\"content\", \"title\", \"correspondent\", \"tag\", \"type\", \"notes\"],\n self.searcher.ixreader.schema,\n )\n qp.add_plugin(DateParserPlugin(basedate=timezone.now()))\n q = qp.parse(q_str)\n\n corrected = self.searcher.correct_query(q, q_str)\n if corrected.query != q:\n corrected.query = corrected.string\n\n return q, None\n\n\nclass DelayedMoreLikeThisQuery(DelayedQuery):\n def _get_query(self):\n more_like_doc_id = int(self.query_params[\"more_like_id\"])\n content = Document.objects.get(id=more_like_doc_id).content\n\n docnum = self.searcher.document_number(id=more_like_doc_id)\n kts = self.searcher.key_terms_from_text(\n \"content\",\n content,\n numterms=20,\n model=classify.Bo1Model,\n normalize=False,\n )\n q = query.Or(\n [query.Term(\"content\", word, boost=weight) for word, weight in kts],\n )\n mask = {docnum}\n\n return q, mask\n\n\ndef autocomplete(ix, term, limit=10):\n with ix.reader() as reader:\n terms = []\n for score, t in reader.most_distinctive_terms(\n \"content\",\n number=limit,\n prefix=term.lower(),\n ):\n terms.append(t)\n return terms\n", "path": "src/documents/index.py"}]}
| 4,095 | 532 |
gh_patches_debug_5253
|
rasdani/github-patches
|
git_diff
|
statsmodels__statsmodels-3430
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SARIMAX predict does not actually allow for datetime start/end
Docs indicate it does, but TimeSeriesModel._get_predict_start requires either a string or an int.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `statsmodels/tsa/base/tsa_model.py`
Content:
```
1 from statsmodels.compat.python import lrange, long
2 from statsmodels.compat.pandas import is_numeric_dtype
3
4 import datetime
5
6 from pandas import to_datetime, DatetimeIndex, Period, PeriodIndex, Timestamp
7
8 from statsmodels.base import data
9 import statsmodels.base.model as base
10 import statsmodels.base.wrapper as wrap
11 from statsmodels.tsa.base import datetools
12
13 _freq_to_pandas = datetools._freq_to_pandas
14
15 _tsa_doc = """
16 %(model)s
17
18 Parameters
19 ----------
20 %(params)s
21 dates : array-like of datetime, optional
22 An array-like object of datetime objects. If a pandas object is given
23 for endog or exog, it is assumed to have a DateIndex.
24 freq : str, optional
25 The frequency of the time-series. A Pandas offset or 'B', 'D', 'W',
26 'M', 'A', or 'Q'. This is optional if dates are given.
27 %(extra_params)s
28 %(extra_sections)s
29 """
30
31 _model_doc = "Timeseries model base class"
32
33 _generic_params = base._model_params_doc
34 _missing_param_doc = base._missing_param_doc
35
36 class TimeSeriesModel(base.LikelihoodModel):
37
38 __doc__ = _tsa_doc % {"model" : _model_doc, "params" : _generic_params,
39 "extra_params" : _missing_param_doc,
40 "extra_sections" : ""}
41
42 def __init__(self, endog, exog=None, dates=None, freq=None, missing='none'):
43 super(TimeSeriesModel, self).__init__(endog, exog, missing=missing)
44 self._init_dates(dates, freq)
45
46 def _init_dates(self, dates, freq):
47 if dates is None:
48 dates = self.data.row_labels
49
50 if dates is not None:
51 if (not datetools._is_datetime_index(dates) and
52 isinstance(self.data, data.PandasData)):
53 try:
54 if is_numeric_dtype(dates):
55 raise ValueError
56 dates = to_datetime(dates)
57 except ValueError:
58 raise ValueError("Given a pandas object and the index does "
59 "not contain dates")
60 if not freq:
61 try:
62 freq = datetools._infer_freq(dates)
63 except:
64 raise ValueError("Frequency inference failed. Use `freq` "
65 "keyword.")
66
67 if isinstance(dates[0], datetime.datetime):
68 dates = DatetimeIndex(dates)
69 else: # preserve PeriodIndex
70 dates = PeriodIndex(dates)
71 self.data.dates = dates
72 self.data.freq = freq
73
74 # Test for nanoseconds in early pandas versions
75 if freq is not None and _freq_to_pandas[freq].freqstr == 'N':
76 from distutils.version import LooseVersion
77 from pandas import __version__ as pd_version
78 if LooseVersion(pd_version) < '0.14':
79 raise NotImplementedError('Nanosecond index not available in'
80 ' Pandas < 0.14')
81
82
83 def _get_exog_names(self):
84 return self.data.xnames
85
86 def _set_exog_names(self, vals):
87 if not isinstance(vals, list):
88 vals = [vals]
89 self.data.xnames = vals
90
91 #overwrite with writable property for (V)AR models
92 exog_names = property(_get_exog_names, _set_exog_names)
93
94 def _get_dates_loc(self, dates, date):
95 date = dates.get_loc(date)
96 return date
97
98 def _str_to_date(self, date):
99 """
100 Takes a string and returns a datetime object
101 """
102 if isinstance(self.data.dates, PeriodIndex):
103 return Period(date)
104 else:
105 return datetools.date_parser(date)
106
107 def _set_predict_start_date(self, start):
108 dates = self.data.dates
109 if dates is None:
110 return
111 if start > len(dates):
112 raise ValueError("Start must be <= len(endog)")
113 if start == len(dates):
114 self.data.predict_start = datetools._date_from_idx(dates[-1],
115 1, self.data.freq)
116 elif start < len(dates):
117 self.data.predict_start = dates[start]
118 else:
119 raise ValueError("Start must be <= len(dates)")
120
121 def _get_predict_start(self, start):
122 """
123 Returns the index of the given start date. Subclasses should define
124 default behavior for start = None. That isn't handled here.
125
126 Start can be a string or an integer if self.data.dates is None.
127 """
128 dates = self.data.dates
129 if not isinstance(start, (int, long)):
130 start = str(start)
131 if dates is None:
132 raise ValueError("Got a string for start and dates is None")
133 dtstart = self._str_to_date(start)
134 self.data.predict_start = dtstart
135 try:
136 start = self._get_dates_loc(dates, dtstart)
137 except KeyError:
138 raise ValueError("Start must be in dates. Got %s | %s" %
139 (str(start), str(dtstart)))
140
141 self._set_predict_start_date(start)
142 return start
143
144
145 def _get_predict_end(self, end):
146 """
147 See _get_predict_start for more information. Subclasses do not
148 need to define anything for this.
149 """
150
151 out_of_sample = 0 # will be overwritten if needed
152 if end is None: # use data for ARIMA - endog changes
153 end = len(self.data.endog) - 1
154
155 dates = self.data.dates
156 freq = self.data.freq
157
158 if isinstance(end, str) or (dates is not None
159 and isinstance(end, type(dates[0]))):
160 if dates is None:
161 raise ValueError("Got a string or date for `end` and `dates` is None")
162
163 if isinstance(end, str):
164 dtend = self._str_to_date(end)
165 else:
166 dtend = end # end could be a pandas TimeStamp not a datetime
167
168 self.data.predict_end = dtend
169 try:
170 end = self._get_dates_loc(dates, dtend)
171 except KeyError as err: # end is greater than dates[-1]...probably
172 if dtend > self.data.dates[-1]:
173 end = len(self.data.endog) - 1
174 freq = self.data.freq
175 out_of_sample = datetools._idx_from_dates(dates[-1], dtend,
176 freq)
177 else:
178 if freq is None:
179 raise ValueError("There is no frequency for these "
180 "dates and date %s is not in dates "
181 "index. Try giving a date that is in "
182 "the dates index or use an integer."
183 % dtend)
184 else: #pragma: no cover
185 raise err # should never get here
186 self._make_predict_dates() # attaches self.data.predict_dates
187
188 elif isinstance(end, (int, long)) and dates is not None:
189 try:
190 self.data.predict_end = dates[end]
191 except IndexError as err:
192 nobs = len(self.data.endog) - 1 # as an index
193 out_of_sample = end - nobs
194 end = nobs
195 if freq is not None:
196 self.data.predict_end = datetools._date_from_idx(dates[-1],
197 out_of_sample, freq)
198 elif out_of_sample <= 0: # have no frequency but are in sample
199 #TODO: what error to catch here to make sure dates is
200 #on the index?
201 try:
202 self.data.predict_end = self._get_dates_loc(dates, end)
203 except KeyError:
204 raise
205 else:
206 self.data.predict_end = end + out_of_sample
207 self.data.predict_start = self._get_dates_loc(dates,
208 self.data.predict_start)
209
210 self._make_predict_dates()
211
212 elif isinstance(end, (int, long)):
213 nobs = len(self.data.endog) - 1 # is an index
214 if end > nobs:
215 out_of_sample = end - nobs
216 end = nobs
217
218 elif freq is None: # should have a date with freq = None
219 print('#'*80)
220 print(freq)
221 print(type(freq))
222 print('#'*80)
223 raise ValueError("When freq is None, you must give an integer "
224 "index for end.")
225
226 else:
227 print('#'*80)
228 print(freq)
229 print(type(freq))
230 print('#'*80)
231 raise ValueError("no rule for interpreting end")
232
233 return end, out_of_sample
234
235 def _make_predict_dates(self):
236 data = self.data
237 dtstart = data.predict_start
238 dtend = data.predict_end
239 freq = data.freq
240
241 if freq is not None:
242 pandas_freq = _freq_to_pandas[freq]
243 # preserve PeriodIndex or DatetimeIndex
244 dates = self.data.dates.__class__(start=dtstart,
245 end=dtend,
246 freq=pandas_freq)
247
248 if pandas_freq.freqstr == 'N':
249 _dtend = dtend
250 if isinstance(dates[-1], Period):
251 _dtend = pd.to_datetime(_dtend).to_period(dates.freq)
252 if not dates[-1] == _dtend:
253 # TODO: this is a hack because a DatetimeIndex with
254 # nanosecond frequency does not include "end"
255 dtend = Timestamp(dtend.value + 1)
256 dates = self.data.dates.__class__(start=dtstart,
257 end=dtend,
258 freq=pandas_freq)
259 # handle
260 elif freq is None and (isinstance(dtstart, (int, long)) and
261 isinstance(dtend, (int, long))):
262 from pandas import Index
263 dates = Index(lrange(dtstart, dtend+1))
264 # if freq is None and dtstart and dtend aren't integers, we're
265 # in sample
266 else:
267 dates = self.data.dates
268 start = self._get_dates_loc(dates, dtstart)
269 end = self._get_dates_loc(dates, dtend)
270 dates = dates[start:end+1] # is this index inclusive?
271 self.data.predict_dates = dates
272
273 class TimeSeriesModelResults(base.LikelihoodModelResults):
274 def __init__(self, model, params, normalized_cov_params, scale=1.):
275 self.data = model.data
276 super(TimeSeriesModelResults,
277 self).__init__(model, params, normalized_cov_params, scale)
278
279 class TimeSeriesResultsWrapper(wrap.ResultsWrapper):
280 _attrs = {}
281 _wrap_attrs = wrap.union_dicts(base.LikelihoodResultsWrapper._wrap_attrs,
282 _attrs)
283 _methods = {'predict' : 'dates'}
284 _wrap_methods = wrap.union_dicts(base.LikelihoodResultsWrapper._wrap_methods,
285 _methods)
286 wrap.populate_wrapper(TimeSeriesResultsWrapper,
287 TimeSeriesModelResults)
288
289 if __name__ == "__main__":
290 import statsmodels.api as sm
291 import pandas
292
293 data = sm.datasets.macrodata.load()
294
295 #make a DataFrame
296 #TODO: attach a DataFrame to some of the datasets, for quicker use
297 dates = [str(int(x[0])) +':'+ str(int(x[1])) \
298 for x in data.data[['year','quarter']]]
299
300 df = pandas.DataFrame(data.data[['realgdp','realinv','realcons']], index=dates)
301 ex_mod = TimeSeriesModel(df)
302
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/statsmodels/tsa/base/tsa_model.py b/statsmodels/tsa/base/tsa_model.py
--- a/statsmodels/tsa/base/tsa_model.py
+++ b/statsmodels/tsa/base/tsa_model.py
@@ -155,6 +155,9 @@
dates = self.data.dates
freq = self.data.freq
+ if isinstance(end, datetime.datetime):
+ end = self._str_to_date(str(end))
+
if isinstance(end, str) or (dates is not None
and isinstance(end, type(dates[0]))):
if dates is None:
|
{"golden_diff": "diff --git a/statsmodels/tsa/base/tsa_model.py b/statsmodels/tsa/base/tsa_model.py\n--- a/statsmodels/tsa/base/tsa_model.py\n+++ b/statsmodels/tsa/base/tsa_model.py\n@@ -155,6 +155,9 @@\n dates = self.data.dates\n freq = self.data.freq\n \n+ if isinstance(end, datetime.datetime):\n+ end = self._str_to_date(str(end))\n+\n if isinstance(end, str) or (dates is not None\n and isinstance(end, type(dates[0]))):\n if dates is None:\n", "issue": "SARIMAX predict does not actually allow for datetime start/end\nDocs indicate it does, but TimeSeriesModel._get_predict_start requires either a string or an int.\n\n", "before_files": [{"content": "from statsmodels.compat.python import lrange, long\nfrom statsmodels.compat.pandas import is_numeric_dtype\n\nimport datetime\n\nfrom pandas import to_datetime, DatetimeIndex, Period, PeriodIndex, Timestamp\n\nfrom statsmodels.base import data\nimport statsmodels.base.model as base\nimport statsmodels.base.wrapper as wrap\nfrom statsmodels.tsa.base import datetools\n\n_freq_to_pandas = datetools._freq_to_pandas\n\n_tsa_doc = \"\"\"\n %(model)s\n\n Parameters\n ----------\n %(params)s\n dates : array-like of datetime, optional\n An array-like object of datetime objects. If a pandas object is given\n for endog or exog, it is assumed to have a DateIndex.\n freq : str, optional\n The frequency of the time-series. A Pandas offset or 'B', 'D', 'W',\n 'M', 'A', or 'Q'. This is optional if dates are given.\n %(extra_params)s\n %(extra_sections)s\n\"\"\"\n\n_model_doc = \"Timeseries model base class\"\n\n_generic_params = base._model_params_doc\n_missing_param_doc = base._missing_param_doc\n\nclass TimeSeriesModel(base.LikelihoodModel):\n\n __doc__ = _tsa_doc % {\"model\" : _model_doc, \"params\" : _generic_params,\n \"extra_params\" : _missing_param_doc,\n \"extra_sections\" : \"\"}\n\n def __init__(self, endog, exog=None, dates=None, freq=None, missing='none'):\n super(TimeSeriesModel, self).__init__(endog, exog, missing=missing)\n self._init_dates(dates, freq)\n\n def _init_dates(self, dates, freq):\n if dates is None:\n dates = self.data.row_labels\n\n if dates is not None:\n if (not datetools._is_datetime_index(dates) and\n isinstance(self.data, data.PandasData)):\n try:\n if is_numeric_dtype(dates):\n raise ValueError\n dates = to_datetime(dates)\n except ValueError:\n raise ValueError(\"Given a pandas object and the index does \"\n \"not contain dates\")\n if not freq:\n try:\n freq = datetools._infer_freq(dates)\n except:\n raise ValueError(\"Frequency inference failed. Use `freq` \"\n \"keyword.\")\n\n if isinstance(dates[0], datetime.datetime):\n dates = DatetimeIndex(dates)\n else: # preserve PeriodIndex\n dates = PeriodIndex(dates)\n self.data.dates = dates\n self.data.freq = freq\n\n # Test for nanoseconds in early pandas versions\n if freq is not None and _freq_to_pandas[freq].freqstr == 'N':\n from distutils.version import LooseVersion\n from pandas import __version__ as pd_version\n if LooseVersion(pd_version) < '0.14':\n raise NotImplementedError('Nanosecond index not available in'\n ' Pandas < 0.14')\n\n\n def _get_exog_names(self):\n return self.data.xnames\n\n def _set_exog_names(self, vals):\n if not isinstance(vals, list):\n vals = [vals]\n self.data.xnames = vals\n\n #overwrite with writable property for (V)AR models\n exog_names = property(_get_exog_names, _set_exog_names)\n\n def _get_dates_loc(self, dates, date):\n date = dates.get_loc(date)\n return date\n\n def _str_to_date(self, date):\n \"\"\"\n Takes a string and returns a datetime object\n \"\"\"\n if isinstance(self.data.dates, PeriodIndex):\n return Period(date)\n else:\n return datetools.date_parser(date)\n\n def _set_predict_start_date(self, start):\n dates = self.data.dates\n if dates is None:\n return\n if start > len(dates):\n raise ValueError(\"Start must be <= len(endog)\")\n if start == len(dates):\n self.data.predict_start = datetools._date_from_idx(dates[-1],\n 1, self.data.freq)\n elif start < len(dates):\n self.data.predict_start = dates[start]\n else:\n raise ValueError(\"Start must be <= len(dates)\")\n\n def _get_predict_start(self, start):\n \"\"\"\n Returns the index of the given start date. Subclasses should define\n default behavior for start = None. That isn't handled here.\n\n Start can be a string or an integer if self.data.dates is None.\n \"\"\"\n dates = self.data.dates\n if not isinstance(start, (int, long)):\n start = str(start)\n if dates is None:\n raise ValueError(\"Got a string for start and dates is None\")\n dtstart = self._str_to_date(start)\n self.data.predict_start = dtstart\n try:\n start = self._get_dates_loc(dates, dtstart)\n except KeyError:\n raise ValueError(\"Start must be in dates. Got %s | %s\" %\n (str(start), str(dtstart)))\n\n self._set_predict_start_date(start)\n return start\n\n\n def _get_predict_end(self, end):\n \"\"\"\n See _get_predict_start for more information. Subclasses do not\n need to define anything for this.\n \"\"\"\n\n out_of_sample = 0 # will be overwritten if needed\n if end is None: # use data for ARIMA - endog changes\n end = len(self.data.endog) - 1\n\n dates = self.data.dates\n freq = self.data.freq\n\n if isinstance(end, str) or (dates is not None\n and isinstance(end, type(dates[0]))):\n if dates is None:\n raise ValueError(\"Got a string or date for `end` and `dates` is None\")\n\n if isinstance(end, str):\n dtend = self._str_to_date(end)\n else:\n dtend = end # end could be a pandas TimeStamp not a datetime\n\n self.data.predict_end = dtend\n try:\n end = self._get_dates_loc(dates, dtend)\n except KeyError as err: # end is greater than dates[-1]...probably\n if dtend > self.data.dates[-1]:\n end = len(self.data.endog) - 1\n freq = self.data.freq\n out_of_sample = datetools._idx_from_dates(dates[-1], dtend,\n freq)\n else:\n if freq is None:\n raise ValueError(\"There is no frequency for these \"\n \"dates and date %s is not in dates \"\n \"index. Try giving a date that is in \"\n \"the dates index or use an integer.\"\n % dtend)\n else: #pragma: no cover\n raise err # should never get here\n self._make_predict_dates() # attaches self.data.predict_dates\n\n elif isinstance(end, (int, long)) and dates is not None:\n try:\n self.data.predict_end = dates[end]\n except IndexError as err:\n nobs = len(self.data.endog) - 1 # as an index\n out_of_sample = end - nobs\n end = nobs\n if freq is not None:\n self.data.predict_end = datetools._date_from_idx(dates[-1],\n out_of_sample, freq)\n elif out_of_sample <= 0: # have no frequency but are in sample\n #TODO: what error to catch here to make sure dates is\n #on the index?\n try:\n self.data.predict_end = self._get_dates_loc(dates, end)\n except KeyError:\n raise\n else:\n self.data.predict_end = end + out_of_sample\n self.data.predict_start = self._get_dates_loc(dates,\n self.data.predict_start)\n\n self._make_predict_dates()\n\n elif isinstance(end, (int, long)):\n nobs = len(self.data.endog) - 1 # is an index\n if end > nobs:\n out_of_sample = end - nobs\n end = nobs\n\n elif freq is None: # should have a date with freq = None\n print('#'*80)\n print(freq)\n print(type(freq))\n print('#'*80)\n raise ValueError(\"When freq is None, you must give an integer \"\n \"index for end.\")\n\n else:\n print('#'*80)\n print(freq)\n print(type(freq))\n print('#'*80)\n raise ValueError(\"no rule for interpreting end\")\n\n return end, out_of_sample\n\n def _make_predict_dates(self):\n data = self.data\n dtstart = data.predict_start\n dtend = data.predict_end\n freq = data.freq\n\n if freq is not None:\n pandas_freq = _freq_to_pandas[freq]\n # preserve PeriodIndex or DatetimeIndex\n dates = self.data.dates.__class__(start=dtstart,\n end=dtend,\n freq=pandas_freq)\n\n if pandas_freq.freqstr == 'N':\n _dtend = dtend\n if isinstance(dates[-1], Period):\n _dtend = pd.to_datetime(_dtend).to_period(dates.freq)\n if not dates[-1] == _dtend:\n # TODO: this is a hack because a DatetimeIndex with\n # nanosecond frequency does not include \"end\"\n dtend = Timestamp(dtend.value + 1)\n dates = self.data.dates.__class__(start=dtstart,\n end=dtend,\n freq=pandas_freq)\n # handle\n elif freq is None and (isinstance(dtstart, (int, long)) and\n isinstance(dtend, (int, long))):\n from pandas import Index\n dates = Index(lrange(dtstart, dtend+1))\n # if freq is None and dtstart and dtend aren't integers, we're\n # in sample\n else:\n dates = self.data.dates\n start = self._get_dates_loc(dates, dtstart)\n end = self._get_dates_loc(dates, dtend)\n dates = dates[start:end+1] # is this index inclusive?\n self.data.predict_dates = dates\n\nclass TimeSeriesModelResults(base.LikelihoodModelResults):\n def __init__(self, model, params, normalized_cov_params, scale=1.):\n self.data = model.data\n super(TimeSeriesModelResults,\n self).__init__(model, params, normalized_cov_params, scale)\n\nclass TimeSeriesResultsWrapper(wrap.ResultsWrapper):\n _attrs = {}\n _wrap_attrs = wrap.union_dicts(base.LikelihoodResultsWrapper._wrap_attrs,\n _attrs)\n _methods = {'predict' : 'dates'}\n _wrap_methods = wrap.union_dicts(base.LikelihoodResultsWrapper._wrap_methods,\n _methods)\nwrap.populate_wrapper(TimeSeriesResultsWrapper,\n TimeSeriesModelResults)\n\nif __name__ == \"__main__\":\n import statsmodels.api as sm\n import pandas\n\n data = sm.datasets.macrodata.load()\n\n #make a DataFrame\n #TODO: attach a DataFrame to some of the datasets, for quicker use\n dates = [str(int(x[0])) +':'+ str(int(x[1])) \\\n for x in data.data[['year','quarter']]]\n\n df = pandas.DataFrame(data.data[['realgdp','realinv','realcons']], index=dates)\n ex_mod = TimeSeriesModel(df)\n", "path": "statsmodels/tsa/base/tsa_model.py"}], "after_files": [{"content": "from statsmodels.compat.python import lrange, long\nfrom statsmodels.compat.pandas import is_numeric_dtype\n\nimport datetime\n\nfrom pandas import to_datetime, DatetimeIndex, Period, PeriodIndex, Timestamp\n\nfrom statsmodels.base import data\nimport statsmodels.base.model as base\nimport statsmodels.base.wrapper as wrap\nfrom statsmodels.tsa.base import datetools\n\n_freq_to_pandas = datetools._freq_to_pandas\n\n_tsa_doc = \"\"\"\n %(model)s\n\n Parameters\n ----------\n %(params)s\n dates : array-like of datetime, optional\n An array-like object of datetime objects. If a pandas object is given\n for endog or exog, it is assumed to have a DateIndex.\n freq : str, optional\n The frequency of the time-series. A Pandas offset or 'B', 'D', 'W',\n 'M', 'A', or 'Q'. This is optional if dates are given.\n %(extra_params)s\n %(extra_sections)s\n\"\"\"\n\n_model_doc = \"Timeseries model base class\"\n\n_generic_params = base._model_params_doc\n_missing_param_doc = base._missing_param_doc\n\nclass TimeSeriesModel(base.LikelihoodModel):\n\n __doc__ = _tsa_doc % {\"model\" : _model_doc, \"params\" : _generic_params,\n \"extra_params\" : _missing_param_doc,\n \"extra_sections\" : \"\"}\n\n def __init__(self, endog, exog=None, dates=None, freq=None, missing='none'):\n super(TimeSeriesModel, self).__init__(endog, exog, missing=missing)\n self._init_dates(dates, freq)\n\n def _init_dates(self, dates, freq):\n if dates is None:\n dates = self.data.row_labels\n\n if dates is not None:\n if (not datetools._is_datetime_index(dates) and\n isinstance(self.data, data.PandasData)):\n try:\n if is_numeric_dtype(dates):\n raise ValueError\n dates = to_datetime(dates)\n except ValueError:\n raise ValueError(\"Given a pandas object and the index does \"\n \"not contain dates\")\n if not freq:\n try:\n freq = datetools._infer_freq(dates)\n except:\n raise ValueError(\"Frequency inference failed. Use `freq` \"\n \"keyword.\")\n\n if isinstance(dates[0], datetime.datetime):\n dates = DatetimeIndex(dates)\n else: # preserve PeriodIndex\n dates = PeriodIndex(dates)\n self.data.dates = dates\n self.data.freq = freq\n\n # Test for nanoseconds in early pandas versions\n if freq is not None and _freq_to_pandas[freq].freqstr == 'N':\n from distutils.version import LooseVersion\n from pandas import __version__ as pd_version\n if LooseVersion(pd_version) < '0.14':\n raise NotImplementedError('Nanosecond index not available in'\n ' Pandas < 0.14')\n\n\n def _get_exog_names(self):\n return self.data.xnames\n\n def _set_exog_names(self, vals):\n if not isinstance(vals, list):\n vals = [vals]\n self.data.xnames = vals\n\n #overwrite with writable property for (V)AR models\n exog_names = property(_get_exog_names, _set_exog_names)\n\n def _get_dates_loc(self, dates, date):\n date = dates.get_loc(date)\n return date\n\n def _str_to_date(self, date):\n \"\"\"\n Takes a string and returns a datetime object\n \"\"\"\n if isinstance(self.data.dates, PeriodIndex):\n return Period(date)\n else:\n return datetools.date_parser(date)\n\n def _set_predict_start_date(self, start):\n dates = self.data.dates\n if dates is None:\n return\n if start > len(dates):\n raise ValueError(\"Start must be <= len(endog)\")\n if start == len(dates):\n self.data.predict_start = datetools._date_from_idx(dates[-1],\n 1, self.data.freq)\n elif start < len(dates):\n self.data.predict_start = dates[start]\n else:\n raise ValueError(\"Start must be <= len(dates)\")\n\n def _get_predict_start(self, start):\n \"\"\"\n Returns the index of the given start date. Subclasses should define\n default behavior for start = None. That isn't handled here.\n\n Start can be a string or an integer if self.data.dates is None.\n \"\"\"\n dates = self.data.dates\n if not isinstance(start, (int, long)):\n start = str(start)\n if dates is None:\n raise ValueError(\"Got a string for start and dates is None\")\n dtstart = self._str_to_date(start)\n self.data.predict_start = dtstart\n try:\n start = self._get_dates_loc(dates, dtstart)\n except KeyError:\n raise ValueError(\"Start must be in dates. Got %s | %s\" %\n (str(start), str(dtstart)))\n\n self._set_predict_start_date(start)\n return start\n\n\n def _get_predict_end(self, end):\n \"\"\"\n See _get_predict_start for more information. Subclasses do not\n need to define anything for this.\n \"\"\"\n\n out_of_sample = 0 # will be overwritten if needed\n if end is None: # use data for ARIMA - endog changes\n end = len(self.data.endog) - 1\n\n dates = self.data.dates\n freq = self.data.freq\n\n if isinstance(end, datetime.datetime):\n end = self._str_to_date(str(end))\n\n if isinstance(end, str) or (dates is not None\n and isinstance(end, type(dates[0]))):\n if dates is None:\n raise ValueError(\"Got a string or date for `end` and `dates` is None\")\n\n if isinstance(end, str):\n dtend = self._str_to_date(end)\n else:\n dtend = end # end could be a pandas TimeStamp not a datetime\n\n self.data.predict_end = dtend\n try:\n end = self._get_dates_loc(dates, dtend)\n except KeyError as err: # end is greater than dates[-1]...probably\n if dtend > self.data.dates[-1]:\n end = len(self.data.endog) - 1\n freq = self.data.freq\n out_of_sample = datetools._idx_from_dates(dates[-1], dtend,\n freq)\n else:\n if freq is None:\n raise ValueError(\"There is no frequency for these \"\n \"dates and date %s is not in dates \"\n \"index. Try giving a date that is in \"\n \"the dates index or use an integer.\"\n % dtend)\n else: #pragma: no cover\n raise err # should never get here\n self._make_predict_dates() # attaches self.data.predict_dates\n\n elif isinstance(end, (int, long)) and dates is not None:\n try:\n self.data.predict_end = dates[end]\n except IndexError as err:\n nobs = len(self.data.endog) - 1 # as an index\n out_of_sample = end - nobs\n end = nobs\n if freq is not None:\n self.data.predict_end = datetools._date_from_idx(dates[-1],\n out_of_sample, freq)\n elif out_of_sample <= 0: # have no frequency but are in sample\n #TODO: what error to catch here to make sure dates is\n #on the index?\n try:\n self.data.predict_end = self._get_dates_loc(dates, end)\n except KeyError:\n raise\n else:\n self.data.predict_end = end + out_of_sample\n self.data.predict_start = self._get_dates_loc(dates,\n self.data.predict_start)\n\n self._make_predict_dates()\n\n elif isinstance(end, (int, long)):\n nobs = len(self.data.endog) - 1 # is an index\n if end > nobs:\n out_of_sample = end - nobs\n end = nobs\n\n elif freq is None: # should have a date with freq = None\n print('#'*80)\n print(freq)\n print(type(freq))\n print('#'*80)\n raise ValueError(\"When freq is None, you must give an integer \"\n \"index for end.\")\n\n else:\n print('#'*80)\n print(freq)\n print(type(freq))\n print('#'*80)\n raise ValueError(\"no rule for interpreting end\")\n\n return end, out_of_sample\n\n def _make_predict_dates(self):\n data = self.data\n dtstart = data.predict_start\n dtend = data.predict_end\n freq = data.freq\n\n if freq is not None:\n pandas_freq = _freq_to_pandas[freq]\n # preserve PeriodIndex or DatetimeIndex\n dates = self.data.dates.__class__(start=dtstart,\n end=dtend,\n freq=pandas_freq)\n\n if pandas_freq.freqstr == 'N':\n _dtend = dtend\n if isinstance(dates[-1], Period):\n _dtend = pd.to_datetime(_dtend).to_period(dates.freq)\n if not dates[-1] == _dtend:\n # TODO: this is a hack because a DatetimeIndex with\n # nanosecond frequency does not include \"end\"\n dtend = Timestamp(dtend.value + 1)\n dates = self.data.dates.__class__(start=dtstart,\n end=dtend,\n freq=pandas_freq)\n # handle\n elif freq is None and (isinstance(dtstart, (int, long)) and\n isinstance(dtend, (int, long))):\n from pandas import Index\n dates = Index(lrange(dtstart, dtend+1))\n # if freq is None and dtstart and dtend aren't integers, we're\n # in sample\n else:\n dates = self.data.dates\n start = self._get_dates_loc(dates, dtstart)\n end = self._get_dates_loc(dates, dtend)\n dates = dates[start:end+1] # is this index inclusive?\n self.data.predict_dates = dates\n\nclass TimeSeriesModelResults(base.LikelihoodModelResults):\n def __init__(self, model, params, normalized_cov_params, scale=1.):\n self.data = model.data\n super(TimeSeriesModelResults,\n self).__init__(model, params, normalized_cov_params, scale)\n\nclass TimeSeriesResultsWrapper(wrap.ResultsWrapper):\n _attrs = {}\n _wrap_attrs = wrap.union_dicts(base.LikelihoodResultsWrapper._wrap_attrs,\n _attrs)\n _methods = {'predict' : 'dates'}\n _wrap_methods = wrap.union_dicts(base.LikelihoodResultsWrapper._wrap_methods,\n _methods)\nwrap.populate_wrapper(TimeSeriesResultsWrapper,\n TimeSeriesModelResults)\n\nif __name__ == \"__main__\":\n import statsmodels.api as sm\n import pandas\n\n data = sm.datasets.macrodata.load()\n\n #make a DataFrame\n #TODO: attach a DataFrame to some of the datasets, for quicker use\n dates = [str(int(x[0])) +':'+ str(int(x[1])) \\\n for x in data.data[['year','quarter']]]\n\n df = pandas.DataFrame(data.data[['realgdp','realinv','realcons']], index=dates)\n ex_mod = TimeSeriesModel(df)\n", "path": "statsmodels/tsa/base/tsa_model.py"}]}
| 3,594 | 131 |
gh_patches_debug_13278
|
rasdani/github-patches
|
git_diff
|
pyro-ppl__pyro-1882
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incomplete download of MNIST data
### Description
`make test-examples` fails to fully download mnist data dependency for the air example
### Details
On `osx==10.14.1` and `docker==18.09.2`
```
git checkout 3ac5a02e0e6b0a11ae796707413c11df2c14ff6b
make build pyro_branch=dev pytorch_branch=release python_verion=3.6
make run pyro_branch=dev pytorch_branch=release python_verion=3.6
cd pyro
pip install numpy==1.15
pip install scipy==1.2
make test-examples > output 2>&1
```
[Output](https://github.com/pyro-ppl/pyro/files/3204055/make_test_examples.txt)
Resulting files in `.data`
```
ls -lh -rw-r--r-- 1 matt staff 19M May 21 15:03 train-images-idx3-ubyte.gz.part
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from __future__ import absolute_import, division, print_function
2
3 import os
4 import subprocess
5 import sys
6
7 from setuptools import find_packages, setup
8
9 PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
10 VERSION = """
11 # This file is auto-generated with the version information during setup.py installation.
12
13 __version__ = '{}'
14 """
15
16 # Find pyro version.
17 for line in open(os.path.join(PROJECT_PATH, 'pyro', '__init__.py')):
18 if line.startswith('version_prefix = '):
19 version = line.strip().split()[2][1:-1]
20
21 # Append current commit sha to version
22 commit_sha = ''
23 try:
24 current_tag = subprocess.check_output(['git', 'tag', '--points-at', 'HEAD'],
25 cwd=PROJECT_PATH).decode('ascii').strip()
26 # only add sha if HEAD does not point to the release tag
27 if not current_tag == version:
28 commit_sha = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'],
29 cwd=PROJECT_PATH).decode('ascii').strip()
30 # catch all exception to be safe
31 except Exception:
32 pass # probably not a git repo
33
34 # Write version to _version.py
35 if commit_sha:
36 version += '+{}'.format(commit_sha)
37 with open(os.path.join(PROJECT_PATH, 'pyro', '_version.py'), 'w') as f:
38 f.write(VERSION.format(version))
39
40 # Convert README.md to rst for display at https://pypi.python.org/pypi/pyro-ppl
41 # When releasing on pypi, make sure pandoc is on your system:
42 # $ brew install pandoc # OS X
43 # $ sudo apt-get install pandoc # Ubuntu Linux
44 try:
45 import pypandoc
46 long_description = pypandoc.convert('README.md', 'rst')
47 print(long_description)
48 except Exception as e:
49 sys.stderr.write('Failed to convert README.md to rst:\n {}\n'.format(e))
50 sys.stderr.flush()
51 long_description = open('README.md').read()
52
53 # Remove badges since they will always be obsolete.
54 # This assumes the first 10 lines contain badge info.
55 long_description = '\n'.join([str(line) for line in long_description.split('\n')[10:]])
56
57 # examples/tutorials
58 EXTRAS_REQUIRE = [
59 'jupyter>=1.0.0',
60 'matplotlib>=1.3',
61 'observations>=0.1.4',
62 'pillow',
63 'torchvision>=0.2.2',
64 'visdom>=0.1.4',
65 'pandas',
66 'seaborn',
67 'wget',
68 ]
69
70 if sys.version_info[0] == 2:
71 EXTRAS_REQUIRE.append('functools32')
72
73 setup(
74 name='pyro-ppl',
75 version=version,
76 description='A Python library for probabilistic modeling and inference',
77 long_description=long_description,
78 packages=find_packages(include=['pyro', 'pyro.*']),
79 url='http://pyro.ai',
80 author='Uber AI Labs',
81 author_email='[email protected]',
82 install_requires=[
83 # if you add any additional libraries, please also
84 # add them to `docs/requirements.txt`
85 'contextlib2',
86 'graphviz>=0.8',
87 # numpy is necessary for some functionality of PyTorch
88 'numpy>=1.7',
89 'opt_einsum>=2.3.2',
90 'six>=1.10.0',
91 'torch>=1.1.0',
92 'tqdm>=4.31',
93 ],
94 extras_require={
95 'extras': EXTRAS_REQUIRE,
96 'test': EXTRAS_REQUIRE + [
97 'nbval',
98 'pytest>=4.1',
99 'pytest-cov',
100 # TODO: remove once https://github.com/pyro-ppl/pyro/issues/1871
101 # is fixed.
102 'scipy>=1.1, <1.3',
103 ],
104 'profile': ['prettytable', 'pytest-benchmark', 'snakeviz'],
105 'dev': EXTRAS_REQUIRE + [
106 'flake8',
107 'isort',
108 'nbformat',
109 'nbsphinx>=0.3.2',
110 'nbstripout',
111 'nbval',
112 'pypandoc',
113 'pytest>=4.1',
114 'pytest-xdist',
115 # TODO: remove once https://github.com/pyro-ppl/pyro/issues/1871
116 # is fixed.
117 'scipy>=1.1, < 1.3',
118 'sphinx',
119 'sphinx_rtd_theme',
120 'yapf',
121 ],
122 },
123 tests_require=['flake8', 'pytest>=4.1'],
124 keywords='machine learning statistics probabilistic programming bayesian modeling pytorch',
125 license='MIT License',
126 classifiers=[
127 'Intended Audience :: Developers',
128 'Intended Audience :: Education',
129 'Intended Audience :: Science/Research',
130 'Operating System :: POSIX :: Linux',
131 'Operating System :: MacOS :: MacOS X',
132 'Programming Language :: Python :: 2.7',
133 'Programming Language :: Python :: 3.6',
134 ],
135 # yapf
136 )
137
```
Path: `pyro/contrib/examples/multi_mnist.py`
Content:
```
1 """
2 This script generates a dataset similar to the Multi-MNIST dataset
3 described in [1].
4
5 [1] Eslami, SM Ali, et al. "Attend, infer, repeat: Fast scene
6 understanding with generative models." Advances in Neural Information
7 Processing Systems. 2016.
8 """
9
10 import os
11
12 import numpy as np
13 from PIL import Image
14
15 from pyro.contrib.examples.util import get_data_loader
16
17
18 def imresize(arr, size):
19 return np.array(Image.fromarray(arr).resize(size))
20
21
22 def sample_one(canvas_size, mnist):
23 i = np.random.randint(mnist['digits'].shape[0])
24 digit = mnist['digits'][i]
25 label = mnist['labels'][i].item()
26 scale = 0.1 * np.random.randn() + 1.3
27 new_size = tuple(int(s / scale) for s in digit.shape)
28 resized = imresize(digit, new_size)
29 w = resized.shape[0]
30 assert w == resized.shape[1]
31 padding = canvas_size - w
32 pad_l = np.random.randint(0, padding)
33 pad_r = np.random.randint(0, padding)
34 pad_width = ((pad_l, padding - pad_l), (pad_r, padding - pad_r))
35 positioned = np.pad(resized, pad_width, 'constant', constant_values=0)
36 return positioned, label
37
38
39 def sample_multi(num_digits, canvas_size, mnist):
40 canvas = np.zeros((canvas_size, canvas_size))
41 labels = []
42 for _ in range(num_digits):
43 positioned_digit, label = sample_one(canvas_size, mnist)
44 canvas += positioned_digit
45 labels.append(label)
46 # Crude check for overlapping digits.
47 if np.max(canvas) > 255:
48 return sample_multi(num_digits, canvas_size, mnist)
49 else:
50 return canvas, labels
51
52
53 def mk_dataset(n, mnist, max_digits, canvas_size):
54 x = []
55 y = []
56 for _ in range(n):
57 num_digits = np.random.randint(max_digits + 1)
58 canvas, labels = sample_multi(num_digits, canvas_size, mnist)
59 x.append(canvas)
60 y.append(labels)
61 return np.array(x, dtype=np.uint8), y
62
63
64 def load_mnist(root_path):
65 loader = get_data_loader('MNIST', root_path)
66 return {
67 'digits': loader.dataset.data.cpu().numpy(),
68 'labels': loader.dataset.targets
69 }
70
71
72 def load(root_path):
73 file_path = os.path.join(root_path, 'multi_mnist_uint8.npz')
74 if os.path.exists(file_path):
75 data = np.load(file_path)
76 return data['x'], data['y']
77 else:
78 # Set RNG to known state.
79 rng_state = np.random.get_state()
80 np.random.seed(681307)
81 mnist = load_mnist(root_path)
82 print('Generating multi-MNIST dataset...')
83 x, y = mk_dataset(60000, mnist, 2, 50)
84 # Revert RNG state.
85 np.random.set_state(rng_state)
86 # Crude checksum.
87 # assert x.sum() == 883114919, 'Did not generate the expected data.'
88 with open(file_path, 'wb') as f:
89 np.savez_compressed(f, x=x, y=y)
90 print('Done!')
91 return x, y
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyro/contrib/examples/multi_mnist.py b/pyro/contrib/examples/multi_mnist.py
--- a/pyro/contrib/examples/multi_mnist.py
+++ b/pyro/contrib/examples/multi_mnist.py
@@ -72,7 +72,7 @@
def load(root_path):
file_path = os.path.join(root_path, 'multi_mnist_uint8.npz')
if os.path.exists(file_path):
- data = np.load(file_path)
+ data = np.load(file_path, allow_pickle=True)
return data['x'], data['y']
else:
# Set RNG to known state.
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -58,7 +58,6 @@
EXTRAS_REQUIRE = [
'jupyter>=1.0.0',
'matplotlib>=1.3',
- 'observations>=0.1.4',
'pillow',
'torchvision>=0.2.2',
'visdom>=0.1.4',
|
{"golden_diff": "diff --git a/pyro/contrib/examples/multi_mnist.py b/pyro/contrib/examples/multi_mnist.py\n--- a/pyro/contrib/examples/multi_mnist.py\n+++ b/pyro/contrib/examples/multi_mnist.py\n@@ -72,7 +72,7 @@\n def load(root_path):\n file_path = os.path.join(root_path, 'multi_mnist_uint8.npz')\n if os.path.exists(file_path):\n- data = np.load(file_path)\n+ data = np.load(file_path, allow_pickle=True)\n return data['x'], data['y']\n else:\n # Set RNG to known state.\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -58,7 +58,6 @@\n EXTRAS_REQUIRE = [\n 'jupyter>=1.0.0',\n 'matplotlib>=1.3',\n- 'observations>=0.1.4',\n 'pillow',\n 'torchvision>=0.2.2',\n 'visdom>=0.1.4',\n", "issue": "Incomplete download of MNIST data\n### Description\r\n\r\n`make test-examples` fails to fully download mnist data dependency for the air example\r\n\r\n### Details\r\n\r\nOn `osx==10.14.1` and `docker==18.09.2`\r\n\r\n```\r\ngit checkout 3ac5a02e0e6b0a11ae796707413c11df2c14ff6b\r\nmake build pyro_branch=dev pytorch_branch=release python_verion=3.6\r\nmake run pyro_branch=dev pytorch_branch=release python_verion=3.6\r\ncd pyro\r\npip install numpy==1.15\r\npip install scipy==1.2\r\nmake test-examples > output 2>&1\r\n```\r\n\r\n[Output](https://github.com/pyro-ppl/pyro/files/3204055/make_test_examples.txt)\r\n\r\nResulting files in `.data`\r\n```\r\nls -lh -rw-r--r-- 1 matt staff 19M May 21 15:03 train-images-idx3-ubyte.gz.part\r\n```\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport subprocess\nimport sys\n\nfrom setuptools import find_packages, setup\n\nPROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\nVERSION = \"\"\"\n# This file is auto-generated with the version information during setup.py installation.\n\n__version__ = '{}'\n\"\"\"\n\n# Find pyro version.\nfor line in open(os.path.join(PROJECT_PATH, 'pyro', '__init__.py')):\n if line.startswith('version_prefix = '):\n version = line.strip().split()[2][1:-1]\n\n# Append current commit sha to version\ncommit_sha = ''\ntry:\n current_tag = subprocess.check_output(['git', 'tag', '--points-at', 'HEAD'],\n cwd=PROJECT_PATH).decode('ascii').strip()\n # only add sha if HEAD does not point to the release tag\n if not current_tag == version:\n commit_sha = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'],\n cwd=PROJECT_PATH).decode('ascii').strip()\n# catch all exception to be safe\nexcept Exception:\n pass # probably not a git repo\n\n# Write version to _version.py\nif commit_sha:\n version += '+{}'.format(commit_sha)\nwith open(os.path.join(PROJECT_PATH, 'pyro', '_version.py'), 'w') as f:\n f.write(VERSION.format(version))\n\n# Convert README.md to rst for display at https://pypi.python.org/pypi/pyro-ppl\n# When releasing on pypi, make sure pandoc is on your system:\n# $ brew install pandoc # OS X\n# $ sudo apt-get install pandoc # Ubuntu Linux\ntry:\n import pypandoc\n long_description = pypandoc.convert('README.md', 'rst')\n print(long_description)\nexcept Exception as e:\n sys.stderr.write('Failed to convert README.md to rst:\\n {}\\n'.format(e))\n sys.stderr.flush()\n long_description = open('README.md').read()\n\n# Remove badges since they will always be obsolete.\n# This assumes the first 10 lines contain badge info.\nlong_description = '\\n'.join([str(line) for line in long_description.split('\\n')[10:]])\n\n# examples/tutorials\nEXTRAS_REQUIRE = [\n 'jupyter>=1.0.0',\n 'matplotlib>=1.3',\n 'observations>=0.1.4',\n 'pillow',\n 'torchvision>=0.2.2',\n 'visdom>=0.1.4',\n 'pandas',\n 'seaborn',\n 'wget',\n]\n\nif sys.version_info[0] == 2:\n EXTRAS_REQUIRE.append('functools32')\n\nsetup(\n name='pyro-ppl',\n version=version,\n description='A Python library for probabilistic modeling and inference',\n long_description=long_description,\n packages=find_packages(include=['pyro', 'pyro.*']),\n url='http://pyro.ai',\n author='Uber AI Labs',\n author_email='[email protected]',\n install_requires=[\n # if you add any additional libraries, please also\n # add them to `docs/requirements.txt`\n 'contextlib2',\n 'graphviz>=0.8',\n # numpy is necessary for some functionality of PyTorch\n 'numpy>=1.7',\n 'opt_einsum>=2.3.2',\n 'six>=1.10.0',\n 'torch>=1.1.0',\n 'tqdm>=4.31',\n ],\n extras_require={\n 'extras': EXTRAS_REQUIRE,\n 'test': EXTRAS_REQUIRE + [\n 'nbval',\n 'pytest>=4.1',\n 'pytest-cov',\n # TODO: remove once https://github.com/pyro-ppl/pyro/issues/1871\n # is fixed.\n 'scipy>=1.1, <1.3',\n ],\n 'profile': ['prettytable', 'pytest-benchmark', 'snakeviz'],\n 'dev': EXTRAS_REQUIRE + [\n 'flake8',\n 'isort',\n 'nbformat',\n 'nbsphinx>=0.3.2',\n 'nbstripout',\n 'nbval',\n 'pypandoc',\n 'pytest>=4.1',\n 'pytest-xdist',\n # TODO: remove once https://github.com/pyro-ppl/pyro/issues/1871\n # is fixed.\n 'scipy>=1.1, < 1.3',\n 'sphinx',\n 'sphinx_rtd_theme',\n 'yapf',\n ],\n },\n tests_require=['flake8', 'pytest>=4.1'],\n keywords='machine learning statistics probabilistic programming bayesian modeling pytorch',\n license='MIT License',\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.6',\n ],\n # yapf\n)\n", "path": "setup.py"}, {"content": "\"\"\"\nThis script generates a dataset similar to the Multi-MNIST dataset\ndescribed in [1].\n\n[1] Eslami, SM Ali, et al. \"Attend, infer, repeat: Fast scene\nunderstanding with generative models.\" Advances in Neural Information\nProcessing Systems. 2016.\n\"\"\"\n\nimport os\n\nimport numpy as np\nfrom PIL import Image\n\nfrom pyro.contrib.examples.util import get_data_loader\n\n\ndef imresize(arr, size):\n return np.array(Image.fromarray(arr).resize(size))\n\n\ndef sample_one(canvas_size, mnist):\n i = np.random.randint(mnist['digits'].shape[0])\n digit = mnist['digits'][i]\n label = mnist['labels'][i].item()\n scale = 0.1 * np.random.randn() + 1.3\n new_size = tuple(int(s / scale) for s in digit.shape)\n resized = imresize(digit, new_size)\n w = resized.shape[0]\n assert w == resized.shape[1]\n padding = canvas_size - w\n pad_l = np.random.randint(0, padding)\n pad_r = np.random.randint(0, padding)\n pad_width = ((pad_l, padding - pad_l), (pad_r, padding - pad_r))\n positioned = np.pad(resized, pad_width, 'constant', constant_values=0)\n return positioned, label\n\n\ndef sample_multi(num_digits, canvas_size, mnist):\n canvas = np.zeros((canvas_size, canvas_size))\n labels = []\n for _ in range(num_digits):\n positioned_digit, label = sample_one(canvas_size, mnist)\n canvas += positioned_digit\n labels.append(label)\n # Crude check for overlapping digits.\n if np.max(canvas) > 255:\n return sample_multi(num_digits, canvas_size, mnist)\n else:\n return canvas, labels\n\n\ndef mk_dataset(n, mnist, max_digits, canvas_size):\n x = []\n y = []\n for _ in range(n):\n num_digits = np.random.randint(max_digits + 1)\n canvas, labels = sample_multi(num_digits, canvas_size, mnist)\n x.append(canvas)\n y.append(labels)\n return np.array(x, dtype=np.uint8), y\n\n\ndef load_mnist(root_path):\n loader = get_data_loader('MNIST', root_path)\n return {\n 'digits': loader.dataset.data.cpu().numpy(),\n 'labels': loader.dataset.targets\n }\n\n\ndef load(root_path):\n file_path = os.path.join(root_path, 'multi_mnist_uint8.npz')\n if os.path.exists(file_path):\n data = np.load(file_path)\n return data['x'], data['y']\n else:\n # Set RNG to known state.\n rng_state = np.random.get_state()\n np.random.seed(681307)\n mnist = load_mnist(root_path)\n print('Generating multi-MNIST dataset...')\n x, y = mk_dataset(60000, mnist, 2, 50)\n # Revert RNG state.\n np.random.set_state(rng_state)\n # Crude checksum.\n # assert x.sum() == 883114919, 'Did not generate the expected data.'\n with open(file_path, 'wb') as f:\n np.savez_compressed(f, x=x, y=y)\n print('Done!')\n return x, y\n", "path": "pyro/contrib/examples/multi_mnist.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport subprocess\nimport sys\n\nfrom setuptools import find_packages, setup\n\nPROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\nVERSION = \"\"\"\n# This file is auto-generated with the version information during setup.py installation.\n\n__version__ = '{}'\n\"\"\"\n\n# Find pyro version.\nfor line in open(os.path.join(PROJECT_PATH, 'pyro', '__init__.py')):\n if line.startswith('version_prefix = '):\n version = line.strip().split()[2][1:-1]\n\n# Append current commit sha to version\ncommit_sha = ''\ntry:\n current_tag = subprocess.check_output(['git', 'tag', '--points-at', 'HEAD'],\n cwd=PROJECT_PATH).decode('ascii').strip()\n # only add sha if HEAD does not point to the release tag\n if not current_tag == version:\n commit_sha = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'],\n cwd=PROJECT_PATH).decode('ascii').strip()\n# catch all exception to be safe\nexcept Exception:\n pass # probably not a git repo\n\n# Write version to _version.py\nif commit_sha:\n version += '+{}'.format(commit_sha)\nwith open(os.path.join(PROJECT_PATH, 'pyro', '_version.py'), 'w') as f:\n f.write(VERSION.format(version))\n\n# Convert README.md to rst for display at https://pypi.python.org/pypi/pyro-ppl\n# When releasing on pypi, make sure pandoc is on your system:\n# $ brew install pandoc # OS X\n# $ sudo apt-get install pandoc # Ubuntu Linux\ntry:\n import pypandoc\n long_description = pypandoc.convert('README.md', 'rst')\n print(long_description)\nexcept Exception as e:\n sys.stderr.write('Failed to convert README.md to rst:\\n {}\\n'.format(e))\n sys.stderr.flush()\n long_description = open('README.md').read()\n\n# Remove badges since they will always be obsolete.\n# This assumes the first 10 lines contain badge info.\nlong_description = '\\n'.join([str(line) for line in long_description.split('\\n')[10:]])\n\n# examples/tutorials\nEXTRAS_REQUIRE = [\n 'jupyter>=1.0.0',\n 'matplotlib>=1.3',\n 'pillow',\n 'torchvision>=0.2.2',\n 'visdom>=0.1.4',\n 'pandas',\n 'seaborn',\n 'wget',\n]\n\nif sys.version_info[0] == 2:\n EXTRAS_REQUIRE.append('functools32')\n\nsetup(\n name='pyro-ppl',\n version=version,\n description='A Python library for probabilistic modeling and inference',\n long_description=long_description,\n packages=find_packages(include=['pyro', 'pyro.*']),\n url='http://pyro.ai',\n author='Uber AI Labs',\n author_email='[email protected]',\n install_requires=[\n # if you add any additional libraries, please also\n # add them to `docs/requirements.txt`\n 'contextlib2',\n 'graphviz>=0.8',\n # numpy is necessary for some functionality of PyTorch\n 'numpy>=1.7',\n 'opt_einsum>=2.3.2',\n 'six>=1.10.0',\n 'torch>=1.1.0',\n 'tqdm>=4.31',\n ],\n extras_require={\n 'extras': EXTRAS_REQUIRE,\n 'test': EXTRAS_REQUIRE + [\n 'nbval',\n 'pytest>=4.1',\n 'pytest-cov',\n # TODO: remove once https://github.com/pyro-ppl/pyro/issues/1871\n # is fixed.\n 'scipy>=1.1, <1.3',\n ],\n 'profile': ['prettytable', 'pytest-benchmark', 'snakeviz'],\n 'dev': EXTRAS_REQUIRE + [\n 'flake8',\n 'isort',\n 'nbformat',\n 'nbsphinx>=0.3.2',\n 'nbstripout',\n 'nbval',\n 'pypandoc',\n 'pytest>=4.1',\n 'pytest-xdist',\n # TODO: remove once https://github.com/pyro-ppl/pyro/issues/1871\n # is fixed.\n 'scipy>=1.1, < 1.3',\n 'sphinx',\n 'sphinx_rtd_theme',\n 'yapf',\n ],\n },\n tests_require=['flake8', 'pytest>=4.1'],\n keywords='machine learning statistics probabilistic programming bayesian modeling pytorch',\n license='MIT License',\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.6',\n ],\n # yapf\n)\n", "path": "setup.py"}, {"content": "\"\"\"\nThis script generates a dataset similar to the Multi-MNIST dataset\ndescribed in [1].\n\n[1] Eslami, SM Ali, et al. \"Attend, infer, repeat: Fast scene\nunderstanding with generative models.\" Advances in Neural Information\nProcessing Systems. 2016.\n\"\"\"\n\nimport os\n\nimport numpy as np\nfrom PIL import Image\n\nfrom pyro.contrib.examples.util import get_data_loader\n\n\ndef imresize(arr, size):\n return np.array(Image.fromarray(arr).resize(size))\n\n\ndef sample_one(canvas_size, mnist):\n i = np.random.randint(mnist['digits'].shape[0])\n digit = mnist['digits'][i]\n label = mnist['labels'][i].item()\n scale = 0.1 * np.random.randn() + 1.3\n new_size = tuple(int(s / scale) for s in digit.shape)\n resized = imresize(digit, new_size)\n w = resized.shape[0]\n assert w == resized.shape[1]\n padding = canvas_size - w\n pad_l = np.random.randint(0, padding)\n pad_r = np.random.randint(0, padding)\n pad_width = ((pad_l, padding - pad_l), (pad_r, padding - pad_r))\n positioned = np.pad(resized, pad_width, 'constant', constant_values=0)\n return positioned, label\n\n\ndef sample_multi(num_digits, canvas_size, mnist):\n canvas = np.zeros((canvas_size, canvas_size))\n labels = []\n for _ in range(num_digits):\n positioned_digit, label = sample_one(canvas_size, mnist)\n canvas += positioned_digit\n labels.append(label)\n # Crude check for overlapping digits.\n if np.max(canvas) > 255:\n return sample_multi(num_digits, canvas_size, mnist)\n else:\n return canvas, labels\n\n\ndef mk_dataset(n, mnist, max_digits, canvas_size):\n x = []\n y = []\n for _ in range(n):\n num_digits = np.random.randint(max_digits + 1)\n canvas, labels = sample_multi(num_digits, canvas_size, mnist)\n x.append(canvas)\n y.append(labels)\n return np.array(x, dtype=np.uint8), y\n\n\ndef load_mnist(root_path):\n loader = get_data_loader('MNIST', root_path)\n return {\n 'digits': loader.dataset.data.cpu().numpy(),\n 'labels': loader.dataset.targets\n }\n\n\ndef load(root_path):\n file_path = os.path.join(root_path, 'multi_mnist_uint8.npz')\n if os.path.exists(file_path):\n data = np.load(file_path, allow_pickle=True)\n return data['x'], data['y']\n else:\n # Set RNG to known state.\n rng_state = np.random.get_state()\n np.random.seed(681307)\n mnist = load_mnist(root_path)\n print('Generating multi-MNIST dataset...')\n x, y = mk_dataset(60000, mnist, 2, 50)\n # Revert RNG state.\n np.random.set_state(rng_state)\n # Crude checksum.\n # assert x.sum() == 883114919, 'Did not generate the expected data.'\n with open(file_path, 'wb') as f:\n np.savez_compressed(f, x=x, y=y)\n print('Done!')\n return x, y\n", "path": "pyro/contrib/examples/multi_mnist.py"}]}
| 2,875 | 234 |
gh_patches_debug_1395
|
rasdani/github-patches
|
git_diff
|
sktime__sktime-5710
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Irreproducible results with `MultiRocketMultivariate`
`random_state` does guarantee the same results for each run.
```python
rng = np.random.default_rng()
X = pd.DataFrame([
pd.Series([
pd.Series(rng.integers(0, 10, 100)).astype(float),
pd.Series(rng.integers(0, 10, 100)).astype(float),
]),
pd.Series([
pd.Series(rng.integers(0, 10, 100)).astype(float),
pd.Series(rng.integers(0, 10, 100)).astype(float),
]),
])
MultiRocketMultivariate(random_state=42, num_kernels=84).fit_transform(X) - MultiRocketMultivariate(random_state=42, num_kernels=84).fit_transform(X)
```
The output should always be a `DataFrame` of zeros, but this is not the case.
<details>
<!--
Please run the following code snippet and paste the output here:
from sktime import show_versions; show_versions()
-->
System:
python: 3.9.6 (default, Aug 11 2023, 19:44:49) [Clang 15.0.0 (clang-1500.0.40.1)]
executable: /Users/temp/sktime/venv/bin/python
machine: macOS-14.1.2-arm64-arm-64bit
Python dependencies:
pip: 23.3.1
sktime: 0.25.0
sklearn: 1.3.2
skbase: 0.4.6
numpy: 1.26.1
scipy: 1.11.3
pandas: 2.1.4
matplotlib: 3.8.1
joblib: 1.3.2
numba: 0.58.1
statsmodels: 0.14.0
pmdarima: None
statsforecast: None
tsfresh: 0.20.1
tslearn: 0.6.3
torch: 2.1.0
tensorflow: None
tensorflow_probability: None
</details>
<!-- Thanks for contributing! -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sktime/transformations/panel/rocket/_multirocket_multivariate.py`
Content:
```
1 import multiprocessing
2
3 import numpy as np
4 import pandas as pd
5
6 from sktime.transformations.base import BaseTransformer
7
8
9 class MultiRocketMultivariate(BaseTransformer):
10 """Multi RandOm Convolutional KErnel Transform (MultiRocket).
11
12 MultiRocket [1]_ is uses the same set of kernels as MiniRocket on both the raw
13 series and the first order differenced series representation. It uses a different
14 set of dilations and used for each representation. In addition to percentage of
15 positive values (PPV) MultiRocket adds 3 pooling operators: Mean of Positive
16 Values (MPV); Mean of Indices of Positive Values (MIPV); and Longest Stretch of
17 Positive Values (LSPV). This version is the multivariate version.
18
19 This transformer fits one set of paramereters per individual series,
20 and applies the transform with fitted parameter i to the i-th series in transform.
21 Vanilla use requires same number of series in fit and transform.
22
23 To fit and transform series at the same time,
24 without an identification of fit/transform instances,
25 wrap this transformer in ``FitInTransform``,
26 from ``sktime.transformations.compose``.
27
28 Parameters
29 ----------
30 num_kernels : int, default=6,250
31 number of random convolutional kernels. The calculated number of features is the
32 nearest multiple of n_features_per_kernel(default 4)*84=336 < 50,000
33 (2*n_features_per_kernel(default 4)*num_kernels(default 6,250)).
34 max_dilations_per_kernel : int, default=32
35 maximum number of dilations per kernel.
36 n_features_per_kernel : int, default =4
37 number of features per kernel.
38 normalise : bool, default False
39 n_jobs : int, default=1
40 The number of jobs to run in parallel for `transform`. ``-1`` means using all
41 processors.
42 random_state : None or int, default = None
43
44 Attributes
45 ----------
46 parameter : tuple
47 parameter (dilations, num_features_per_dilation, biases) for
48 transformation of input X
49 parameter1 : tuple
50 parameter (dilations, num_features_per_dilation, biases) for
51 transformation of input X1 = np.diff(X, 1)
52
53 See Also
54 --------
55 MultiRocketMultivariate, MiniRocket, MiniRocketMultivariate, Rocket
56
57 References
58 ----------
59 .. [1] Tan, Chang Wei and Dempster, Angus and Bergmeir, Christoph and
60 Webb, Geoffrey I, "MultiRocket: Multiple pooling operators and transformations
61 for fast and effective time series classification",2022,
62 https://link.springer.com/article/10.1007/s10618-022-00844-1
63 https://arxiv.org/abs/2102.00457
64
65 Examples
66 --------
67 >>> from sktime.transformations.panel.rocket import Rocket
68 >>> from sktime.datasets import load_basic_motions
69 >>> X_train, y_train = load_basic_motions(split="train") # doctest: +SKIP
70 >>> X_test, y_test = load_basic_motions(split="test") # doctest: +SKIP
71 >>> trf = MultiRocketMultivariate(num_kernels=512) # doctest: +SKIP
72 >>> trf.fit(X_train) # doctest: +SKIP
73 MultiRocketMultivariate(...)
74 >>> X_train = trf.transform(X_train) # doctest: +SKIP
75 >>> X_test = trf.transform(X_test) # doctest: +SKIP
76 """
77
78 _tags = {
79 "univariate-only": False,
80 "fit_is_empty": False,
81 "scitype:transform-input": "Series",
82 # what is the scitype of X: Series, or Panel
83 "scitype:transform-output": "Primitives",
84 # what is the scitype of y: None (not needed), Primitives, Series, Panel
85 "scitype:instancewise": False, # is this an instance-wise transform?
86 "X_inner_mtype": "numpy3D", # which mtypes do _fit/_predict support for X?
87 "y_inner_mtype": "None", # which mtypes do _fit/_predict support for X?
88 "python_dependencies": "numba",
89 }
90
91 def __init__(
92 self,
93 num_kernels=6_250,
94 max_dilations_per_kernel=32,
95 n_features_per_kernel=4,
96 normalise=False,
97 n_jobs=1,
98 random_state=None,
99 ):
100 self.max_dilations_per_kernel = max_dilations_per_kernel
101 self.n_features_per_kernel = n_features_per_kernel
102 self.num_kernels = num_kernels
103 self.normalise = normalise
104 self.n_jobs = n_jobs
105 self.random_state = random_state if isinstance(random_state, int) else None
106
107 self.parameter = None
108 self.parameter1 = None
109
110 super().__init__()
111
112 def _fit(self, X, y=None):
113 """Fit dilations and biases to input time series.
114
115 Parameters
116 ----------
117 X : 3D np.ndarray of shape = [n_instances, n_dimensions, series_length]
118 panel of time series to transform
119 y : ignored argument for interface compatibility
120
121 Returns
122 -------
123 self
124 """
125 if self.normalise:
126 X = (X - X.mean(axis=-1, keepdims=True)) / (
127 X.std(axis=-1, keepdims=True) + 1e-8
128 )
129
130 if X.shape[2] < 10:
131 # handling very short series (like PensDigit from the MTSC archive)
132 # series have to be at least a length of 10 (including differencing)
133 _X1 = np.zeros((X.shape[0], X.shape[1], 10), dtype=X.dtype)
134 _X1[:, :, : X.shape[2]] = X
135 X = _X1
136 del _X1
137
138 X = X.astype(np.float64)
139
140 self.parameter = self._get_parameter(X)
141 _X1 = np.diff(X, 1)
142
143 self.parameter1 = self._get_parameter(_X1)
144
145 return self
146
147 def _transform(self, X, y=None):
148 """Transform input time series using random convolutional kernels.
149
150 Parameters
151 ----------
152 X : 3D np.ndarray of shape = [n_instances, n_dimensions, series_length]
153 panel of time series to transform
154 y : ignored argument for interface compatibility
155
156 Returns
157 -------
158 pandas DataFrame, transformed features
159 """
160 from numba import get_num_threads, set_num_threads
161
162 from sktime.transformations.panel.rocket._multirocket_multi_numba import (
163 _transform,
164 )
165
166 if self.normalise:
167 X = (X - X.mean(axis=-1, keepdims=True)) / (
168 X.std(axis=-1, keepdims=True) + 1e-8
169 )
170
171 _X1 = np.diff(X, 1)
172
173 # change n_jobs depended on value and existing cores
174 prev_threads = get_num_threads()
175 if self.n_jobs < 1 or self.n_jobs > multiprocessing.cpu_count():
176 n_jobs = multiprocessing.cpu_count()
177 else:
178 n_jobs = self.n_jobs
179 set_num_threads(n_jobs)
180
181 X = _transform(
182 X,
183 _X1,
184 self.parameter,
185 self.parameter1,
186 self.n_features_per_kernel,
187 )
188 X = np.nan_to_num(X)
189
190 set_num_threads(prev_threads)
191
192 return pd.DataFrame(X)
193
194 def _get_parameter(self, X):
195 from sktime.transformations.panel.rocket._multirocket_multi_numba import (
196 _fit_biases,
197 _fit_dilations,
198 _quantiles,
199 )
200
201 _, num_channels, input_length = X.shape
202
203 num_kernels = 84
204
205 dilations, num_features_per_dilation = _fit_dilations(
206 input_length, self.num_kernels, self.max_dilations_per_kernel
207 )
208
209 num_features_per_kernel = np.sum(num_features_per_dilation)
210
211 quantiles = _quantiles(num_kernels * num_features_per_kernel)
212
213 num_dilations = len(dilations)
214 num_combinations = num_kernels * num_dilations
215
216 max_num_channels = min(num_channels, 9)
217 max_exponent = np.log2(max_num_channels + 1)
218
219 num_channels_per_combination = (
220 2 ** np.random.uniform(0, max_exponent, num_combinations)
221 ).astype(np.int32)
222
223 channel_indices = np.zeros(num_channels_per_combination.sum(), dtype=np.int32)
224
225 num_channels_start = 0
226 for combination_index in range(num_combinations):
227 num_channels_this_combination = num_channels_per_combination[
228 combination_index
229 ]
230 num_channels_end = num_channels_start + num_channels_this_combination
231 channel_indices[num_channels_start:num_channels_end] = np.random.choice(
232 num_channels, num_channels_this_combination, replace=False
233 )
234
235 num_channels_start = num_channels_end
236
237 biases = _fit_biases(
238 X,
239 num_channels_per_combination,
240 channel_indices,
241 dilations,
242 num_features_per_dilation,
243 quantiles,
244 self.random_state,
245 )
246
247 return (
248 num_channels_per_combination,
249 channel_indices,
250 dilations,
251 num_features_per_dilation,
252 biases,
253 )
254
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sktime/transformations/panel/rocket/_multirocket_multivariate.py b/sktime/transformations/panel/rocket/_multirocket_multivariate.py
--- a/sktime/transformations/panel/rocket/_multirocket_multivariate.py
+++ b/sktime/transformations/panel/rocket/_multirocket_multivariate.py
@@ -198,6 +198,9 @@
_quantiles,
)
+ if self.random_state is not None:
+ np.random.seed(self.random_state)
+
_, num_channels, input_length = X.shape
num_kernels = 84
|
{"golden_diff": "diff --git a/sktime/transformations/panel/rocket/_multirocket_multivariate.py b/sktime/transformations/panel/rocket/_multirocket_multivariate.py\n--- a/sktime/transformations/panel/rocket/_multirocket_multivariate.py\n+++ b/sktime/transformations/panel/rocket/_multirocket_multivariate.py\n@@ -198,6 +198,9 @@\n _quantiles,\n )\n \n+ if self.random_state is not None:\n+ np.random.seed(self.random_state)\n+\n _, num_channels, input_length = X.shape\n \n num_kernels = 84\n", "issue": "[BUG] Irreproducible results with `MultiRocketMultivariate`\n`random_state` does guarantee the same results for each run.\r\n\r\n```python\r\nrng = np.random.default_rng()\r\n\r\nX = pd.DataFrame([\r\n pd.Series([\r\n pd.Series(rng.integers(0, 10, 100)).astype(float),\r\n pd.Series(rng.integers(0, 10, 100)).astype(float),\r\n ]),\r\n pd.Series([\r\n pd.Series(rng.integers(0, 10, 100)).astype(float),\r\n pd.Series(rng.integers(0, 10, 100)).astype(float),\r\n ]),\r\n])\r\n\r\nMultiRocketMultivariate(random_state=42, num_kernels=84).fit_transform(X) - MultiRocketMultivariate(random_state=42, num_kernels=84).fit_transform(X)\r\n```\r\n\r\nThe output should always be a `DataFrame` of zeros, but this is not the case.\r\n\r\n<details>\r\n\r\n<!--\r\nPlease run the following code snippet and paste the output here:\r\n\r\nfrom sktime import show_versions; show_versions()\r\n-->\r\n\r\nSystem:\r\n python: 3.9.6 (default, Aug 11 2023, 19:44:49) [Clang 15.0.0 (clang-1500.0.40.1)]\r\nexecutable: /Users/temp/sktime/venv/bin/python\r\n machine: macOS-14.1.2-arm64-arm-64bit\r\n\r\nPython dependencies:\r\n pip: 23.3.1\r\n sktime: 0.25.0\r\n sklearn: 1.3.2\r\n skbase: 0.4.6\r\n numpy: 1.26.1\r\n scipy: 1.11.3\r\n pandas: 2.1.4\r\n matplotlib: 3.8.1\r\n joblib: 1.3.2\r\n numba: 0.58.1\r\n statsmodels: 0.14.0\r\n pmdarima: None\r\nstatsforecast: None\r\n tsfresh: 0.20.1\r\n tslearn: 0.6.3\r\n torch: 2.1.0\r\n tensorflow: None\r\ntensorflow_probability: None\r\n\r\n</details>\r\n\r\n<!-- Thanks for contributing! -->\r\n\n", "before_files": [{"content": "import multiprocessing\n\nimport numpy as np\nimport pandas as pd\n\nfrom sktime.transformations.base import BaseTransformer\n\n\nclass MultiRocketMultivariate(BaseTransformer):\n \"\"\"Multi RandOm Convolutional KErnel Transform (MultiRocket).\n\n MultiRocket [1]_ is uses the same set of kernels as MiniRocket on both the raw\n series and the first order differenced series representation. It uses a different\n set of dilations and used for each representation. In addition to percentage of\n positive values (PPV) MultiRocket adds 3 pooling operators: Mean of Positive\n Values (MPV); Mean of Indices of Positive Values (MIPV); and Longest Stretch of\n Positive Values (LSPV). This version is the multivariate version.\n\n This transformer fits one set of paramereters per individual series,\n and applies the transform with fitted parameter i to the i-th series in transform.\n Vanilla use requires same number of series in fit and transform.\n\n To fit and transform series at the same time,\n without an identification of fit/transform instances,\n wrap this transformer in ``FitInTransform``,\n from ``sktime.transformations.compose``.\n\n Parameters\n ----------\n num_kernels : int, default=6,250\n number of random convolutional kernels. The calculated number of features is the\n nearest multiple of n_features_per_kernel(default 4)*84=336 < 50,000\n (2*n_features_per_kernel(default 4)*num_kernels(default 6,250)).\n max_dilations_per_kernel : int, default=32\n maximum number of dilations per kernel.\n n_features_per_kernel : int, default =4\n number of features per kernel.\n normalise : bool, default False\n n_jobs : int, default=1\n The number of jobs to run in parallel for `transform`. ``-1`` means using all\n processors.\n random_state : None or int, default = None\n\n Attributes\n ----------\n parameter : tuple\n parameter (dilations, num_features_per_dilation, biases) for\n transformation of input X\n parameter1 : tuple\n parameter (dilations, num_features_per_dilation, biases) for\n transformation of input X1 = np.diff(X, 1)\n\n See Also\n --------\n MultiRocketMultivariate, MiniRocket, MiniRocketMultivariate, Rocket\n\n References\n ----------\n .. [1] Tan, Chang Wei and Dempster, Angus and Bergmeir, Christoph and\n Webb, Geoffrey I, \"MultiRocket: Multiple pooling operators and transformations\n for fast and effective time series classification\",2022,\n https://link.springer.com/article/10.1007/s10618-022-00844-1\n https://arxiv.org/abs/2102.00457\n\n Examples\n --------\n >>> from sktime.transformations.panel.rocket import Rocket\n >>> from sktime.datasets import load_basic_motions\n >>> X_train, y_train = load_basic_motions(split=\"train\") # doctest: +SKIP\n >>> X_test, y_test = load_basic_motions(split=\"test\") # doctest: +SKIP\n >>> trf = MultiRocketMultivariate(num_kernels=512) # doctest: +SKIP\n >>> trf.fit(X_train) # doctest: +SKIP\n MultiRocketMultivariate(...)\n >>> X_train = trf.transform(X_train) # doctest: +SKIP\n >>> X_test = trf.transform(X_test) # doctest: +SKIP\n \"\"\"\n\n _tags = {\n \"univariate-only\": False,\n \"fit_is_empty\": False,\n \"scitype:transform-input\": \"Series\",\n # what is the scitype of X: Series, or Panel\n \"scitype:transform-output\": \"Primitives\",\n # what is the scitype of y: None (not needed), Primitives, Series, Panel\n \"scitype:instancewise\": False, # is this an instance-wise transform?\n \"X_inner_mtype\": \"numpy3D\", # which mtypes do _fit/_predict support for X?\n \"y_inner_mtype\": \"None\", # which mtypes do _fit/_predict support for X?\n \"python_dependencies\": \"numba\",\n }\n\n def __init__(\n self,\n num_kernels=6_250,\n max_dilations_per_kernel=32,\n n_features_per_kernel=4,\n normalise=False,\n n_jobs=1,\n random_state=None,\n ):\n self.max_dilations_per_kernel = max_dilations_per_kernel\n self.n_features_per_kernel = n_features_per_kernel\n self.num_kernels = num_kernels\n self.normalise = normalise\n self.n_jobs = n_jobs\n self.random_state = random_state if isinstance(random_state, int) else None\n\n self.parameter = None\n self.parameter1 = None\n\n super().__init__()\n\n def _fit(self, X, y=None):\n \"\"\"Fit dilations and biases to input time series.\n\n Parameters\n ----------\n X : 3D np.ndarray of shape = [n_instances, n_dimensions, series_length]\n panel of time series to transform\n y : ignored argument for interface compatibility\n\n Returns\n -------\n self\n \"\"\"\n if self.normalise:\n X = (X - X.mean(axis=-1, keepdims=True)) / (\n X.std(axis=-1, keepdims=True) + 1e-8\n )\n\n if X.shape[2] < 10:\n # handling very short series (like PensDigit from the MTSC archive)\n # series have to be at least a length of 10 (including differencing)\n _X1 = np.zeros((X.shape[0], X.shape[1], 10), dtype=X.dtype)\n _X1[:, :, : X.shape[2]] = X\n X = _X1\n del _X1\n\n X = X.astype(np.float64)\n\n self.parameter = self._get_parameter(X)\n _X1 = np.diff(X, 1)\n\n self.parameter1 = self._get_parameter(_X1)\n\n return self\n\n def _transform(self, X, y=None):\n \"\"\"Transform input time series using random convolutional kernels.\n\n Parameters\n ----------\n X : 3D np.ndarray of shape = [n_instances, n_dimensions, series_length]\n panel of time series to transform\n y : ignored argument for interface compatibility\n\n Returns\n -------\n pandas DataFrame, transformed features\n \"\"\"\n from numba import get_num_threads, set_num_threads\n\n from sktime.transformations.panel.rocket._multirocket_multi_numba import (\n _transform,\n )\n\n if self.normalise:\n X = (X - X.mean(axis=-1, keepdims=True)) / (\n X.std(axis=-1, keepdims=True) + 1e-8\n )\n\n _X1 = np.diff(X, 1)\n\n # change n_jobs depended on value and existing cores\n prev_threads = get_num_threads()\n if self.n_jobs < 1 or self.n_jobs > multiprocessing.cpu_count():\n n_jobs = multiprocessing.cpu_count()\n else:\n n_jobs = self.n_jobs\n set_num_threads(n_jobs)\n\n X = _transform(\n X,\n _X1,\n self.parameter,\n self.parameter1,\n self.n_features_per_kernel,\n )\n X = np.nan_to_num(X)\n\n set_num_threads(prev_threads)\n\n return pd.DataFrame(X)\n\n def _get_parameter(self, X):\n from sktime.transformations.panel.rocket._multirocket_multi_numba import (\n _fit_biases,\n _fit_dilations,\n _quantiles,\n )\n\n _, num_channels, input_length = X.shape\n\n num_kernels = 84\n\n dilations, num_features_per_dilation = _fit_dilations(\n input_length, self.num_kernels, self.max_dilations_per_kernel\n )\n\n num_features_per_kernel = np.sum(num_features_per_dilation)\n\n quantiles = _quantiles(num_kernels * num_features_per_kernel)\n\n num_dilations = len(dilations)\n num_combinations = num_kernels * num_dilations\n\n max_num_channels = min(num_channels, 9)\n max_exponent = np.log2(max_num_channels + 1)\n\n num_channels_per_combination = (\n 2 ** np.random.uniform(0, max_exponent, num_combinations)\n ).astype(np.int32)\n\n channel_indices = np.zeros(num_channels_per_combination.sum(), dtype=np.int32)\n\n num_channels_start = 0\n for combination_index in range(num_combinations):\n num_channels_this_combination = num_channels_per_combination[\n combination_index\n ]\n num_channels_end = num_channels_start + num_channels_this_combination\n channel_indices[num_channels_start:num_channels_end] = np.random.choice(\n num_channels, num_channels_this_combination, replace=False\n )\n\n num_channels_start = num_channels_end\n\n biases = _fit_biases(\n X,\n num_channels_per_combination,\n channel_indices,\n dilations,\n num_features_per_dilation,\n quantiles,\n self.random_state,\n )\n\n return (\n num_channels_per_combination,\n channel_indices,\n dilations,\n num_features_per_dilation,\n biases,\n )\n", "path": "sktime/transformations/panel/rocket/_multirocket_multivariate.py"}], "after_files": [{"content": "import multiprocessing\n\nimport numpy as np\nimport pandas as pd\n\nfrom sktime.transformations.base import BaseTransformer\n\n\nclass MultiRocketMultivariate(BaseTransformer):\n \"\"\"Multi RandOm Convolutional KErnel Transform (MultiRocket).\n\n MultiRocket [1]_ is uses the same set of kernels as MiniRocket on both the raw\n series and the first order differenced series representation. It uses a different\n set of dilations and used for each representation. In addition to percentage of\n positive values (PPV) MultiRocket adds 3 pooling operators: Mean of Positive\n Values (MPV); Mean of Indices of Positive Values (MIPV); and Longest Stretch of\n Positive Values (LSPV). This version is the multivariate version.\n\n This transformer fits one set of paramereters per individual series,\n and applies the transform with fitted parameter i to the i-th series in transform.\n Vanilla use requires same number of series in fit and transform.\n\n To fit and transform series at the same time,\n without an identification of fit/transform instances,\n wrap this transformer in ``FitInTransform``,\n from ``sktime.transformations.compose``.\n\n Parameters\n ----------\n num_kernels : int, default=6,250\n number of random convolutional kernels. The calculated number of features is the\n nearest multiple of n_features_per_kernel(default 4)*84=336 < 50,000\n (2*n_features_per_kernel(default 4)*num_kernels(default 6,250)).\n max_dilations_per_kernel : int, default=32\n maximum number of dilations per kernel.\n n_features_per_kernel : int, default =4\n number of features per kernel.\n normalise : bool, default False\n n_jobs : int, default=1\n The number of jobs to run in parallel for `transform`. ``-1`` means using all\n processors.\n random_state : None or int, default = None\n\n Attributes\n ----------\n parameter : tuple\n parameter (dilations, num_features_per_dilation, biases) for\n transformation of input X\n parameter1 : tuple\n parameter (dilations, num_features_per_dilation, biases) for\n transformation of input X1 = np.diff(X, 1)\n\n See Also\n --------\n MultiRocketMultivariate, MiniRocket, MiniRocketMultivariate, Rocket\n\n References\n ----------\n .. [1] Tan, Chang Wei and Dempster, Angus and Bergmeir, Christoph and\n Webb, Geoffrey I, \"MultiRocket: Multiple pooling operators and transformations\n for fast and effective time series classification\",2022,\n https://link.springer.com/article/10.1007/s10618-022-00844-1\n https://arxiv.org/abs/2102.00457\n\n Examples\n --------\n >>> from sktime.transformations.panel.rocket import Rocket\n >>> from sktime.datasets import load_basic_motions\n >>> X_train, y_train = load_basic_motions(split=\"train\") # doctest: +SKIP\n >>> X_test, y_test = load_basic_motions(split=\"test\") # doctest: +SKIP\n >>> trf = MultiRocketMultivariate(num_kernels=512) # doctest: +SKIP\n >>> trf.fit(X_train) # doctest: +SKIP\n MultiRocketMultivariate(...)\n >>> X_train = trf.transform(X_train) # doctest: +SKIP\n >>> X_test = trf.transform(X_test) # doctest: +SKIP\n \"\"\"\n\n _tags = {\n \"univariate-only\": False,\n \"fit_is_empty\": False,\n \"scitype:transform-input\": \"Series\",\n # what is the scitype of X: Series, or Panel\n \"scitype:transform-output\": \"Primitives\",\n # what is the scitype of y: None (not needed), Primitives, Series, Panel\n \"scitype:instancewise\": False, # is this an instance-wise transform?\n \"X_inner_mtype\": \"numpy3D\", # which mtypes do _fit/_predict support for X?\n \"y_inner_mtype\": \"None\", # which mtypes do _fit/_predict support for X?\n \"python_dependencies\": \"numba\",\n }\n\n def __init__(\n self,\n num_kernels=6_250,\n max_dilations_per_kernel=32,\n n_features_per_kernel=4,\n normalise=False,\n n_jobs=1,\n random_state=None,\n ):\n self.max_dilations_per_kernel = max_dilations_per_kernel\n self.n_features_per_kernel = n_features_per_kernel\n self.num_kernels = num_kernels\n self.normalise = normalise\n self.n_jobs = n_jobs\n self.random_state = random_state if isinstance(random_state, int) else None\n\n self.parameter = None\n self.parameter1 = None\n\n super().__init__()\n\n def _fit(self, X, y=None):\n \"\"\"Fit dilations and biases to input time series.\n\n Parameters\n ----------\n X : 3D np.ndarray of shape = [n_instances, n_dimensions, series_length]\n panel of time series to transform\n y : ignored argument for interface compatibility\n\n Returns\n -------\n self\n \"\"\"\n if self.normalise:\n X = (X - X.mean(axis=-1, keepdims=True)) / (\n X.std(axis=-1, keepdims=True) + 1e-8\n )\n\n if X.shape[2] < 10:\n # handling very short series (like PensDigit from the MTSC archive)\n # series have to be at least a length of 10 (including differencing)\n _X1 = np.zeros((X.shape[0], X.shape[1], 10), dtype=X.dtype)\n _X1[:, :, : X.shape[2]] = X\n X = _X1\n del _X1\n\n X = X.astype(np.float64)\n\n self.parameter = self._get_parameter(X)\n _X1 = np.diff(X, 1)\n\n self.parameter1 = self._get_parameter(_X1)\n\n return self\n\n def _transform(self, X, y=None):\n \"\"\"Transform input time series using random convolutional kernels.\n\n Parameters\n ----------\n X : 3D np.ndarray of shape = [n_instances, n_dimensions, series_length]\n panel of time series to transform\n y : ignored argument for interface compatibility\n\n Returns\n -------\n pandas DataFrame, transformed features\n \"\"\"\n from numba import get_num_threads, set_num_threads\n\n from sktime.transformations.panel.rocket._multirocket_multi_numba import (\n _transform,\n )\n\n if self.normalise:\n X = (X - X.mean(axis=-1, keepdims=True)) / (\n X.std(axis=-1, keepdims=True) + 1e-8\n )\n\n _X1 = np.diff(X, 1)\n\n # change n_jobs depended on value and existing cores\n prev_threads = get_num_threads()\n if self.n_jobs < 1 or self.n_jobs > multiprocessing.cpu_count():\n n_jobs = multiprocessing.cpu_count()\n else:\n n_jobs = self.n_jobs\n set_num_threads(n_jobs)\n\n X = _transform(\n X,\n _X1,\n self.parameter,\n self.parameter1,\n self.n_features_per_kernel,\n )\n X = np.nan_to_num(X)\n\n set_num_threads(prev_threads)\n\n return pd.DataFrame(X)\n\n def _get_parameter(self, X):\n from sktime.transformations.panel.rocket._multirocket_multi_numba import (\n _fit_biases,\n _fit_dilations,\n _quantiles,\n )\n\n if self.random_state is not None:\n np.random.seed(self.random_state)\n\n _, num_channels, input_length = X.shape\n\n num_kernels = 84\n\n dilations, num_features_per_dilation = _fit_dilations(\n input_length, self.num_kernels, self.max_dilations_per_kernel\n )\n\n num_features_per_kernel = np.sum(num_features_per_dilation)\n\n quantiles = _quantiles(num_kernels * num_features_per_kernel)\n\n num_dilations = len(dilations)\n num_combinations = num_kernels * num_dilations\n\n max_num_channels = min(num_channels, 9)\n max_exponent = np.log2(max_num_channels + 1)\n\n num_channels_per_combination = (\n 2 ** np.random.uniform(0, max_exponent, num_combinations)\n ).astype(np.int32)\n\n channel_indices = np.zeros(num_channels_per_combination.sum(), dtype=np.int32)\n\n num_channels_start = 0\n for combination_index in range(num_combinations):\n num_channels_this_combination = num_channels_per_combination[\n combination_index\n ]\n num_channels_end = num_channels_start + num_channels_this_combination\n channel_indices[num_channels_start:num_channels_end] = np.random.choice(\n num_channels, num_channels_this_combination, replace=False\n )\n\n num_channels_start = num_channels_end\n\n biases = _fit_biases(\n X,\n num_channels_per_combination,\n channel_indices,\n dilations,\n num_features_per_dilation,\n quantiles,\n self.random_state,\n )\n\n return (\n num_channels_per_combination,\n channel_indices,\n dilations,\n num_features_per_dilation,\n biases,\n )\n", "path": "sktime/transformations/panel/rocket/_multirocket_multivariate.py"}]}
| 3,554 | 138 |
gh_patches_debug_39703
|
rasdani/github-patches
|
git_diff
|
hydroshare__hydroshare-1594
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Failed to create geographic feature resource
The files are from this resource https://www.hydroshare.org/resource/822fe5a7a83a49668c77a85179c3a7ee/.
The error is shown as below. Thanks for looking into this issue

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hs_geographic_feature_resource/parse_lib.py`
Content:
```
1 import os
2 import xmltodict
3 import re
4 from osgeo import ogr, osr
5 try:
6 # Python 2.6-2.7
7 from HTMLParser import HTMLParser
8 except ImportError:
9 # Python 3
10 from html.parser import HTMLParser
11
12 UNKNOWN_STR = "unknown"
13
14
15 def parse_shp(file_path):
16 # output dictionary format
17 # shp_metadata_dict["origin_projection_string"]: original projection string
18 # shp_metadata_dict["origin_projection_name"]: origin_projection_name
19 # shp_metadata_dict["origin_datum"]: origin_datum
20 # shp_metadata_dict["origin_unit"]: origin_unit
21 # shp_metadata_dict["field_meta_dict"]["field_list"]: list [fieldname1, fieldname2...]
22 # shp_metadata_dict["field_meta_dict"]["field_attr_dic"]:
23 # dict {"fieldname": dict {
24 # "fieldName":fieldName,
25 # "fieldTypeCode":fieldTypeCode,
26 # "fieldType":fieldType,
27 # "fieldWidth:fieldWidth,
28 # "fieldPrecision:fieldPrecision"
29 # }
30 # }
31 # shp_metadata_dict["feature_count"]: feature count
32 # shp_metadata_dict["geometry_type"]: geometry_type
33 # shp_metadata_dict["origin_extent_dict"]:
34 # dict{"west": east, "north":north, "east":east, "south":south}
35 # shp_metadata_dict["wgs84_extent_dict"]:
36 # dict{"west": east, "north":north, "east":east, "south":south}
37
38 shp_metadata_dict = {}
39 # read shapefile
40 driver = ogr.GetDriverByName('ESRI Shapefile')
41 dataset = driver.Open(file_path)
42
43 # get layer
44 layer = dataset.GetLayer()
45 # get spatialRef from layer
46 spatialRef_from_layer = layer.GetSpatialRef()
47
48 if spatialRef_from_layer is not None:
49 shp_metadata_dict["origin_projection_string"] = str(spatialRef_from_layer)
50 prj_name = spatialRef_from_layer.GetAttrValue('projcs')
51 if prj_name is None:
52 prj_name = spatialRef_from_layer.GetAttrValue('geogcs')
53 shp_metadata_dict["origin_projection_name"] = prj_name
54
55 shp_metadata_dict["origin_datum"] = spatialRef_from_layer.GetAttrValue('datum')
56 shp_metadata_dict["origin_unit"] = spatialRef_from_layer.GetAttrValue('unit')
57 else:
58 shp_metadata_dict["origin_projection_string"] = UNKNOWN_STR
59 shp_metadata_dict["origin_projection_name"] = UNKNOWN_STR
60 shp_metadata_dict["origin_datum"] = UNKNOWN_STR
61 shp_metadata_dict["origin_unit"] = UNKNOWN_STR
62
63 field_list = []
64 filed_attr_dic = {}
65 field_meta_dict = {"field_list": field_list, "field_attr_dict": filed_attr_dic}
66 shp_metadata_dict["field_meta_dict"] = field_meta_dict
67 # get Attributes
68 layerDefinition = layer.GetLayerDefn()
69 for i in range(layerDefinition.GetFieldCount()):
70 fieldName = layerDefinition.GetFieldDefn(i).GetName()
71 field_list.append(fieldName)
72 attr_dict = {}
73 field_meta_dict["field_attr_dict"][fieldName] = attr_dict
74
75 attr_dict["fieldName"] = fieldName
76 fieldTypeCode = layerDefinition.GetFieldDefn(i).GetType()
77 attr_dict["fieldTypeCode"] = fieldTypeCode
78 fieldType = layerDefinition.GetFieldDefn(i).GetFieldTypeName(fieldTypeCode)
79 attr_dict["fieldType"] = fieldType
80 fieldWidth = layerDefinition.GetFieldDefn(i).GetWidth()
81 attr_dict["fieldWidth"] = fieldWidth
82 fieldPrecision = layerDefinition.GetFieldDefn(i).GetPrecision()
83 attr_dict["fieldPrecision"] = fieldPrecision
84
85 # get layer extent
86 layer_extent = layer.GetExtent()
87
88 # get feature count
89 featureCount = layer.GetFeatureCount()
90 shp_metadata_dict["feature_count"] = featureCount
91
92 # get a feature from layer
93 feature = layer.GetNextFeature()
94
95 # get geometry from feature
96 geom = feature.GetGeometryRef()
97
98 # get geometry name
99 shp_metadata_dict["geometry_type"] = geom.GetGeometryName()
100
101 # reproject layer extent
102 # source SpatialReference
103 source = spatialRef_from_layer
104 # target SpatialReference
105 target = osr.SpatialReference()
106 target.ImportFromEPSG(4326)
107
108 # create two key points from layer extent
109 left_upper_point = ogr.Geometry(ogr.wkbPoint)
110 left_upper_point.AddPoint(layer_extent[0], layer_extent[3]) # left-upper
111 right_lower_point = ogr.Geometry(ogr.wkbPoint)
112 right_lower_point.AddPoint(layer_extent[1], layer_extent[2]) # right-lower
113
114 # source map always has extent, even projection is unknown
115 shp_metadata_dict["origin_extent_dict"] = {}
116 shp_metadata_dict["origin_extent_dict"]["westlimit"] = layer_extent[0]
117 shp_metadata_dict["origin_extent_dict"]["northlimit"] = layer_extent[3]
118 shp_metadata_dict["origin_extent_dict"]["eastlimit"] = layer_extent[1]
119 shp_metadata_dict["origin_extent_dict"]["southlimit"] = layer_extent[2]
120
121 # reproject to WGS84
122 shp_metadata_dict["wgs84_extent_dict"] = {}
123
124 if source is not None:
125 # define CoordinateTransformation obj
126 transform = osr.CoordinateTransformation(source, target)
127 # project two key points
128 left_upper_point.Transform(transform)
129 right_lower_point.Transform(transform)
130 shp_metadata_dict["wgs84_extent_dict"]["westlimit"] = left_upper_point.GetX()
131 shp_metadata_dict["wgs84_extent_dict"]["northlimit"] = left_upper_point.GetY()
132 shp_metadata_dict["wgs84_extent_dict"]["eastlimit"] = right_lower_point.GetX()
133 shp_metadata_dict["wgs84_extent_dict"]["southlimit"] = right_lower_point.GetY()
134 shp_metadata_dict["wgs84_extent_dict"]["projection"] = "WGS 84 EPSG:4326"
135 shp_metadata_dict["wgs84_extent_dict"]["units"] = "Decimal degrees"
136 else:
137 shp_metadata_dict["wgs84_extent_dict"]["westlimit"] = UNKNOWN_STR
138 shp_metadata_dict["wgs84_extent_dict"]["northlimit"] = UNKNOWN_STR
139 shp_metadata_dict["wgs84_extent_dict"]["eastlimit"] = UNKNOWN_STR
140 shp_metadata_dict["wgs84_extent_dict"]["southlimit"] = UNKNOWN_STR
141 shp_metadata_dict["wgs84_extent_dict"]["projection"] = UNKNOWN_STR
142 shp_metadata_dict["wgs84_extent_dict"]["units"] = UNKNOWN_STR
143
144 return shp_metadata_dict
145
146
147 def parse_shp_xml(shp_xml_full_path):
148 """
149 Parse ArcGIS 10.X ESRI Shapefile Metadata XML.
150 :param shp_xml_full_path: Expected fullpath to the .shp.xml file
151 :return: a list of metadata dict
152 """
153 metadata = []
154
155 try:
156 if os.path.isfile(shp_xml_full_path):
157 with open(shp_xml_full_path) as fd:
158 xml_dict = xmltodict.parse(fd.read())
159 if 'metadata' in xml_dict:
160 if 'dataIdInfo' in xml_dict['metadata']:
161 dataIdInfo_dict = xml_dict['metadata']['dataIdInfo']
162 if 'idAbs' in dataIdInfo_dict:
163 description_value = clean_text(dataIdInfo_dict['idAbs'])
164 description = {'description': {'abstract': description_value}}
165 metadata.append(description)
166 if 'idPurp' in xml_dict['metadata']['dataIdInfo']:
167 title_value = clean_text(dataIdInfo_dict['idPurp'])
168 title = {'title': {'value': title_value}}
169 metadata.append(title)
170 if 'searchKeys' in dataIdInfo_dict:
171 searchKeys_dict = dataIdInfo_dict['searchKeys']
172 if 'keyword' in searchKeys_dict:
173 keyword_list = []
174 if type(searchKeys_dict["keyword"]) is list:
175 keyword_list += searchKeys_dict["keyword"]
176 else:
177 keyword_list.append(searchKeys_dict["keyword"])
178 for k in keyword_list:
179 metadata.append({'subject': {'value': k}})
180
181 except Exception:
182 # Catch any exception silently and return an empty list
183 # Due to the variant format of ESRI Shapefile Metadata XML
184 # among different ArcGIS versions, an empty list will be returned
185 # if any exception occurs
186 metadata = []
187 finally:
188 return metadata
189
190
191 def clean_text(text):
192 # Decode html
193
194 h = HTMLParser()
195 return h.unescape(clean_html(text))
196
197
198 def clean_html(raw_html):
199 # Remove html tag from raw_html
200
201 cleanr = re.compile('<.*?>')
202 cleantext = re.sub(cleanr, '', raw_html)
203 return cleantext
204
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/hs_geographic_feature_resource/parse_lib.py b/hs_geographic_feature_resource/parse_lib.py
--- a/hs_geographic_feature_resource/parse_lib.py
+++ b/hs_geographic_feature_resource/parse_lib.py
@@ -9,6 +9,9 @@
# Python 3
from html.parser import HTMLParser
+from hs_core.models import Title
+
+
UNKNOWN_STR = "unknown"
@@ -156,27 +159,36 @@
if os.path.isfile(shp_xml_full_path):
with open(shp_xml_full_path) as fd:
xml_dict = xmltodict.parse(fd.read())
- if 'metadata' in xml_dict:
- if 'dataIdInfo' in xml_dict['metadata']:
- dataIdInfo_dict = xml_dict['metadata']['dataIdInfo']
- if 'idAbs' in dataIdInfo_dict:
- description_value = clean_text(dataIdInfo_dict['idAbs'])
- description = {'description': {'abstract': description_value}}
- metadata.append(description)
- if 'idPurp' in xml_dict['metadata']['dataIdInfo']:
- title_value = clean_text(dataIdInfo_dict['idPurp'])
- title = {'title': {'value': title_value}}
- metadata.append(title)
- if 'searchKeys' in dataIdInfo_dict:
- searchKeys_dict = dataIdInfo_dict['searchKeys']
- if 'keyword' in searchKeys_dict:
- keyword_list = []
- if type(searchKeys_dict["keyword"]) is list:
- keyword_list += searchKeys_dict["keyword"]
- else:
- keyword_list.append(searchKeys_dict["keyword"])
- for k in keyword_list:
- metadata.append({'subject': {'value': k}})
+
+ dataIdInfo_dict = xml_dict['metadata']['dataIdInfo']
+ if 'idCitation' in dataIdInfo_dict:
+ if 'resTitle' in dataIdInfo_dict['idCitation']:
+ if '#text' in dataIdInfo_dict['idCitation']['resTitle']:
+ title_value = dataIdInfo_dict['idCitation']['resTitle']['#text']
+ else:
+ title_value = dataIdInfo_dict['idCitation']['resTitle']
+
+ title_max_length = Title._meta.get_field('value').max_length
+ if len(title_value) > title_max_length:
+ title_value = title_value[:title_max_length-1]
+ title = {'title': {'value': title_value}}
+ metadata.append(title)
+
+ if 'idAbs' in dataIdInfo_dict:
+ description_value = clean_text(dataIdInfo_dict['idAbs'])
+ description = {'description': {'abstract': description_value}}
+ metadata.append(description)
+
+ if 'searchKeys' in dataIdInfo_dict:
+ searchKeys_dict = dataIdInfo_dict['searchKeys']
+ if 'keyword' in searchKeys_dict:
+ keyword_list = []
+ if type(searchKeys_dict["keyword"]) is list:
+ keyword_list += searchKeys_dict["keyword"]
+ else:
+ keyword_list.append(searchKeys_dict["keyword"])
+ for k in keyword_list:
+ metadata.append({'subject': {'value': k}})
except Exception:
# Catch any exception silently and return an empty list
|
{"golden_diff": "diff --git a/hs_geographic_feature_resource/parse_lib.py b/hs_geographic_feature_resource/parse_lib.py\n--- a/hs_geographic_feature_resource/parse_lib.py\n+++ b/hs_geographic_feature_resource/parse_lib.py\n@@ -9,6 +9,9 @@\n # Python 3\n from html.parser import HTMLParser\n \n+from hs_core.models import Title\n+\n+\n UNKNOWN_STR = \"unknown\"\n \n \n@@ -156,27 +159,36 @@\n if os.path.isfile(shp_xml_full_path):\n with open(shp_xml_full_path) as fd:\n xml_dict = xmltodict.parse(fd.read())\n- if 'metadata' in xml_dict:\n- if 'dataIdInfo' in xml_dict['metadata']:\n- dataIdInfo_dict = xml_dict['metadata']['dataIdInfo']\n- if 'idAbs' in dataIdInfo_dict:\n- description_value = clean_text(dataIdInfo_dict['idAbs'])\n- description = {'description': {'abstract': description_value}}\n- metadata.append(description)\n- if 'idPurp' in xml_dict['metadata']['dataIdInfo']:\n- title_value = clean_text(dataIdInfo_dict['idPurp'])\n- title = {'title': {'value': title_value}}\n- metadata.append(title)\n- if 'searchKeys' in dataIdInfo_dict:\n- searchKeys_dict = dataIdInfo_dict['searchKeys']\n- if 'keyword' in searchKeys_dict:\n- keyword_list = []\n- if type(searchKeys_dict[\"keyword\"]) is list:\n- keyword_list += searchKeys_dict[\"keyword\"]\n- else:\n- keyword_list.append(searchKeys_dict[\"keyword\"])\n- for k in keyword_list:\n- metadata.append({'subject': {'value': k}})\n+\n+ dataIdInfo_dict = xml_dict['metadata']['dataIdInfo']\n+ if 'idCitation' in dataIdInfo_dict:\n+ if 'resTitle' in dataIdInfo_dict['idCitation']:\n+ if '#text' in dataIdInfo_dict['idCitation']['resTitle']:\n+ title_value = dataIdInfo_dict['idCitation']['resTitle']['#text']\n+ else:\n+ title_value = dataIdInfo_dict['idCitation']['resTitle']\n+\n+ title_max_length = Title._meta.get_field('value').max_length\n+ if len(title_value) > title_max_length:\n+ title_value = title_value[:title_max_length-1]\n+ title = {'title': {'value': title_value}}\n+ metadata.append(title)\n+\n+ if 'idAbs' in dataIdInfo_dict:\n+ description_value = clean_text(dataIdInfo_dict['idAbs'])\n+ description = {'description': {'abstract': description_value}}\n+ metadata.append(description)\n+\n+ if 'searchKeys' in dataIdInfo_dict:\n+ searchKeys_dict = dataIdInfo_dict['searchKeys']\n+ if 'keyword' in searchKeys_dict:\n+ keyword_list = []\n+ if type(searchKeys_dict[\"keyword\"]) is list:\n+ keyword_list += searchKeys_dict[\"keyword\"]\n+ else:\n+ keyword_list.append(searchKeys_dict[\"keyword\"])\n+ for k in keyword_list:\n+ metadata.append({'subject': {'value': k}})\n \n except Exception:\n # Catch any exception silently and return an empty list\n", "issue": "Failed to create geographic feature resource\nThe files are from this resource https://www.hydroshare.org/resource/822fe5a7a83a49668c77a85179c3a7ee/. \r\n\r\nThe error is shown as below. Thanks for looking into this issue \r\n\r\n\n", "before_files": [{"content": "import os\nimport xmltodict\nimport re\nfrom osgeo import ogr, osr\ntry:\n # Python 2.6-2.7\n from HTMLParser import HTMLParser\nexcept ImportError:\n # Python 3\n from html.parser import HTMLParser\n\nUNKNOWN_STR = \"unknown\"\n\n\ndef parse_shp(file_path):\n # output dictionary format\n # shp_metadata_dict[\"origin_projection_string\"]: original projection string\n # shp_metadata_dict[\"origin_projection_name\"]: origin_projection_name\n # shp_metadata_dict[\"origin_datum\"]: origin_datum\n # shp_metadata_dict[\"origin_unit\"]: origin_unit\n # shp_metadata_dict[\"field_meta_dict\"][\"field_list\"]: list [fieldname1, fieldname2...]\n # shp_metadata_dict[\"field_meta_dict\"][\"field_attr_dic\"]:\n # dict {\"fieldname\": dict {\n # \"fieldName\":fieldName,\n # \"fieldTypeCode\":fieldTypeCode,\n # \"fieldType\":fieldType,\n # \"fieldWidth:fieldWidth,\n # \"fieldPrecision:fieldPrecision\"\n # }\n # }\n # shp_metadata_dict[\"feature_count\"]: feature count\n # shp_metadata_dict[\"geometry_type\"]: geometry_type\n # shp_metadata_dict[\"origin_extent_dict\"]:\n # dict{\"west\": east, \"north\":north, \"east\":east, \"south\":south}\n # shp_metadata_dict[\"wgs84_extent_dict\"]:\n # dict{\"west\": east, \"north\":north, \"east\":east, \"south\":south}\n\n shp_metadata_dict = {}\n # read shapefile\n driver = ogr.GetDriverByName('ESRI Shapefile')\n dataset = driver.Open(file_path)\n\n # get layer\n layer = dataset.GetLayer()\n # get spatialRef from layer\n spatialRef_from_layer = layer.GetSpatialRef()\n\n if spatialRef_from_layer is not None:\n shp_metadata_dict[\"origin_projection_string\"] = str(spatialRef_from_layer)\n prj_name = spatialRef_from_layer.GetAttrValue('projcs')\n if prj_name is None:\n prj_name = spatialRef_from_layer.GetAttrValue('geogcs')\n shp_metadata_dict[\"origin_projection_name\"] = prj_name\n\n shp_metadata_dict[\"origin_datum\"] = spatialRef_from_layer.GetAttrValue('datum')\n shp_metadata_dict[\"origin_unit\"] = spatialRef_from_layer.GetAttrValue('unit')\n else:\n shp_metadata_dict[\"origin_projection_string\"] = UNKNOWN_STR\n shp_metadata_dict[\"origin_projection_name\"] = UNKNOWN_STR\n shp_metadata_dict[\"origin_datum\"] = UNKNOWN_STR\n shp_metadata_dict[\"origin_unit\"] = UNKNOWN_STR\n\n field_list = []\n filed_attr_dic = {}\n field_meta_dict = {\"field_list\": field_list, \"field_attr_dict\": filed_attr_dic}\n shp_metadata_dict[\"field_meta_dict\"] = field_meta_dict\n # get Attributes\n layerDefinition = layer.GetLayerDefn()\n for i in range(layerDefinition.GetFieldCount()):\n fieldName = layerDefinition.GetFieldDefn(i).GetName()\n field_list.append(fieldName)\n attr_dict = {}\n field_meta_dict[\"field_attr_dict\"][fieldName] = attr_dict\n\n attr_dict[\"fieldName\"] = fieldName\n fieldTypeCode = layerDefinition.GetFieldDefn(i).GetType()\n attr_dict[\"fieldTypeCode\"] = fieldTypeCode\n fieldType = layerDefinition.GetFieldDefn(i).GetFieldTypeName(fieldTypeCode)\n attr_dict[\"fieldType\"] = fieldType\n fieldWidth = layerDefinition.GetFieldDefn(i).GetWidth()\n attr_dict[\"fieldWidth\"] = fieldWidth\n fieldPrecision = layerDefinition.GetFieldDefn(i).GetPrecision()\n attr_dict[\"fieldPrecision\"] = fieldPrecision\n\n # get layer extent\n layer_extent = layer.GetExtent()\n\n # get feature count\n featureCount = layer.GetFeatureCount()\n shp_metadata_dict[\"feature_count\"] = featureCount\n\n # get a feature from layer\n feature = layer.GetNextFeature()\n\n # get geometry from feature\n geom = feature.GetGeometryRef()\n\n # get geometry name\n shp_metadata_dict[\"geometry_type\"] = geom.GetGeometryName()\n\n # reproject layer extent\n # source SpatialReference\n source = spatialRef_from_layer\n # target SpatialReference\n target = osr.SpatialReference()\n target.ImportFromEPSG(4326)\n\n # create two key points from layer extent\n left_upper_point = ogr.Geometry(ogr.wkbPoint)\n left_upper_point.AddPoint(layer_extent[0], layer_extent[3]) # left-upper\n right_lower_point = ogr.Geometry(ogr.wkbPoint)\n right_lower_point.AddPoint(layer_extent[1], layer_extent[2]) # right-lower\n\n # source map always has extent, even projection is unknown\n shp_metadata_dict[\"origin_extent_dict\"] = {}\n shp_metadata_dict[\"origin_extent_dict\"][\"westlimit\"] = layer_extent[0]\n shp_metadata_dict[\"origin_extent_dict\"][\"northlimit\"] = layer_extent[3]\n shp_metadata_dict[\"origin_extent_dict\"][\"eastlimit\"] = layer_extent[1]\n shp_metadata_dict[\"origin_extent_dict\"][\"southlimit\"] = layer_extent[2]\n\n # reproject to WGS84\n shp_metadata_dict[\"wgs84_extent_dict\"] = {}\n\n if source is not None:\n # define CoordinateTransformation obj\n transform = osr.CoordinateTransformation(source, target)\n # project two key points\n left_upper_point.Transform(transform)\n right_lower_point.Transform(transform)\n shp_metadata_dict[\"wgs84_extent_dict\"][\"westlimit\"] = left_upper_point.GetX()\n shp_metadata_dict[\"wgs84_extent_dict\"][\"northlimit\"] = left_upper_point.GetY()\n shp_metadata_dict[\"wgs84_extent_dict\"][\"eastlimit\"] = right_lower_point.GetX()\n shp_metadata_dict[\"wgs84_extent_dict\"][\"southlimit\"] = right_lower_point.GetY()\n shp_metadata_dict[\"wgs84_extent_dict\"][\"projection\"] = \"WGS 84 EPSG:4326\"\n shp_metadata_dict[\"wgs84_extent_dict\"][\"units\"] = \"Decimal degrees\"\n else:\n shp_metadata_dict[\"wgs84_extent_dict\"][\"westlimit\"] = UNKNOWN_STR\n shp_metadata_dict[\"wgs84_extent_dict\"][\"northlimit\"] = UNKNOWN_STR\n shp_metadata_dict[\"wgs84_extent_dict\"][\"eastlimit\"] = UNKNOWN_STR\n shp_metadata_dict[\"wgs84_extent_dict\"][\"southlimit\"] = UNKNOWN_STR\n shp_metadata_dict[\"wgs84_extent_dict\"][\"projection\"] = UNKNOWN_STR\n shp_metadata_dict[\"wgs84_extent_dict\"][\"units\"] = UNKNOWN_STR\n\n return shp_metadata_dict\n\n\ndef parse_shp_xml(shp_xml_full_path):\n \"\"\"\n Parse ArcGIS 10.X ESRI Shapefile Metadata XML.\n :param shp_xml_full_path: Expected fullpath to the .shp.xml file\n :return: a list of metadata dict\n \"\"\"\n metadata = []\n\n try:\n if os.path.isfile(shp_xml_full_path):\n with open(shp_xml_full_path) as fd:\n xml_dict = xmltodict.parse(fd.read())\n if 'metadata' in xml_dict:\n if 'dataIdInfo' in xml_dict['metadata']:\n dataIdInfo_dict = xml_dict['metadata']['dataIdInfo']\n if 'idAbs' in dataIdInfo_dict:\n description_value = clean_text(dataIdInfo_dict['idAbs'])\n description = {'description': {'abstract': description_value}}\n metadata.append(description)\n if 'idPurp' in xml_dict['metadata']['dataIdInfo']:\n title_value = clean_text(dataIdInfo_dict['idPurp'])\n title = {'title': {'value': title_value}}\n metadata.append(title)\n if 'searchKeys' in dataIdInfo_dict:\n searchKeys_dict = dataIdInfo_dict['searchKeys']\n if 'keyword' in searchKeys_dict:\n keyword_list = []\n if type(searchKeys_dict[\"keyword\"]) is list:\n keyword_list += searchKeys_dict[\"keyword\"]\n else:\n keyword_list.append(searchKeys_dict[\"keyword\"])\n for k in keyword_list:\n metadata.append({'subject': {'value': k}})\n\n except Exception:\n # Catch any exception silently and return an empty list\n # Due to the variant format of ESRI Shapefile Metadata XML\n # among different ArcGIS versions, an empty list will be returned\n # if any exception occurs\n metadata = []\n finally:\n return metadata\n\n\ndef clean_text(text):\n # Decode html\n\n h = HTMLParser()\n return h.unescape(clean_html(text))\n\n\ndef clean_html(raw_html):\n # Remove html tag from raw_html\n\n cleanr = re.compile('<.*?>')\n cleantext = re.sub(cleanr, '', raw_html)\n return cleantext\n", "path": "hs_geographic_feature_resource/parse_lib.py"}], "after_files": [{"content": "import os\nimport xmltodict\nimport re\nfrom osgeo import ogr, osr\ntry:\n # Python 2.6-2.7\n from HTMLParser import HTMLParser\nexcept ImportError:\n # Python 3\n from html.parser import HTMLParser\n\nfrom hs_core.models import Title\n\n\nUNKNOWN_STR = \"unknown\"\n\n\ndef parse_shp(file_path):\n # output dictionary format\n # shp_metadata_dict[\"origin_projection_string\"]: original projection string\n # shp_metadata_dict[\"origin_projection_name\"]: origin_projection_name\n # shp_metadata_dict[\"origin_datum\"]: origin_datum\n # shp_metadata_dict[\"origin_unit\"]: origin_unit\n # shp_metadata_dict[\"field_meta_dict\"][\"field_list\"]: list [fieldname1, fieldname2...]\n # shp_metadata_dict[\"field_meta_dict\"][\"field_attr_dic\"]:\n # dict {\"fieldname\": dict {\n # \"fieldName\":fieldName,\n # \"fieldTypeCode\":fieldTypeCode,\n # \"fieldType\":fieldType,\n # \"fieldWidth:fieldWidth,\n # \"fieldPrecision:fieldPrecision\"\n # }\n # }\n # shp_metadata_dict[\"feature_count\"]: feature count\n # shp_metadata_dict[\"geometry_type\"]: geometry_type\n # shp_metadata_dict[\"origin_extent_dict\"]:\n # dict{\"west\": east, \"north\":north, \"east\":east, \"south\":south}\n # shp_metadata_dict[\"wgs84_extent_dict\"]:\n # dict{\"west\": east, \"north\":north, \"east\":east, \"south\":south}\n\n shp_metadata_dict = {}\n # read shapefile\n driver = ogr.GetDriverByName('ESRI Shapefile')\n dataset = driver.Open(file_path)\n\n # get layer\n layer = dataset.GetLayer()\n # get spatialRef from layer\n spatialRef_from_layer = layer.GetSpatialRef()\n\n if spatialRef_from_layer is not None:\n shp_metadata_dict[\"origin_projection_string\"] = str(spatialRef_from_layer)\n prj_name = spatialRef_from_layer.GetAttrValue('projcs')\n if prj_name is None:\n prj_name = spatialRef_from_layer.GetAttrValue('geogcs')\n shp_metadata_dict[\"origin_projection_name\"] = prj_name\n\n shp_metadata_dict[\"origin_datum\"] = spatialRef_from_layer.GetAttrValue('datum')\n shp_metadata_dict[\"origin_unit\"] = spatialRef_from_layer.GetAttrValue('unit')\n else:\n shp_metadata_dict[\"origin_projection_string\"] = UNKNOWN_STR\n shp_metadata_dict[\"origin_projection_name\"] = UNKNOWN_STR\n shp_metadata_dict[\"origin_datum\"] = UNKNOWN_STR\n shp_metadata_dict[\"origin_unit\"] = UNKNOWN_STR\n\n field_list = []\n filed_attr_dic = {}\n field_meta_dict = {\"field_list\": field_list, \"field_attr_dict\": filed_attr_dic}\n shp_metadata_dict[\"field_meta_dict\"] = field_meta_dict\n # get Attributes\n layerDefinition = layer.GetLayerDefn()\n for i in range(layerDefinition.GetFieldCount()):\n fieldName = layerDefinition.GetFieldDefn(i).GetName()\n field_list.append(fieldName)\n attr_dict = {}\n field_meta_dict[\"field_attr_dict\"][fieldName] = attr_dict\n\n attr_dict[\"fieldName\"] = fieldName\n fieldTypeCode = layerDefinition.GetFieldDefn(i).GetType()\n attr_dict[\"fieldTypeCode\"] = fieldTypeCode\n fieldType = layerDefinition.GetFieldDefn(i).GetFieldTypeName(fieldTypeCode)\n attr_dict[\"fieldType\"] = fieldType\n fieldWidth = layerDefinition.GetFieldDefn(i).GetWidth()\n attr_dict[\"fieldWidth\"] = fieldWidth\n fieldPrecision = layerDefinition.GetFieldDefn(i).GetPrecision()\n attr_dict[\"fieldPrecision\"] = fieldPrecision\n\n # get layer extent\n layer_extent = layer.GetExtent()\n\n # get feature count\n featureCount = layer.GetFeatureCount()\n shp_metadata_dict[\"feature_count\"] = featureCount\n\n # get a feature from layer\n feature = layer.GetNextFeature()\n\n # get geometry from feature\n geom = feature.GetGeometryRef()\n\n # get geometry name\n shp_metadata_dict[\"geometry_type\"] = geom.GetGeometryName()\n\n # reproject layer extent\n # source SpatialReference\n source = spatialRef_from_layer\n # target SpatialReference\n target = osr.SpatialReference()\n target.ImportFromEPSG(4326)\n\n # create two key points from layer extent\n left_upper_point = ogr.Geometry(ogr.wkbPoint)\n left_upper_point.AddPoint(layer_extent[0], layer_extent[3]) # left-upper\n right_lower_point = ogr.Geometry(ogr.wkbPoint)\n right_lower_point.AddPoint(layer_extent[1], layer_extent[2]) # right-lower\n\n # source map always has extent, even projection is unknown\n shp_metadata_dict[\"origin_extent_dict\"] = {}\n shp_metadata_dict[\"origin_extent_dict\"][\"westlimit\"] = layer_extent[0]\n shp_metadata_dict[\"origin_extent_dict\"][\"northlimit\"] = layer_extent[3]\n shp_metadata_dict[\"origin_extent_dict\"][\"eastlimit\"] = layer_extent[1]\n shp_metadata_dict[\"origin_extent_dict\"][\"southlimit\"] = layer_extent[2]\n\n # reproject to WGS84\n shp_metadata_dict[\"wgs84_extent_dict\"] = {}\n\n if source is not None:\n # define CoordinateTransformation obj\n transform = osr.CoordinateTransformation(source, target)\n # project two key points\n left_upper_point.Transform(transform)\n right_lower_point.Transform(transform)\n shp_metadata_dict[\"wgs84_extent_dict\"][\"westlimit\"] = left_upper_point.GetX()\n shp_metadata_dict[\"wgs84_extent_dict\"][\"northlimit\"] = left_upper_point.GetY()\n shp_metadata_dict[\"wgs84_extent_dict\"][\"eastlimit\"] = right_lower_point.GetX()\n shp_metadata_dict[\"wgs84_extent_dict\"][\"southlimit\"] = right_lower_point.GetY()\n shp_metadata_dict[\"wgs84_extent_dict\"][\"projection\"] = \"WGS 84 EPSG:4326\"\n shp_metadata_dict[\"wgs84_extent_dict\"][\"units\"] = \"Decimal degrees\"\n else:\n shp_metadata_dict[\"wgs84_extent_dict\"][\"westlimit\"] = UNKNOWN_STR\n shp_metadata_dict[\"wgs84_extent_dict\"][\"northlimit\"] = UNKNOWN_STR\n shp_metadata_dict[\"wgs84_extent_dict\"][\"eastlimit\"] = UNKNOWN_STR\n shp_metadata_dict[\"wgs84_extent_dict\"][\"southlimit\"] = UNKNOWN_STR\n shp_metadata_dict[\"wgs84_extent_dict\"][\"projection\"] = UNKNOWN_STR\n shp_metadata_dict[\"wgs84_extent_dict\"][\"units\"] = UNKNOWN_STR\n\n return shp_metadata_dict\n\n\ndef parse_shp_xml(shp_xml_full_path):\n \"\"\"\n Parse ArcGIS 10.X ESRI Shapefile Metadata XML.\n :param shp_xml_full_path: Expected fullpath to the .shp.xml file\n :return: a list of metadata dict\n \"\"\"\n metadata = []\n\n try:\n if os.path.isfile(shp_xml_full_path):\n with open(shp_xml_full_path) as fd:\n xml_dict = xmltodict.parse(fd.read())\n\n dataIdInfo_dict = xml_dict['metadata']['dataIdInfo']\n if 'idCitation' in dataIdInfo_dict:\n if 'resTitle' in dataIdInfo_dict['idCitation']:\n if '#text' in dataIdInfo_dict['idCitation']['resTitle']:\n title_value = dataIdInfo_dict['idCitation']['resTitle']['#text']\n else:\n title_value = dataIdInfo_dict['idCitation']['resTitle']\n\n title_max_length = Title._meta.get_field('value').max_length\n if len(title_value) > title_max_length:\n title_value = title_value[:title_max_length-1]\n title = {'title': {'value': title_value}}\n metadata.append(title)\n\n if 'idAbs' in dataIdInfo_dict:\n description_value = clean_text(dataIdInfo_dict['idAbs'])\n description = {'description': {'abstract': description_value}}\n metadata.append(description)\n\n if 'searchKeys' in dataIdInfo_dict:\n searchKeys_dict = dataIdInfo_dict['searchKeys']\n if 'keyword' in searchKeys_dict:\n keyword_list = []\n if type(searchKeys_dict[\"keyword\"]) is list:\n keyword_list += searchKeys_dict[\"keyword\"]\n else:\n keyword_list.append(searchKeys_dict[\"keyword\"])\n for k in keyword_list:\n metadata.append({'subject': {'value': k}})\n\n except Exception:\n # Catch any exception silently and return an empty list\n # Due to the variant format of ESRI Shapefile Metadata XML\n # among different ArcGIS versions, an empty list will be returned\n # if any exception occurs\n metadata = []\n finally:\n return metadata\n\n\ndef clean_text(text):\n # Decode html\n\n h = HTMLParser()\n return h.unescape(clean_html(text))\n\n\ndef clean_html(raw_html):\n # Remove html tag from raw_html\n\n cleanr = re.compile('<.*?>')\n cleantext = re.sub(cleanr, '', raw_html)\n return cleantext\n", "path": "hs_geographic_feature_resource/parse_lib.py"}]}
| 2,882 | 729 |
gh_patches_debug_34408
|
rasdani/github-patches
|
git_diff
|
learningequality__kolibri-12008
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Quizzes - The 'missing resource' alert is not displayed at the Home page
## Observed behavior
Observed while testing https://github.com/learningequality/kolibri/pull/11426 - quizzes with missing resources are not showing an alert on the home page.
## Expected behavior
To be further discussed.
## Steps to reproduce the issue
1. Install the latest build from https://github.com/learningequality/kolibri/pull/11426 and setup a full facility and a learn-only device.
2. Assign a quiz to the learner on the LOD.
3. Sign in as the learner and delete some of the quiz resources by going to Device > Channel
4. Go to the quiz an observe that it correctly alerts for missing resources.
5. Go to the learner's home page and observe that there the alert 'Some resources are missing or not supported..' is missing.
## More information
Video with replications steps - the original was more than 10 minutes long, so I had to shorten it:
https://github.com/learningequality/kolibri/assets/79847249/8fb9dcb4-8419-4e01-8f8f-180d39a3ad91
## Usage Details
Kolibri 0.16beta6
Ubuntu - Chrome, Firefox
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/plugins/learn/viewsets.py`
Content:
```
1 from django.db.models import Count
2 from django.db.models import OuterRef
3 from django.db.models import Q
4 from django.db.models import Subquery
5 from django.db.models import Sum
6 from django.db.models.fields import IntegerField
7 from rest_framework.permissions import IsAuthenticated
8 from rest_framework.response import Response
9 from rest_framework.views import APIView
10
11 from kolibri.core.api import ReadOnlyValuesViewset
12 from kolibri.core.auth.models import Classroom
13 from kolibri.core.auth.models import Facility
14 from kolibri.core.content.api import ContentNodeProgressViewset
15 from kolibri.core.content.api import ContentNodeViewset
16 from kolibri.core.content.api import UserContentNodeViewset
17 from kolibri.core.content.models import ContentNode
18 from kolibri.core.exams.models import Exam
19 from kolibri.core.lessons.models import Lesson
20 from kolibri.core.logger.models import AttemptLog
21 from kolibri.core.logger.models import MasteryLog
22
23
24 contentnode_progress_viewset = ContentNodeProgressViewset()
25 contentnode_viewset = ContentNodeViewset()
26 user_contentnode_viewset = UserContentNodeViewset()
27
28
29 class LearnStateView(APIView):
30 def get(self, request, format=None):
31 """
32 Returns some configuration variables applicable to users navigating learn.
33 - in_classes: Whether the user is in any classes
34 - can_download_externally: Whether the user can download content externally from Kolibri
35 """
36 if request.user.is_anonymous:
37 default_facility = Facility.get_default_facility()
38 can_download_externally = (
39 default_facility.dataset.show_download_button_in_learn
40 if default_facility
41 else True
42 )
43 return Response(
44 {
45 "in_classes": False,
46 "can_download_externally": can_download_externally,
47 }
48 )
49 return Response(
50 {
51 "in_classes": request.user.memberships.exists(),
52 "can_download_externally": request.user.dataset.show_download_button_in_learn,
53 }
54 )
55
56
57 def _consolidate_lessons_data(request, lessons):
58 lesson_contentnode_ids = set()
59 for lesson in lessons:
60 lesson_contentnode_ids |= {
61 resource["contentnode_id"] for resource in lesson["resources"]
62 }
63
64 contentnode_progress = (
65 contentnode_progress_viewset.serialize_list(
66 request, {"ids": lesson_contentnode_ids}
67 )
68 if lesson_contentnode_ids
69 else []
70 )
71
72 contentnodes = (
73 contentnode_viewset.serialize_list(request, {"ids": lesson_contentnode_ids})
74 if lesson_contentnode_ids
75 else []
76 )
77
78 progress_map = {l["content_id"]: l["progress"] for l in contentnode_progress}
79
80 contentnode_map = {c["id"]: c for c in contentnodes}
81
82 for lesson in lessons:
83 lesson["progress"] = {
84 "resource_progress": sum(
85 (
86 progress_map[resource["content_id"]]
87 for resource in lesson["resources"]
88 if resource["content_id"] in progress_map
89 )
90 ),
91 "total_resources": len(lesson["resources"]),
92 }
93 missing_resource = False
94 for resource in lesson["resources"]:
95 resource["progress"] = progress_map.get(resource["content_id"], 0)
96 resource["contentnode"] = contentnode_map.get(
97 resource["contentnode_id"], None
98 )
99 missing_resource = missing_resource or not resource["contentnode"]
100 lesson["missing_resource"] = missing_resource
101
102
103 class LearnerClassroomViewset(ReadOnlyValuesViewset):
104 """
105 Returns all Classrooms for which the requesting User is a member,
106 along with all associated assignments.
107 """
108
109 permission_classes = (IsAuthenticated,)
110
111 values = ("id", "name")
112
113 def get_queryset(self):
114 if self.request.user.is_anonymous:
115 return Classroom.objects.none()
116 return Classroom.objects.filter(membership__user=self.request.user)
117
118 def consolidate(self, items, queryset):
119 if not items:
120 return items
121 lessons = (
122 Lesson.objects.filter(
123 lesson_assignments__collection__membership__user=self.request.user,
124 is_active=True,
125 collection__in=(c["id"] for c in items),
126 )
127 .distinct()
128 .values(
129 "description", "id", "is_active", "title", "resources", "collection"
130 )
131 )
132 _consolidate_lessons_data(self.request, lessons)
133
134 user_masterylog_content_ids = MasteryLog.objects.filter(
135 user=self.request.user
136 ).values("summarylog__content_id")
137
138 exams = (
139 Exam.objects.filter(
140 assignments__collection__membership__user=self.request.user,
141 collection__in=(c["id"] for c in items),
142 )
143 .filter(Q(active=True) | Q(id__in=user_masterylog_content_ids))
144 .annotate(
145 closed=Subquery(
146 MasteryLog.objects.filter(
147 summarylog__content_id=OuterRef("id"), user=self.request.user
148 ).values("complete")[:1]
149 ),
150 score=Subquery(
151 AttemptLog.objects.filter(
152 sessionlog__content_id=OuterRef("id"), user=self.request.user
153 )
154 .order_by()
155 .values_list("item")
156 .distinct()
157 .values("masterylog")
158 .annotate(total_correct=Sum("correct"))
159 .values("total_correct"),
160 output_field=IntegerField(),
161 ),
162 answer_count=Subquery(
163 AttemptLog.objects.filter(
164 sessionlog__content_id=OuterRef("id"), user=self.request.user
165 )
166 .order_by()
167 .values_list("item")
168 .distinct()
169 .values("masterylog")
170 .annotate(total_complete=Count("id"))
171 .values("total_complete"),
172 output_field=IntegerField(),
173 ),
174 )
175 .distinct()
176 .values(
177 "collection",
178 "active",
179 "archive",
180 "id",
181 "question_count",
182 "title",
183 "closed",
184 "answer_count",
185 "score",
186 "question_sources",
187 )
188 )
189 exam_node_ids = set()
190 for exam in exams:
191 exam_node_ids |= {
192 question["exercise_id"] for question in exam.get("question_sources")
193 }
194
195 available_exam_ids = set(
196 ContentNode.objects.filter_by_uuids(exam_node_ids).values_list(
197 "id", flat=True
198 )
199 )
200
201 for exam in exams:
202 closed = exam.pop("closed")
203 score = exam.pop("score")
204 answer_count = exam.pop("answer_count")
205 if closed is not None:
206 exam["progress"] = {
207 "closed": closed,
208 "score": score,
209 "answer_count": answer_count,
210 "started": True,
211 }
212 else:
213 exam["progress"] = {
214 "score": None,
215 "answer_count": None,
216 "closed": None,
217 "started": False,
218 }
219 exam["missing_resource"] = any(
220 question["exercise_id"] not in available_exam_ids
221 for question in exam.get("question_sources")
222 )
223 out_items = []
224 for item in items:
225 item["assignments"] = {
226 "exams": [exam for exam in exams if exam["collection"] == item["id"]],
227 "lessons": [
228 lesson for lesson in lessons if lesson["collection"] == item["id"]
229 ],
230 }
231 out_items.append(item)
232 return out_items
233
234
235 learner_classroom_viewset = LearnerClassroomViewset()
236
237
238 def _resumable_resources(classrooms):
239 for classroom in classrooms:
240 for lesson in classroom["assignments"]["lessons"]:
241 for resource in lesson["resources"]:
242 yield 0 < resource["progress"] < 1
243
244
245 class LearnHomePageHydrationView(APIView):
246 def get(self, request, format=None):
247 classrooms = []
248 resumable_resources = []
249 resumable_resources_progress = []
250 if not request.user.is_anonymous:
251 classrooms = learner_classroom_viewset.serialize_list(request)
252 if not classrooms or not any(_resumable_resources(classrooms)):
253 resumable_resources = user_contentnode_viewset.serialize_list(
254 request,
255 {"resume": True, "max_results": 12, "ordering": "-last_interacted"},
256 )
257 resumable_resources_progress = (
258 contentnode_progress_viewset.serialize_list(
259 request,
260 {
261 "resume": True,
262 "max_results": 12,
263 "ordering": "-last_interacted",
264 },
265 )
266 )
267
268 return Response(
269 {
270 "classrooms": classrooms,
271 "resumable_resources": resumable_resources,
272 "resumable_resources_progress": resumable_resources_progress,
273 }
274 )
275
276
277 def _map_lesson_classroom(item):
278 return {
279 "id": item.pop("collection__id"),
280 "name": item.pop("collection__name"),
281 "parent": item.pop("collection__parent_id"),
282 }
283
284
285 class LearnerLessonViewset(ReadOnlyValuesViewset):
286 """
287 Special Viewset for Learners to view Lessons to which they are assigned.
288 The core Lesson Viewset is locked down to Admin users only.
289 """
290
291 permission_classes = (IsAuthenticated,)
292
293 values = (
294 "id",
295 "title",
296 "description",
297 "resources",
298 "is_active",
299 "collection",
300 "collection__id",
301 "collection__name",
302 "collection__parent_id",
303 )
304
305 field_map = {"classroom": _map_lesson_classroom}
306
307 def get_queryset(self):
308 if self.request.user.is_anonymous:
309 return Lesson.objects.none()
310 return Lesson.objects.filter(
311 lesson_assignments__collection__membership__user=self.request.user,
312 is_active=True,
313 )
314
315 def consolidate(self, items, queryset):
316 if not items:
317 return items
318
319 _consolidate_lessons_data(self.request, items)
320
321 return items
322
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kolibri/plugins/learn/viewsets.py b/kolibri/plugins/learn/viewsets.py
--- a/kolibri/plugins/learn/viewsets.py
+++ b/kolibri/plugins/learn/viewsets.py
@@ -54,6 +54,16 @@
)
+def _map_contentnodes(request, content_ids):
+ contentnodes = (
+ contentnode_viewset.serialize_list(request, {"ids": content_ids})
+ if content_ids
+ else []
+ )
+ contentnode_map = {c["id"]: c for c in contentnodes}
+ return contentnode_map
+
+
def _consolidate_lessons_data(request, lessons):
lesson_contentnode_ids = set()
for lesson in lessons:
@@ -69,16 +79,10 @@
else []
)
- contentnodes = (
- contentnode_viewset.serialize_list(request, {"ids": lesson_contentnode_ids})
- if lesson_contentnode_ids
- else []
- )
+ contentnode_map = _map_contentnodes(request, lesson_contentnode_ids)
progress_map = {l["content_id"]: l["progress"] for l in contentnode_progress}
- contentnode_map = {c["id"]: c for c in contentnodes}
-
for lesson in lessons:
lesson["progress"] = {
"resource_progress": sum(
@@ -198,6 +202,8 @@
)
)
+ contentnode_map = _map_contentnodes(self.request, available_exam_ids)
+
for exam in exams:
closed = exam.pop("closed")
score = exam.pop("score")
@@ -216,10 +222,12 @@
"closed": None,
"started": False,
}
- exam["missing_resource"] = any(
- question["exercise_id"] not in available_exam_ids
- for question in exam.get("question_sources")
- )
+ missing_resource = False
+ for question_source in exam["question_sources"]:
+ if question_source["exercise_id"] not in contentnode_map:
+ missing_resource = True
+ break
+ exam["missing_resource"] = missing_resource
out_items = []
for item in items:
item["assignments"] = {
|
{"golden_diff": "diff --git a/kolibri/plugins/learn/viewsets.py b/kolibri/plugins/learn/viewsets.py\n--- a/kolibri/plugins/learn/viewsets.py\n+++ b/kolibri/plugins/learn/viewsets.py\n@@ -54,6 +54,16 @@\n )\n \n \n+def _map_contentnodes(request, content_ids):\n+ contentnodes = (\n+ contentnode_viewset.serialize_list(request, {\"ids\": content_ids})\n+ if content_ids\n+ else []\n+ )\n+ contentnode_map = {c[\"id\"]: c for c in contentnodes}\n+ return contentnode_map\n+\n+\n def _consolidate_lessons_data(request, lessons):\n lesson_contentnode_ids = set()\n for lesson in lessons:\n@@ -69,16 +79,10 @@\n else []\n )\n \n- contentnodes = (\n- contentnode_viewset.serialize_list(request, {\"ids\": lesson_contentnode_ids})\n- if lesson_contentnode_ids\n- else []\n- )\n+ contentnode_map = _map_contentnodes(request, lesson_contentnode_ids)\n \n progress_map = {l[\"content_id\"]: l[\"progress\"] for l in contentnode_progress}\n \n- contentnode_map = {c[\"id\"]: c for c in contentnodes}\n-\n for lesson in lessons:\n lesson[\"progress\"] = {\n \"resource_progress\": sum(\n@@ -198,6 +202,8 @@\n )\n )\n \n+ contentnode_map = _map_contentnodes(self.request, available_exam_ids)\n+\n for exam in exams:\n closed = exam.pop(\"closed\")\n score = exam.pop(\"score\")\n@@ -216,10 +222,12 @@\n \"closed\": None,\n \"started\": False,\n }\n- exam[\"missing_resource\"] = any(\n- question[\"exercise_id\"] not in available_exam_ids\n- for question in exam.get(\"question_sources\")\n- )\n+ missing_resource = False\n+ for question_source in exam[\"question_sources\"]:\n+ if question_source[\"exercise_id\"] not in contentnode_map:\n+ missing_resource = True\n+ break\n+ exam[\"missing_resource\"] = missing_resource\n out_items = []\n for item in items:\n item[\"assignments\"] = {\n", "issue": "Quizzes - The 'missing resource' alert is not displayed at the Home page\n## Observed behavior\r\n\r\nObserved while testing https://github.com/learningequality/kolibri/pull/11426 - quizzes with missing resources are not showing an alert on the home page.\r\n\r\n## Expected behavior\r\nTo be further discussed.\r\n\r\n## Steps to reproduce the issue\r\n1. Install the latest build from https://github.com/learningequality/kolibri/pull/11426 and setup a full facility and a learn-only device.\r\n2. Assign a quiz to the learner on the LOD.\r\n3. Sign in as the learner and delete some of the quiz resources by going to Device > Channel\r\n4. Go to the quiz an observe that it correctly alerts for missing resources.\r\n5. Go to the learner's home page and observe that there the alert 'Some resources are missing or not supported..' is missing.\r\n\r\n## More information\r\n\r\nVideo with replications steps - the original was more than 10 minutes long, so I had to shorten it:\r\n\r\nhttps://github.com/learningequality/kolibri/assets/79847249/8fb9dcb4-8419-4e01-8f8f-180d39a3ad91\r\n\r\n## Usage Details\r\nKolibri 0.16beta6\r\nUbuntu - Chrome, Firefox\n", "before_files": [{"content": "from django.db.models import Count\nfrom django.db.models import OuterRef\nfrom django.db.models import Q\nfrom django.db.models import Subquery\nfrom django.db.models import Sum\nfrom django.db.models.fields import IntegerField\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom kolibri.core.api import ReadOnlyValuesViewset\nfrom kolibri.core.auth.models import Classroom\nfrom kolibri.core.auth.models import Facility\nfrom kolibri.core.content.api import ContentNodeProgressViewset\nfrom kolibri.core.content.api import ContentNodeViewset\nfrom kolibri.core.content.api import UserContentNodeViewset\nfrom kolibri.core.content.models import ContentNode\nfrom kolibri.core.exams.models import Exam\nfrom kolibri.core.lessons.models import Lesson\nfrom kolibri.core.logger.models import AttemptLog\nfrom kolibri.core.logger.models import MasteryLog\n\n\ncontentnode_progress_viewset = ContentNodeProgressViewset()\ncontentnode_viewset = ContentNodeViewset()\nuser_contentnode_viewset = UserContentNodeViewset()\n\n\nclass LearnStateView(APIView):\n def get(self, request, format=None):\n \"\"\"\n Returns some configuration variables applicable to users navigating learn.\n - in_classes: Whether the user is in any classes\n - can_download_externally: Whether the user can download content externally from Kolibri\n \"\"\"\n if request.user.is_anonymous:\n default_facility = Facility.get_default_facility()\n can_download_externally = (\n default_facility.dataset.show_download_button_in_learn\n if default_facility\n else True\n )\n return Response(\n {\n \"in_classes\": False,\n \"can_download_externally\": can_download_externally,\n }\n )\n return Response(\n {\n \"in_classes\": request.user.memberships.exists(),\n \"can_download_externally\": request.user.dataset.show_download_button_in_learn,\n }\n )\n\n\ndef _consolidate_lessons_data(request, lessons):\n lesson_contentnode_ids = set()\n for lesson in lessons:\n lesson_contentnode_ids |= {\n resource[\"contentnode_id\"] for resource in lesson[\"resources\"]\n }\n\n contentnode_progress = (\n contentnode_progress_viewset.serialize_list(\n request, {\"ids\": lesson_contentnode_ids}\n )\n if lesson_contentnode_ids\n else []\n )\n\n contentnodes = (\n contentnode_viewset.serialize_list(request, {\"ids\": lesson_contentnode_ids})\n if lesson_contentnode_ids\n else []\n )\n\n progress_map = {l[\"content_id\"]: l[\"progress\"] for l in contentnode_progress}\n\n contentnode_map = {c[\"id\"]: c for c in contentnodes}\n\n for lesson in lessons:\n lesson[\"progress\"] = {\n \"resource_progress\": sum(\n (\n progress_map[resource[\"content_id\"]]\n for resource in lesson[\"resources\"]\n if resource[\"content_id\"] in progress_map\n )\n ),\n \"total_resources\": len(lesson[\"resources\"]),\n }\n missing_resource = False\n for resource in lesson[\"resources\"]:\n resource[\"progress\"] = progress_map.get(resource[\"content_id\"], 0)\n resource[\"contentnode\"] = contentnode_map.get(\n resource[\"contentnode_id\"], None\n )\n missing_resource = missing_resource or not resource[\"contentnode\"]\n lesson[\"missing_resource\"] = missing_resource\n\n\nclass LearnerClassroomViewset(ReadOnlyValuesViewset):\n \"\"\"\n Returns all Classrooms for which the requesting User is a member,\n along with all associated assignments.\n \"\"\"\n\n permission_classes = (IsAuthenticated,)\n\n values = (\"id\", \"name\")\n\n def get_queryset(self):\n if self.request.user.is_anonymous:\n return Classroom.objects.none()\n return Classroom.objects.filter(membership__user=self.request.user)\n\n def consolidate(self, items, queryset):\n if not items:\n return items\n lessons = (\n Lesson.objects.filter(\n lesson_assignments__collection__membership__user=self.request.user,\n is_active=True,\n collection__in=(c[\"id\"] for c in items),\n )\n .distinct()\n .values(\n \"description\", \"id\", \"is_active\", \"title\", \"resources\", \"collection\"\n )\n )\n _consolidate_lessons_data(self.request, lessons)\n\n user_masterylog_content_ids = MasteryLog.objects.filter(\n user=self.request.user\n ).values(\"summarylog__content_id\")\n\n exams = (\n Exam.objects.filter(\n assignments__collection__membership__user=self.request.user,\n collection__in=(c[\"id\"] for c in items),\n )\n .filter(Q(active=True) | Q(id__in=user_masterylog_content_ids))\n .annotate(\n closed=Subquery(\n MasteryLog.objects.filter(\n summarylog__content_id=OuterRef(\"id\"), user=self.request.user\n ).values(\"complete\")[:1]\n ),\n score=Subquery(\n AttemptLog.objects.filter(\n sessionlog__content_id=OuterRef(\"id\"), user=self.request.user\n )\n .order_by()\n .values_list(\"item\")\n .distinct()\n .values(\"masterylog\")\n .annotate(total_correct=Sum(\"correct\"))\n .values(\"total_correct\"),\n output_field=IntegerField(),\n ),\n answer_count=Subquery(\n AttemptLog.objects.filter(\n sessionlog__content_id=OuterRef(\"id\"), user=self.request.user\n )\n .order_by()\n .values_list(\"item\")\n .distinct()\n .values(\"masterylog\")\n .annotate(total_complete=Count(\"id\"))\n .values(\"total_complete\"),\n output_field=IntegerField(),\n ),\n )\n .distinct()\n .values(\n \"collection\",\n \"active\",\n \"archive\",\n \"id\",\n \"question_count\",\n \"title\",\n \"closed\",\n \"answer_count\",\n \"score\",\n \"question_sources\",\n )\n )\n exam_node_ids = set()\n for exam in exams:\n exam_node_ids |= {\n question[\"exercise_id\"] for question in exam.get(\"question_sources\")\n }\n\n available_exam_ids = set(\n ContentNode.objects.filter_by_uuids(exam_node_ids).values_list(\n \"id\", flat=True\n )\n )\n\n for exam in exams:\n closed = exam.pop(\"closed\")\n score = exam.pop(\"score\")\n answer_count = exam.pop(\"answer_count\")\n if closed is not None:\n exam[\"progress\"] = {\n \"closed\": closed,\n \"score\": score,\n \"answer_count\": answer_count,\n \"started\": True,\n }\n else:\n exam[\"progress\"] = {\n \"score\": None,\n \"answer_count\": None,\n \"closed\": None,\n \"started\": False,\n }\n exam[\"missing_resource\"] = any(\n question[\"exercise_id\"] not in available_exam_ids\n for question in exam.get(\"question_sources\")\n )\n out_items = []\n for item in items:\n item[\"assignments\"] = {\n \"exams\": [exam for exam in exams if exam[\"collection\"] == item[\"id\"]],\n \"lessons\": [\n lesson for lesson in lessons if lesson[\"collection\"] == item[\"id\"]\n ],\n }\n out_items.append(item)\n return out_items\n\n\nlearner_classroom_viewset = LearnerClassroomViewset()\n\n\ndef _resumable_resources(classrooms):\n for classroom in classrooms:\n for lesson in classroom[\"assignments\"][\"lessons\"]:\n for resource in lesson[\"resources\"]:\n yield 0 < resource[\"progress\"] < 1\n\n\nclass LearnHomePageHydrationView(APIView):\n def get(self, request, format=None):\n classrooms = []\n resumable_resources = []\n resumable_resources_progress = []\n if not request.user.is_anonymous:\n classrooms = learner_classroom_viewset.serialize_list(request)\n if not classrooms or not any(_resumable_resources(classrooms)):\n resumable_resources = user_contentnode_viewset.serialize_list(\n request,\n {\"resume\": True, \"max_results\": 12, \"ordering\": \"-last_interacted\"},\n )\n resumable_resources_progress = (\n contentnode_progress_viewset.serialize_list(\n request,\n {\n \"resume\": True,\n \"max_results\": 12,\n \"ordering\": \"-last_interacted\",\n },\n )\n )\n\n return Response(\n {\n \"classrooms\": classrooms,\n \"resumable_resources\": resumable_resources,\n \"resumable_resources_progress\": resumable_resources_progress,\n }\n )\n\n\ndef _map_lesson_classroom(item):\n return {\n \"id\": item.pop(\"collection__id\"),\n \"name\": item.pop(\"collection__name\"),\n \"parent\": item.pop(\"collection__parent_id\"),\n }\n\n\nclass LearnerLessonViewset(ReadOnlyValuesViewset):\n \"\"\"\n Special Viewset for Learners to view Lessons to which they are assigned.\n The core Lesson Viewset is locked down to Admin users only.\n \"\"\"\n\n permission_classes = (IsAuthenticated,)\n\n values = (\n \"id\",\n \"title\",\n \"description\",\n \"resources\",\n \"is_active\",\n \"collection\",\n \"collection__id\",\n \"collection__name\",\n \"collection__parent_id\",\n )\n\n field_map = {\"classroom\": _map_lesson_classroom}\n\n def get_queryset(self):\n if self.request.user.is_anonymous:\n return Lesson.objects.none()\n return Lesson.objects.filter(\n lesson_assignments__collection__membership__user=self.request.user,\n is_active=True,\n )\n\n def consolidate(self, items, queryset):\n if not items:\n return items\n\n _consolidate_lessons_data(self.request, items)\n\n return items\n", "path": "kolibri/plugins/learn/viewsets.py"}], "after_files": [{"content": "from django.db.models import Count\nfrom django.db.models import OuterRef\nfrom django.db.models import Q\nfrom django.db.models import Subquery\nfrom django.db.models import Sum\nfrom django.db.models.fields import IntegerField\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom kolibri.core.api import ReadOnlyValuesViewset\nfrom kolibri.core.auth.models import Classroom\nfrom kolibri.core.auth.models import Facility\nfrom kolibri.core.content.api import ContentNodeProgressViewset\nfrom kolibri.core.content.api import ContentNodeViewset\nfrom kolibri.core.content.api import UserContentNodeViewset\nfrom kolibri.core.content.models import ContentNode\nfrom kolibri.core.exams.models import Exam\nfrom kolibri.core.lessons.models import Lesson\nfrom kolibri.core.logger.models import AttemptLog\nfrom kolibri.core.logger.models import MasteryLog\n\n\ncontentnode_progress_viewset = ContentNodeProgressViewset()\ncontentnode_viewset = ContentNodeViewset()\nuser_contentnode_viewset = UserContentNodeViewset()\n\n\nclass LearnStateView(APIView):\n def get(self, request, format=None):\n \"\"\"\n Returns some configuration variables applicable to users navigating learn.\n - in_classes: Whether the user is in any classes\n - can_download_externally: Whether the user can download content externally from Kolibri\n \"\"\"\n if request.user.is_anonymous:\n default_facility = Facility.get_default_facility()\n can_download_externally = (\n default_facility.dataset.show_download_button_in_learn\n if default_facility\n else True\n )\n return Response(\n {\n \"in_classes\": False,\n \"can_download_externally\": can_download_externally,\n }\n )\n return Response(\n {\n \"in_classes\": request.user.memberships.exists(),\n \"can_download_externally\": request.user.dataset.show_download_button_in_learn,\n }\n )\n\n\ndef _map_contentnodes(request, content_ids):\n contentnodes = (\n contentnode_viewset.serialize_list(request, {\"ids\": content_ids})\n if content_ids\n else []\n )\n contentnode_map = {c[\"id\"]: c for c in contentnodes}\n return contentnode_map\n\n\ndef _consolidate_lessons_data(request, lessons):\n lesson_contentnode_ids = set()\n for lesson in lessons:\n lesson_contentnode_ids |= {\n resource[\"contentnode_id\"] for resource in lesson[\"resources\"]\n }\n\n contentnode_progress = (\n contentnode_progress_viewset.serialize_list(\n request, {\"ids\": lesson_contentnode_ids}\n )\n if lesson_contentnode_ids\n else []\n )\n\n contentnode_map = _map_contentnodes(request, lesson_contentnode_ids)\n\n progress_map = {l[\"content_id\"]: l[\"progress\"] for l in contentnode_progress}\n\n for lesson in lessons:\n lesson[\"progress\"] = {\n \"resource_progress\": sum(\n (\n progress_map[resource[\"content_id\"]]\n for resource in lesson[\"resources\"]\n if resource[\"content_id\"] in progress_map\n )\n ),\n \"total_resources\": len(lesson[\"resources\"]),\n }\n missing_resource = False\n for resource in lesson[\"resources\"]:\n resource[\"progress\"] = progress_map.get(resource[\"content_id\"], 0)\n resource[\"contentnode\"] = contentnode_map.get(\n resource[\"contentnode_id\"], None\n )\n missing_resource = missing_resource or not resource[\"contentnode\"]\n lesson[\"missing_resource\"] = missing_resource\n\n\nclass LearnerClassroomViewset(ReadOnlyValuesViewset):\n \"\"\"\n Returns all Classrooms for which the requesting User is a member,\n along with all associated assignments.\n \"\"\"\n\n permission_classes = (IsAuthenticated,)\n\n values = (\"id\", \"name\")\n\n def get_queryset(self):\n if self.request.user.is_anonymous:\n return Classroom.objects.none()\n return Classroom.objects.filter(membership__user=self.request.user)\n\n def consolidate(self, items, queryset):\n if not items:\n return items\n lessons = (\n Lesson.objects.filter(\n lesson_assignments__collection__membership__user=self.request.user,\n is_active=True,\n collection__in=(c[\"id\"] for c in items),\n )\n .distinct()\n .values(\n \"description\", \"id\", \"is_active\", \"title\", \"resources\", \"collection\"\n )\n )\n _consolidate_lessons_data(self.request, lessons)\n\n user_masterylog_content_ids = MasteryLog.objects.filter(\n user=self.request.user\n ).values(\"summarylog__content_id\")\n\n exams = (\n Exam.objects.filter(\n assignments__collection__membership__user=self.request.user,\n collection__in=(c[\"id\"] for c in items),\n )\n .filter(Q(active=True) | Q(id__in=user_masterylog_content_ids))\n .annotate(\n closed=Subquery(\n MasteryLog.objects.filter(\n summarylog__content_id=OuterRef(\"id\"), user=self.request.user\n ).values(\"complete\")[:1]\n ),\n score=Subquery(\n AttemptLog.objects.filter(\n sessionlog__content_id=OuterRef(\"id\"), user=self.request.user\n )\n .order_by()\n .values_list(\"item\")\n .distinct()\n .values(\"masterylog\")\n .annotate(total_correct=Sum(\"correct\"))\n .values(\"total_correct\"),\n output_field=IntegerField(),\n ),\n answer_count=Subquery(\n AttemptLog.objects.filter(\n sessionlog__content_id=OuterRef(\"id\"), user=self.request.user\n )\n .order_by()\n .values_list(\"item\")\n .distinct()\n .values(\"masterylog\")\n .annotate(total_complete=Count(\"id\"))\n .values(\"total_complete\"),\n output_field=IntegerField(),\n ),\n )\n .distinct()\n .values(\n \"collection\",\n \"active\",\n \"archive\",\n \"id\",\n \"question_count\",\n \"title\",\n \"closed\",\n \"answer_count\",\n \"score\",\n \"question_sources\",\n )\n )\n exam_node_ids = set()\n for exam in exams:\n exam_node_ids |= {\n question[\"exercise_id\"] for question in exam.get(\"question_sources\")\n }\n\n available_exam_ids = set(\n ContentNode.objects.filter_by_uuids(exam_node_ids).values_list(\n \"id\", flat=True\n )\n )\n\n contentnode_map = _map_contentnodes(self.request, available_exam_ids)\n\n for exam in exams:\n closed = exam.pop(\"closed\")\n score = exam.pop(\"score\")\n answer_count = exam.pop(\"answer_count\")\n if closed is not None:\n exam[\"progress\"] = {\n \"closed\": closed,\n \"score\": score,\n \"answer_count\": answer_count,\n \"started\": True,\n }\n else:\n exam[\"progress\"] = {\n \"score\": None,\n \"answer_count\": None,\n \"closed\": None,\n \"started\": False,\n }\n missing_resource = False\n for question_source in exam[\"question_sources\"]:\n if question_source[\"exercise_id\"] not in contentnode_map:\n missing_resource = True\n break\n exam[\"missing_resource\"] = missing_resource\n out_items = []\n for item in items:\n item[\"assignments\"] = {\n \"exams\": [exam for exam in exams if exam[\"collection\"] == item[\"id\"]],\n \"lessons\": [\n lesson for lesson in lessons if lesson[\"collection\"] == item[\"id\"]\n ],\n }\n out_items.append(item)\n return out_items\n\n\nlearner_classroom_viewset = LearnerClassroomViewset()\n\n\ndef _resumable_resources(classrooms):\n for classroom in classrooms:\n for lesson in classroom[\"assignments\"][\"lessons\"]:\n for resource in lesson[\"resources\"]:\n yield 0 < resource[\"progress\"] < 1\n\n\nclass LearnHomePageHydrationView(APIView):\n def get(self, request, format=None):\n classrooms = []\n resumable_resources = []\n resumable_resources_progress = []\n if not request.user.is_anonymous:\n classrooms = learner_classroom_viewset.serialize_list(request)\n if not classrooms or not any(_resumable_resources(classrooms)):\n resumable_resources = user_contentnode_viewset.serialize_list(\n request,\n {\"resume\": True, \"max_results\": 12, \"ordering\": \"-last_interacted\"},\n )\n resumable_resources_progress = (\n contentnode_progress_viewset.serialize_list(\n request,\n {\n \"resume\": True,\n \"max_results\": 12,\n \"ordering\": \"-last_interacted\",\n },\n )\n )\n\n return Response(\n {\n \"classrooms\": classrooms,\n \"resumable_resources\": resumable_resources,\n \"resumable_resources_progress\": resumable_resources_progress,\n }\n )\n\n\ndef _map_lesson_classroom(item):\n return {\n \"id\": item.pop(\"collection__id\"),\n \"name\": item.pop(\"collection__name\"),\n \"parent\": item.pop(\"collection__parent_id\"),\n }\n\n\nclass LearnerLessonViewset(ReadOnlyValuesViewset):\n \"\"\"\n Special Viewset for Learners to view Lessons to which they are assigned.\n The core Lesson Viewset is locked down to Admin users only.\n \"\"\"\n\n permission_classes = (IsAuthenticated,)\n\n values = (\n \"id\",\n \"title\",\n \"description\",\n \"resources\",\n \"is_active\",\n \"collection\",\n \"collection__id\",\n \"collection__name\",\n \"collection__parent_id\",\n )\n\n field_map = {\"classroom\": _map_lesson_classroom}\n\n def get_queryset(self):\n if self.request.user.is_anonymous:\n return Lesson.objects.none()\n return Lesson.objects.filter(\n lesson_assignments__collection__membership__user=self.request.user,\n is_active=True,\n )\n\n def consolidate(self, items, queryset):\n if not items:\n return items\n\n _consolidate_lessons_data(self.request, items)\n\n return items\n", "path": "kolibri/plugins/learn/viewsets.py"}]}
| 3,511 | 500 |
gh_patches_debug_13661
|
rasdani/github-patches
|
git_diff
|
jupyterhub__zero-to-jupyterhub-k8s-2769
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Move changelog to docs website
I suggest we make the changelog become part of the rendered documentation instead of a dedicated markdown file to be read from GitHub, and that we let CHANGELOG.md point to this rendered view.
Note for reference that jupyterhub/jupyterhub repo only has rendered website documentation currently, [see it here](https://jupyterhub.readthedocs.io/en/stable/changelog.html).

---
I think there are two key benefits of this:
1. We can get navigation assistance out of the box
2. We can make use of myst formatting, such as note/warning callouts
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/source/conf.py`
Content:
```
1 # Configuration file for Sphinx to build our documentation to HTML.
2 #
3 # Configuration reference: https://www.sphinx-doc.org/en/master/usage/configuration.html
4 #
5
6 # -- Path setup --------------------------------------------------------------
7
8 # If extensions (or modules to document with autodoc) are in another directory,
9 # add these directories to sys.path here. If the directory is relative to the
10 # documentation root, use os.path.abspath to make it absolute, like shown here.
11 #
12 # import os
13 # import sys
14 # sys.path.insert(0, os.path.abspath('.'))
15
16
17 # -- Project specific imports ------------------------------------------------
18
19 import datetime
20 import os
21 import re
22 import subprocess
23
24 import yaml
25
26 # -- Sphinx setup function ---------------------------------------------------
27 # ref: http://www.sphinx-doc.org/en/latest/extdev/tutorial.html#the-setup-function
28
29
30 def setup(app):
31 app.add_css_file("custom.css")
32
33
34 # -- Referencable variables --------------------------------------------------
35
36
37 def _get_git_ref_from_chartpress_based_version(version):
38 """
39 Get a git ref from a chartpress set version of format like
40 1.2.3-beta.1.n123.h1234567, 1.2.3-n123.h1234567, or 1.2.3.
41 """
42 tag_hash_split = re.split(r"[\.|-]n\d\d\d\.h", version)
43 if len(tag_hash_split) == 2:
44 return tag_hash_split[1]
45 else:
46 return tag_hash_split[0]
47
48
49 # FIXME: Stop relying on chartpress to modify Chart.yaml (and values.yaml) by
50 # creating a new feature of chartpress that allows us to directly acquire
51 # the dynamically set chart version from Chart.yaml. This would be
52 # similar to the --list-images feature of chartpress.
53 subprocess.run(["chartpress", "--skip-build"], cwd=os.path.abspath("../.."))
54 with open("../../jupyterhub/Chart.yaml") as f:
55 chart = yaml.safe_load(f)
56 subprocess.run(["chartpress", "--reset"], cwd=os.path.abspath("../.."))
57
58 # These substitution variables only work in markdown contexts, and does not work
59 # within links etc. Reference using {{ variable_name }}
60 #
61 # myst_substitutions ref: https://myst-parser.readthedocs.io/en/latest/using/syntax-optional.html#substitutions-with-jinja2
62 myst_substitutions = {
63 "chart_version": chart["version"],
64 "jupyterhub_version": chart["appVersion"],
65 # FIXME: kubeVersion contain >=, but by having > in the string we substitute
66 # we run into this issue:
67 # https://github.com/executablebooks/MyST-Parser/issues/282
68 "kube_version": chart["kubeVersion"].split("-", 1)[0][2:],
69 "helm_version": "3.5",
70 "requirements": f"[hub/images/requirements.txt](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/{_get_git_ref_from_chartpress_based_version(chart['version'])}/images/hub/requirements.txt)",
71 }
72
73
74 # -- General MyST configuration -----------------------------------------------------
75
76 # myst_enable_extensions ref: https://myst-parser.readthedocs.io/en/latest/using/syntax-optional.html
77 myst_enable_extensions = [
78 "substitution",
79 ]
80
81
82 # -- Project information -----------------------------------------------------
83 # ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
84
85 project = "Zero to JupyterHub with Kubernetes"
86 copyright = f"{datetime.date.today().year}, Project Jupyter Contributors"
87 author = "Project Jupyter Contributors"
88
89
90 # -- General Sphinx configuration ---------------------------------------------------
91 # ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
92
93 # Set the default role so we can use `foo` instead of ``foo``
94 default_role = "literal"
95
96 # Add any Sphinx extension module names here, as strings. They can be
97 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
98 # ones.
99 extensions = [
100 "sphinx.ext.mathjax",
101 "sphinx_copybutton",
102 "myst_parser",
103 "sphinxext.rediraffe",
104 "sphinxext.opengraph",
105 ]
106
107 # List of patterns, relative to source directory, that match files and
108 # directories to ignore when looking for source files.
109 # This pattern also affects html_static_path and html_extra_path.
110 exclude_patterns = []
111
112 # The root toctree document.
113 root_doc = master_doc = "index"
114
115 # The suffix(es) of source filenames.
116 source_suffix = [".md", ".rst"]
117
118 # Rediraffe redirects to ensure proper redirection
119 rediraffe_redirects = {
120 "customizing/user-management": "jupyterhub/customizing/user-management",
121 "customizing/user-storage": "jupyterhub/customizing/user-storage",
122 "customizing/user-resources": "jupyterhub/customizing/user-resources",
123 "customizing/user-environment": "jupyterhub/customizing/user-environment",
124 "customizing/extending-jupyterhub": "jupyterhub/customizing/extending-jupyterhub",
125 "reference/glossary": "resources/glossary",
126 "reference/tools": "resources/tools",
127 "reference/reference-docs": "resources/reference-docs",
128 "reference/reference": "resources/reference",
129 "community/additional-resources": "resources/community",
130 "community/users-list": "resources/community",
131 "community/tips": "resources/community",
132 "setup-jupyterhub/turn-off": "jupyterhub/uninstall",
133 "setup-jupyterhub/setup-jupyterhub": "jupyterhub/installation",
134 "setup-jupyterhub/setup-helm": "kubernetes/setup-helm",
135 "ovh/step-zero-ovh": "kubernetes/ovh/step-zero-ovh",
136 "digital-ocean/step-zero-digital-ocean": "kubernetes/digital-ocean/step-zero-digital-ocean",
137 "ibm/step-zero-ibm": "kubernetes/ibm/step-zero-ibm",
138 "redhat/step-zero-openshift": "kubernetes/redhat/step-zero-openshift",
139 "amazon/step-zero-aws-eks": "kubernetes/amazon/step-zero-aws-eks",
140 "amazon/step-zero-aws": "kubernetes/amazon/step-zero-aws",
141 "microsoft/step-zero-azure-autoscale": "kubernetes/microsoft/step-zero-azure-autoscale",
142 "microsoft/step-zero-azure": "kubernetes/microsoft/step-zero-azure",
143 "google/step-zero-gcp": "kubernetes/google/step-zero-gcp",
144 "create-k8s-cluster": "kubernetes/setup-kubernetes",
145 "turn-off": "jupyterhub/uninstall",
146 "setup-jupyterhub": "jupyterhub/index",
147 "setup-helm": "kubernetes/setup-helm",
148 "index-setup-jupyterhub": "jupyterhub/index",
149 "tools": "reference/tools",
150 "reference-docs": "reference/reference-docs",
151 "index-reference": "resources/reference",
152 "glossary": "reference/glossary",
153 "user-storage": "customizing/user-storage",
154 "user-resources": "customizing/user-resources",
155 "user-management": "customizing/user-management",
156 "user-environment": "customizing/user-environment",
157 "index-customization-guide": "jupyterhub/customization",
158 "extending-jupyterhub": "customizing/extending-jupyterhub",
159 "users-list": "community/users-list",
160 "tips": "community/tips",
161 "index-community-resources": "resources/community",
162 "additional-resources": "resources/community",
163 "upgrading": "administrator/upgrading",
164 "troubleshooting": "administrator/troubleshooting",
165 "security": "administrator/security",
166 "optimization": "administrator/optimization",
167 "index-administrator-guide": "administrator/index",
168 "debug": "administrator/debug",
169 "cost": "administrator/cost",
170 "authentication": "administrator/authentication",
171 "architecture": "administrator/architecture",
172 "advanced": "administrator/advanced",
173 }
174
175 # opengraph configuration
176 # ogp_site_url/prefix is set automatically by RTD
177 ogp_image = "_static/logo.png"
178 ogp_use_first_image = True
179
180 # -- Generate the Helm chart configuration reference from a schema file ------
181
182 # header
183 with open("resources/reference.txt") as f:
184 header_md = f.readlines()
185 header_md = header_md[1:]
186 header_md = [ln.strip("\n") for ln in header_md]
187
188 # schema
189 with open("../../jupyterhub/schema.yaml") as f:
190 data = yaml.safe_load(f)
191
192
193 def parse_schema(d, md=[], depth=0, pre=""):
194 """
195 Generate markdown headers from a passed python dictionary created by
196 parsing a schema.yaml file.
197 """
198 if "then" in d:
199 d = d["then"]
200
201 if "properties" in d:
202 depth += 1
203 # Create markdown headers for each schema level
204 for key, val in d["properties"].items():
205 md.append(f"(schema_{pre}{key})=")
206 md.append("#" * (depth + 1) + f" {pre}{key}")
207 md.append("")
208 if "description" in val:
209 for ln in val["description"].split("\n"):
210 md.append(ln)
211 md.append("")
212
213 parse_schema(val, md, depth, f"{pre}{key}.")
214 depth -= 1
215 return md
216
217
218 schema_md = parse_schema(data)
219
220 # reference = header + schema
221 reference_md = header_md + schema_md
222 with open("resources/reference.md", "w") as f:
223 f.write("\n".join(reference_md))
224
225
226 # -- Options for linkcheck builder -------------------------------------------
227 # ref: http://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-the-linkcheck-builder
228 linkcheck_ignore = [
229 r"(.*)github\.com(.*)#", # javascript based anchors
230 r"(.*)/#%21(.*)/(.*)", # /#!forum/jupyter - encoded anchor edge case
231 "https://your-domain.com", # example
232 "https://your-domain-name.com", # example
233 "https://kubernetes.io/docs/tutorials/kubernetes-basics/", # works
234 "https://cloud.ibm.com/kubernetes/catalog/create", # works
235 "https://portal.azure.com", # sign-in redirect noise
236 "https://console.cloud.google.com", # sign-in redirect noise
237 "https://console.developers.google.com", # sign-in redirect noise
238 ]
239 linkcheck_anchors_ignore = [
240 "/#!",
241 "/#%21",
242 ]
243
244
245 # -- Options for HTML output -------------------------------------------------
246 # ref: http://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
247
248 # The theme to use for HTML and HTML Help pages. See the documentation for
249 # a list of builtin themes.
250 #
251
252 html_theme = "pydata_sphinx_theme"
253 html_theme_options = {
254 "github_url": "https://github.com/jupyterhub/zero-to-jupyterhub-k8s/",
255 "use_edit_page_button": True,
256 }
257 html_context = {
258 "github_user": "jupyterhub",
259 "github_repo": "zero-to-jupyterhub-k8s",
260 "github_version": "main",
261 "doc_path": "docs/source",
262 }
263
264 html_favicon = "_static/images/logo/favicon.ico"
265 html_logo = "_static/images/logo/logo.png"
266
267 # Add any paths that contain custom static files (such as style sheets) here,
268 # relative to this directory. They are copied after the builtin static files,
269 # so a file named "default.css" will overwrite the builtin "default.css".
270 html_static_path = ["_static"]
271
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -228,6 +228,9 @@
linkcheck_ignore = [
r"(.*)github\.com(.*)#", # javascript based anchors
r"(.*)/#%21(.*)/(.*)", # /#!forum/jupyter - encoded anchor edge case
+ r"https://github.com/[^/]*$", # too many github usernames / searches in changelog
+ "https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/", # too many PRs in changelog
+ "https://github.com/jupyterhub/zero-to-jupyterhub-k8s/compare/", # too many comparisons in changelog
"https://your-domain.com", # example
"https://your-domain-name.com", # example
"https://kubernetes.io/docs/tutorials/kubernetes-basics/", # works
|
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -228,6 +228,9 @@\n linkcheck_ignore = [\n r\"(.*)github\\.com(.*)#\", # javascript based anchors\n r\"(.*)/#%21(.*)/(.*)\", # /#!forum/jupyter - encoded anchor edge case\n+ r\"https://github.com/[^/]*$\", # too many github usernames / searches in changelog\n+ \"https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/\", # too many PRs in changelog\n+ \"https://github.com/jupyterhub/zero-to-jupyterhub-k8s/compare/\", # too many comparisons in changelog\n \"https://your-domain.com\", # example\n \"https://your-domain-name.com\", # example\n \"https://kubernetes.io/docs/tutorials/kubernetes-basics/\", # works\n", "issue": "Move changelog to docs website\nI suggest we make the changelog become part of the rendered documentation instead of a dedicated markdown file to be read from GitHub, and that we let CHANGELOG.md point to this rendered view.\r\n\r\nNote for reference that jupyterhub/jupyterhub repo only has rendered website documentation currently, [see it here](https://jupyterhub.readthedocs.io/en/stable/changelog.html).\r\n\r\n\r\n\r\n---\r\n\r\nI think there are two key benefits of this:\r\n1. We can get navigation assistance out of the box\r\n2. We can make use of myst formatting, such as note/warning callouts\n", "before_files": [{"content": "# Configuration file for Sphinx to build our documentation to HTML.\n#\n# Configuration reference: https://www.sphinx-doc.org/en/master/usage/configuration.html\n#\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- Project specific imports ------------------------------------------------\n\nimport datetime\nimport os\nimport re\nimport subprocess\n\nimport yaml\n\n# -- Sphinx setup function ---------------------------------------------------\n# ref: http://www.sphinx-doc.org/en/latest/extdev/tutorial.html#the-setup-function\n\n\ndef setup(app):\n app.add_css_file(\"custom.css\")\n\n\n# -- Referencable variables --------------------------------------------------\n\n\ndef _get_git_ref_from_chartpress_based_version(version):\n \"\"\"\n Get a git ref from a chartpress set version of format like\n 1.2.3-beta.1.n123.h1234567, 1.2.3-n123.h1234567, or 1.2.3.\n \"\"\"\n tag_hash_split = re.split(r\"[\\.|-]n\\d\\d\\d\\.h\", version)\n if len(tag_hash_split) == 2:\n return tag_hash_split[1]\n else:\n return tag_hash_split[0]\n\n\n# FIXME: Stop relying on chartpress to modify Chart.yaml (and values.yaml) by\n# creating a new feature of chartpress that allows us to directly acquire\n# the dynamically set chart version from Chart.yaml. This would be\n# similar to the --list-images feature of chartpress.\nsubprocess.run([\"chartpress\", \"--skip-build\"], cwd=os.path.abspath(\"../..\"))\nwith open(\"../../jupyterhub/Chart.yaml\") as f:\n chart = yaml.safe_load(f)\nsubprocess.run([\"chartpress\", \"--reset\"], cwd=os.path.abspath(\"../..\"))\n\n# These substitution variables only work in markdown contexts, and does not work\n# within links etc. Reference using {{ variable_name }}\n#\n# myst_substitutions ref: https://myst-parser.readthedocs.io/en/latest/using/syntax-optional.html#substitutions-with-jinja2\nmyst_substitutions = {\n \"chart_version\": chart[\"version\"],\n \"jupyterhub_version\": chart[\"appVersion\"],\n # FIXME: kubeVersion contain >=, but by having > in the string we substitute\n # we run into this issue:\n # https://github.com/executablebooks/MyST-Parser/issues/282\n \"kube_version\": chart[\"kubeVersion\"].split(\"-\", 1)[0][2:],\n \"helm_version\": \"3.5\",\n \"requirements\": f\"[hub/images/requirements.txt](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/{_get_git_ref_from_chartpress_based_version(chart['version'])}/images/hub/requirements.txt)\",\n}\n\n\n# -- General MyST configuration -----------------------------------------------------\n\n# myst_enable_extensions ref: https://myst-parser.readthedocs.io/en/latest/using/syntax-optional.html\nmyst_enable_extensions = [\n \"substitution\",\n]\n\n\n# -- Project information -----------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n\nproject = \"Zero to JupyterHub with Kubernetes\"\ncopyright = f\"{datetime.date.today().year}, Project Jupyter Contributors\"\nauthor = \"Project Jupyter Contributors\"\n\n\n# -- General Sphinx configuration ---------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n\n# Set the default role so we can use `foo` instead of ``foo``\ndefault_role = \"literal\"\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.mathjax\",\n \"sphinx_copybutton\",\n \"myst_parser\",\n \"sphinxext.rediraffe\",\n \"sphinxext.opengraph\",\n]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n# The root toctree document.\nroot_doc = master_doc = \"index\"\n\n# The suffix(es) of source filenames.\nsource_suffix = [\".md\", \".rst\"]\n\n# Rediraffe redirects to ensure proper redirection\nrediraffe_redirects = {\n \"customizing/user-management\": \"jupyterhub/customizing/user-management\",\n \"customizing/user-storage\": \"jupyterhub/customizing/user-storage\",\n \"customizing/user-resources\": \"jupyterhub/customizing/user-resources\",\n \"customizing/user-environment\": \"jupyterhub/customizing/user-environment\",\n \"customizing/extending-jupyterhub\": \"jupyterhub/customizing/extending-jupyterhub\",\n \"reference/glossary\": \"resources/glossary\",\n \"reference/tools\": \"resources/tools\",\n \"reference/reference-docs\": \"resources/reference-docs\",\n \"reference/reference\": \"resources/reference\",\n \"community/additional-resources\": \"resources/community\",\n \"community/users-list\": \"resources/community\",\n \"community/tips\": \"resources/community\",\n \"setup-jupyterhub/turn-off\": \"jupyterhub/uninstall\",\n \"setup-jupyterhub/setup-jupyterhub\": \"jupyterhub/installation\",\n \"setup-jupyterhub/setup-helm\": \"kubernetes/setup-helm\",\n \"ovh/step-zero-ovh\": \"kubernetes/ovh/step-zero-ovh\",\n \"digital-ocean/step-zero-digital-ocean\": \"kubernetes/digital-ocean/step-zero-digital-ocean\",\n \"ibm/step-zero-ibm\": \"kubernetes/ibm/step-zero-ibm\",\n \"redhat/step-zero-openshift\": \"kubernetes/redhat/step-zero-openshift\",\n \"amazon/step-zero-aws-eks\": \"kubernetes/amazon/step-zero-aws-eks\",\n \"amazon/step-zero-aws\": \"kubernetes/amazon/step-zero-aws\",\n \"microsoft/step-zero-azure-autoscale\": \"kubernetes/microsoft/step-zero-azure-autoscale\",\n \"microsoft/step-zero-azure\": \"kubernetes/microsoft/step-zero-azure\",\n \"google/step-zero-gcp\": \"kubernetes/google/step-zero-gcp\",\n \"create-k8s-cluster\": \"kubernetes/setup-kubernetes\",\n \"turn-off\": \"jupyterhub/uninstall\",\n \"setup-jupyterhub\": \"jupyterhub/index\",\n \"setup-helm\": \"kubernetes/setup-helm\",\n \"index-setup-jupyterhub\": \"jupyterhub/index\",\n \"tools\": \"reference/tools\",\n \"reference-docs\": \"reference/reference-docs\",\n \"index-reference\": \"resources/reference\",\n \"glossary\": \"reference/glossary\",\n \"user-storage\": \"customizing/user-storage\",\n \"user-resources\": \"customizing/user-resources\",\n \"user-management\": \"customizing/user-management\",\n \"user-environment\": \"customizing/user-environment\",\n \"index-customization-guide\": \"jupyterhub/customization\",\n \"extending-jupyterhub\": \"customizing/extending-jupyterhub\",\n \"users-list\": \"community/users-list\",\n \"tips\": \"community/tips\",\n \"index-community-resources\": \"resources/community\",\n \"additional-resources\": \"resources/community\",\n \"upgrading\": \"administrator/upgrading\",\n \"troubleshooting\": \"administrator/troubleshooting\",\n \"security\": \"administrator/security\",\n \"optimization\": \"administrator/optimization\",\n \"index-administrator-guide\": \"administrator/index\",\n \"debug\": \"administrator/debug\",\n \"cost\": \"administrator/cost\",\n \"authentication\": \"administrator/authentication\",\n \"architecture\": \"administrator/architecture\",\n \"advanced\": \"administrator/advanced\",\n}\n\n# opengraph configuration\n# ogp_site_url/prefix is set automatically by RTD\nogp_image = \"_static/logo.png\"\nogp_use_first_image = True\n\n# -- Generate the Helm chart configuration reference from a schema file ------\n\n# header\nwith open(\"resources/reference.txt\") as f:\n header_md = f.readlines()\nheader_md = header_md[1:]\nheader_md = [ln.strip(\"\\n\") for ln in header_md]\n\n# schema\nwith open(\"../../jupyterhub/schema.yaml\") as f:\n data = yaml.safe_load(f)\n\n\ndef parse_schema(d, md=[], depth=0, pre=\"\"):\n \"\"\"\n Generate markdown headers from a passed python dictionary created by\n parsing a schema.yaml file.\n \"\"\"\n if \"then\" in d:\n d = d[\"then\"]\n\n if \"properties\" in d:\n depth += 1\n # Create markdown headers for each schema level\n for key, val in d[\"properties\"].items():\n md.append(f\"(schema_{pre}{key})=\")\n md.append(\"#\" * (depth + 1) + f\" {pre}{key}\")\n md.append(\"\")\n if \"description\" in val:\n for ln in val[\"description\"].split(\"\\n\"):\n md.append(ln)\n md.append(\"\")\n\n parse_schema(val, md, depth, f\"{pre}{key}.\")\n depth -= 1\n return md\n\n\nschema_md = parse_schema(data)\n\n# reference = header + schema\nreference_md = header_md + schema_md\nwith open(\"resources/reference.md\", \"w\") as f:\n f.write(\"\\n\".join(reference_md))\n\n\n# -- Options for linkcheck builder -------------------------------------------\n# ref: http://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-the-linkcheck-builder\nlinkcheck_ignore = [\n r\"(.*)github\\.com(.*)#\", # javascript based anchors\n r\"(.*)/#%21(.*)/(.*)\", # /#!forum/jupyter - encoded anchor edge case\n \"https://your-domain.com\", # example\n \"https://your-domain-name.com\", # example\n \"https://kubernetes.io/docs/tutorials/kubernetes-basics/\", # works\n \"https://cloud.ibm.com/kubernetes/catalog/create\", # works\n \"https://portal.azure.com\", # sign-in redirect noise\n \"https://console.cloud.google.com\", # sign-in redirect noise\n \"https://console.developers.google.com\", # sign-in redirect noise\n]\nlinkcheck_anchors_ignore = [\n \"/#!\",\n \"/#%21\",\n]\n\n\n# -- Options for HTML output -------------------------------------------------\n# ref: http://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n\nhtml_theme = \"pydata_sphinx_theme\"\nhtml_theme_options = {\n \"github_url\": \"https://github.com/jupyterhub/zero-to-jupyterhub-k8s/\",\n \"use_edit_page_button\": True,\n}\nhtml_context = {\n \"github_user\": \"jupyterhub\",\n \"github_repo\": \"zero-to-jupyterhub-k8s\",\n \"github_version\": \"main\",\n \"doc_path\": \"docs/source\",\n}\n\nhtml_favicon = \"_static/images/logo/favicon.ico\"\nhtml_logo = \"_static/images/logo/logo.png\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "# Configuration file for Sphinx to build our documentation to HTML.\n#\n# Configuration reference: https://www.sphinx-doc.org/en/master/usage/configuration.html\n#\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- Project specific imports ------------------------------------------------\n\nimport datetime\nimport os\nimport re\nimport subprocess\n\nimport yaml\n\n# -- Sphinx setup function ---------------------------------------------------\n# ref: http://www.sphinx-doc.org/en/latest/extdev/tutorial.html#the-setup-function\n\n\ndef setup(app):\n app.add_css_file(\"custom.css\")\n\n\n# -- Referencable variables --------------------------------------------------\n\n\ndef _get_git_ref_from_chartpress_based_version(version):\n \"\"\"\n Get a git ref from a chartpress set version of format like\n 1.2.3-beta.1.n123.h1234567, 1.2.3-n123.h1234567, or 1.2.3.\n \"\"\"\n tag_hash_split = re.split(r\"[\\.|-]n\\d\\d\\d\\.h\", version)\n if len(tag_hash_split) == 2:\n return tag_hash_split[1]\n else:\n return tag_hash_split[0]\n\n\n# FIXME: Stop relying on chartpress to modify Chart.yaml (and values.yaml) by\n# creating a new feature of chartpress that allows us to directly acquire\n# the dynamically set chart version from Chart.yaml. This would be\n# similar to the --list-images feature of chartpress.\nsubprocess.run([\"chartpress\", \"--skip-build\"], cwd=os.path.abspath(\"../..\"))\nwith open(\"../../jupyterhub/Chart.yaml\") as f:\n chart = yaml.safe_load(f)\nsubprocess.run([\"chartpress\", \"--reset\"], cwd=os.path.abspath(\"../..\"))\n\n# These substitution variables only work in markdown contexts, and does not work\n# within links etc. Reference using {{ variable_name }}\n#\n# myst_substitutions ref: https://myst-parser.readthedocs.io/en/latest/using/syntax-optional.html#substitutions-with-jinja2\nmyst_substitutions = {\n \"chart_version\": chart[\"version\"],\n \"jupyterhub_version\": chart[\"appVersion\"],\n # FIXME: kubeVersion contain >=, but by having > in the string we substitute\n # we run into this issue:\n # https://github.com/executablebooks/MyST-Parser/issues/282\n \"kube_version\": chart[\"kubeVersion\"].split(\"-\", 1)[0][2:],\n \"helm_version\": \"3.5\",\n \"requirements\": f\"[hub/images/requirements.txt](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/{_get_git_ref_from_chartpress_based_version(chart['version'])}/images/hub/requirements.txt)\",\n}\n\n\n# -- General MyST configuration -----------------------------------------------------\n\n# myst_enable_extensions ref: https://myst-parser.readthedocs.io/en/latest/using/syntax-optional.html\nmyst_enable_extensions = [\n \"substitution\",\n]\n\n\n# -- Project information -----------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n\nproject = \"Zero to JupyterHub with Kubernetes\"\ncopyright = f\"{datetime.date.today().year}, Project Jupyter Contributors\"\nauthor = \"Project Jupyter Contributors\"\n\n\n# -- General Sphinx configuration ---------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n\n# Set the default role so we can use `foo` instead of ``foo``\ndefault_role = \"literal\"\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.mathjax\",\n \"sphinx_copybutton\",\n \"myst_parser\",\n \"sphinxext.rediraffe\",\n \"sphinxext.opengraph\",\n]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n# The root toctree document.\nroot_doc = master_doc = \"index\"\n\n# The suffix(es) of source filenames.\nsource_suffix = [\".md\", \".rst\"]\n\n# Rediraffe redirects to ensure proper redirection\nrediraffe_redirects = {\n \"customizing/user-management\": \"jupyterhub/customizing/user-management\",\n \"customizing/user-storage\": \"jupyterhub/customizing/user-storage\",\n \"customizing/user-resources\": \"jupyterhub/customizing/user-resources\",\n \"customizing/user-environment\": \"jupyterhub/customizing/user-environment\",\n \"customizing/extending-jupyterhub\": \"jupyterhub/customizing/extending-jupyterhub\",\n \"reference/glossary\": \"resources/glossary\",\n \"reference/tools\": \"resources/tools\",\n \"reference/reference-docs\": \"resources/reference-docs\",\n \"reference/reference\": \"resources/reference\",\n \"community/additional-resources\": \"resources/community\",\n \"community/users-list\": \"resources/community\",\n \"community/tips\": \"resources/community\",\n \"setup-jupyterhub/turn-off\": \"jupyterhub/uninstall\",\n \"setup-jupyterhub/setup-jupyterhub\": \"jupyterhub/installation\",\n \"setup-jupyterhub/setup-helm\": \"kubernetes/setup-helm\",\n \"ovh/step-zero-ovh\": \"kubernetes/ovh/step-zero-ovh\",\n \"digital-ocean/step-zero-digital-ocean\": \"kubernetes/digital-ocean/step-zero-digital-ocean\",\n \"ibm/step-zero-ibm\": \"kubernetes/ibm/step-zero-ibm\",\n \"redhat/step-zero-openshift\": \"kubernetes/redhat/step-zero-openshift\",\n \"amazon/step-zero-aws-eks\": \"kubernetes/amazon/step-zero-aws-eks\",\n \"amazon/step-zero-aws\": \"kubernetes/amazon/step-zero-aws\",\n \"microsoft/step-zero-azure-autoscale\": \"kubernetes/microsoft/step-zero-azure-autoscale\",\n \"microsoft/step-zero-azure\": \"kubernetes/microsoft/step-zero-azure\",\n \"google/step-zero-gcp\": \"kubernetes/google/step-zero-gcp\",\n \"create-k8s-cluster\": \"kubernetes/setup-kubernetes\",\n \"turn-off\": \"jupyterhub/uninstall\",\n \"setup-jupyterhub\": \"jupyterhub/index\",\n \"setup-helm\": \"kubernetes/setup-helm\",\n \"index-setup-jupyterhub\": \"jupyterhub/index\",\n \"tools\": \"reference/tools\",\n \"reference-docs\": \"reference/reference-docs\",\n \"index-reference\": \"resources/reference\",\n \"glossary\": \"reference/glossary\",\n \"user-storage\": \"customizing/user-storage\",\n \"user-resources\": \"customizing/user-resources\",\n \"user-management\": \"customizing/user-management\",\n \"user-environment\": \"customizing/user-environment\",\n \"index-customization-guide\": \"jupyterhub/customization\",\n \"extending-jupyterhub\": \"customizing/extending-jupyterhub\",\n \"users-list\": \"community/users-list\",\n \"tips\": \"community/tips\",\n \"index-community-resources\": \"resources/community\",\n \"additional-resources\": \"resources/community\",\n \"upgrading\": \"administrator/upgrading\",\n \"troubleshooting\": \"administrator/troubleshooting\",\n \"security\": \"administrator/security\",\n \"optimization\": \"administrator/optimization\",\n \"index-administrator-guide\": \"administrator/index\",\n \"debug\": \"administrator/debug\",\n \"cost\": \"administrator/cost\",\n \"authentication\": \"administrator/authentication\",\n \"architecture\": \"administrator/architecture\",\n \"advanced\": \"administrator/advanced\",\n}\n\n# opengraph configuration\n# ogp_site_url/prefix is set automatically by RTD\nogp_image = \"_static/logo.png\"\nogp_use_first_image = True\n\n# -- Generate the Helm chart configuration reference from a schema file ------\n\n# header\nwith open(\"resources/reference.txt\") as f:\n header_md = f.readlines()\nheader_md = header_md[1:]\nheader_md = [ln.strip(\"\\n\") for ln in header_md]\n\n# schema\nwith open(\"../../jupyterhub/schema.yaml\") as f:\n data = yaml.safe_load(f)\n\n\ndef parse_schema(d, md=[], depth=0, pre=\"\"):\n \"\"\"\n Generate markdown headers from a passed python dictionary created by\n parsing a schema.yaml file.\n \"\"\"\n if \"then\" in d:\n d = d[\"then\"]\n\n if \"properties\" in d:\n depth += 1\n # Create markdown headers for each schema level\n for key, val in d[\"properties\"].items():\n md.append(f\"(schema_{pre}{key})=\")\n md.append(\"#\" * (depth + 1) + f\" {pre}{key}\")\n md.append(\"\")\n if \"description\" in val:\n for ln in val[\"description\"].split(\"\\n\"):\n md.append(ln)\n md.append(\"\")\n\n parse_schema(val, md, depth, f\"{pre}{key}.\")\n depth -= 1\n return md\n\n\nschema_md = parse_schema(data)\n\n# reference = header + schema\nreference_md = header_md + schema_md\nwith open(\"resources/reference.md\", \"w\") as f:\n f.write(\"\\n\".join(reference_md))\n\n\n# -- Options for linkcheck builder -------------------------------------------\n# ref: http://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-the-linkcheck-builder\nlinkcheck_ignore = [\n r\"(.*)github\\.com(.*)#\", # javascript based anchors\n r\"(.*)/#%21(.*)/(.*)\", # /#!forum/jupyter - encoded anchor edge case\n r\"https://github.com/[^/]*$\", # too many github usernames / searches in changelog\n \"https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/\", # too many PRs in changelog\n \"https://github.com/jupyterhub/zero-to-jupyterhub-k8s/compare/\", # too many comparisons in changelog\n \"https://your-domain.com\", # example\n \"https://your-domain-name.com\", # example\n \"https://kubernetes.io/docs/tutorials/kubernetes-basics/\", # works\n \"https://cloud.ibm.com/kubernetes/catalog/create\", # works\n \"https://portal.azure.com\", # sign-in redirect noise\n \"https://console.cloud.google.com\", # sign-in redirect noise\n \"https://console.developers.google.com\", # sign-in redirect noise\n]\nlinkcheck_anchors_ignore = [\n \"/#!\",\n \"/#%21\",\n]\n\n\n# -- Options for HTML output -------------------------------------------------\n# ref: http://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n\nhtml_theme = \"pydata_sphinx_theme\"\nhtml_theme_options = {\n \"github_url\": \"https://github.com/jupyterhub/zero-to-jupyterhub-k8s/\",\n \"use_edit_page_button\": True,\n}\nhtml_context = {\n \"github_user\": \"jupyterhub\",\n \"github_repo\": \"zero-to-jupyterhub-k8s\",\n \"github_version\": \"main\",\n \"doc_path\": \"docs/source\",\n}\n\nhtml_favicon = \"_static/images/logo/favicon.ico\"\nhtml_logo = \"_static/images/logo/logo.png\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n", "path": "docs/source/conf.py"}]}
| 3,698 | 221 |
gh_patches_debug_10581
|
rasdani/github-patches
|
git_diff
|
pytorch__rl-598
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] timeit profiling class does not correctly track how many times a function has been called.
## Describe the bug
In file **pytorch/rl/torchrl/_utils.py**, in the class **timeit**, starting at line 31, the code is the following
```
count = self._REG[self.name][1]
self._REG[self.name][0] = (self._REG[self.name][0] * count + t) / (count + 1)
self._REG[self.name][1] = self._REG[self.name][1] + t
self._REG[self.name][2] = count + 1
```
I understand self._REG[self.name][2] tracks the number of times a function has been called.
In that case, shouldn't the first line be changed to
```
count = self._REG[self.name][2]
```
## Checklist
- [X] I have checked that there is no similar issue in the repo (**required**)
- [ ] I have read the [documentation](https://github.com/pytorch/rl/tree/main/docs/) (**required**)
- [ ] I have provided a minimal working example to reproduce the bug (**required**)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchrl/_utils.py`
Content:
```
1 import collections
2 import math
3 import time
4
5 import numpy as np
6
7
8 class timeit:
9 """A dirty but easy to use decorator for profiling code."""
10
11 _REG = {}
12
13 def __init__(self, name):
14 self.name = name
15
16 def __call__(self, fn):
17 def decorated_fn(*args, **kwargs):
18 with self:
19 out = fn(*args, **kwargs)
20 return out
21
22 return decorated_fn
23
24 def __enter__(self):
25 self.t0 = time.time()
26
27 def __exit__(self, exc_type, exc_val, exc_tb):
28 t = time.time() - self.t0
29 self._REG.setdefault(self.name, [0.0, 0.0, 0])
30
31 count = self._REG[self.name][1]
32 self._REG[self.name][0] = (self._REG[self.name][0] * count + t) / (count + 1)
33 self._REG[self.name][1] = self._REG[self.name][1] + t
34 self._REG[self.name][2] = count + 1
35
36 @staticmethod
37 def print(prefix=None):
38 keys = list(timeit._REG)
39 keys.sort()
40 for name in keys:
41 strings = []
42 if prefix:
43 strings.append(prefix)
44 strings.append(
45 f"{name} took {timeit._REG[name][0] * 1000:4.4} msec (total = {timeit._REG[name][1]} sec)"
46 )
47 print(" -- ".join(strings))
48
49 @staticmethod
50 def erase():
51 for k in timeit._REG:
52 timeit._REG[k] = [0.0, 0.0, 0]
53
54
55 def _check_for_faulty_process(processes):
56 terminate = False
57 for p in processes:
58 if not p.is_alive():
59 terminate = True
60 for _p in processes:
61 if _p.is_alive():
62 _p.terminate()
63 if terminate:
64 break
65 if terminate:
66 raise RuntimeError(
67 "At least one process failed. Check for more infos in the log."
68 )
69
70
71 def seed_generator(seed):
72 """A seed generator function.
73
74 Given a seeding integer, generates a deterministic next seed to be used in a
75 seeding sequence.
76
77 Args:
78 seed (int): initial seed.
79
80 Returns: Next seed of the chain.
81
82 """
83 max_seed_val = (
84 2 ** 32 - 1
85 ) # https://discuss.pytorch.org/t/what-is-the-max-seed-you-can-set-up/145688
86 rng = np.random.default_rng(seed)
87 seed = int.from_bytes(rng.bytes(8), "big")
88 return seed % max_seed_val
89
90
91 class KeyDependentDefaultDict(collections.defaultdict):
92 """A key-dependent default dict.
93
94 Examples:
95 >>> my_dict = KeyDependentDefaultDict(lambda key: "foo_" + key)
96 >>> print(my_dict["bar"])
97 foo_bar
98 """
99
100 def __init__(self, fun):
101 self.fun = fun
102 super().__init__()
103
104 def __missing__(self, key):
105 value = self.fun(key)
106 self[key] = value
107 return value
108
109
110 def prod(sequence):
111 """General prod function, that generalised usage across math and np.
112
113 Created for multiple python versions compatibility).
114
115 """
116 if hasattr(math, "prod"):
117 return math.prod(sequence)
118 else:
119 return int(np.prod(sequence))
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torchrl/_utils.py b/torchrl/_utils.py
--- a/torchrl/_utils.py
+++ b/torchrl/_utils.py
@@ -26,12 +26,13 @@
def __exit__(self, exc_type, exc_val, exc_tb):
t = time.time() - self.t0
- self._REG.setdefault(self.name, [0.0, 0.0, 0])
+ val = self._REG.setdefault(self.name, [0.0, 0.0, 0])
- count = self._REG[self.name][1]
- self._REG[self.name][0] = (self._REG[self.name][0] * count + t) / (count + 1)
- self._REG[self.name][1] = self._REG[self.name][1] + t
- self._REG[self.name][2] = count + 1
+ count = val[2]
+ N = count + 1
+ val[0] = val[0] * (count / N) + t / N
+ val[1] += t
+ val[2] = N
@staticmethod
def print(prefix=None):
|
{"golden_diff": "diff --git a/torchrl/_utils.py b/torchrl/_utils.py\n--- a/torchrl/_utils.py\n+++ b/torchrl/_utils.py\n@@ -26,12 +26,13 @@\n \n def __exit__(self, exc_type, exc_val, exc_tb):\n t = time.time() - self.t0\n- self._REG.setdefault(self.name, [0.0, 0.0, 0])\n+ val = self._REG.setdefault(self.name, [0.0, 0.0, 0])\n \n- count = self._REG[self.name][1]\n- self._REG[self.name][0] = (self._REG[self.name][0] * count + t) / (count + 1)\n- self._REG[self.name][1] = self._REG[self.name][1] + t\n- self._REG[self.name][2] = count + 1\n+ count = val[2]\n+ N = count + 1\n+ val[0] = val[0] * (count / N) + t / N\n+ val[1] += t\n+ val[2] = N\n \n @staticmethod\n def print(prefix=None):\n", "issue": "[BUG] timeit profiling class does not correctly track how many times a function has been called.\n## Describe the bug\r\n\r\nIn file **pytorch/rl/torchrl/_utils.py**, in the class **timeit**, starting at line 31, the code is the following\r\n\r\n```\r\ncount = self._REG[self.name][1]\r\nself._REG[self.name][0] = (self._REG[self.name][0] * count + t) / (count + 1)\r\nself._REG[self.name][1] = self._REG[self.name][1] + t\r\nself._REG[self.name][2] = count + 1\r\n```\r\n\r\nI understand self._REG[self.name][2] tracks the number of times a function has been called.\r\nIn that case, shouldn't the first line be changed to \r\n\r\n```\r\ncount = self._REG[self.name][2]\r\n```\r\n\r\n## Checklist\r\n\r\n- [X] I have checked that there is no similar issue in the repo (**required**)\r\n- [ ] I have read the [documentation](https://github.com/pytorch/rl/tree/main/docs/) (**required**)\r\n- [ ] I have provided a minimal working example to reproduce the bug (**required**)\r\n\n", "before_files": [{"content": "import collections\nimport math\nimport time\n\nimport numpy as np\n\n\nclass timeit:\n \"\"\"A dirty but easy to use decorator for profiling code.\"\"\"\n\n _REG = {}\n\n def __init__(self, name):\n self.name = name\n\n def __call__(self, fn):\n def decorated_fn(*args, **kwargs):\n with self:\n out = fn(*args, **kwargs)\n return out\n\n return decorated_fn\n\n def __enter__(self):\n self.t0 = time.time()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n t = time.time() - self.t0\n self._REG.setdefault(self.name, [0.0, 0.0, 0])\n\n count = self._REG[self.name][1]\n self._REG[self.name][0] = (self._REG[self.name][0] * count + t) / (count + 1)\n self._REG[self.name][1] = self._REG[self.name][1] + t\n self._REG[self.name][2] = count + 1\n\n @staticmethod\n def print(prefix=None):\n keys = list(timeit._REG)\n keys.sort()\n for name in keys:\n strings = []\n if prefix:\n strings.append(prefix)\n strings.append(\n f\"{name} took {timeit._REG[name][0] * 1000:4.4} msec (total = {timeit._REG[name][1]} sec)\"\n )\n print(\" -- \".join(strings))\n\n @staticmethod\n def erase():\n for k in timeit._REG:\n timeit._REG[k] = [0.0, 0.0, 0]\n\n\ndef _check_for_faulty_process(processes):\n terminate = False\n for p in processes:\n if not p.is_alive():\n terminate = True\n for _p in processes:\n if _p.is_alive():\n _p.terminate()\n if terminate:\n break\n if terminate:\n raise RuntimeError(\n \"At least one process failed. Check for more infos in the log.\"\n )\n\n\ndef seed_generator(seed):\n \"\"\"A seed generator function.\n\n Given a seeding integer, generates a deterministic next seed to be used in a\n seeding sequence.\n\n Args:\n seed (int): initial seed.\n\n Returns: Next seed of the chain.\n\n \"\"\"\n max_seed_val = (\n 2 ** 32 - 1\n ) # https://discuss.pytorch.org/t/what-is-the-max-seed-you-can-set-up/145688\n rng = np.random.default_rng(seed)\n seed = int.from_bytes(rng.bytes(8), \"big\")\n return seed % max_seed_val\n\n\nclass KeyDependentDefaultDict(collections.defaultdict):\n \"\"\"A key-dependent default dict.\n\n Examples:\n >>> my_dict = KeyDependentDefaultDict(lambda key: \"foo_\" + key)\n >>> print(my_dict[\"bar\"])\n foo_bar\n \"\"\"\n\n def __init__(self, fun):\n self.fun = fun\n super().__init__()\n\n def __missing__(self, key):\n value = self.fun(key)\n self[key] = value\n return value\n\n\ndef prod(sequence):\n \"\"\"General prod function, that generalised usage across math and np.\n\n Created for multiple python versions compatibility).\n\n \"\"\"\n if hasattr(math, \"prod\"):\n return math.prod(sequence)\n else:\n return int(np.prod(sequence))\n", "path": "torchrl/_utils.py"}], "after_files": [{"content": "import collections\nimport math\nimport time\n\nimport numpy as np\n\n\nclass timeit:\n \"\"\"A dirty but easy to use decorator for profiling code.\"\"\"\n\n _REG = {}\n\n def __init__(self, name):\n self.name = name\n\n def __call__(self, fn):\n def decorated_fn(*args, **kwargs):\n with self:\n out = fn(*args, **kwargs)\n return out\n\n return decorated_fn\n\n def __enter__(self):\n self.t0 = time.time()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n t = time.time() - self.t0\n val = self._REG.setdefault(self.name, [0.0, 0.0, 0])\n\n count = val[2]\n N = count + 1\n val[0] = val[0] * (count / N) + t / N\n val[1] += t\n val[2] = N\n\n @staticmethod\n def print(prefix=None):\n keys = list(timeit._REG)\n keys.sort()\n for name in keys:\n strings = []\n if prefix:\n strings.append(prefix)\n strings.append(\n f\"{name} took {timeit._REG[name][0] * 1000:4.4} msec (total = {timeit._REG[name][1]} sec)\"\n )\n print(\" -- \".join(strings))\n\n @staticmethod\n def erase():\n for k in timeit._REG:\n timeit._REG[k] = [0.0, 0.0, 0]\n\n\ndef _check_for_faulty_process(processes):\n terminate = False\n for p in processes:\n if not p.is_alive():\n terminate = True\n for _p in processes:\n if _p.is_alive():\n _p.terminate()\n if terminate:\n break\n if terminate:\n raise RuntimeError(\n \"At least one process failed. Check for more infos in the log.\"\n )\n\n\ndef seed_generator(seed):\n \"\"\"A seed generator function.\n\n Given a seeding integer, generates a deterministic next seed to be used in a\n seeding sequence.\n\n Args:\n seed (int): initial seed.\n\n Returns: Next seed of the chain.\n\n \"\"\"\n max_seed_val = (\n 2 ** 32 - 1\n ) # https://discuss.pytorch.org/t/what-is-the-max-seed-you-can-set-up/145688\n rng = np.random.default_rng(seed)\n seed = int.from_bytes(rng.bytes(8), \"big\")\n return seed % max_seed_val\n\n\nclass KeyDependentDefaultDict(collections.defaultdict):\n \"\"\"A key-dependent default dict.\n\n Examples:\n >>> my_dict = KeyDependentDefaultDict(lambda key: \"foo_\" + key)\n >>> print(my_dict[\"bar\"])\n foo_bar\n \"\"\"\n\n def __init__(self, fun):\n self.fun = fun\n super().__init__()\n\n def __missing__(self, key):\n value = self.fun(key)\n self[key] = value\n return value\n\n\ndef prod(sequence):\n \"\"\"General prod function, that generalised usage across math and np.\n\n Created for multiple python versions compatibility).\n\n \"\"\"\n if hasattr(math, \"prod\"):\n return math.prod(sequence)\n else:\n return int(np.prod(sequence))\n", "path": "torchrl/_utils.py"}]}
| 1,538 | 274 |
gh_patches_debug_6725
|
rasdani/github-patches
|
git_diff
|
weni-ai__bothub-engine-197
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Rest API just generate HTTP urls
next links (List Views) and absolute urls are generated to HTTP protocol because the all requests are make in port 80.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2
3
4 setup(
5 name='bothub',
6 version='1.15.0',
7 description='bothub',
8 packages=find_packages(),
9 install_requires=[
10 'python-decouple',
11 'requests',
12 'django==2.0.6',
13 'djangorestframework==3.7.7',
14 'whitenoise',
15 'dj-database-url',
16 'django-cors-headers',
17 'django-filter',
18 'coreapi',
19 ],
20 python_requires='>=3.6',
21 )
22
```
Path: `bothub/settings.py`
Content:
```
1 import os
2 import dj_database_url
3
4 from decouple import config
5 from django.utils.log import DEFAULT_LOGGING
6
7 from .utils import cast_supported_languages
8 from .utils import cast_empty_str_to_none
9
10
11 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
12 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
13
14
15 # SECURITY WARNING: keep the secret key used in production secret!
16 SECRET_KEY = config('SECRET_KEY')
17
18 # SECURITY WARNING: don't run with debug turned on in production!
19 DEBUG = config('DEBUG', default=False, cast=bool)
20
21 ALLOWED_HOSTS = config(
22 'ALLOWED_HOSTS',
23 default='*',
24 cast=lambda v: [s.strip() for s in v.split(',')])
25
26
27 # Application definition
28
29 INSTALLED_APPS = [
30 'django.contrib.admin',
31 'django.contrib.auth',
32 'django.contrib.contenttypes',
33 'django.contrib.sessions',
34 'django.contrib.messages',
35 'django.contrib.staticfiles',
36 'rest_framework',
37 'rest_framework.authtoken',
38 'django_filters',
39 'corsheaders',
40 'bothub.authentication',
41 'bothub.common',
42 'bothub.api',
43 ]
44
45 MIDDLEWARE = [
46 'django.middleware.security.SecurityMiddleware',
47 'whitenoise.middleware.WhiteNoiseMiddleware',
48 'django.contrib.sessions.middleware.SessionMiddleware',
49 'corsheaders.middleware.CorsMiddleware',
50 'django.middleware.common.CommonMiddleware',
51 'django.middleware.csrf.CsrfViewMiddleware',
52 'django.contrib.auth.middleware.AuthenticationMiddleware',
53 'django.contrib.messages.middleware.MessageMiddleware',
54 'django.middleware.clickjacking.XFrameOptionsMiddleware',
55 ]
56
57 ROOT_URLCONF = 'bothub.urls'
58
59 TEMPLATES = [
60 {
61 'BACKEND': 'django.template.backends.django.DjangoTemplates',
62 'DIRS': [],
63 'APP_DIRS': True,
64 'OPTIONS': {
65 'context_processors': [
66 'django.template.context_processors.debug',
67 'django.template.context_processors.request',
68 'django.contrib.auth.context_processors.auth',
69 'django.contrib.messages.context_processors.messages',
70 ],
71 },
72 },
73 ]
74
75 WSGI_APPLICATION = 'bothub.wsgi.application'
76
77
78 # Database
79
80 DATABASES = {}
81 DATABASES['default'] = dj_database_url.parse(
82 config(
83 'DEFAULT_DATABASE',
84 default='sqlite:///db.sqlite3'))
85
86
87 # Auth
88
89 AUTH_USER_MODEL = 'authentication.User'
90
91
92 # Password validation
93
94 AUTH_PASSWORD_VALIDATORS = [
95 {
96 'NAME': 'django.contrib.auth.password_validation.' +
97 'UserAttributeSimilarityValidator',
98 },
99 {
100 'NAME': 'django.contrib.auth.password_validation.' +
101 'MinimumLengthValidator',
102 },
103 {
104 'NAME': 'django.contrib.auth.password_validation.' +
105 'CommonPasswordValidator',
106 },
107 {
108 'NAME': 'django.contrib.auth.password_validation.' +
109 'NumericPasswordValidator',
110 },
111 ]
112
113
114 # Internationalization
115
116 LANGUAGE_CODE = config('LANGUAGE_CODE', default='en-us')
117
118 TIME_ZONE = config('TIME_ZONE', default='UTC')
119
120 USE_I18N = True
121
122 USE_L10N = True
123
124 USE_TZ = True
125
126
127 # Static files (CSS, JavaScript, Images)
128
129 STATIC_URL = config('STATIC_URL', default='/static/')
130
131 STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
132
133 STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
134
135
136 # rest framework
137
138 REST_FRAMEWORK = {
139 'DEFAULT_AUTHENTICATION_CLASSES': [
140 'rest_framework.authentication.TokenAuthentication',
141 ],
142 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.' +
143 'LimitOffsetPagination',
144 'PAGE_SIZE': 20,
145 'DEFAULT_FILTER_BACKENDS': [
146 'django_filters.rest_framework.DjangoFilterBackend',
147 ],
148 'DEFAULT_METADATA_CLASS': 'bothub.api.metadata.Metadata',
149 }
150
151
152 # cors headers
153
154 CORS_ORIGIN_ALLOW_ALL = True
155
156
157 # mail
158
159 envvar_EMAIL_HOST = config(
160 'EMAIL_HOST',
161 default=None,
162 cast=cast_empty_str_to_none)
163
164 ADMINS = config(
165 'ADMINS',
166 default='',
167 cast=lambda v: [
168 (
169 s.strip().split('|')[0],
170 s.strip().split('|')[1],
171 ) for s in v.split(',')] if v else [])
172 EMAIL_SUBJECT_PREFIX = '[bothub] '
173 DEFAULT_FROM_EMAIL = config(
174 'DEFAULT_FROM_EMAIL',
175 default='webmaster@localhost')
176 SERVER_EMAIL = config('SERVER_EMAIL', default='root@localhost')
177
178 if envvar_EMAIL_HOST:
179 EMAIL_HOST = envvar_EMAIL_HOST
180 EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)
181 EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')
182 EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')
183 EMAIL_USE_SSL = config('EMAIL_USE_SSL', default=False, cast=bool)
184 EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)
185 else:
186 EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
187
188 SEND_EMAILS = config('SEND_EMAILS', default=True, cast=bool)
189
190
191 # webapp
192
193 BOTHUB_WEBAPP_BASE_URL = config(
194 'BOTHUB_WEBAPP_BASE_URL',
195 default='http://localhost:8080/')
196
197
198 # NLP
199
200 BOTHUB_NLP_BASE_URL = config(
201 'BOTHUB_NLP_BASE_URL',
202 default='http://localhost:2657/')
203
204
205 # CSRF
206
207 CSRF_COOKIE_DOMAIN = config(
208 'CSRF_COOKIE_DOMAIN',
209 default=None,
210 cast=cast_empty_str_to_none)
211
212 CSRF_COOKIE_SECURE = config(
213 'CSRF_COOKIE_SECURE',
214 default=False,
215 cast=bool)
216
217
218 # Logging
219
220 LOGGING = DEFAULT_LOGGING
221 LOGGING['formatters']['bothub.health'] = {
222 'format': '[bothub.health] {message}',
223 'style': '{',
224 }
225 LOGGING['handlers']['bothub.health'] = {
226 'level': 'DEBUG',
227 'class': 'logging.StreamHandler',
228 'formatter': 'bothub.health',
229 }
230 LOGGING['loggers']['bothub.health.checks'] = {
231 'handlers': ['bothub.health'],
232 'level': 'DEBUG',
233 }
234
235
236 # Supported Languages
237
238 SUPPORTED_LANGUAGES = config(
239 'SUPPORTED_LANGUAGES',
240 default='en|pt',
241 cast=cast_supported_languages)
242
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bothub/settings.py b/bothub/settings.py
--- a/bothub/settings.py
+++ b/bothub/settings.py
@@ -239,3 +239,8 @@
'SUPPORTED_LANGUAGES',
default='en|pt',
cast=cast_supported_languages)
+
+
+# SECURE PROXY SSL HEADER
+
+SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -3,7 +3,7 @@
setup(
name='bothub',
- version='1.15.0',
+ version='1.15.1',
description='bothub',
packages=find_packages(),
install_requires=[
|
{"golden_diff": "diff --git a/bothub/settings.py b/bothub/settings.py\n--- a/bothub/settings.py\n+++ b/bothub/settings.py\n@@ -239,3 +239,8 @@\n 'SUPPORTED_LANGUAGES',\n default='en|pt',\n cast=cast_supported_languages)\n+\n+\n+# SECURE PROXY SSL HEADER\n+\n+SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -3,7 +3,7 @@\n \n setup(\n name='bothub',\n- version='1.15.0',\n+ version='1.15.1',\n description='bothub',\n packages=find_packages(),\n install_requires=[\n", "issue": "Rest API just generate HTTP urls\nnext links (List Views) and absolute urls are generated to HTTP protocol because the all requests are make in port 80.\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\n\nsetup(\n name='bothub',\n version='1.15.0',\n description='bothub',\n packages=find_packages(),\n install_requires=[\n 'python-decouple',\n 'requests',\n 'django==2.0.6',\n 'djangorestframework==3.7.7',\n 'whitenoise',\n 'dj-database-url',\n 'django-cors-headers',\n 'django-filter',\n 'coreapi',\n ],\n python_requires='>=3.6',\n)\n", "path": "setup.py"}, {"content": "import os\nimport dj_database_url\n\nfrom decouple import config\nfrom django.utils.log import DEFAULT_LOGGING\n\nfrom .utils import cast_supported_languages\nfrom .utils import cast_empty_str_to_none\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = config('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = config('DEBUG', default=False, cast=bool)\n\nALLOWED_HOSTS = config(\n 'ALLOWED_HOSTS',\n default='*',\n cast=lambda v: [s.strip() for s in v.split(',')])\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'django_filters',\n 'corsheaders',\n 'bothub.authentication',\n 'bothub.common',\n 'bothub.api',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'bothub.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'bothub.wsgi.application'\n\n\n# Database\n\nDATABASES = {}\nDATABASES['default'] = dj_database_url.parse(\n config(\n 'DEFAULT_DATABASE',\n default='sqlite:///db.sqlite3'))\n\n\n# Auth\n\nAUTH_USER_MODEL = 'authentication.User'\n\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n\nLANGUAGE_CODE = config('LANGUAGE_CODE', default='en-us')\n\nTIME_ZONE = config('TIME_ZONE', default='UTC')\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = config('STATIC_URL', default='/static/')\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\n\nSTATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\n\n\n# rest framework\n\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.TokenAuthentication',\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.' +\n 'LimitOffsetPagination',\n 'PAGE_SIZE': 20,\n 'DEFAULT_FILTER_BACKENDS': [\n 'django_filters.rest_framework.DjangoFilterBackend',\n ],\n 'DEFAULT_METADATA_CLASS': 'bothub.api.metadata.Metadata',\n}\n\n\n# cors headers\n\nCORS_ORIGIN_ALLOW_ALL = True\n\n\n# mail\n\nenvvar_EMAIL_HOST = config(\n 'EMAIL_HOST',\n default=None,\n cast=cast_empty_str_to_none)\n\nADMINS = config(\n 'ADMINS',\n default='',\n cast=lambda v: [\n (\n s.strip().split('|')[0],\n s.strip().split('|')[1],\n ) for s in v.split(',')] if v else [])\nEMAIL_SUBJECT_PREFIX = '[bothub] '\nDEFAULT_FROM_EMAIL = config(\n 'DEFAULT_FROM_EMAIL',\n default='webmaster@localhost')\nSERVER_EMAIL = config('SERVER_EMAIL', default='root@localhost')\n\nif envvar_EMAIL_HOST:\n EMAIL_HOST = envvar_EMAIL_HOST\n EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)\n EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')\n EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')\n EMAIL_USE_SSL = config('EMAIL_USE_SSL', default=False, cast=bool)\n EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)\nelse:\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\nSEND_EMAILS = config('SEND_EMAILS', default=True, cast=bool)\n\n\n# webapp\n\nBOTHUB_WEBAPP_BASE_URL = config(\n 'BOTHUB_WEBAPP_BASE_URL',\n default='http://localhost:8080/')\n\n\n# NLP\n\nBOTHUB_NLP_BASE_URL = config(\n 'BOTHUB_NLP_BASE_URL',\n default='http://localhost:2657/')\n\n\n# CSRF\n\nCSRF_COOKIE_DOMAIN = config(\n 'CSRF_COOKIE_DOMAIN',\n default=None,\n cast=cast_empty_str_to_none)\n\nCSRF_COOKIE_SECURE = config(\n 'CSRF_COOKIE_SECURE',\n default=False,\n cast=bool)\n\n\n# Logging\n\nLOGGING = DEFAULT_LOGGING\nLOGGING['formatters']['bothub.health'] = {\n 'format': '[bothub.health] {message}',\n 'style': '{',\n}\nLOGGING['handlers']['bothub.health'] = {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'bothub.health',\n}\nLOGGING['loggers']['bothub.health.checks'] = {\n 'handlers': ['bothub.health'],\n 'level': 'DEBUG',\n}\n\n\n# Supported Languages\n\nSUPPORTED_LANGUAGES = config(\n 'SUPPORTED_LANGUAGES',\n default='en|pt',\n cast=cast_supported_languages)\n", "path": "bothub/settings.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\n\n\nsetup(\n name='bothub',\n version='1.15.1',\n description='bothub',\n packages=find_packages(),\n install_requires=[\n 'python-decouple',\n 'requests',\n 'django==2.0.6',\n 'djangorestframework==3.7.7',\n 'whitenoise',\n 'dj-database-url',\n 'django-cors-headers',\n 'django-filter',\n 'coreapi',\n ],\n python_requires='>=3.6',\n)\n", "path": "setup.py"}, {"content": "import os\nimport dj_database_url\n\nfrom decouple import config\nfrom django.utils.log import DEFAULT_LOGGING\n\nfrom .utils import cast_supported_languages\nfrom .utils import cast_empty_str_to_none\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = config('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = config('DEBUG', default=False, cast=bool)\n\nALLOWED_HOSTS = config(\n 'ALLOWED_HOSTS',\n default='*',\n cast=lambda v: [s.strip() for s in v.split(',')])\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'django_filters',\n 'corsheaders',\n 'bothub.authentication',\n 'bothub.common',\n 'bothub.api',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'bothub.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'bothub.wsgi.application'\n\n\n# Database\n\nDATABASES = {}\nDATABASES['default'] = dj_database_url.parse(\n config(\n 'DEFAULT_DATABASE',\n default='sqlite:///db.sqlite3'))\n\n\n# Auth\n\nAUTH_USER_MODEL = 'authentication.User'\n\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n\nLANGUAGE_CODE = config('LANGUAGE_CODE', default='en-us')\n\nTIME_ZONE = config('TIME_ZONE', default='UTC')\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = config('STATIC_URL', default='/static/')\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\n\nSTATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\n\n\n# rest framework\n\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.TokenAuthentication',\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.' +\n 'LimitOffsetPagination',\n 'PAGE_SIZE': 20,\n 'DEFAULT_FILTER_BACKENDS': [\n 'django_filters.rest_framework.DjangoFilterBackend',\n ],\n 'DEFAULT_METADATA_CLASS': 'bothub.api.metadata.Metadata',\n}\n\n\n# cors headers\n\nCORS_ORIGIN_ALLOW_ALL = True\n\n\n# mail\n\nenvvar_EMAIL_HOST = config(\n 'EMAIL_HOST',\n default=None,\n cast=cast_empty_str_to_none)\n\nADMINS = config(\n 'ADMINS',\n default='',\n cast=lambda v: [\n (\n s.strip().split('|')[0],\n s.strip().split('|')[1],\n ) for s in v.split(',')] if v else [])\nEMAIL_SUBJECT_PREFIX = '[bothub] '\nDEFAULT_FROM_EMAIL = config(\n 'DEFAULT_FROM_EMAIL',\n default='webmaster@localhost')\nSERVER_EMAIL = config('SERVER_EMAIL', default='root@localhost')\n\nif envvar_EMAIL_HOST:\n EMAIL_HOST = envvar_EMAIL_HOST\n EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)\n EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')\n EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')\n EMAIL_USE_SSL = config('EMAIL_USE_SSL', default=False, cast=bool)\n EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)\nelse:\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\nSEND_EMAILS = config('SEND_EMAILS', default=True, cast=bool)\n\n\n# webapp\n\nBOTHUB_WEBAPP_BASE_URL = config(\n 'BOTHUB_WEBAPP_BASE_URL',\n default='http://localhost:8080/')\n\n\n# NLP\n\nBOTHUB_NLP_BASE_URL = config(\n 'BOTHUB_NLP_BASE_URL',\n default='http://localhost:2657/')\n\n\n# CSRF\n\nCSRF_COOKIE_DOMAIN = config(\n 'CSRF_COOKIE_DOMAIN',\n default=None,\n cast=cast_empty_str_to_none)\n\nCSRF_COOKIE_SECURE = config(\n 'CSRF_COOKIE_SECURE',\n default=False,\n cast=bool)\n\n\n# Logging\n\nLOGGING = DEFAULT_LOGGING\nLOGGING['formatters']['bothub.health'] = {\n 'format': '[bothub.health] {message}',\n 'style': '{',\n}\nLOGGING['handlers']['bothub.health'] = {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'bothub.health',\n}\nLOGGING['loggers']['bothub.health.checks'] = {\n 'handlers': ['bothub.health'],\n 'level': 'DEBUG',\n}\n\n\n# Supported Languages\n\nSUPPORTED_LANGUAGES = config(\n 'SUPPORTED_LANGUAGES',\n default='en|pt',\n cast=cast_supported_languages)\n\n\n# SECURE PROXY SSL HEADER\n\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n", "path": "bothub/settings.py"}]}
| 2,447 | 172 |
gh_patches_debug_4394
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-3371
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError from contract errback
When running a contract with a URL that returns non-200 response, I get the following:
```
2018-08-09 14:40:23 [scrapy.core.scraper] ERROR: Spider error processing <GET https://www.bureauxlocaux.com/annonce/a-louer-bureaux-a-louer-a-nantes--1289-358662> (referer: None)
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 653, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/usr/local/lib/python3.6/site-packages/scrapy/contracts/__init__.py", line 89, in eb_wrapper
results.addError(case, exc_info)
File "/usr/local/lib/python3.6/unittest/runner.py", line 67, in addError
super(TextTestResult, self).addError(test, err)
File "/usr/local/lib/python3.6/unittest/result.py", line 17, in inner
return method(self, *args, **kw)
File "/usr/local/lib/python3.6/unittest/result.py", line 115, in addError
self.errors.append((test, self._exc_info_to_string(err, test)))
File "/usr/local/lib/python3.6/unittest/result.py", line 186, in _exc_info_to_string
exctype, value, tb, limit=length, capture_locals=self.tb_locals)
File "/usr/local/lib/python3.6/traceback.py", line 470, in __init__
exc_value.__cause__.__traceback__,
AttributeError: 'getset_descriptor' object has no attribute '__traceback__'
```
Here is how `exc_info` looks like:
```
(HttpError('Ignoring non-200 response',), <class 'scrapy.spidermiddlewares.httperror.HttpError'>, <traceback object at 0x7f4bdca1d948>)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/contracts/__init__.py`
Content:
```
1 import sys
2 import re
3 from functools import wraps
4 from unittest import TestCase
5
6 from scrapy.http import Request
7 from scrapy.utils.spider import iterate_spider_output
8 from scrapy.utils.python import get_spec
9
10
11 class ContractsManager(object):
12 contracts = {}
13
14 def __init__(self, contracts):
15 for contract in contracts:
16 self.contracts[contract.name] = contract
17
18 def tested_methods_from_spidercls(self, spidercls):
19 methods = []
20 for key, value in vars(spidercls).items():
21 if (callable(value) and value.__doc__ and
22 re.search(r'^\s*@', value.__doc__, re.MULTILINE)):
23 methods.append(key)
24
25 return methods
26
27 def extract_contracts(self, method):
28 contracts = []
29 for line in method.__doc__.split('\n'):
30 line = line.strip()
31
32 if line.startswith('@'):
33 name, args = re.match(r'@(\w+)\s*(.*)', line).groups()
34 args = re.split(r'\s+', args)
35
36 contracts.append(self.contracts[name](method, *args))
37
38 return contracts
39
40 def from_spider(self, spider, results):
41 requests = []
42 for method in self.tested_methods_from_spidercls(type(spider)):
43 bound_method = spider.__getattribute__(method)
44 requests.append(self.from_method(bound_method, results))
45
46 return requests
47
48 def from_method(self, method, results):
49 contracts = self.extract_contracts(method)
50 if contracts:
51 # calculate request args
52 args, kwargs = get_spec(Request.__init__)
53 kwargs['callback'] = method
54 for contract in contracts:
55 kwargs = contract.adjust_request_args(kwargs)
56
57 # create and prepare request
58 args.remove('self')
59 if set(args).issubset(set(kwargs)):
60 request = Request(**kwargs)
61
62 # execute pre and post hooks in order
63 for contract in reversed(contracts):
64 request = contract.add_pre_hook(request, results)
65 for contract in contracts:
66 request = contract.add_post_hook(request, results)
67
68 self._clean_req(request, method, results)
69 return request
70
71 def _clean_req(self, request, method, results):
72 """ stop the request from returning objects and records any errors """
73
74 cb = request.callback
75
76 @wraps(cb)
77 def cb_wrapper(response):
78 try:
79 output = cb(response)
80 output = list(iterate_spider_output(output))
81 except:
82 case = _create_testcase(method, 'callback')
83 results.addError(case, sys.exc_info())
84
85 def eb_wrapper(failure):
86 case = _create_testcase(method, 'errback')
87 exc_info = failure.value, failure.type, failure.getTracebackObject()
88 results.addError(case, exc_info)
89
90 request.callback = cb_wrapper
91 request.errback = eb_wrapper
92
93
94 class Contract(object):
95 """ Abstract class for contracts """
96
97 def __init__(self, method, *args):
98 self.testcase_pre = _create_testcase(method, '@%s pre-hook' % self.name)
99 self.testcase_post = _create_testcase(method, '@%s post-hook' % self.name)
100 self.args = args
101
102 def add_pre_hook(self, request, results):
103 if hasattr(self, 'pre_process'):
104 cb = request.callback
105
106 @wraps(cb)
107 def wrapper(response):
108 try:
109 results.startTest(self.testcase_pre)
110 self.pre_process(response)
111 results.stopTest(self.testcase_pre)
112 except AssertionError:
113 results.addFailure(self.testcase_pre, sys.exc_info())
114 except Exception:
115 results.addError(self.testcase_pre, sys.exc_info())
116 else:
117 results.addSuccess(self.testcase_pre)
118 finally:
119 return list(iterate_spider_output(cb(response)))
120
121 request.callback = wrapper
122
123 return request
124
125 def add_post_hook(self, request, results):
126 if hasattr(self, 'post_process'):
127 cb = request.callback
128
129 @wraps(cb)
130 def wrapper(response):
131 output = list(iterate_spider_output(cb(response)))
132 try:
133 results.startTest(self.testcase_post)
134 self.post_process(output)
135 results.stopTest(self.testcase_post)
136 except AssertionError:
137 results.addFailure(self.testcase_post, sys.exc_info())
138 except Exception:
139 results.addError(self.testcase_post, sys.exc_info())
140 else:
141 results.addSuccess(self.testcase_post)
142 finally:
143 return output
144
145 request.callback = wrapper
146
147 return request
148
149 def adjust_request_args(self, args):
150 return args
151
152
153 def _create_testcase(method, desc):
154 spider = method.__self__.name
155
156 class ContractTestCase(TestCase):
157 def __str__(_self):
158 return "[%s] %s (%s)" % (spider, method.__name__, desc)
159
160 name = '%s_%s' % (spider, method.__name__)
161 setattr(ContractTestCase, name, lambda x: x)
162 return ContractTestCase(name)
163
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/contracts/__init__.py b/scrapy/contracts/__init__.py
--- a/scrapy/contracts/__init__.py
+++ b/scrapy/contracts/__init__.py
@@ -84,7 +84,7 @@
def eb_wrapper(failure):
case = _create_testcase(method, 'errback')
- exc_info = failure.value, failure.type, failure.getTracebackObject()
+ exc_info = failure.type, failure.value, failure.getTracebackObject()
results.addError(case, exc_info)
request.callback = cb_wrapper
|
{"golden_diff": "diff --git a/scrapy/contracts/__init__.py b/scrapy/contracts/__init__.py\n--- a/scrapy/contracts/__init__.py\n+++ b/scrapy/contracts/__init__.py\n@@ -84,7 +84,7 @@\n \n def eb_wrapper(failure):\n case = _create_testcase(method, 'errback')\n- exc_info = failure.value, failure.type, failure.getTracebackObject()\n+ exc_info = failure.type, failure.value, failure.getTracebackObject()\n results.addError(case, exc_info)\n \n request.callback = cb_wrapper\n", "issue": "AttributeError from contract errback\nWhen running a contract with a URL that returns non-200 response, I get the following:\r\n```\r\n2018-08-09 14:40:23 [scrapy.core.scraper] ERROR: Spider error processing <GET https://www.bureauxlocaux.com/annonce/a-louer-bureaux-a-louer-a-nantes--1289-358662> (referer: None)\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/site-packages/twisted/internet/defer.py\", line 653, in _runCallbacks\r\n current.result = callback(current.result, *args, **kw)\r\n File \"/usr/local/lib/python3.6/site-packages/scrapy/contracts/__init__.py\", line 89, in eb_wrapper\r\n results.addError(case, exc_info)\r\n File \"/usr/local/lib/python3.6/unittest/runner.py\", line 67, in addError\r\n super(TextTestResult, self).addError(test, err)\r\n File \"/usr/local/lib/python3.6/unittest/result.py\", line 17, in inner\r\n return method(self, *args, **kw)\r\n File \"/usr/local/lib/python3.6/unittest/result.py\", line 115, in addError\r\n self.errors.append((test, self._exc_info_to_string(err, test)))\r\n File \"/usr/local/lib/python3.6/unittest/result.py\", line 186, in _exc_info_to_string\r\n exctype, value, tb, limit=length, capture_locals=self.tb_locals)\r\n File \"/usr/local/lib/python3.6/traceback.py\", line 470, in __init__\r\n exc_value.__cause__.__traceback__,\r\nAttributeError: 'getset_descriptor' object has no attribute '__traceback__'\r\n```\r\n\r\nHere is how `exc_info` looks like:\r\n```\r\n(HttpError('Ignoring non-200 response',), <class 'scrapy.spidermiddlewares.httperror.HttpError'>, <traceback object at 0x7f4bdca1d948>)\r\n```\r\n\n", "before_files": [{"content": "import sys\nimport re\nfrom functools import wraps\nfrom unittest import TestCase\n\nfrom scrapy.http import Request\nfrom scrapy.utils.spider import iterate_spider_output\nfrom scrapy.utils.python import get_spec\n\n\nclass ContractsManager(object):\n contracts = {}\n\n def __init__(self, contracts):\n for contract in contracts:\n self.contracts[contract.name] = contract\n\n def tested_methods_from_spidercls(self, spidercls):\n methods = []\n for key, value in vars(spidercls).items():\n if (callable(value) and value.__doc__ and\n re.search(r'^\\s*@', value.__doc__, re.MULTILINE)):\n methods.append(key)\n\n return methods\n\n def extract_contracts(self, method):\n contracts = []\n for line in method.__doc__.split('\\n'):\n line = line.strip()\n\n if line.startswith('@'):\n name, args = re.match(r'@(\\w+)\\s*(.*)', line).groups()\n args = re.split(r'\\s+', args)\n\n contracts.append(self.contracts[name](method, *args))\n\n return contracts\n\n def from_spider(self, spider, results):\n requests = []\n for method in self.tested_methods_from_spidercls(type(spider)):\n bound_method = spider.__getattribute__(method)\n requests.append(self.from_method(bound_method, results))\n\n return requests\n\n def from_method(self, method, results):\n contracts = self.extract_contracts(method)\n if contracts:\n # calculate request args\n args, kwargs = get_spec(Request.__init__)\n kwargs['callback'] = method\n for contract in contracts:\n kwargs = contract.adjust_request_args(kwargs)\n\n # create and prepare request\n args.remove('self')\n if set(args).issubset(set(kwargs)):\n request = Request(**kwargs)\n\n # execute pre and post hooks in order\n for contract in reversed(contracts):\n request = contract.add_pre_hook(request, results)\n for contract in contracts:\n request = contract.add_post_hook(request, results)\n\n self._clean_req(request, method, results)\n return request\n\n def _clean_req(self, request, method, results):\n \"\"\" stop the request from returning objects and records any errors \"\"\"\n\n cb = request.callback\n\n @wraps(cb)\n def cb_wrapper(response):\n try:\n output = cb(response)\n output = list(iterate_spider_output(output))\n except:\n case = _create_testcase(method, 'callback')\n results.addError(case, sys.exc_info())\n\n def eb_wrapper(failure):\n case = _create_testcase(method, 'errback')\n exc_info = failure.value, failure.type, failure.getTracebackObject()\n results.addError(case, exc_info)\n\n request.callback = cb_wrapper\n request.errback = eb_wrapper\n\n\nclass Contract(object):\n \"\"\" Abstract class for contracts \"\"\"\n\n def __init__(self, method, *args):\n self.testcase_pre = _create_testcase(method, '@%s pre-hook' % self.name)\n self.testcase_post = _create_testcase(method, '@%s post-hook' % self.name)\n self.args = args\n\n def add_pre_hook(self, request, results):\n if hasattr(self, 'pre_process'):\n cb = request.callback\n\n @wraps(cb)\n def wrapper(response):\n try:\n results.startTest(self.testcase_pre)\n self.pre_process(response)\n results.stopTest(self.testcase_pre)\n except AssertionError:\n results.addFailure(self.testcase_pre, sys.exc_info())\n except Exception:\n results.addError(self.testcase_pre, sys.exc_info())\n else:\n results.addSuccess(self.testcase_pre)\n finally:\n return list(iterate_spider_output(cb(response)))\n\n request.callback = wrapper\n\n return request\n\n def add_post_hook(self, request, results):\n if hasattr(self, 'post_process'):\n cb = request.callback\n\n @wraps(cb)\n def wrapper(response):\n output = list(iterate_spider_output(cb(response)))\n try:\n results.startTest(self.testcase_post)\n self.post_process(output)\n results.stopTest(self.testcase_post)\n except AssertionError:\n results.addFailure(self.testcase_post, sys.exc_info())\n except Exception:\n results.addError(self.testcase_post, sys.exc_info())\n else:\n results.addSuccess(self.testcase_post)\n finally:\n return output\n\n request.callback = wrapper\n\n return request\n\n def adjust_request_args(self, args):\n return args\n\n\ndef _create_testcase(method, desc):\n spider = method.__self__.name\n\n class ContractTestCase(TestCase):\n def __str__(_self):\n return \"[%s] %s (%s)\" % (spider, method.__name__, desc)\n\n name = '%s_%s' % (spider, method.__name__)\n setattr(ContractTestCase, name, lambda x: x)\n return ContractTestCase(name)\n", "path": "scrapy/contracts/__init__.py"}], "after_files": [{"content": "import sys\nimport re\nfrom functools import wraps\nfrom unittest import TestCase\n\nfrom scrapy.http import Request\nfrom scrapy.utils.spider import iterate_spider_output\nfrom scrapy.utils.python import get_spec\n\n\nclass ContractsManager(object):\n contracts = {}\n\n def __init__(self, contracts):\n for contract in contracts:\n self.contracts[contract.name] = contract\n\n def tested_methods_from_spidercls(self, spidercls):\n methods = []\n for key, value in vars(spidercls).items():\n if (callable(value) and value.__doc__ and\n re.search(r'^\\s*@', value.__doc__, re.MULTILINE)):\n methods.append(key)\n\n return methods\n\n def extract_contracts(self, method):\n contracts = []\n for line in method.__doc__.split('\\n'):\n line = line.strip()\n\n if line.startswith('@'):\n name, args = re.match(r'@(\\w+)\\s*(.*)', line).groups()\n args = re.split(r'\\s+', args)\n\n contracts.append(self.contracts[name](method, *args))\n\n return contracts\n\n def from_spider(self, spider, results):\n requests = []\n for method in self.tested_methods_from_spidercls(type(spider)):\n bound_method = spider.__getattribute__(method)\n requests.append(self.from_method(bound_method, results))\n\n return requests\n\n def from_method(self, method, results):\n contracts = self.extract_contracts(method)\n if contracts:\n # calculate request args\n args, kwargs = get_spec(Request.__init__)\n kwargs['callback'] = method\n for contract in contracts:\n kwargs = contract.adjust_request_args(kwargs)\n\n # create and prepare request\n args.remove('self')\n if set(args).issubset(set(kwargs)):\n request = Request(**kwargs)\n\n # execute pre and post hooks in order\n for contract in reversed(contracts):\n request = contract.add_pre_hook(request, results)\n for contract in contracts:\n request = contract.add_post_hook(request, results)\n\n self._clean_req(request, method, results)\n return request\n\n def _clean_req(self, request, method, results):\n \"\"\" stop the request from returning objects and records any errors \"\"\"\n\n cb = request.callback\n\n @wraps(cb)\n def cb_wrapper(response):\n try:\n output = cb(response)\n output = list(iterate_spider_output(output))\n except:\n case = _create_testcase(method, 'callback')\n results.addError(case, sys.exc_info())\n\n def eb_wrapper(failure):\n case = _create_testcase(method, 'errback')\n exc_info = failure.type, failure.value, failure.getTracebackObject()\n results.addError(case, exc_info)\n\n request.callback = cb_wrapper\n request.errback = eb_wrapper\n\n\nclass Contract(object):\n \"\"\" Abstract class for contracts \"\"\"\n\n def __init__(self, method, *args):\n self.testcase_pre = _create_testcase(method, '@%s pre-hook' % self.name)\n self.testcase_post = _create_testcase(method, '@%s post-hook' % self.name)\n self.args = args\n\n def add_pre_hook(self, request, results):\n if hasattr(self, 'pre_process'):\n cb = request.callback\n\n @wraps(cb)\n def wrapper(response):\n try:\n results.startTest(self.testcase_pre)\n self.pre_process(response)\n results.stopTest(self.testcase_pre)\n except AssertionError:\n results.addFailure(self.testcase_pre, sys.exc_info())\n except Exception:\n results.addError(self.testcase_pre, sys.exc_info())\n else:\n results.addSuccess(self.testcase_pre)\n finally:\n return list(iterate_spider_output(cb(response)))\n\n request.callback = wrapper\n\n return request\n\n def add_post_hook(self, request, results):\n if hasattr(self, 'post_process'):\n cb = request.callback\n\n @wraps(cb)\n def wrapper(response):\n output = list(iterate_spider_output(cb(response)))\n try:\n results.startTest(self.testcase_post)\n self.post_process(output)\n results.stopTest(self.testcase_post)\n except AssertionError:\n results.addFailure(self.testcase_post, sys.exc_info())\n except Exception:\n results.addError(self.testcase_post, sys.exc_info())\n else:\n results.addSuccess(self.testcase_post)\n finally:\n return output\n\n request.callback = wrapper\n\n return request\n\n def adjust_request_args(self, args):\n return args\n\n\ndef _create_testcase(method, desc):\n spider = method.__self__.name\n\n class ContractTestCase(TestCase):\n def __str__(_self):\n return \"[%s] %s (%s)\" % (spider, method.__name__, desc)\n\n name = '%s_%s' % (spider, method.__name__)\n setattr(ContractTestCase, name, lambda x: x)\n return ContractTestCase(name)\n", "path": "scrapy/contracts/__init__.py"}]}
| 2,195 | 128 |
gh_patches_debug_39579
|
rasdani/github-patches
|
git_diff
|
vyperlang__vyper-2071
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Infinite loop from multidimensional array in calldata
### Version Information
* vyper Version (output of `vyper --version`): latest master
* OS: linux
* Python Version: `3.8.2`
### What's your issue about?
Using a multidimensional array in the function inputs, where the length of the >=2nd dimension is 6 or more, causes an infinite loop when calling the function.
For example, each of the following methods will compile but attempting to call them fails with out of gas:
```python
@public
def foo(a: uint256[1][6]):
pass
@public
def bar(a: uint256[1][1][6]):
pass
```
For comparison, these methods execute as expected:
```python
@public
def foo(a: uint256[6][1]):
pass
@public
def bar(a: uint256[100][5][5][5]):
pass
```
#### Some observations:
* The length of the first array element has no effect.
* The data type has no effect.
* The location of the array within calldata, and total number of arguments, has no effect.
* The number of dimensions, or dimension that exceeds a length of 5, has no effect.
### How can it be fixed?
Unsure at this time.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vyper/parser/arg_clamps.py`
Content:
```
1 import functools
2 import uuid
3
4 from vyper.parser.lll_node import LLLnode
5 from vyper.types.types import (
6 ByteArrayLike,
7 ListType,
8 get_size_of_type,
9 is_base_type,
10 )
11 from vyper.utils import MemoryPositions
12
13
14 def _mk_calldatacopy_copier(pos, sz, mempos):
15 return ["calldatacopy", mempos, ["add", 4, pos], sz]
16
17
18 def _mk_codecopy_copier(pos, sz, mempos):
19 return ["codecopy", mempos, ["add", "~codelen", pos], sz]
20
21
22 def make_arg_clamper(datapos, mempos, typ, is_init=False):
23 """
24 Clamps argument to type limits.
25 """
26
27 if not is_init:
28 data_decl = ["calldataload", ["add", 4, datapos]]
29 copier = functools.partial(_mk_calldatacopy_copier, mempos=mempos)
30 else:
31 data_decl = ["codeload", ["add", "~codelen", datapos]]
32 copier = functools.partial(_mk_codecopy_copier, mempos=mempos)
33 # Numbers: make sure they're in range
34 if is_base_type(typ, "int128"):
35 return LLLnode.from_list(
36 [
37 "clamp",
38 ["mload", MemoryPositions.MINNUM],
39 data_decl,
40 ["mload", MemoryPositions.MAXNUM],
41 ],
42 typ=typ,
43 annotation="checking int128 input",
44 )
45 # Booleans: make sure they're zero or one
46 elif is_base_type(typ, "bool"):
47 return LLLnode.from_list(
48 ["uclamplt", data_decl, 2], typ=typ, annotation="checking bool input",
49 )
50 # Addresses: make sure they're in range
51 elif is_base_type(typ, "address"):
52 return LLLnode.from_list(
53 ["uclamplt", data_decl, ["mload", MemoryPositions.ADDRSIZE]],
54 typ=typ,
55 annotation="checking address input",
56 )
57 # Bytes: make sure they have the right size
58 elif isinstance(typ, ByteArrayLike):
59 return LLLnode.from_list(
60 [
61 "seq",
62 copier(data_decl, 32 + typ.maxlen),
63 ["assert", ["le", ["calldataload", ["add", 4, data_decl]], typ.maxlen]],
64 ],
65 typ=None,
66 annotation="checking bytearray input",
67 )
68 # Lists: recurse
69 elif isinstance(typ, ListType):
70 if typ.count > 5 or (type(datapos) is list and type(mempos) is list):
71 subtype_size = get_size_of_type(typ.subtype)
72 i_incr = subtype_size * 32
73
74 mem_to = subtype_size * 32 * (typ.count - 1)
75 loop_label = f"_check_list_loop_{str(uuid.uuid4())}"
76
77 offset = 288
78 o = [
79 ["mstore", offset, 0], # init loop
80 ["label", loop_label],
81 make_arg_clamper(
82 ["add", datapos, ["mload", offset]],
83 ["add", mempos, ["mload", offset]],
84 typ.subtype,
85 is_init,
86 ),
87 ["mstore", offset, ["add", ["mload", offset], i_incr]],
88 ["if", ["lt", ["mload", offset], mem_to], ["goto", loop_label]],
89 ]
90 else:
91 o = []
92 for i in range(typ.count):
93 offset = get_size_of_type(typ.subtype) * 32 * i
94 o.append(make_arg_clamper(datapos + offset, mempos + offset, typ.subtype, is_init))
95 return LLLnode.from_list(["seq"] + o, typ=None, annotation="checking list input")
96 # Otherwise don't make any checks
97 else:
98 return LLLnode.from_list("pass")
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/vyper/parser/arg_clamps.py b/vyper/parser/arg_clamps.py
--- a/vyper/parser/arg_clamps.py
+++ b/vyper/parser/arg_clamps.py
@@ -22,6 +22,22 @@
def make_arg_clamper(datapos, mempos, typ, is_init=False):
"""
Clamps argument to type limits.
+
+ Arguments
+ ---------
+ datapos : int | LLLnode
+ Calldata offset of the value being clamped
+ mempos : int | LLLnode
+ Memory offset that the value is stored at during clamping
+ typ : vyper.types.types.BaseType
+ Type of the value
+ is_init : bool, optional
+ Boolean indicating if we are generating init bytecode
+
+ Returns
+ -------
+ LLLnode
+ Arg clamper LLL
"""
if not is_init:
@@ -68,31 +84,45 @@
# Lists: recurse
elif isinstance(typ, ListType):
if typ.count > 5 or (type(datapos) is list and type(mempos) is list):
- subtype_size = get_size_of_type(typ.subtype)
- i_incr = subtype_size * 32
+ # find ultimate base type
+ subtype = typ.subtype
+ while hasattr(subtype, "subtype"):
+ subtype = subtype.subtype
+
+ # make arg clamper for the base type
+ offset = MemoryPositions.FREE_LOOP_INDEX
+ clamper = make_arg_clamper(
+ ["add", datapos, ["mload", offset]],
+ ["add", mempos, ["mload", offset]],
+ subtype,
+ is_init,
+ )
+ if clamper.value == "pass":
+ # no point looping if the base type doesn't require clamping
+ return clamper
+
+ # loop the entire array at once, even if it's multidimensional
+ type_size = get_size_of_type(typ)
+ i_incr = get_size_of_type(subtype) * 32
- mem_to = subtype_size * 32 * (typ.count - 1)
+ mem_to = type_size * 32
loop_label = f"_check_list_loop_{str(uuid.uuid4())}"
- offset = 288
- o = [
+ lll_node = [
["mstore", offset, 0], # init loop
["label", loop_label],
- make_arg_clamper(
- ["add", datapos, ["mload", offset]],
- ["add", mempos, ["mload", offset]],
- typ.subtype,
- is_init,
- ),
+ clamper,
["mstore", offset, ["add", ["mload", offset], i_incr]],
["if", ["lt", ["mload", offset], mem_to], ["goto", loop_label]],
]
else:
- o = []
+ lll_node = []
for i in range(typ.count):
offset = get_size_of_type(typ.subtype) * 32 * i
- o.append(make_arg_clamper(datapos + offset, mempos + offset, typ.subtype, is_init))
- return LLLnode.from_list(["seq"] + o, typ=None, annotation="checking list input")
+ lll_node.append(
+ make_arg_clamper(datapos + offset, mempos + offset, typ.subtype, is_init)
+ )
+ return LLLnode.from_list(["seq"] + lll_node, typ=None, annotation="checking list input")
# Otherwise don't make any checks
else:
return LLLnode.from_list("pass")
|
{"golden_diff": "diff --git a/vyper/parser/arg_clamps.py b/vyper/parser/arg_clamps.py\n--- a/vyper/parser/arg_clamps.py\n+++ b/vyper/parser/arg_clamps.py\n@@ -22,6 +22,22 @@\n def make_arg_clamper(datapos, mempos, typ, is_init=False):\n \"\"\"\n Clamps argument to type limits.\n+\n+ Arguments\n+ ---------\n+ datapos : int | LLLnode\n+ Calldata offset of the value being clamped\n+ mempos : int | LLLnode\n+ Memory offset that the value is stored at during clamping\n+ typ : vyper.types.types.BaseType\n+ Type of the value\n+ is_init : bool, optional\n+ Boolean indicating if we are generating init bytecode\n+\n+ Returns\n+ -------\n+ LLLnode\n+ Arg clamper LLL\n \"\"\"\n \n if not is_init:\n@@ -68,31 +84,45 @@\n # Lists: recurse\n elif isinstance(typ, ListType):\n if typ.count > 5 or (type(datapos) is list and type(mempos) is list):\n- subtype_size = get_size_of_type(typ.subtype)\n- i_incr = subtype_size * 32\n+ # find ultimate base type\n+ subtype = typ.subtype\n+ while hasattr(subtype, \"subtype\"):\n+ subtype = subtype.subtype\n+\n+ # make arg clamper for the base type\n+ offset = MemoryPositions.FREE_LOOP_INDEX\n+ clamper = make_arg_clamper(\n+ [\"add\", datapos, [\"mload\", offset]],\n+ [\"add\", mempos, [\"mload\", offset]],\n+ subtype,\n+ is_init,\n+ )\n+ if clamper.value == \"pass\":\n+ # no point looping if the base type doesn't require clamping\n+ return clamper\n+\n+ # loop the entire array at once, even if it's multidimensional\n+ type_size = get_size_of_type(typ)\n+ i_incr = get_size_of_type(subtype) * 32\n \n- mem_to = subtype_size * 32 * (typ.count - 1)\n+ mem_to = type_size * 32\n loop_label = f\"_check_list_loop_{str(uuid.uuid4())}\"\n \n- offset = 288\n- o = [\n+ lll_node = [\n [\"mstore\", offset, 0], # init loop\n [\"label\", loop_label],\n- make_arg_clamper(\n- [\"add\", datapos, [\"mload\", offset]],\n- [\"add\", mempos, [\"mload\", offset]],\n- typ.subtype,\n- is_init,\n- ),\n+ clamper,\n [\"mstore\", offset, [\"add\", [\"mload\", offset], i_incr]],\n [\"if\", [\"lt\", [\"mload\", offset], mem_to], [\"goto\", loop_label]],\n ]\n else:\n- o = []\n+ lll_node = []\n for i in range(typ.count):\n offset = get_size_of_type(typ.subtype) * 32 * i\n- o.append(make_arg_clamper(datapos + offset, mempos + offset, typ.subtype, is_init))\n- return LLLnode.from_list([\"seq\"] + o, typ=None, annotation=\"checking list input\")\n+ lll_node.append(\n+ make_arg_clamper(datapos + offset, mempos + offset, typ.subtype, is_init)\n+ )\n+ return LLLnode.from_list([\"seq\"] + lll_node, typ=None, annotation=\"checking list input\")\n # Otherwise don't make any checks\n else:\n return LLLnode.from_list(\"pass\")\n", "issue": "Infinite loop from multidimensional array in calldata\n### Version Information\r\n\r\n* vyper Version (output of `vyper --version`): latest master\r\n* OS: linux\r\n* Python Version: `3.8.2`\r\n\r\n### What's your issue about?\r\n\r\nUsing a multidimensional array in the function inputs, where the length of the >=2nd dimension is 6 or more, causes an infinite loop when calling the function.\r\n\r\nFor example, each of the following methods will compile but attempting to call them fails with out of gas:\r\n\r\n```python\r\n@public\r\ndef foo(a: uint256[1][6]):\r\n pass\r\n\r\n@public\r\ndef bar(a: uint256[1][1][6]):\r\n pass\r\n```\r\n\r\nFor comparison, these methods execute as expected:\r\n\r\n```python\r\n@public\r\ndef foo(a: uint256[6][1]):\r\n pass\r\n\r\n@public\r\ndef bar(a: uint256[100][5][5][5]):\r\n pass\r\n```\r\n\r\n#### Some observations:\r\n\r\n* The length of the first array element has no effect.\r\n* The data type has no effect.\r\n* The location of the array within calldata, and total number of arguments, has no effect.\r\n* The number of dimensions, or dimension that exceeds a length of 5, has no effect.\r\n\r\n### How can it be fixed?\r\nUnsure at this time.\n", "before_files": [{"content": "import functools\nimport uuid\n\nfrom vyper.parser.lll_node import LLLnode\nfrom vyper.types.types import (\n ByteArrayLike,\n ListType,\n get_size_of_type,\n is_base_type,\n)\nfrom vyper.utils import MemoryPositions\n\n\ndef _mk_calldatacopy_copier(pos, sz, mempos):\n return [\"calldatacopy\", mempos, [\"add\", 4, pos], sz]\n\n\ndef _mk_codecopy_copier(pos, sz, mempos):\n return [\"codecopy\", mempos, [\"add\", \"~codelen\", pos], sz]\n\n\ndef make_arg_clamper(datapos, mempos, typ, is_init=False):\n \"\"\"\n Clamps argument to type limits.\n \"\"\"\n\n if not is_init:\n data_decl = [\"calldataload\", [\"add\", 4, datapos]]\n copier = functools.partial(_mk_calldatacopy_copier, mempos=mempos)\n else:\n data_decl = [\"codeload\", [\"add\", \"~codelen\", datapos]]\n copier = functools.partial(_mk_codecopy_copier, mempos=mempos)\n # Numbers: make sure they're in range\n if is_base_type(typ, \"int128\"):\n return LLLnode.from_list(\n [\n \"clamp\",\n [\"mload\", MemoryPositions.MINNUM],\n data_decl,\n [\"mload\", MemoryPositions.MAXNUM],\n ],\n typ=typ,\n annotation=\"checking int128 input\",\n )\n # Booleans: make sure they're zero or one\n elif is_base_type(typ, \"bool\"):\n return LLLnode.from_list(\n [\"uclamplt\", data_decl, 2], typ=typ, annotation=\"checking bool input\",\n )\n # Addresses: make sure they're in range\n elif is_base_type(typ, \"address\"):\n return LLLnode.from_list(\n [\"uclamplt\", data_decl, [\"mload\", MemoryPositions.ADDRSIZE]],\n typ=typ,\n annotation=\"checking address input\",\n )\n # Bytes: make sure they have the right size\n elif isinstance(typ, ByteArrayLike):\n return LLLnode.from_list(\n [\n \"seq\",\n copier(data_decl, 32 + typ.maxlen),\n [\"assert\", [\"le\", [\"calldataload\", [\"add\", 4, data_decl]], typ.maxlen]],\n ],\n typ=None,\n annotation=\"checking bytearray input\",\n )\n # Lists: recurse\n elif isinstance(typ, ListType):\n if typ.count > 5 or (type(datapos) is list and type(mempos) is list):\n subtype_size = get_size_of_type(typ.subtype)\n i_incr = subtype_size * 32\n\n mem_to = subtype_size * 32 * (typ.count - 1)\n loop_label = f\"_check_list_loop_{str(uuid.uuid4())}\"\n\n offset = 288\n o = [\n [\"mstore\", offset, 0], # init loop\n [\"label\", loop_label],\n make_arg_clamper(\n [\"add\", datapos, [\"mload\", offset]],\n [\"add\", mempos, [\"mload\", offset]],\n typ.subtype,\n is_init,\n ),\n [\"mstore\", offset, [\"add\", [\"mload\", offset], i_incr]],\n [\"if\", [\"lt\", [\"mload\", offset], mem_to], [\"goto\", loop_label]],\n ]\n else:\n o = []\n for i in range(typ.count):\n offset = get_size_of_type(typ.subtype) * 32 * i\n o.append(make_arg_clamper(datapos + offset, mempos + offset, typ.subtype, is_init))\n return LLLnode.from_list([\"seq\"] + o, typ=None, annotation=\"checking list input\")\n # Otherwise don't make any checks\n else:\n return LLLnode.from_list(\"pass\")\n", "path": "vyper/parser/arg_clamps.py"}], "after_files": [{"content": "import functools\nimport uuid\n\nfrom vyper.parser.lll_node import LLLnode\nfrom vyper.types.types import (\n ByteArrayLike,\n ListType,\n get_size_of_type,\n is_base_type,\n)\nfrom vyper.utils import MemoryPositions\n\n\ndef _mk_calldatacopy_copier(pos, sz, mempos):\n return [\"calldatacopy\", mempos, [\"add\", 4, pos], sz]\n\n\ndef _mk_codecopy_copier(pos, sz, mempos):\n return [\"codecopy\", mempos, [\"add\", \"~codelen\", pos], sz]\n\n\ndef make_arg_clamper(datapos, mempos, typ, is_init=False):\n \"\"\"\n Clamps argument to type limits.\n\n Arguments\n ---------\n datapos : int | LLLnode\n Calldata offset of the value being clamped\n mempos : int | LLLnode\n Memory offset that the value is stored at during clamping\n typ : vyper.types.types.BaseType\n Type of the value\n is_init : bool, optional\n Boolean indicating if we are generating init bytecode\n\n Returns\n -------\n LLLnode\n Arg clamper LLL\n \"\"\"\n\n if not is_init:\n data_decl = [\"calldataload\", [\"add\", 4, datapos]]\n copier = functools.partial(_mk_calldatacopy_copier, mempos=mempos)\n else:\n data_decl = [\"codeload\", [\"add\", \"~codelen\", datapos]]\n copier = functools.partial(_mk_codecopy_copier, mempos=mempos)\n # Numbers: make sure they're in range\n if is_base_type(typ, \"int128\"):\n return LLLnode.from_list(\n [\n \"clamp\",\n [\"mload\", MemoryPositions.MINNUM],\n data_decl,\n [\"mload\", MemoryPositions.MAXNUM],\n ],\n typ=typ,\n annotation=\"checking int128 input\",\n )\n # Booleans: make sure they're zero or one\n elif is_base_type(typ, \"bool\"):\n return LLLnode.from_list(\n [\"uclamplt\", data_decl, 2], typ=typ, annotation=\"checking bool input\",\n )\n # Addresses: make sure they're in range\n elif is_base_type(typ, \"address\"):\n return LLLnode.from_list(\n [\"uclamplt\", data_decl, [\"mload\", MemoryPositions.ADDRSIZE]],\n typ=typ,\n annotation=\"checking address input\",\n )\n # Bytes: make sure they have the right size\n elif isinstance(typ, ByteArrayLike):\n return LLLnode.from_list(\n [\n \"seq\",\n copier(data_decl, 32 + typ.maxlen),\n [\"assert\", [\"le\", [\"calldataload\", [\"add\", 4, data_decl]], typ.maxlen]],\n ],\n typ=None,\n annotation=\"checking bytearray input\",\n )\n # Lists: recurse\n elif isinstance(typ, ListType):\n if typ.count > 5 or (type(datapos) is list and type(mempos) is list):\n # find ultimate base type\n subtype = typ.subtype\n while hasattr(subtype, \"subtype\"):\n subtype = subtype.subtype\n\n # make arg clamper for the base type\n offset = MemoryPositions.FREE_LOOP_INDEX\n clamper = make_arg_clamper(\n [\"add\", datapos, [\"mload\", offset]],\n [\"add\", mempos, [\"mload\", offset]],\n subtype,\n is_init,\n )\n if clamper.value == \"pass\":\n # no point looping if the base type doesn't require clamping\n return clamper\n\n # loop the entire array at once, even if it's multidimensional\n type_size = get_size_of_type(typ)\n i_incr = get_size_of_type(subtype) * 32\n\n mem_to = type_size * 32\n loop_label = f\"_check_list_loop_{str(uuid.uuid4())}\"\n\n lll_node = [\n [\"mstore\", offset, 0], # init loop\n [\"label\", loop_label],\n clamper,\n [\"mstore\", offset, [\"add\", [\"mload\", offset], i_incr]],\n [\"if\", [\"lt\", [\"mload\", offset], mem_to], [\"goto\", loop_label]],\n ]\n else:\n lll_node = []\n for i in range(typ.count):\n offset = get_size_of_type(typ.subtype) * 32 * i\n lll_node.append(\n make_arg_clamper(datapos + offset, mempos + offset, typ.subtype, is_init)\n )\n return LLLnode.from_list([\"seq\"] + lll_node, typ=None, annotation=\"checking list input\")\n # Otherwise don't make any checks\n else:\n return LLLnode.from_list(\"pass\")\n", "path": "vyper/parser/arg_clamps.py"}]}
| 1,605 | 837 |
gh_patches_debug_1907
|
rasdani/github-patches
|
git_diff
|
google__flax-628
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
After update from 0.2.0: AttributeError: module 'jax.core' has no attribute 'eval_context'
After updating from flax 0.2.0 to flax 0.2.2 I get the above error message. Downgrading to 0.2.0 solves this, so the error source is located. I'm working with the now deprecated flax.nn package if backward-compatibility might be the reason for this issue.
The Issue is encountered in a custom RNN, when using the init_by_shape function in conjunction with jax.lax.scan.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2020 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """setup.py for Flax."""
16
17 import os
18 from setuptools import find_packages
19 from setuptools import setup
20
21 here = os.path.abspath(os.path.dirname(__file__))
22 try:
23 README = open(os.path.join(here, "README.md"), encoding='utf-8').read()
24 except IOError:
25 README = ""
26
27 install_requires = [
28 "numpy>=1.12",
29 "jax>=0.1.59",
30 "matplotlib", # only needed for tensorboard export
31 "dataclasses;python_version<'3.7'", # will only install on py3.6
32 "msgpack",
33 ]
34
35 tests_require = [
36 "atari-py",
37 "clu", # All examples.
38 "gym",
39 "jaxlib",
40 "ml-collections",
41 "opencv-python",
42 "pytest",
43 "pytest-cov",
44 "pytest-xdist==1.34.0", # upgrading to 2.0 broke tests, need to investigate
45 "sentencepiece", # WMT example.
46 "svn",
47 "tensorflow",
48 "tensorflow_text", # WMT example.
49 "tensorflow_datasets",
50 ]
51
52 __version__ = None
53
54 with open('flax/version.py') as f:
55 exec(f.read(), globals())
56
57 setup(
58 name="flax",
59 version=__version__,
60 description="Flax: A neural network library for JAX designed for flexibility",
61 long_description="\n\n".join([README]),
62 long_description_content_type='text/markdown',
63 classifiers=[
64 "Development Status :: 3 - Alpha",
65 "Intended Audience :: Developers",
66 "Intended Audience :: Science/Research",
67 "License :: OSI Approved :: Apache Software License",
68 "Programming Language :: Python :: 3.7",
69 "Topic :: Scientific/Engineering :: Artificial Intelligence",
70 ],
71 keywords="",
72 author="Flax team",
73 author_email="[email protected]",
74 url="https://github.com/google/flax",
75 packages=find_packages(),
76 include_package_data=False,
77 zip_safe=False,
78 install_requires=install_requires,
79 extras_require={
80 "testing": tests_require,
81 },
82 )
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -26,7 +26,7 @@
install_requires = [
"numpy>=1.12",
- "jax>=0.1.59",
+ "jax>=0.1.77",
"matplotlib", # only needed for tensorboard export
"dataclasses;python_version<'3.7'", # will only install on py3.6
"msgpack",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -26,7 +26,7 @@\n \n install_requires = [\n \"numpy>=1.12\",\n- \"jax>=0.1.59\",\n+ \"jax>=0.1.77\",\n \"matplotlib\", # only needed for tensorboard export\n \"dataclasses;python_version<'3.7'\", # will only install on py3.6\n \"msgpack\",\n", "issue": "After update from 0.2.0: AttributeError: module 'jax.core' has no attribute 'eval_context'\nAfter updating from flax 0.2.0 to flax 0.2.2 I get the above error message. Downgrading to 0.2.0 solves this, so the error source is located. I'm working with the now deprecated flax.nn package if backward-compatibility might be the reason for this issue.\r\nThe Issue is encountered in a custom RNN, when using the init_by_shape function in conjunction with jax.lax.scan.\n", "before_files": [{"content": "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"setup.py for Flax.\"\"\"\n\nimport os\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n README = open(os.path.join(here, \"README.md\"), encoding='utf-8').read()\nexcept IOError:\n README = \"\"\n\ninstall_requires = [\n \"numpy>=1.12\",\n \"jax>=0.1.59\",\n \"matplotlib\", # only needed for tensorboard export\n \"dataclasses;python_version<'3.7'\", # will only install on py3.6\n \"msgpack\",\n]\n\ntests_require = [\n \"atari-py\",\n \"clu\", # All examples.\n \"gym\",\n \"jaxlib\",\n \"ml-collections\",\n \"opencv-python\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-xdist==1.34.0\", # upgrading to 2.0 broke tests, need to investigate\n \"sentencepiece\", # WMT example.\n \"svn\",\n \"tensorflow\",\n \"tensorflow_text\", # WMT example.\n \"tensorflow_datasets\",\n]\n\n__version__ = None\n\nwith open('flax/version.py') as f:\n exec(f.read(), globals())\n\nsetup(\n name=\"flax\",\n version=__version__,\n description=\"Flax: A neural network library for JAX designed for flexibility\",\n long_description=\"\\n\\n\".join([README]),\n long_description_content_type='text/markdown',\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n keywords=\"\",\n author=\"Flax team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/google/flax\",\n packages=find_packages(),\n include_package_data=False,\n zip_safe=False,\n install_requires=install_requires,\n extras_require={\n \"testing\": tests_require,\n },\n )\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"setup.py for Flax.\"\"\"\n\nimport os\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n README = open(os.path.join(here, \"README.md\"), encoding='utf-8').read()\nexcept IOError:\n README = \"\"\n\ninstall_requires = [\n \"numpy>=1.12\",\n \"jax>=0.1.77\",\n \"matplotlib\", # only needed for tensorboard export\n \"dataclasses;python_version<'3.7'\", # will only install on py3.6\n \"msgpack\",\n]\n\ntests_require = [\n \"atari-py\",\n \"clu\", # All examples.\n \"gym\",\n \"jaxlib\",\n \"ml-collections\",\n \"opencv-python\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-xdist==1.34.0\", # upgrading to 2.0 broke tests, need to investigate\n \"sentencepiece\", # WMT example.\n \"svn\",\n \"tensorflow\",\n \"tensorflow_text\", # WMT example.\n \"tensorflow_datasets\",\n]\n\n__version__ = None\n\nwith open('flax/version.py') as f:\n exec(f.read(), globals())\n\nsetup(\n name=\"flax\",\n version=__version__,\n description=\"Flax: A neural network library for JAX designed for flexibility\",\n long_description=\"\\n\\n\".join([README]),\n long_description_content_type='text/markdown',\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n keywords=\"\",\n author=\"Flax team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/google/flax\",\n packages=find_packages(),\n include_package_data=False,\n zip_safe=False,\n install_requires=install_requires,\n extras_require={\n \"testing\": tests_require,\n },\n )\n", "path": "setup.py"}]}
| 1,129 | 110 |
gh_patches_debug_5356
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-python-1093
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
threading.setDaemon has been deprecated in favor of setting daemon attribute directly in Python 3.10
Ref : https://github.com/python/cpython/pull/25174
https://github.com/getsentry/sentry-python/blob/927903e3b354a42e427d91129c399d64d480a6b9/sentry_sdk/worker.py#L69
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sentry_sdk/worker.py`
Content:
```
1 import os
2 import threading
3
4 from time import sleep, time
5 from sentry_sdk._compat import check_thread_support
6 from sentry_sdk._queue import Queue, Full
7 from sentry_sdk.utils import logger
8 from sentry_sdk.consts import DEFAULT_QUEUE_SIZE
9
10 from sentry_sdk._types import MYPY
11
12 if MYPY:
13 from typing import Any
14 from typing import Optional
15 from typing import Callable
16
17
18 _TERMINATOR = object()
19
20
21 class BackgroundWorker(object):
22 def __init__(self, queue_size=DEFAULT_QUEUE_SIZE):
23 # type: (int) -> None
24 check_thread_support()
25 self._queue = Queue(queue_size) # type: Queue
26 self._lock = threading.Lock()
27 self._thread = None # type: Optional[threading.Thread]
28 self._thread_for_pid = None # type: Optional[int]
29
30 @property
31 def is_alive(self):
32 # type: () -> bool
33 if self._thread_for_pid != os.getpid():
34 return False
35 if not self._thread:
36 return False
37 return self._thread.is_alive()
38
39 def _ensure_thread(self):
40 # type: () -> None
41 if not self.is_alive:
42 self.start()
43
44 def _timed_queue_join(self, timeout):
45 # type: (float) -> bool
46 deadline = time() + timeout
47 queue = self._queue
48
49 queue.all_tasks_done.acquire()
50
51 try:
52 while queue.unfinished_tasks:
53 delay = deadline - time()
54 if delay <= 0:
55 return False
56 queue.all_tasks_done.wait(timeout=delay)
57
58 return True
59 finally:
60 queue.all_tasks_done.release()
61
62 def start(self):
63 # type: () -> None
64 with self._lock:
65 if not self.is_alive:
66 self._thread = threading.Thread(
67 target=self._target, name="raven-sentry.BackgroundWorker"
68 )
69 self._thread.setDaemon(True)
70 self._thread.start()
71 self._thread_for_pid = os.getpid()
72
73 def kill(self):
74 # type: () -> None
75 """
76 Kill worker thread. Returns immediately. Not useful for
77 waiting on shutdown for events, use `flush` for that.
78 """
79 logger.debug("background worker got kill request")
80 with self._lock:
81 if self._thread:
82 try:
83 self._queue.put_nowait(_TERMINATOR)
84 except Full:
85 logger.debug("background worker queue full, kill failed")
86
87 self._thread = None
88 self._thread_for_pid = None
89
90 def flush(self, timeout, callback=None):
91 # type: (float, Optional[Any]) -> None
92 logger.debug("background worker got flush request")
93 with self._lock:
94 if self.is_alive and timeout > 0.0:
95 self._wait_flush(timeout, callback)
96 logger.debug("background worker flushed")
97
98 def _wait_flush(self, timeout, callback):
99 # type: (float, Optional[Any]) -> None
100 initial_timeout = min(0.1, timeout)
101 if not self._timed_queue_join(initial_timeout):
102 pending = self._queue.qsize() + 1
103 logger.debug("%d event(s) pending on flush", pending)
104 if callback is not None:
105 callback(pending, timeout)
106
107 if not self._timed_queue_join(timeout - initial_timeout):
108 pending = self._queue.qsize() + 1
109 logger.error("flush timed out, dropped %s events", pending)
110
111 def submit(self, callback):
112 # type: (Callable[[], None]) -> bool
113 self._ensure_thread()
114 try:
115 self._queue.put_nowait(callback)
116 return True
117 except Full:
118 return False
119
120 def _target(self):
121 # type: () -> None
122 while True:
123 callback = self._queue.get()
124 try:
125 if callback is _TERMINATOR:
126 break
127 try:
128 callback()
129 except Exception:
130 logger.error("Failed processing job", exc_info=True)
131 finally:
132 self._queue.task_done()
133 sleep(0)
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sentry_sdk/worker.py b/sentry_sdk/worker.py
--- a/sentry_sdk/worker.py
+++ b/sentry_sdk/worker.py
@@ -66,7 +66,7 @@
self._thread = threading.Thread(
target=self._target, name="raven-sentry.BackgroundWorker"
)
- self._thread.setDaemon(True)
+ self._thread.daemon = True
self._thread.start()
self._thread_for_pid = os.getpid()
|
{"golden_diff": "diff --git a/sentry_sdk/worker.py b/sentry_sdk/worker.py\n--- a/sentry_sdk/worker.py\n+++ b/sentry_sdk/worker.py\n@@ -66,7 +66,7 @@\n self._thread = threading.Thread(\n target=self._target, name=\"raven-sentry.BackgroundWorker\"\n )\n- self._thread.setDaemon(True)\n+ self._thread.daemon = True\n self._thread.start()\n self._thread_for_pid = os.getpid()\n", "issue": "threading.setDaemon has been deprecated in favor of setting daemon attribute directly in Python 3.10\nRef : https://github.com/python/cpython/pull/25174\r\n\r\nhttps://github.com/getsentry/sentry-python/blob/927903e3b354a42e427d91129c399d64d480a6b9/sentry_sdk/worker.py#L69\n", "before_files": [{"content": "import os\nimport threading\n\nfrom time import sleep, time\nfrom sentry_sdk._compat import check_thread_support\nfrom sentry_sdk._queue import Queue, Full\nfrom sentry_sdk.utils import logger\nfrom sentry_sdk.consts import DEFAULT_QUEUE_SIZE\n\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from typing import Any\n from typing import Optional\n from typing import Callable\n\n\n_TERMINATOR = object()\n\n\nclass BackgroundWorker(object):\n def __init__(self, queue_size=DEFAULT_QUEUE_SIZE):\n # type: (int) -> None\n check_thread_support()\n self._queue = Queue(queue_size) # type: Queue\n self._lock = threading.Lock()\n self._thread = None # type: Optional[threading.Thread]\n self._thread_for_pid = None # type: Optional[int]\n\n @property\n def is_alive(self):\n # type: () -> bool\n if self._thread_for_pid != os.getpid():\n return False\n if not self._thread:\n return False\n return self._thread.is_alive()\n\n def _ensure_thread(self):\n # type: () -> None\n if not self.is_alive:\n self.start()\n\n def _timed_queue_join(self, timeout):\n # type: (float) -> bool\n deadline = time() + timeout\n queue = self._queue\n\n queue.all_tasks_done.acquire()\n\n try:\n while queue.unfinished_tasks:\n delay = deadline - time()\n if delay <= 0:\n return False\n queue.all_tasks_done.wait(timeout=delay)\n\n return True\n finally:\n queue.all_tasks_done.release()\n\n def start(self):\n # type: () -> None\n with self._lock:\n if not self.is_alive:\n self._thread = threading.Thread(\n target=self._target, name=\"raven-sentry.BackgroundWorker\"\n )\n self._thread.setDaemon(True)\n self._thread.start()\n self._thread_for_pid = os.getpid()\n\n def kill(self):\n # type: () -> None\n \"\"\"\n Kill worker thread. Returns immediately. Not useful for\n waiting on shutdown for events, use `flush` for that.\n \"\"\"\n logger.debug(\"background worker got kill request\")\n with self._lock:\n if self._thread:\n try:\n self._queue.put_nowait(_TERMINATOR)\n except Full:\n logger.debug(\"background worker queue full, kill failed\")\n\n self._thread = None\n self._thread_for_pid = None\n\n def flush(self, timeout, callback=None):\n # type: (float, Optional[Any]) -> None\n logger.debug(\"background worker got flush request\")\n with self._lock:\n if self.is_alive and timeout > 0.0:\n self._wait_flush(timeout, callback)\n logger.debug(\"background worker flushed\")\n\n def _wait_flush(self, timeout, callback):\n # type: (float, Optional[Any]) -> None\n initial_timeout = min(0.1, timeout)\n if not self._timed_queue_join(initial_timeout):\n pending = self._queue.qsize() + 1\n logger.debug(\"%d event(s) pending on flush\", pending)\n if callback is not None:\n callback(pending, timeout)\n\n if not self._timed_queue_join(timeout - initial_timeout):\n pending = self._queue.qsize() + 1\n logger.error(\"flush timed out, dropped %s events\", pending)\n\n def submit(self, callback):\n # type: (Callable[[], None]) -> bool\n self._ensure_thread()\n try:\n self._queue.put_nowait(callback)\n return True\n except Full:\n return False\n\n def _target(self):\n # type: () -> None\n while True:\n callback = self._queue.get()\n try:\n if callback is _TERMINATOR:\n break\n try:\n callback()\n except Exception:\n logger.error(\"Failed processing job\", exc_info=True)\n finally:\n self._queue.task_done()\n sleep(0)\n", "path": "sentry_sdk/worker.py"}], "after_files": [{"content": "import os\nimport threading\n\nfrom time import sleep, time\nfrom sentry_sdk._compat import check_thread_support\nfrom sentry_sdk._queue import Queue, Full\nfrom sentry_sdk.utils import logger\nfrom sentry_sdk.consts import DEFAULT_QUEUE_SIZE\n\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from typing import Any\n from typing import Optional\n from typing import Callable\n\n\n_TERMINATOR = object()\n\n\nclass BackgroundWorker(object):\n def __init__(self, queue_size=DEFAULT_QUEUE_SIZE):\n # type: (int) -> None\n check_thread_support()\n self._queue = Queue(queue_size) # type: Queue\n self._lock = threading.Lock()\n self._thread = None # type: Optional[threading.Thread]\n self._thread_for_pid = None # type: Optional[int]\n\n @property\n def is_alive(self):\n # type: () -> bool\n if self._thread_for_pid != os.getpid():\n return False\n if not self._thread:\n return False\n return self._thread.is_alive()\n\n def _ensure_thread(self):\n # type: () -> None\n if not self.is_alive:\n self.start()\n\n def _timed_queue_join(self, timeout):\n # type: (float) -> bool\n deadline = time() + timeout\n queue = self._queue\n\n queue.all_tasks_done.acquire()\n\n try:\n while queue.unfinished_tasks:\n delay = deadline - time()\n if delay <= 0:\n return False\n queue.all_tasks_done.wait(timeout=delay)\n\n return True\n finally:\n queue.all_tasks_done.release()\n\n def start(self):\n # type: () -> None\n with self._lock:\n if not self.is_alive:\n self._thread = threading.Thread(\n target=self._target, name=\"raven-sentry.BackgroundWorker\"\n )\n self._thread.daemon = True\n self._thread.start()\n self._thread_for_pid = os.getpid()\n\n def kill(self):\n # type: () -> None\n \"\"\"\n Kill worker thread. Returns immediately. Not useful for\n waiting on shutdown for events, use `flush` for that.\n \"\"\"\n logger.debug(\"background worker got kill request\")\n with self._lock:\n if self._thread:\n try:\n self._queue.put_nowait(_TERMINATOR)\n except Full:\n logger.debug(\"background worker queue full, kill failed\")\n\n self._thread = None\n self._thread_for_pid = None\n\n def flush(self, timeout, callback=None):\n # type: (float, Optional[Any]) -> None\n logger.debug(\"background worker got flush request\")\n with self._lock:\n if self.is_alive and timeout > 0.0:\n self._wait_flush(timeout, callback)\n logger.debug(\"background worker flushed\")\n\n def _wait_flush(self, timeout, callback):\n # type: (float, Optional[Any]) -> None\n initial_timeout = min(0.1, timeout)\n if not self._timed_queue_join(initial_timeout):\n pending = self._queue.qsize() + 1\n logger.debug(\"%d event(s) pending on flush\", pending)\n if callback is not None:\n callback(pending, timeout)\n\n if not self._timed_queue_join(timeout - initial_timeout):\n pending = self._queue.qsize() + 1\n logger.error(\"flush timed out, dropped %s events\", pending)\n\n def submit(self, callback):\n # type: (Callable[[], None]) -> None\n self._ensure_thread()\n try:\n self._queue.put_nowait(callback)\n except Full:\n self.on_full_queue(callback)\n\n def on_full_queue(self, callback):\n # type: (Optional[Any]) -> None\n logger.error(\"background worker queue full, dropping event\")\n\n def _target(self):\n # type: () -> None\n while True:\n callback = self._queue.get()\n try:\n if callback is _TERMINATOR:\n break\n try:\n callback()\n except Exception:\n logger.error(\"Failed processing job\", exc_info=True)\n finally:\n self._queue.task_done()\n sleep(0)\n", "path": "sentry_sdk/worker.py"}]}
| 1,550 | 109 |
gh_patches_debug_13158
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-2850
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_AWS_144 false negative after updating to AWS Provider 4.0
**Describe the issue**
After updating our AWS Provider to ~>4.0, we started getting a failure on `CKV_AWS_144` in our bucket module, despite having a properly configured `aws_s3_bucket_lifecycle_configuration` block.
**Examples**
Sample code:
```hcl
provider "aws" {
alias = "aws-primary"
region = "us-east-1"
}
provider "aws" {
alias = "aws-dr"
region = "us-west-2"
}
resource "aws_s3_bucket" "test_bucket" {
bucket = var.bucket_name
}
resource "aws_s3_bucket" "test_dr_bucket" {
provider = aws.aws-dr
bucket = "${var.bucket_name}-dr"
}
resource "aws_s3_bucket_versioning" "test_bucket_versioning" {
bucket = aws_s3_bucket.test_bucket.id
versioning_configuration {
status = "Enabled"
}
}
resource "aws_s3_bucket_versioning" "test_dr_bucket_versioning" {
provider = aws.aws-dr
bucket = aws_s3_bucket.test_dr_bucket.id
versioning_configuration {
status = "Enabled"
}
}
resource "aws_iam_role" "dr_replication" {
name_prefix = "replication"
description = "Allow S3 to assume the role for replication"
assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "s3ReplicationAssume",
"Effect": "Allow",
"Principal": {
"Service": "s3.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
POLICY
}
resource "aws_iam_policy" "dr_replication" {
name_prefix = "replication"
description = "Allows reading for replication."
policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:GetReplicationConfiguration",
"s3:ListBucket"
],
"Effect": "Allow",
"Resource": [
"${aws_s3_bucket.test_bucket.arn}"
]
},
{
"Action": [
"s3:GetObjectVersion",
"s3:GetObjectVersionForReplication",
"s3:GetObjectVersionAcl"
],
"Effect": "Allow",
"Resource": [
"${aws_s3_bucket.test_bucket.arn}/*"
]
},
{
"Action": [
"s3:ReplicateObject",
"s3:ReplicateTags",
"s3:ObjectOwnerOverrideToBucketOwner"
],
"Effect": "Allow",
"Resource": "${aws_s3_bucket.test_dr_bucket.arn}/*"
}
]
}
POLICY
}
resource "aws_iam_policy_attachment" "dr_replication" {
name = "replication"
roles = [aws_iam_role.dr_replication.name]
policy_arn = aws_iam_policy.dr_replication.arn
}
resource "aws_s3_bucket_replication_configuration" "dr_bucket_replication" {
# Must have bucket versioning enabled first
depends_on = [
aws_s3_bucket_versioning.test_bucket_versioning,
aws_s3_bucket_versioning.test_dr_bucket_versioning,
]
role = aws_iam_role.dr_replication.arn
bucket = aws_s3_bucket.test_bucket.id
rule {
id = "entire_bucket"
status = "Enabled"
destination {
bucket = aws_s3_bucket.test_dr_bucket.arn
storage_class = "DEEP_ARCHIVE"
}
}
}
```
Expected: `CKV_AWS_144` will pass.
Actual:
```
Check: CKV_AWS_144: "Ensure that S3 bucket has cross-region replication enabled"
FAILED for resource: aws_s3_bucket.test_bucket
File: /dr_test.tf:11-13
Guide: https://docs.bridgecrew.io/docs/ensure-that-s3-bucket-has-cross-region-replication-enabled
11 | resource "aws_s3_bucket" "test_bucket" {
12 | bucket = var.bucket_name
13 | }
```
**Desktop (please complete the following information):**
- OS: MacOS 10.14.6
- Checkov Version 2.0.1074
**Additional context**
On the surface, this looks like related to https://github.com/bridgecrewio/checkov/issues/2399 and https://github.com/bridgecrewio/checkov/pull/2724, but to the `CKV_AWS_144` rule.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py`
Content:
```
1 from checkov.common.models.consts import ANY_VALUE
2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
3 from checkov.common.models.enums import CheckCategories
4
5
6 class S3BucketReplicationConfiguration(BaseResourceValueCheck):
7 def __init__(self):
8 name = "Ensure that S3 bucket has cross-region replication enabled"
9 id = "CKV_AWS_144"
10 supported_resources = ['aws_s3_bucket']
11 categories = [CheckCategories.GENERAL_SECURITY]
12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
13
14 def get_inspected_key(self):
15 return "replication_configuration/[0]/role"
16
17 def get_expected_value(self):
18 return ANY_VALUE
19
20
21 check = S3BucketReplicationConfiguration()
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py b/checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py
deleted file mode 100644
--- a/checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from checkov.common.models.consts import ANY_VALUE
-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
-from checkov.common.models.enums import CheckCategories
-
-
-class S3BucketReplicationConfiguration(BaseResourceValueCheck):
- def __init__(self):
- name = "Ensure that S3 bucket has cross-region replication enabled"
- id = "CKV_AWS_144"
- supported_resources = ['aws_s3_bucket']
- categories = [CheckCategories.GENERAL_SECURITY]
- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
-
- def get_inspected_key(self):
- return "replication_configuration/[0]/role"
-
- def get_expected_value(self):
- return ANY_VALUE
-
-
-check = S3BucketReplicationConfiguration()
|
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py b/checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py\ndeleted file mode 100644\n--- a/checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py\n+++ /dev/null\n@@ -1,21 +0,0 @@\n-from checkov.common.models.consts import ANY_VALUE\n-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n-from checkov.common.models.enums import CheckCategories\n-\n-\n-class S3BucketReplicationConfiguration(BaseResourceValueCheck):\n- def __init__(self):\n- name = \"Ensure that S3 bucket has cross-region replication enabled\"\n- id = \"CKV_AWS_144\"\n- supported_resources = ['aws_s3_bucket']\n- categories = [CheckCategories.GENERAL_SECURITY]\n- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n-\n- def get_inspected_key(self):\n- return \"replication_configuration/[0]/role\"\n-\n- def get_expected_value(self):\n- return ANY_VALUE\n-\n-\n-check = S3BucketReplicationConfiguration()\n", "issue": "CKV_AWS_144 false negative after updating to AWS Provider 4.0 \n**Describe the issue**\r\nAfter updating our AWS Provider to ~>4.0, we started getting a failure on `CKV_AWS_144` in our bucket module, despite having a properly configured `aws_s3_bucket_lifecycle_configuration` block.\r\n\r\n**Examples**\r\nSample code:\r\n\r\n```hcl\r\nprovider \"aws\" {\r\n alias = \"aws-primary\"\r\n region = \"us-east-1\"\r\n}\r\n\r\nprovider \"aws\" {\r\n alias = \"aws-dr\"\r\n region = \"us-west-2\"\r\n}\r\n\r\nresource \"aws_s3_bucket\" \"test_bucket\" {\r\n bucket = var.bucket_name\r\n}\r\n\r\nresource \"aws_s3_bucket\" \"test_dr_bucket\" {\r\n provider = aws.aws-dr\r\n bucket = \"${var.bucket_name}-dr\"\r\n}\r\n\r\nresource \"aws_s3_bucket_versioning\" \"test_bucket_versioning\" {\r\n bucket = aws_s3_bucket.test_bucket.id\r\n versioning_configuration {\r\n status = \"Enabled\"\r\n }\r\n}\r\n\r\nresource \"aws_s3_bucket_versioning\" \"test_dr_bucket_versioning\" {\r\n provider = aws.aws-dr\r\n bucket = aws_s3_bucket.test_dr_bucket.id\r\n versioning_configuration {\r\n status = \"Enabled\"\r\n }\r\n}\r\n\r\nresource \"aws_iam_role\" \"dr_replication\" {\r\n name_prefix = \"replication\"\r\n description = \"Allow S3 to assume the role for replication\"\r\n\r\n assume_role_policy = <<POLICY\r\n{\r\n \"Version\": \"2012-10-17\",\r\n \"Statement\": [\r\n {\r\n \"Sid\": \"s3ReplicationAssume\",\r\n \"Effect\": \"Allow\",\r\n \"Principal\": {\r\n \"Service\": \"s3.amazonaws.com\"\r\n },\r\n \"Action\": \"sts:AssumeRole\"\r\n }\r\n ]\r\n}\r\nPOLICY\r\n}\r\n\r\nresource \"aws_iam_policy\" \"dr_replication\" {\r\n name_prefix = \"replication\"\r\n description = \"Allows reading for replication.\"\r\n\r\n policy = <<POLICY\r\n{\r\n \"Version\": \"2012-10-17\",\r\n \"Statement\": [\r\n {\r\n \"Action\": [\r\n \"s3:GetReplicationConfiguration\",\r\n \"s3:ListBucket\"\r\n ],\r\n \"Effect\": \"Allow\",\r\n \"Resource\": [\r\n \"${aws_s3_bucket.test_bucket.arn}\"\r\n ]\r\n },\r\n {\r\n \"Action\": [\r\n \"s3:GetObjectVersion\",\r\n \"s3:GetObjectVersionForReplication\",\r\n \"s3:GetObjectVersionAcl\"\r\n ],\r\n \"Effect\": \"Allow\",\r\n \"Resource\": [\r\n \"${aws_s3_bucket.test_bucket.arn}/*\"\r\n ]\r\n },\r\n {\r\n \"Action\": [\r\n \"s3:ReplicateObject\",\r\n \"s3:ReplicateTags\",\r\n \"s3:ObjectOwnerOverrideToBucketOwner\"\r\n ],\r\n \"Effect\": \"Allow\",\r\n \"Resource\": \"${aws_s3_bucket.test_dr_bucket.arn}/*\"\r\n }\r\n ]\r\n}\r\nPOLICY\r\n}\r\n\r\nresource \"aws_iam_policy_attachment\" \"dr_replication\" {\r\n name = \"replication\"\r\n roles = [aws_iam_role.dr_replication.name]\r\n policy_arn = aws_iam_policy.dr_replication.arn\r\n}\r\n\r\nresource \"aws_s3_bucket_replication_configuration\" \"dr_bucket_replication\" {\r\n\r\n # Must have bucket versioning enabled first\r\n depends_on = [\r\n aws_s3_bucket_versioning.test_bucket_versioning,\r\n aws_s3_bucket_versioning.test_dr_bucket_versioning,\r\n ]\r\n\r\n role = aws_iam_role.dr_replication.arn\r\n bucket = aws_s3_bucket.test_bucket.id\r\n\r\n rule {\r\n id = \"entire_bucket\"\r\n status = \"Enabled\"\r\n\r\n destination {\r\n bucket = aws_s3_bucket.test_dr_bucket.arn\r\n storage_class = \"DEEP_ARCHIVE\"\r\n }\r\n }\r\n}\r\n\r\n```\r\n\r\nExpected: `CKV_AWS_144` will pass.\r\n\r\nActual: \r\n```\r\nCheck: CKV_AWS_144: \"Ensure that S3 bucket has cross-region replication enabled\"\r\n FAILED for resource: aws_s3_bucket.test_bucket\r\n File: /dr_test.tf:11-13\r\n Guide: https://docs.bridgecrew.io/docs/ensure-that-s3-bucket-has-cross-region-replication-enabled\r\n\r\n 11 | resource \"aws_s3_bucket\" \"test_bucket\" {\r\n 12 | bucket = var.bucket_name\r\n 13 | }\r\n```\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: MacOS 10.14.6\r\n - Checkov Version 2.0.1074\r\n\r\n**Additional context**\r\nOn the surface, this looks like related to https://github.com/bridgecrewio/checkov/issues/2399 and https://github.com/bridgecrewio/checkov/pull/2724, but to the `CKV_AWS_144` rule.\n", "before_files": [{"content": "from checkov.common.models.consts import ANY_VALUE\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom checkov.common.models.enums import CheckCategories\n\n\nclass S3BucketReplicationConfiguration(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that S3 bucket has cross-region replication enabled\"\n id = \"CKV_AWS_144\"\n supported_resources = ['aws_s3_bucket']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"replication_configuration/[0]/role\"\n\n def get_expected_value(self):\n return ANY_VALUE\n\n\ncheck = S3BucketReplicationConfiguration()\n", "path": "checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py"}], "after_files": [{"content": null, "path": "checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py"}]}
| 1,546 | 265 |
gh_patches_debug_38571
|
rasdani/github-patches
|
git_diff
|
archlinux__archinstall-1659
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"Save configuration" Improve UX
*Feature Request*
I have some time in the next week, I may try and implement this depending on how many roadblocks I hit. Currently, to save a configuration, you are prompted to enter a directory. If it isn't valid, you have to scratch your head wondering what the exact paths are, because you can't discover any directory without exiting `archinstall`.
It would be great if there was a sort of tab completion, or even filesystem traversal similar to `ncdu` to find a directory to save the configuration in.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `archinstall/lib/user_interaction/save_conf.py`
Content:
```
1 from __future__ import annotations
2
3 from pathlib import Path
4 from typing import Any, Dict, TYPE_CHECKING
5
6 from ..configuration import ConfigurationOutput
7 from ..menu import Menu
8 from ..menu.menu import MenuSelectionType
9 from ..output import log
10
11 if TYPE_CHECKING:
12 _: Any
13
14
15 def save_config(config: Dict):
16
17 def preview(selection: str):
18 if options['user_config'] == selection:
19 json_config = config_output.user_config_to_json()
20 return f'{config_output.user_configuration_file}\n{json_config}'
21 elif options['user_creds'] == selection:
22 if json_config := config_output.user_credentials_to_json():
23 return f'{config_output.user_credentials_file}\n{json_config}'
24 else:
25 return str(_('No configuration'))
26 elif options['disk_layout'] == selection:
27 if json_config := config_output.disk_layout_to_json():
28 return f'{config_output.disk_layout_file}\n{json_config}'
29 else:
30 return str(_('No configuration'))
31 elif options['all'] == selection:
32 output = f'{config_output.user_configuration_file}\n'
33 if json_config := config_output.user_credentials_to_json():
34 output += f'{config_output.user_credentials_file}\n'
35 if json_config := config_output.disk_layout_to_json():
36 output += f'{config_output.disk_layout_file}\n'
37 return output[:-1]
38 return None
39
40 config_output = ConfigurationOutput(config)
41
42 options = {
43 'user_config': str(_('Save user configuration')),
44 'user_creds': str(_('Save user credentials')),
45 'disk_layout': str(_('Save disk layout')),
46 'all': str(_('Save all'))
47 }
48
49 choice = Menu(
50 _('Choose which configuration to save'),
51 list(options.values()),
52 sort=False,
53 skip=True,
54 preview_size=0.75,
55 preview_command=preview
56 ).run()
57
58 if choice.type_ == MenuSelectionType.Skip:
59 return
60
61 while True:
62 path = input(_('Enter a directory for the configuration(s) to be saved: ')).strip(' ')
63 dest_path = Path(path)
64 if dest_path.exists() and dest_path.is_dir():
65 break
66 log(_('Not a valid directory: {}').format(dest_path), fg='red')
67
68 if options['user_config'] == choice.value:
69 config_output.save_user_config(dest_path)
70 elif options['user_creds'] == choice.value:
71 config_output.save_user_creds(dest_path)
72 elif options['disk_layout'] == choice.value:
73 config_output.save_disk_layout(dest_path)
74 elif options['all'] == choice.value:
75 config_output.save_user_config(dest_path)
76 config_output.save_user_creds(dest_path)
77 config_output.save_disk_layout(dest_path)
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/archinstall/lib/user_interaction/save_conf.py b/archinstall/lib/user_interaction/save_conf.py
--- a/archinstall/lib/user_interaction/save_conf.py
+++ b/archinstall/lib/user_interaction/save_conf.py
@@ -1,9 +1,12 @@
from __future__ import annotations
+import logging
+
from pathlib import Path
from typing import Any, Dict, TYPE_CHECKING
from ..configuration import ConfigurationOutput
+from ..general import SysCommand
from ..menu import Menu
from ..menu.menu import MenuSelectionType
from ..output import log
@@ -58,20 +61,75 @@
if choice.type_ == MenuSelectionType.Skip:
return
- while True:
- path = input(_('Enter a directory for the configuration(s) to be saved: ')).strip(' ')
- dest_path = Path(path)
- if dest_path.exists() and dest_path.is_dir():
- break
- log(_('Not a valid directory: {}').format(dest_path), fg='red')
-
- if options['user_config'] == choice.value:
- config_output.save_user_config(dest_path)
- elif options['user_creds'] == choice.value:
- config_output.save_user_creds(dest_path)
- elif options['disk_layout'] == choice.value:
- config_output.save_disk_layout(dest_path)
- elif options['all'] == choice.value:
- config_output.save_user_config(dest_path)
- config_output.save_user_creds(dest_path)
- config_output.save_disk_layout(dest_path)
+ dirs_to_exclude = [
+ '/bin',
+ '/dev',
+ '/lib',
+ '/lib64',
+ '/lost+found',
+ '/opt',
+ '/proc',
+ '/run',
+ '/sbin',
+ '/srv',
+ '/sys',
+ '/usr',
+ '/var',
+ ]
+ log(
+ _('When picking a directory to save configuration files to,'
+ ' by default we will ignore the following folders: ') + ','.join(dirs_to_exclude),
+ level=logging.DEBUG
+ )
+
+ log(_('Finding possible directories to save configuration files ...'), level=logging.INFO)
+
+ find_exclude = '-path ' + ' -prune -o -path '.join(dirs_to_exclude) + ' -prune '
+ file_picker_command = f'find / {find_exclude} -o -type d -print0'
+ possible_save_dirs = list(
+ filter(None, SysCommand(file_picker_command).decode().split('\x00'))
+ )
+
+ selection = Menu(
+ _('Select directory (or directories) for saving configuration files'),
+ possible_save_dirs,
+ multi=True,
+ skip=True,
+ allow_reset=False,
+ ).run()
+
+ match selection.type_:
+ case MenuSelectionType.Skip:
+ return
+ case _:
+ save_dirs = selection.value
+
+ prompt = _('Do you want to save {} configuration file(s) in the following locations?\n\n{}').format(
+ list(options.keys())[list(options.values()).index(choice.value)],
+ save_dirs
+ )
+ save_confirmation = Menu(prompt, Menu.yes_no(), default_option=Menu.yes()).run()
+ if save_confirmation == Menu.no():
+ return
+
+ log(
+ _('Saving {} configuration files to {}').format(
+ list(options.keys())[list(options.values()).index(choice.value)],
+ save_dirs
+ ),
+ level=logging.DEBUG
+ )
+
+ if save_dirs is not None:
+ for save_dir_str in save_dirs:
+ save_dir = Path(save_dir_str)
+ if options['user_config'] == choice.value:
+ config_output.save_user_config(save_dir)
+ elif options['user_creds'] == choice.value:
+ config_output.save_user_creds(save_dir)
+ elif options['disk_layout'] == choice.value:
+ config_output.save_disk_layout(save_dir)
+ elif options['all'] == choice.value:
+ config_output.save_user_config(save_dir)
+ config_output.save_user_creds(save_dir)
+ config_output.save_disk_layout(save_dir)
|
{"golden_diff": "diff --git a/archinstall/lib/user_interaction/save_conf.py b/archinstall/lib/user_interaction/save_conf.py\n--- a/archinstall/lib/user_interaction/save_conf.py\n+++ b/archinstall/lib/user_interaction/save_conf.py\n@@ -1,9 +1,12 @@\n from __future__ import annotations\n \n+import logging\n+\n from pathlib import Path\n from typing import Any, Dict, TYPE_CHECKING\n \n from ..configuration import ConfigurationOutput\n+from ..general import SysCommand\n from ..menu import Menu\n from ..menu.menu import MenuSelectionType\n from ..output import log\n@@ -58,20 +61,75 @@\n \tif choice.type_ == MenuSelectionType.Skip:\n \t\treturn\n \n-\twhile True:\n-\t\tpath = input(_('Enter a directory for the configuration(s) to be saved: ')).strip(' ')\n-\t\tdest_path = Path(path)\n-\t\tif dest_path.exists() and dest_path.is_dir():\n-\t\t\tbreak\n-\t\tlog(_('Not a valid directory: {}').format(dest_path), fg='red')\n-\n-\tif options['user_config'] == choice.value:\n-\t\tconfig_output.save_user_config(dest_path)\n-\telif options['user_creds'] == choice.value:\n-\t\tconfig_output.save_user_creds(dest_path)\n-\telif options['disk_layout'] == choice.value:\n-\t\tconfig_output.save_disk_layout(dest_path)\n-\telif options['all'] == choice.value:\n-\t\tconfig_output.save_user_config(dest_path)\n-\t\tconfig_output.save_user_creds(dest_path)\n-\t\tconfig_output.save_disk_layout(dest_path)\n+\tdirs_to_exclude = [\n+\t\t'/bin',\n+\t\t'/dev',\n+\t\t'/lib',\n+\t\t'/lib64',\n+\t\t'/lost+found',\n+\t\t'/opt',\n+\t\t'/proc',\n+\t\t'/run',\n+\t\t'/sbin',\n+\t\t'/srv',\n+\t\t'/sys',\n+\t\t'/usr',\n+\t\t'/var',\n+\t]\n+\tlog(\n+\t\t_('When picking a directory to save configuration files to,'\n+\t\t' by default we will ignore the following folders: ') + ','.join(dirs_to_exclude),\n+\t\tlevel=logging.DEBUG\n+\t)\n+\n+\tlog(_('Finding possible directories to save configuration files ...'), level=logging.INFO)\n+\t\n+\tfind_exclude = '-path ' + ' -prune -o -path '.join(dirs_to_exclude) + ' -prune '\n+\tfile_picker_command = f'find / {find_exclude} -o -type d -print0'\n+\tpossible_save_dirs = list(\n+\t\tfilter(None, SysCommand(file_picker_command).decode().split('\\x00'))\n+\t)\n+\n+\tselection = Menu(\n+\t\t_('Select directory (or directories) for saving configuration files'),\n+\t\tpossible_save_dirs,\n+\t\tmulti=True,\n+\t\tskip=True,\n+\t\tallow_reset=False,\n+\t).run()\n+\n+\tmatch selection.type_:\n+\t\tcase MenuSelectionType.Skip:\n+\t\t\treturn\n+\t\tcase _:\n+\t\t\tsave_dirs = selection.value\n+\n+\tprompt = _('Do you want to save {} configuration file(s) in the following locations?\\n\\n{}').format(\n+\t\tlist(options.keys())[list(options.values()).index(choice.value)],\n+\t\tsave_dirs\n+\t)\n+\tsave_confirmation = Menu(prompt, Menu.yes_no(), default_option=Menu.yes()).run()\n+\tif save_confirmation == Menu.no():\n+\t\treturn\n+\t\n+\tlog(\n+\t\t_('Saving {} configuration files to {}').format(\n+\t\t\tlist(options.keys())[list(options.values()).index(choice.value)],\n+\t\t\tsave_dirs\n+\t\t),\n+\t\tlevel=logging.DEBUG\n+\t)\n+\t\n+\tif save_dirs is not None:\n+\t\tfor save_dir_str in save_dirs:\n+\t\t\tsave_dir = Path(save_dir_str)\n+\t\t\tif options['user_config'] == choice.value:\n+\t\t\t\tconfig_output.save_user_config(save_dir)\n+\t\t\telif options['user_creds'] == choice.value:\n+\t\t\t\tconfig_output.save_user_creds(save_dir)\n+\t\t\telif options['disk_layout'] == choice.value:\n+\t\t\t\tconfig_output.save_disk_layout(save_dir)\n+\t\t\telif options['all'] == choice.value:\n+\t\t\t\tconfig_output.save_user_config(save_dir)\n+\t\t\t\tconfig_output.save_user_creds(save_dir)\n+\t\t\t\tconfig_output.save_disk_layout(save_dir)\n", "issue": "\"Save configuration\" Improve UX\n*Feature Request*\r\n\r\nI have some time in the next week, I may try and implement this depending on how many roadblocks I hit. Currently, to save a configuration, you are prompted to enter a directory. If it isn't valid, you have to scratch your head wondering what the exact paths are, because you can't discover any directory without exiting `archinstall`. \r\n\r\nIt would be great if there was a sort of tab completion, or even filesystem traversal similar to `ncdu` to find a directory to save the configuration in.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom pathlib import Path\nfrom typing import Any, Dict, TYPE_CHECKING\n\nfrom ..configuration import ConfigurationOutput\nfrom ..menu import Menu\nfrom ..menu.menu import MenuSelectionType\nfrom ..output import log\n\nif TYPE_CHECKING:\n\t_: Any\n\n\ndef save_config(config: Dict):\n\n\tdef preview(selection: str):\n\t\tif options['user_config'] == selection:\n\t\t\tjson_config = config_output.user_config_to_json()\n\t\t\treturn f'{config_output.user_configuration_file}\\n{json_config}'\n\t\telif options['user_creds'] == selection:\n\t\t\tif json_config := config_output.user_credentials_to_json():\n\t\t\t\treturn f'{config_output.user_credentials_file}\\n{json_config}'\n\t\t\telse:\n\t\t\t\treturn str(_('No configuration'))\n\t\telif options['disk_layout'] == selection:\n\t\t\tif json_config := config_output.disk_layout_to_json():\n\t\t\t\treturn f'{config_output.disk_layout_file}\\n{json_config}'\n\t\t\telse:\n\t\t\t\treturn str(_('No configuration'))\n\t\telif options['all'] == selection:\n\t\t\toutput = f'{config_output.user_configuration_file}\\n'\n\t\t\tif json_config := config_output.user_credentials_to_json():\n\t\t\t\toutput += f'{config_output.user_credentials_file}\\n'\n\t\t\tif json_config := config_output.disk_layout_to_json():\n\t\t\t\toutput += f'{config_output.disk_layout_file}\\n'\n\t\t\treturn output[:-1]\n\t\treturn None\n\n\tconfig_output = ConfigurationOutput(config)\n\n\toptions = {\n\t\t'user_config': str(_('Save user configuration')),\n\t\t'user_creds': str(_('Save user credentials')),\n\t\t'disk_layout': str(_('Save disk layout')),\n\t\t'all': str(_('Save all'))\n\t}\n\n\tchoice = Menu(\n\t\t_('Choose which configuration to save'),\n\t\tlist(options.values()),\n\t\tsort=False,\n\t\tskip=True,\n\t\tpreview_size=0.75,\n\t\tpreview_command=preview\n\t).run()\n\n\tif choice.type_ == MenuSelectionType.Skip:\n\t\treturn\n\n\twhile True:\n\t\tpath = input(_('Enter a directory for the configuration(s) to be saved: ')).strip(' ')\n\t\tdest_path = Path(path)\n\t\tif dest_path.exists() and dest_path.is_dir():\n\t\t\tbreak\n\t\tlog(_('Not a valid directory: {}').format(dest_path), fg='red')\n\n\tif options['user_config'] == choice.value:\n\t\tconfig_output.save_user_config(dest_path)\n\telif options['user_creds'] == choice.value:\n\t\tconfig_output.save_user_creds(dest_path)\n\telif options['disk_layout'] == choice.value:\n\t\tconfig_output.save_disk_layout(dest_path)\n\telif options['all'] == choice.value:\n\t\tconfig_output.save_user_config(dest_path)\n\t\tconfig_output.save_user_creds(dest_path)\n\t\tconfig_output.save_disk_layout(dest_path)\n", "path": "archinstall/lib/user_interaction/save_conf.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport logging\n\nfrom pathlib import Path\nfrom typing import Any, Dict, TYPE_CHECKING\n\nfrom ..configuration import ConfigurationOutput\nfrom ..general import SysCommand\nfrom ..menu import Menu\nfrom ..menu.menu import MenuSelectionType\nfrom ..output import log\n\nif TYPE_CHECKING:\n\t_: Any\n\n\ndef save_config(config: Dict):\n\n\tdef preview(selection: str):\n\t\tif options['user_config'] == selection:\n\t\t\tjson_config = config_output.user_config_to_json()\n\t\t\treturn f'{config_output.user_configuration_file}\\n{json_config}'\n\t\telif options['user_creds'] == selection:\n\t\t\tif json_config := config_output.user_credentials_to_json():\n\t\t\t\treturn f'{config_output.user_credentials_file}\\n{json_config}'\n\t\t\telse:\n\t\t\t\treturn str(_('No configuration'))\n\t\telif options['disk_layout'] == selection:\n\t\t\tif json_config := config_output.disk_layout_to_json():\n\t\t\t\treturn f'{config_output.disk_layout_file}\\n{json_config}'\n\t\t\telse:\n\t\t\t\treturn str(_('No configuration'))\n\t\telif options['all'] == selection:\n\t\t\toutput = f'{config_output.user_configuration_file}\\n'\n\t\t\tif json_config := config_output.user_credentials_to_json():\n\t\t\t\toutput += f'{config_output.user_credentials_file}\\n'\n\t\t\tif json_config := config_output.disk_layout_to_json():\n\t\t\t\toutput += f'{config_output.disk_layout_file}\\n'\n\t\t\treturn output[:-1]\n\t\treturn None\n\n\tconfig_output = ConfigurationOutput(config)\n\n\toptions = {\n\t\t'user_config': str(_('Save user configuration')),\n\t\t'user_creds': str(_('Save user credentials')),\n\t\t'disk_layout': str(_('Save disk layout')),\n\t\t'all': str(_('Save all'))\n\t}\n\n\tchoice = Menu(\n\t\t_('Choose which configuration to save'),\n\t\tlist(options.values()),\n\t\tsort=False,\n\t\tskip=True,\n\t\tpreview_size=0.75,\n\t\tpreview_command=preview\n\t).run()\n\n\tif choice.type_ == MenuSelectionType.Skip:\n\t\treturn\n\n\tdirs_to_exclude = [\n\t\t'/bin',\n\t\t'/dev',\n\t\t'/lib',\n\t\t'/lib64',\n\t\t'/lost+found',\n\t\t'/opt',\n\t\t'/proc',\n\t\t'/run',\n\t\t'/sbin',\n\t\t'/srv',\n\t\t'/sys',\n\t\t'/usr',\n\t\t'/var',\n\t]\n\tlog(\n\t\t_('When picking a directory to save configuration files to,'\n\t\t' by default we will ignore the following folders: ') + ','.join(dirs_to_exclude),\n\t\tlevel=logging.DEBUG\n\t)\n\n\tlog(_('Finding possible directories to save configuration files ...'), level=logging.INFO)\n\t\n\tfind_exclude = '-path ' + ' -prune -o -path '.join(dirs_to_exclude) + ' -prune '\n\tfile_picker_command = f'find / {find_exclude} -o -type d -print0'\n\tpossible_save_dirs = list(\n\t\tfilter(None, SysCommand(file_picker_command).decode().split('\\x00'))\n\t)\n\n\tselection = Menu(\n\t\t_('Select directory (or directories) for saving configuration files'),\n\t\tpossible_save_dirs,\n\t\tmulti=True,\n\t\tskip=True,\n\t\tallow_reset=False,\n\t).run()\n\n\tmatch selection.type_:\n\t\tcase MenuSelectionType.Skip:\n\t\t\treturn\n\t\tcase _:\n\t\t\tsave_dirs = selection.value\n\n\tprompt = _('Do you want to save {} configuration file(s) in the following locations?\\n\\n{}').format(\n\t\tlist(options.keys())[list(options.values()).index(choice.value)],\n\t\tsave_dirs\n\t)\n\tsave_confirmation = Menu(prompt, Menu.yes_no(), default_option=Menu.yes()).run()\n\tif save_confirmation == Menu.no():\n\t\treturn\n\t\n\tlog(\n\t\t_('Saving {} configuration files to {}').format(\n\t\t\tlist(options.keys())[list(options.values()).index(choice.value)],\n\t\t\tsave_dirs\n\t\t),\n\t\tlevel=logging.DEBUG\n\t)\n\t\n\tif save_dirs is not None:\n\t\tfor save_dir_str in save_dirs:\n\t\t\tsave_dir = Path(save_dir_str)\n\t\t\tif options['user_config'] == choice.value:\n\t\t\t\tconfig_output.save_user_config(save_dir)\n\t\t\telif options['user_creds'] == choice.value:\n\t\t\t\tconfig_output.save_user_creds(save_dir)\n\t\t\telif options['disk_layout'] == choice.value:\n\t\t\t\tconfig_output.save_disk_layout(save_dir)\n\t\t\telif options['all'] == choice.value:\n\t\t\t\tconfig_output.save_user_config(save_dir)\n\t\t\t\tconfig_output.save_user_creds(save_dir)\n\t\t\t\tconfig_output.save_disk_layout(save_dir)\n", "path": "archinstall/lib/user_interaction/save_conf.py"}]}
| 1,116 | 923 |
gh_patches_debug_14642
|
rasdani/github-patches
|
git_diff
|
mitmproxy__mitmproxy-6819
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mitmdump does not exit
#### Problem Description
Mitmdump does not exit automatically when executing:
`mitmdump -nr infile -w outfile
`
Until version 10.0.0 it was working properly and when running mitmdump with "-n" the process finished automatically once the outfile was written.
#### Steps to reproduce the behavior:
1. Generate a mitm file
2. Execute mitmdump -nr infile -w outfile
#### System Information
Mitmproxy: 10.3.0 binary
Python: 3.12.3
OpenSSL: OpenSSL 3.2.1 30 Jan 2024
Platform: Linux-6.5.0-27-generic-x86_64-with-glibc2.35
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/addons/readfile.py`
Content:
```
1 import asyncio
2 import logging
3 import os.path
4 import sys
5 from typing import BinaryIO
6 from typing import Optional
7
8 from mitmproxy import command
9 from mitmproxy import ctx
10 from mitmproxy import exceptions
11 from mitmproxy import flowfilter
12 from mitmproxy import io
13
14 logger = logging.getLogger(__name__)
15
16
17 class ReadFile:
18 """
19 An addon that handles reading from file on startup.
20 """
21
22 def __init__(self):
23 self.filter = None
24 self._read_task: asyncio.Task | None = None
25
26 def load(self, loader):
27 loader.add_option("rfile", Optional[str], None, "Read flows from file.")
28 loader.add_option(
29 "readfile_filter", Optional[str], None, "Read only matching flows."
30 )
31
32 def configure(self, updated):
33 if "readfile_filter" in updated:
34 if ctx.options.readfile_filter:
35 try:
36 self.filter = flowfilter.parse(ctx.options.readfile_filter)
37 except ValueError as e:
38 raise exceptions.OptionsError(str(e)) from e
39 else:
40 self.filter = None
41
42 async def load_flows(self, fo: BinaryIO) -> int:
43 cnt = 0
44 freader = io.FlowReader(fo)
45 try:
46 for flow in freader.stream():
47 if self.filter and not self.filter(flow):
48 continue
49 await ctx.master.load_flow(flow)
50 cnt += 1
51 except (OSError, exceptions.FlowReadException) as e:
52 if cnt:
53 logging.warning("Flow file corrupted - loaded %i flows." % cnt)
54 else:
55 logging.error("Flow file corrupted.")
56 raise exceptions.FlowReadException(str(e)) from e
57 else:
58 return cnt
59
60 async def load_flows_from_path(self, path: str) -> int:
61 path = os.path.expanduser(path)
62 try:
63 with open(path, "rb") as f:
64 return await self.load_flows(f)
65 except OSError as e:
66 logging.error(f"Cannot load flows: {e}")
67 raise exceptions.FlowReadException(str(e)) from e
68
69 async def doread(self, rfile: str) -> None:
70 try:
71 await self.load_flows_from_path(rfile)
72 except exceptions.FlowReadException as e:
73 logger.exception(f"Failed to read {ctx.options.rfile}: {e}")
74 finally:
75 self._read_task = None
76
77 def running(self):
78 if ctx.options.rfile:
79 self._read_task = asyncio.create_task(self.doread(ctx.options.rfile))
80
81 @command.command("readfile.reading")
82 def reading(self) -> bool:
83 return bool(self._read_task)
84
85
86 class ReadFileStdin(ReadFile):
87 """Support the special case of "-" for reading from stdin"""
88
89 async def load_flows_from_path(self, path: str) -> int:
90 if path == "-": # pragma: no cover
91 # Need to think about how to test this. This function is scheduled
92 # onto the event loop, where a sys.stdin mock has no effect.
93 return await self.load_flows(sys.stdin.buffer)
94 else:
95 return await super().load_flows_from_path(path)
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mitmproxy/addons/readfile.py b/mitmproxy/addons/readfile.py
--- a/mitmproxy/addons/readfile.py
+++ b/mitmproxy/addons/readfile.py
@@ -71,8 +71,6 @@
await self.load_flows_from_path(rfile)
except exceptions.FlowReadException as e:
logger.exception(f"Failed to read {ctx.options.rfile}: {e}")
- finally:
- self._read_task = None
def running(self):
if ctx.options.rfile:
@@ -80,7 +78,7 @@
@command.command("readfile.reading")
def reading(self) -> bool:
- return bool(self._read_task)
+ return bool(self._read_task and not self._read_task.done())
class ReadFileStdin(ReadFile):
|
{"golden_diff": "diff --git a/mitmproxy/addons/readfile.py b/mitmproxy/addons/readfile.py\n--- a/mitmproxy/addons/readfile.py\n+++ b/mitmproxy/addons/readfile.py\n@@ -71,8 +71,6 @@\n await self.load_flows_from_path(rfile)\n except exceptions.FlowReadException as e:\n logger.exception(f\"Failed to read {ctx.options.rfile}: {e}\")\n- finally:\n- self._read_task = None\n \n def running(self):\n if ctx.options.rfile:\n@@ -80,7 +78,7 @@\n \n @command.command(\"readfile.reading\")\n def reading(self) -> bool:\n- return bool(self._read_task)\n+ return bool(self._read_task and not self._read_task.done())\n \n \n class ReadFileStdin(ReadFile):\n", "issue": "Mitmdump does not exit\n#### Problem Description\r\nMitmdump does not exit automatically when executing:\r\n\r\n`mitmdump -nr infile -w outfile\r\n`\r\n\r\nUntil version 10.0.0 it was working properly and when running mitmdump with \"-n\" the process finished automatically once the outfile was written.\r\n\r\n#### Steps to reproduce the behavior:\r\n1. Generate a mitm file\r\n2. Execute mitmdump -nr infile -w outfile\r\n\r\n#### System Information\r\nMitmproxy: 10.3.0 binary\r\nPython: 3.12.3\r\nOpenSSL: OpenSSL 3.2.1 30 Jan 2024\r\nPlatform: Linux-6.5.0-27-generic-x86_64-with-glibc2.35\r\n\n", "before_files": [{"content": "import asyncio\nimport logging\nimport os.path\nimport sys\nfrom typing import BinaryIO\nfrom typing import Optional\n\nfrom mitmproxy import command\nfrom mitmproxy import ctx\nfrom mitmproxy import exceptions\nfrom mitmproxy import flowfilter\nfrom mitmproxy import io\n\nlogger = logging.getLogger(__name__)\n\n\nclass ReadFile:\n \"\"\"\n An addon that handles reading from file on startup.\n \"\"\"\n\n def __init__(self):\n self.filter = None\n self._read_task: asyncio.Task | None = None\n\n def load(self, loader):\n loader.add_option(\"rfile\", Optional[str], None, \"Read flows from file.\")\n loader.add_option(\n \"readfile_filter\", Optional[str], None, \"Read only matching flows.\"\n )\n\n def configure(self, updated):\n if \"readfile_filter\" in updated:\n if ctx.options.readfile_filter:\n try:\n self.filter = flowfilter.parse(ctx.options.readfile_filter)\n except ValueError as e:\n raise exceptions.OptionsError(str(e)) from e\n else:\n self.filter = None\n\n async def load_flows(self, fo: BinaryIO) -> int:\n cnt = 0\n freader = io.FlowReader(fo)\n try:\n for flow in freader.stream():\n if self.filter and not self.filter(flow):\n continue\n await ctx.master.load_flow(flow)\n cnt += 1\n except (OSError, exceptions.FlowReadException) as e:\n if cnt:\n logging.warning(\"Flow file corrupted - loaded %i flows.\" % cnt)\n else:\n logging.error(\"Flow file corrupted.\")\n raise exceptions.FlowReadException(str(e)) from e\n else:\n return cnt\n\n async def load_flows_from_path(self, path: str) -> int:\n path = os.path.expanduser(path)\n try:\n with open(path, \"rb\") as f:\n return await self.load_flows(f)\n except OSError as e:\n logging.error(f\"Cannot load flows: {e}\")\n raise exceptions.FlowReadException(str(e)) from e\n\n async def doread(self, rfile: str) -> None:\n try:\n await self.load_flows_from_path(rfile)\n except exceptions.FlowReadException as e:\n logger.exception(f\"Failed to read {ctx.options.rfile}: {e}\")\n finally:\n self._read_task = None\n\n def running(self):\n if ctx.options.rfile:\n self._read_task = asyncio.create_task(self.doread(ctx.options.rfile))\n\n @command.command(\"readfile.reading\")\n def reading(self) -> bool:\n return bool(self._read_task)\n\n\nclass ReadFileStdin(ReadFile):\n \"\"\"Support the special case of \"-\" for reading from stdin\"\"\"\n\n async def load_flows_from_path(self, path: str) -> int:\n if path == \"-\": # pragma: no cover\n # Need to think about how to test this. This function is scheduled\n # onto the event loop, where a sys.stdin mock has no effect.\n return await self.load_flows(sys.stdin.buffer)\n else:\n return await super().load_flows_from_path(path)\n", "path": "mitmproxy/addons/readfile.py"}], "after_files": [{"content": "import asyncio\nimport logging\nimport os.path\nimport sys\nfrom typing import BinaryIO\nfrom typing import Optional\n\nfrom mitmproxy import command\nfrom mitmproxy import ctx\nfrom mitmproxy import exceptions\nfrom mitmproxy import flowfilter\nfrom mitmproxy import io\n\nlogger = logging.getLogger(__name__)\n\n\nclass ReadFile:\n \"\"\"\n An addon that handles reading from file on startup.\n \"\"\"\n\n def __init__(self):\n self.filter = None\n self._read_task: asyncio.Task | None = None\n\n def load(self, loader):\n loader.add_option(\"rfile\", Optional[str], None, \"Read flows from file.\")\n loader.add_option(\n \"readfile_filter\", Optional[str], None, \"Read only matching flows.\"\n )\n\n def configure(self, updated):\n if \"readfile_filter\" in updated:\n if ctx.options.readfile_filter:\n try:\n self.filter = flowfilter.parse(ctx.options.readfile_filter)\n except ValueError as e:\n raise exceptions.OptionsError(str(e)) from e\n else:\n self.filter = None\n\n async def load_flows(self, fo: BinaryIO) -> int:\n cnt = 0\n freader = io.FlowReader(fo)\n try:\n for flow in freader.stream():\n if self.filter and not self.filter(flow):\n continue\n await ctx.master.load_flow(flow)\n cnt += 1\n except (OSError, exceptions.FlowReadException) as e:\n if cnt:\n logging.warning(\"Flow file corrupted - loaded %i flows.\" % cnt)\n else:\n logging.error(\"Flow file corrupted.\")\n raise exceptions.FlowReadException(str(e)) from e\n else:\n return cnt\n\n async def load_flows_from_path(self, path: str) -> int:\n path = os.path.expanduser(path)\n try:\n with open(path, \"rb\") as f:\n return await self.load_flows(f)\n except OSError as e:\n logging.error(f\"Cannot load flows: {e}\")\n raise exceptions.FlowReadException(str(e)) from e\n\n async def doread(self, rfile: str) -> None:\n try:\n await self.load_flows_from_path(rfile)\n except exceptions.FlowReadException as e:\n logger.exception(f\"Failed to read {ctx.options.rfile}: {e}\")\n\n def running(self):\n if ctx.options.rfile:\n self._read_task = asyncio.create_task(self.doread(ctx.options.rfile))\n\n @command.command(\"readfile.reading\")\n def reading(self) -> bool:\n return bool(self._read_task and not self._read_task.done())\n\n\nclass ReadFileStdin(ReadFile):\n \"\"\"Support the special case of \"-\" for reading from stdin\"\"\"\n\n async def load_flows_from_path(self, path: str) -> int:\n if path == \"-\": # pragma: no cover\n # Need to think about how to test this. This function is scheduled\n # onto the event loop, where a sys.stdin mock has no effect.\n return await self.load_flows(sys.stdin.buffer)\n else:\n return await super().load_flows_from_path(path)\n", "path": "mitmproxy/addons/readfile.py"}]}
| 1,308 | 184 |
gh_patches_debug_33189
|
rasdani/github-patches
|
git_diff
|
arviz-devs__arviz-1221
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Inconsistent capitalization in `plot_hdi` docstring
**Describe the bug**
Docs have inconsistent capitalization

**To Reproduce**
https://arviz-devs.github.io/arviz/generated/arviz.plot_hdi.html#arviz.plot_hdi
**Expected behavior**
Consistency across arg docstrings
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `arviz/plots/hdiplot.py`
Content:
```
1 """Plot highest density intervals for regression data."""
2 import warnings
3
4 import numpy as np
5 from scipy.interpolate import griddata
6 from scipy.signal import savgol_filter
7
8 from ..stats import hdi
9 from .plot_utils import get_plotting_function, matplotlib_kwarg_dealiaser
10 from ..rcparams import rcParams
11 from ..utils import credible_interval_warning
12
13
14 def plot_hdi(
15 x,
16 y,
17 hdi_prob=None,
18 color="C1",
19 circular=False,
20 smooth=True,
21 smooth_kwargs=None,
22 fill_kwargs=None,
23 plot_kwargs=None,
24 ax=None,
25 backend=None,
26 backend_kwargs=None,
27 show=None,
28 credible_interval=None,
29 ):
30 r"""
31 Plot hdi intervals for regression data.
32
33 Parameters
34 ----------
35 x : array-like
36 Values to plot
37 y : array-like
38 values from which to compute the hdi. Assumed shape (chain, draw, \*shape).
39 hdi_prob : float, optional
40 Probability for the highest density interval. Defaults to 0.94.
41 color : str
42 Color used for the limits of the hdi and fill. Should be a valid matplotlib color
43 circular : bool, optional
44 Whether to compute the hdi taking into account `x` is a circular variable
45 (in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).
46 smooth : boolean
47 If True the result will be smoothed by first computing a linear interpolation of the data
48 over a regular grid and then applying the Savitzky-Golay filter to the interpolated data.
49 Defaults to True.
50 smooth_kwargs : dict, optional
51 Additional keywords modifying the Savitzky-Golay filter. See Scipy's documentation for
52 details
53 fill_kwargs : dict
54 Keywords passed to `fill_between` (use fill_kwargs={'alpha': 0} to disable fill).
55 plot_kwargs : dict
56 Keywords passed to hdi limits
57 ax: axes, optional
58 Matplotlib axes or bokeh figures.
59 backend: str, optional
60 Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
61 backend_kwargs: bool, optional
62 These are kwargs specific to the backend being used. For additional documentation
63 check the plotting method of the backend.
64 show : bool, optional
65 Call backend show function.
66 credible_interval: float, optional
67 deprecated: Please see hdi_prob
68
69 Returns
70 -------
71 axes : matplotlib axes or bokeh figures
72 """
73 if credible_interval:
74 hdi_prob = credible_interval_warning(credible_interval, hdi_prob)
75
76 plot_kwargs = matplotlib_kwarg_dealiaser(plot_kwargs, "plot")
77 plot_kwargs.setdefault("color", color)
78 plot_kwargs.setdefault("alpha", 0)
79
80 fill_kwargs = matplotlib_kwarg_dealiaser(fill_kwargs, "hexbin")
81 fill_kwargs.setdefault("color", color)
82 fill_kwargs.setdefault("alpha", 0.5)
83
84 x = np.asarray(x)
85 y = np.asarray(y)
86
87 x_shape = x.shape
88 y_shape = y.shape
89 if y_shape[-len(x_shape) :] != x_shape:
90 msg = "Dimension mismatch for x: {} and y: {}."
91 msg += " y-dimensions should be (chain, draw, *x.shape) or"
92 msg += " (draw, *x.shape)"
93 raise TypeError(msg.format(x_shape, y_shape))
94
95 if len(y_shape[: -len(x_shape)]) > 1:
96 new_shape = tuple([-1] + list(x_shape))
97 y = y.reshape(new_shape)
98
99 if hdi_prob is None:
100 hdi_prob = rcParams["stats.hdi_prob"]
101 else:
102 if not 1 >= hdi_prob > 0:
103 raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
104
105 hdi_ = hdi(y, hdi_prob=hdi_prob, circular=circular, multimodal=False)
106
107 if smooth:
108 if smooth_kwargs is None:
109 smooth_kwargs = {}
110 smooth_kwargs.setdefault("window_length", 55)
111 smooth_kwargs.setdefault("polyorder", 2)
112 x_data = np.linspace(x.min(), x.max(), 200)
113 x_data[0] = (x_data[0] + x_data[1]) / 2
114 hdi_interp = griddata(x, hdi_, x_data)
115 y_data = savgol_filter(hdi_interp, axis=0, **smooth_kwargs)
116 else:
117 idx = np.argsort(x)
118 x_data = x[idx]
119 y_data = hdi_[idx]
120
121 hdiplot_kwargs = dict(
122 ax=ax,
123 x_data=x_data,
124 y_data=y_data,
125 plot_kwargs=plot_kwargs,
126 fill_kwargs=fill_kwargs,
127 backend_kwargs=backend_kwargs,
128 show=show,
129 )
130
131 if backend is None:
132 backend = rcParams["plot.backend"]
133 backend = backend.lower()
134
135 # TODO: Add backend kwargs
136 plot = get_plotting_function("plot_hdi", "hdiplot", backend)
137 ax = plot(**hdiplot_kwargs)
138 return ax
139
140
141 def plot_hpd(*args, **kwargs): # noqa: D103
142 warnings.warn("plot_hdi has been deprecated, please use plot_hdi", DeprecationWarning)
143 return plot_hdi(*args, **kwargs)
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/arviz/plots/hdiplot.py b/arviz/plots/hdiplot.py
--- a/arviz/plots/hdiplot.py
+++ b/arviz/plots/hdiplot.py
@@ -33,13 +33,13 @@
Parameters
----------
x : array-like
- Values to plot
+ Values to plot.
y : array-like
- values from which to compute the hdi. Assumed shape (chain, draw, \*shape).
+ Values from which to compute the hdi. Assumed shape (chain, draw, \*shape).
hdi_prob : float, optional
Probability for the highest density interval. Defaults to 0.94.
color : str
- Color used for the limits of the hdi and fill. Should be a valid matplotlib color
+ Color used for the limits of the hdi and fill. Should be a valid matplotlib color.
circular : bool, optional
Whether to compute the hdi taking into account `x` is a circular variable
(in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).
@@ -49,11 +49,11 @@
Defaults to True.
smooth_kwargs : dict, optional
Additional keywords modifying the Savitzky-Golay filter. See Scipy's documentation for
- details
+ details.
fill_kwargs : dict
Keywords passed to `fill_between` (use fill_kwargs={'alpha': 0} to disable fill).
plot_kwargs : dict
- Keywords passed to hdi limits
+ Keywords passed to hdi limits.
ax: axes, optional
Matplotlib axes or bokeh figures.
backend: str, optional
@@ -64,7 +64,7 @@
show : bool, optional
Call backend show function.
credible_interval: float, optional
- deprecated: Please see hdi_prob
+ Deprecated: Please see hdi_prob
Returns
-------
|
{"golden_diff": "diff --git a/arviz/plots/hdiplot.py b/arviz/plots/hdiplot.py\n--- a/arviz/plots/hdiplot.py\n+++ b/arviz/plots/hdiplot.py\n@@ -33,13 +33,13 @@\n Parameters\n ----------\n x : array-like\n- Values to plot\n+ Values to plot.\n y : array-like\n- values from which to compute the hdi. Assumed shape (chain, draw, \\*shape).\n+ Values from which to compute the hdi. Assumed shape (chain, draw, \\*shape).\n hdi_prob : float, optional\n Probability for the highest density interval. Defaults to 0.94.\n color : str\n- Color used for the limits of the hdi and fill. Should be a valid matplotlib color\n+ Color used for the limits of the hdi and fill. Should be a valid matplotlib color.\n circular : bool, optional\n Whether to compute the hdi taking into account `x` is a circular variable\n (in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).\n@@ -49,11 +49,11 @@\n Defaults to True.\n smooth_kwargs : dict, optional\n Additional keywords modifying the Savitzky-Golay filter. See Scipy's documentation for\n- details\n+ details.\n fill_kwargs : dict\n Keywords passed to `fill_between` (use fill_kwargs={'alpha': 0} to disable fill).\n plot_kwargs : dict\n- Keywords passed to hdi limits\n+ Keywords passed to hdi limits.\n ax: axes, optional\n Matplotlib axes or bokeh figures.\n backend: str, optional\n@@ -64,7 +64,7 @@\n show : bool, optional\n Call backend show function.\n credible_interval: float, optional\n- deprecated: Please see hdi_prob\n+ Deprecated: Please see hdi_prob\n \n Returns\n -------\n", "issue": "Inconsistent capitalization in `plot_hdi` docstring\n**Describe the bug**\r\nDocs have inconsistent capitalization\r\n\r\n\r\n\r\n**To Reproduce**\r\nhttps://arviz-devs.github.io/arviz/generated/arviz.plot_hdi.html#arviz.plot_hdi\r\n\r\n**Expected behavior**\r\nConsistency across arg docstrings\r\n\n", "before_files": [{"content": "\"\"\"Plot highest density intervals for regression data.\"\"\"\nimport warnings\n\nimport numpy as np\nfrom scipy.interpolate import griddata\nfrom scipy.signal import savgol_filter\n\nfrom ..stats import hdi\nfrom .plot_utils import get_plotting_function, matplotlib_kwarg_dealiaser\nfrom ..rcparams import rcParams\nfrom ..utils import credible_interval_warning\n\n\ndef plot_hdi(\n x,\n y,\n hdi_prob=None,\n color=\"C1\",\n circular=False,\n smooth=True,\n smooth_kwargs=None,\n fill_kwargs=None,\n plot_kwargs=None,\n ax=None,\n backend=None,\n backend_kwargs=None,\n show=None,\n credible_interval=None,\n):\n r\"\"\"\n Plot hdi intervals for regression data.\n\n Parameters\n ----------\n x : array-like\n Values to plot\n y : array-like\n values from which to compute the hdi. Assumed shape (chain, draw, \\*shape).\n hdi_prob : float, optional\n Probability for the highest density interval. Defaults to 0.94.\n color : str\n Color used for the limits of the hdi and fill. Should be a valid matplotlib color\n circular : bool, optional\n Whether to compute the hdi taking into account `x` is a circular variable\n (in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).\n smooth : boolean\n If True the result will be smoothed by first computing a linear interpolation of the data\n over a regular grid and then applying the Savitzky-Golay filter to the interpolated data.\n Defaults to True.\n smooth_kwargs : dict, optional\n Additional keywords modifying the Savitzky-Golay filter. See Scipy's documentation for\n details\n fill_kwargs : dict\n Keywords passed to `fill_between` (use fill_kwargs={'alpha': 0} to disable fill).\n plot_kwargs : dict\n Keywords passed to hdi limits\n ax: axes, optional\n Matplotlib axes or bokeh figures.\n backend: str, optional\n Select plotting backend {\"matplotlib\",\"bokeh\"}. Default \"matplotlib\".\n backend_kwargs: bool, optional\n These are kwargs specific to the backend being used. For additional documentation\n check the plotting method of the backend.\n show : bool, optional\n Call backend show function.\n credible_interval: float, optional\n deprecated: Please see hdi_prob\n\n Returns\n -------\n axes : matplotlib axes or bokeh figures\n \"\"\"\n if credible_interval:\n hdi_prob = credible_interval_warning(credible_interval, hdi_prob)\n\n plot_kwargs = matplotlib_kwarg_dealiaser(plot_kwargs, \"plot\")\n plot_kwargs.setdefault(\"color\", color)\n plot_kwargs.setdefault(\"alpha\", 0)\n\n fill_kwargs = matplotlib_kwarg_dealiaser(fill_kwargs, \"hexbin\")\n fill_kwargs.setdefault(\"color\", color)\n fill_kwargs.setdefault(\"alpha\", 0.5)\n\n x = np.asarray(x)\n y = np.asarray(y)\n\n x_shape = x.shape\n y_shape = y.shape\n if y_shape[-len(x_shape) :] != x_shape:\n msg = \"Dimension mismatch for x: {} and y: {}.\"\n msg += \" y-dimensions should be (chain, draw, *x.shape) or\"\n msg += \" (draw, *x.shape)\"\n raise TypeError(msg.format(x_shape, y_shape))\n\n if len(y_shape[: -len(x_shape)]) > 1:\n new_shape = tuple([-1] + list(x_shape))\n y = y.reshape(new_shape)\n\n if hdi_prob is None:\n hdi_prob = rcParams[\"stats.hdi_prob\"]\n else:\n if not 1 >= hdi_prob > 0:\n raise ValueError(\"The value of hdi_prob should be in the interval (0, 1]\")\n\n hdi_ = hdi(y, hdi_prob=hdi_prob, circular=circular, multimodal=False)\n\n if smooth:\n if smooth_kwargs is None:\n smooth_kwargs = {}\n smooth_kwargs.setdefault(\"window_length\", 55)\n smooth_kwargs.setdefault(\"polyorder\", 2)\n x_data = np.linspace(x.min(), x.max(), 200)\n x_data[0] = (x_data[0] + x_data[1]) / 2\n hdi_interp = griddata(x, hdi_, x_data)\n y_data = savgol_filter(hdi_interp, axis=0, **smooth_kwargs)\n else:\n idx = np.argsort(x)\n x_data = x[idx]\n y_data = hdi_[idx]\n\n hdiplot_kwargs = dict(\n ax=ax,\n x_data=x_data,\n y_data=y_data,\n plot_kwargs=plot_kwargs,\n fill_kwargs=fill_kwargs,\n backend_kwargs=backend_kwargs,\n show=show,\n )\n\n if backend is None:\n backend = rcParams[\"plot.backend\"]\n backend = backend.lower()\n\n # TODO: Add backend kwargs\n plot = get_plotting_function(\"plot_hdi\", \"hdiplot\", backend)\n ax = plot(**hdiplot_kwargs)\n return ax\n\n\ndef plot_hpd(*args, **kwargs): # noqa: D103\n warnings.warn(\"plot_hdi has been deprecated, please use plot_hdi\", DeprecationWarning)\n return plot_hdi(*args, **kwargs)\n", "path": "arviz/plots/hdiplot.py"}], "after_files": [{"content": "\"\"\"Plot highest density intervals for regression data.\"\"\"\nimport warnings\n\nimport numpy as np\nfrom scipy.interpolate import griddata\nfrom scipy.signal import savgol_filter\n\nfrom ..stats import hdi\nfrom .plot_utils import get_plotting_function, matplotlib_kwarg_dealiaser\nfrom ..rcparams import rcParams\nfrom ..utils import credible_interval_warning\n\n\ndef plot_hdi(\n x,\n y,\n hdi_prob=None,\n color=\"C1\",\n circular=False,\n smooth=True,\n smooth_kwargs=None,\n fill_kwargs=None,\n plot_kwargs=None,\n ax=None,\n backend=None,\n backend_kwargs=None,\n show=None,\n credible_interval=None,\n):\n r\"\"\"\n Plot hdi intervals for regression data.\n\n Parameters\n ----------\n x : array-like\n Values to plot.\n y : array-like\n Values from which to compute the hdi. Assumed shape (chain, draw, \\*shape).\n hdi_prob : float, optional\n Probability for the highest density interval. Defaults to 0.94.\n color : str\n Color used for the limits of the hdi and fill. Should be a valid matplotlib color.\n circular : bool, optional\n Whether to compute the hdi taking into account `x` is a circular variable\n (in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).\n smooth : boolean\n If True the result will be smoothed by first computing a linear interpolation of the data\n over a regular grid and then applying the Savitzky-Golay filter to the interpolated data.\n Defaults to True.\n smooth_kwargs : dict, optional\n Additional keywords modifying the Savitzky-Golay filter. See Scipy's documentation for\n details.\n fill_kwargs : dict\n Keywords passed to `fill_between` (use fill_kwargs={'alpha': 0} to disable fill).\n plot_kwargs : dict\n Keywords passed to hdi limits.\n ax: axes, optional\n Matplotlib axes or bokeh figures.\n backend: str, optional\n Select plotting backend {\"matplotlib\",\"bokeh\"}. Default \"matplotlib\".\n backend_kwargs: bool, optional\n These are kwargs specific to the backend being used. For additional documentation\n check the plotting method of the backend.\n show : bool, optional\n Call backend show function.\n credible_interval: float, optional\n Deprecated: Please see hdi_prob\n\n Returns\n -------\n axes : matplotlib axes or bokeh figures\n \"\"\"\n if credible_interval:\n hdi_prob = credible_interval_warning(credible_interval, hdi_prob)\n\n plot_kwargs = matplotlib_kwarg_dealiaser(plot_kwargs, \"plot\")\n plot_kwargs.setdefault(\"color\", color)\n plot_kwargs.setdefault(\"alpha\", 0)\n\n fill_kwargs = matplotlib_kwarg_dealiaser(fill_kwargs, \"hexbin\")\n fill_kwargs.setdefault(\"color\", color)\n fill_kwargs.setdefault(\"alpha\", 0.5)\n\n x = np.asarray(x)\n y = np.asarray(y)\n\n x_shape = x.shape\n y_shape = y.shape\n if y_shape[-len(x_shape) :] != x_shape:\n msg = \"Dimension mismatch for x: {} and y: {}.\"\n msg += \" y-dimensions should be (chain, draw, *x.shape) or\"\n msg += \" (draw, *x.shape)\"\n raise TypeError(msg.format(x_shape, y_shape))\n\n if len(y_shape[: -len(x_shape)]) > 1:\n new_shape = tuple([-1] + list(x_shape))\n y = y.reshape(new_shape)\n\n if hdi_prob is None:\n hdi_prob = rcParams[\"stats.hdi_prob\"]\n else:\n if not 1 >= hdi_prob > 0:\n raise ValueError(\"The value of hdi_prob should be in the interval (0, 1]\")\n\n hdi_ = hdi(y, hdi_prob=hdi_prob, circular=circular, multimodal=False)\n\n if smooth:\n if smooth_kwargs is None:\n smooth_kwargs = {}\n smooth_kwargs.setdefault(\"window_length\", 55)\n smooth_kwargs.setdefault(\"polyorder\", 2)\n x_data = np.linspace(x.min(), x.max(), 200)\n x_data[0] = (x_data[0] + x_data[1]) / 2\n hdi_interp = griddata(x, hdi_, x_data)\n y_data = savgol_filter(hdi_interp, axis=0, **smooth_kwargs)\n else:\n idx = np.argsort(x)\n x_data = x[idx]\n y_data = hdi_[idx]\n\n hdiplot_kwargs = dict(\n ax=ax,\n x_data=x_data,\n y_data=y_data,\n plot_kwargs=plot_kwargs,\n fill_kwargs=fill_kwargs,\n backend_kwargs=backend_kwargs,\n show=show,\n )\n\n if backend is None:\n backend = rcParams[\"plot.backend\"]\n backend = backend.lower()\n\n # TODO: Add backend kwargs\n plot = get_plotting_function(\"plot_hdi\", \"hdiplot\", backend)\n ax = plot(**hdiplot_kwargs)\n return ax\n\n\ndef plot_hpd(*args, **kwargs): # noqa: D103\n warnings.warn(\"plot_hdi has been deprecated, please use plot_hdi\", DeprecationWarning)\n return plot_hdi(*args, **kwargs)\n", "path": "arviz/plots/hdiplot.py"}]}
| 1,888 | 444 |
gh_patches_debug_13929
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-1740
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Books on multiple shelves duplicated in the "all books" view
<img width="1172" alt="Screen Shot 2021-12-02 at 11 47 40 AM" src="https://user-images.githubusercontent.com/1807695/144495864-f94425a7-b11f-4df4-8ab7-c850d5a17996.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/views/shelf/shelf.py`
Content:
```
1 """ shelf views """
2 from collections import namedtuple
3
4 from django.db.models import OuterRef, Subquery, F
5 from django.contrib.auth.decorators import login_required
6 from django.core.paginator import Paginator
7 from django.http import HttpResponseBadRequest
8 from django.shortcuts import get_object_or_404, redirect
9 from django.template.response import TemplateResponse
10 from django.utils.decorators import method_decorator
11 from django.utils.translation import gettext_lazy as _
12 from django.views import View
13
14 from bookwyrm import forms, models
15 from bookwyrm.activitypub import ActivitypubResponse
16 from bookwyrm.settings import PAGE_LENGTH
17 from bookwyrm.views.helpers import is_api_request, get_user_from_username
18
19
20 # pylint: disable=no-self-use
21 class Shelf(View):
22 """shelf page"""
23
24 def get(self, request, username, shelf_identifier=None):
25 """display a shelf"""
26 user = get_user_from_username(request.user, username)
27
28 is_self = user == request.user
29
30 if is_self:
31 shelves = user.shelf_set.all()
32 else:
33 shelves = models.Shelf.privacy_filter(request.user).filter(user=user).all()
34
35 # get the shelf and make sure the logged in user should be able to see it
36 if shelf_identifier:
37 shelf = get_object_or_404(user.shelf_set, identifier=shelf_identifier)
38 shelf.raise_visible_to_user(request.user)
39 books = shelf.books
40 else:
41 # this is a constructed "all books" view, with a fake "shelf" obj
42 FakeShelf = namedtuple(
43 "Shelf", ("identifier", "name", "user", "books", "privacy")
44 )
45 books = (
46 models.Edition.viewer_aware_objects(request.user)
47 .filter(
48 # privacy is ensured because the shelves are already filtered above
49 shelfbook__shelf__in=shelves
50 )
51 .distinct()
52 )
53 shelf = FakeShelf("all", _("All books"), user, books, "public")
54
55 if is_api_request(request) and shelf_identifier:
56 return ActivitypubResponse(shelf.to_activity(**request.GET))
57
58 reviews = models.Review.objects
59 if not is_self:
60 reviews = models.Review.privacy_filter(request.user)
61
62 reviews = reviews.filter(
63 user=user,
64 rating__isnull=False,
65 book__id=OuterRef("id"),
66 deleted=False,
67 ).order_by("-published_date")
68
69 reading = models.ReadThrough.objects
70
71 reading = reading.filter(user=user, book__id=OuterRef("id")).order_by(
72 "start_date"
73 )
74
75 books = books.annotate(
76 rating=Subquery(reviews.values("rating")[:1]),
77 shelved_date=F("shelfbook__shelved_date"),
78 start_date=Subquery(reading.values("start_date")[:1]),
79 finish_date=Subquery(reading.values("finish_date")[:1]),
80 author=Subquery(
81 models.Book.objects.filter(id=OuterRef("id")).values("authors__name")[
82 :1
83 ]
84 ),
85 ).prefetch_related("authors")
86
87 books = sort_books(books, request.GET.get("sort"))
88
89 paginated = Paginator(
90 books,
91 PAGE_LENGTH,
92 )
93 page = paginated.get_page(request.GET.get("page"))
94 data = {
95 "user": user,
96 "is_self": is_self,
97 "shelves": shelves,
98 "shelf": shelf,
99 "books": page,
100 "edit_form": forms.ShelfForm(instance=shelf if shelf_identifier else None),
101 "create_form": forms.ShelfForm(),
102 "sort": request.GET.get("sort"),
103 "page_range": paginated.get_elided_page_range(
104 page.number, on_each_side=2, on_ends=1
105 ),
106 }
107
108 return TemplateResponse(request, "shelf/shelf.html", data)
109
110 @method_decorator(login_required, name="dispatch")
111 # pylint: disable=unused-argument
112 def post(self, request, username, shelf_identifier):
113 """edit a shelf"""
114 user = get_user_from_username(request.user, username)
115 shelf = get_object_or_404(user.shelf_set, identifier=shelf_identifier)
116 shelf.raise_not_editable(request.user)
117
118 # you can't change the name of the default shelves
119 if not shelf.editable and request.POST.get("name") != shelf.name:
120 return HttpResponseBadRequest()
121
122 form = forms.ShelfForm(request.POST, instance=shelf)
123 if not form.is_valid():
124 return redirect(shelf.local_path)
125 shelf = form.save()
126 return redirect(shelf.local_path)
127
128
129 def sort_books(books, sort):
130 """Books in shelf sorting"""
131 sort_fields = [
132 "title",
133 "author",
134 "shelved_date",
135 "start_date",
136 "finish_date",
137 "rating",
138 ]
139
140 if sort in sort_fields:
141 books = books.order_by(sort)
142 elif sort and sort[1:] in sort_fields:
143 books = books.order_by(F(sort[1:]).desc(nulls_last=True))
144 else:
145 books = books.order_by("-shelved_date")
146 return books
147
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bookwyrm/views/shelf/shelf.py b/bookwyrm/views/shelf/shelf.py
--- a/bookwyrm/views/shelf/shelf.py
+++ b/bookwyrm/views/shelf/shelf.py
@@ -72,9 +72,13 @@
"start_date"
)
+ if shelf_identifier:
+ books = books.annotate(shelved_date=F("shelfbook__shelved_date"))
+ else:
+ # sorting by shelved date will cause duplicates in the "all books" view
+ books = books.annotate(shelved_date=F("updated_date"))
books = books.annotate(
rating=Subquery(reviews.values("rating")[:1]),
- shelved_date=F("shelfbook__shelved_date"),
start_date=Subquery(reading.values("start_date")[:1]),
finish_date=Subquery(reading.values("finish_date")[:1]),
author=Subquery(
|
{"golden_diff": "diff --git a/bookwyrm/views/shelf/shelf.py b/bookwyrm/views/shelf/shelf.py\n--- a/bookwyrm/views/shelf/shelf.py\n+++ b/bookwyrm/views/shelf/shelf.py\n@@ -72,9 +72,13 @@\n \"start_date\"\n )\n \n+ if shelf_identifier:\n+ books = books.annotate(shelved_date=F(\"shelfbook__shelved_date\"))\n+ else:\n+ # sorting by shelved date will cause duplicates in the \"all books\" view\n+ books = books.annotate(shelved_date=F(\"updated_date\"))\n books = books.annotate(\n rating=Subquery(reviews.values(\"rating\")[:1]),\n- shelved_date=F(\"shelfbook__shelved_date\"),\n start_date=Subquery(reading.values(\"start_date\")[:1]),\n finish_date=Subquery(reading.values(\"finish_date\")[:1]),\n author=Subquery(\n", "issue": "Books on multiple shelves duplicated in the \"all books\" view\n<img width=\"1172\" alt=\"Screen Shot 2021-12-02 at 11 47 40 AM\" src=\"https://user-images.githubusercontent.com/1807695/144495864-f94425a7-b11f-4df4-8ab7-c850d5a17996.png\">\r\n\n", "before_files": [{"content": "\"\"\" shelf views \"\"\"\nfrom collections import namedtuple\n\nfrom django.db.models import OuterRef, Subquery, F\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.activitypub import ActivitypubResponse\nfrom bookwyrm.settings import PAGE_LENGTH\nfrom bookwyrm.views.helpers import is_api_request, get_user_from_username\n\n\n# pylint: disable=no-self-use\nclass Shelf(View):\n \"\"\"shelf page\"\"\"\n\n def get(self, request, username, shelf_identifier=None):\n \"\"\"display a shelf\"\"\"\n user = get_user_from_username(request.user, username)\n\n is_self = user == request.user\n\n if is_self:\n shelves = user.shelf_set.all()\n else:\n shelves = models.Shelf.privacy_filter(request.user).filter(user=user).all()\n\n # get the shelf and make sure the logged in user should be able to see it\n if shelf_identifier:\n shelf = get_object_or_404(user.shelf_set, identifier=shelf_identifier)\n shelf.raise_visible_to_user(request.user)\n books = shelf.books\n else:\n # this is a constructed \"all books\" view, with a fake \"shelf\" obj\n FakeShelf = namedtuple(\n \"Shelf\", (\"identifier\", \"name\", \"user\", \"books\", \"privacy\")\n )\n books = (\n models.Edition.viewer_aware_objects(request.user)\n .filter(\n # privacy is ensured because the shelves are already filtered above\n shelfbook__shelf__in=shelves\n )\n .distinct()\n )\n shelf = FakeShelf(\"all\", _(\"All books\"), user, books, \"public\")\n\n if is_api_request(request) and shelf_identifier:\n return ActivitypubResponse(shelf.to_activity(**request.GET))\n\n reviews = models.Review.objects\n if not is_self:\n reviews = models.Review.privacy_filter(request.user)\n\n reviews = reviews.filter(\n user=user,\n rating__isnull=False,\n book__id=OuterRef(\"id\"),\n deleted=False,\n ).order_by(\"-published_date\")\n\n reading = models.ReadThrough.objects\n\n reading = reading.filter(user=user, book__id=OuterRef(\"id\")).order_by(\n \"start_date\"\n )\n\n books = books.annotate(\n rating=Subquery(reviews.values(\"rating\")[:1]),\n shelved_date=F(\"shelfbook__shelved_date\"),\n start_date=Subquery(reading.values(\"start_date\")[:1]),\n finish_date=Subquery(reading.values(\"finish_date\")[:1]),\n author=Subquery(\n models.Book.objects.filter(id=OuterRef(\"id\")).values(\"authors__name\")[\n :1\n ]\n ),\n ).prefetch_related(\"authors\")\n\n books = sort_books(books, request.GET.get(\"sort\"))\n\n paginated = Paginator(\n books,\n PAGE_LENGTH,\n )\n page = paginated.get_page(request.GET.get(\"page\"))\n data = {\n \"user\": user,\n \"is_self\": is_self,\n \"shelves\": shelves,\n \"shelf\": shelf,\n \"books\": page,\n \"edit_form\": forms.ShelfForm(instance=shelf if shelf_identifier else None),\n \"create_form\": forms.ShelfForm(),\n \"sort\": request.GET.get(\"sort\"),\n \"page_range\": paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n ),\n }\n\n return TemplateResponse(request, \"shelf/shelf.html\", data)\n\n @method_decorator(login_required, name=\"dispatch\")\n # pylint: disable=unused-argument\n def post(self, request, username, shelf_identifier):\n \"\"\"edit a shelf\"\"\"\n user = get_user_from_username(request.user, username)\n shelf = get_object_or_404(user.shelf_set, identifier=shelf_identifier)\n shelf.raise_not_editable(request.user)\n\n # you can't change the name of the default shelves\n if not shelf.editable and request.POST.get(\"name\") != shelf.name:\n return HttpResponseBadRequest()\n\n form = forms.ShelfForm(request.POST, instance=shelf)\n if not form.is_valid():\n return redirect(shelf.local_path)\n shelf = form.save()\n return redirect(shelf.local_path)\n\n\ndef sort_books(books, sort):\n \"\"\"Books in shelf sorting\"\"\"\n sort_fields = [\n \"title\",\n \"author\",\n \"shelved_date\",\n \"start_date\",\n \"finish_date\",\n \"rating\",\n ]\n\n if sort in sort_fields:\n books = books.order_by(sort)\n elif sort and sort[1:] in sort_fields:\n books = books.order_by(F(sort[1:]).desc(nulls_last=True))\n else:\n books = books.order_by(\"-shelved_date\")\n return books\n", "path": "bookwyrm/views/shelf/shelf.py"}], "after_files": [{"content": "\"\"\" shelf views \"\"\"\nfrom collections import namedtuple\n\nfrom django.db.models import OuterRef, Subquery, F\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.activitypub import ActivitypubResponse\nfrom bookwyrm.settings import PAGE_LENGTH\nfrom bookwyrm.views.helpers import is_api_request, get_user_from_username\n\n\n# pylint: disable=no-self-use\nclass Shelf(View):\n \"\"\"shelf page\"\"\"\n\n def get(self, request, username, shelf_identifier=None):\n \"\"\"display a shelf\"\"\"\n user = get_user_from_username(request.user, username)\n\n is_self = user == request.user\n\n if is_self:\n shelves = user.shelf_set.all()\n else:\n shelves = models.Shelf.privacy_filter(request.user).filter(user=user).all()\n\n # get the shelf and make sure the logged in user should be able to see it\n if shelf_identifier:\n shelf = get_object_or_404(user.shelf_set, identifier=shelf_identifier)\n shelf.raise_visible_to_user(request.user)\n books = shelf.books\n else:\n # this is a constructed \"all books\" view, with a fake \"shelf\" obj\n FakeShelf = namedtuple(\n \"Shelf\", (\"identifier\", \"name\", \"user\", \"books\", \"privacy\")\n )\n books = (\n models.Edition.viewer_aware_objects(request.user)\n .filter(\n # privacy is ensured because the shelves are already filtered above\n shelfbook__shelf__in=shelves\n )\n .distinct()\n )\n shelf = FakeShelf(\"all\", _(\"All books\"), user, books, \"public\")\n\n if is_api_request(request) and shelf_identifier:\n return ActivitypubResponse(shelf.to_activity(**request.GET))\n\n reviews = models.Review.objects\n if not is_self:\n reviews = models.Review.privacy_filter(request.user)\n\n reviews = reviews.filter(\n user=user,\n rating__isnull=False,\n book__id=OuterRef(\"id\"),\n deleted=False,\n ).order_by(\"-published_date\")\n\n reading = models.ReadThrough.objects\n\n reading = reading.filter(user=user, book__id=OuterRef(\"id\")).order_by(\n \"start_date\"\n )\n\n if shelf_identifier:\n books = books.annotate(shelved_date=F(\"shelfbook__shelved_date\"))\n else:\n # sorting by shelved date will cause duplicates in the \"all books\" view\n books = books.annotate(shelved_date=F(\"updated_date\"))\n books = books.annotate(\n rating=Subquery(reviews.values(\"rating\")[:1]),\n start_date=Subquery(reading.values(\"start_date\")[:1]),\n finish_date=Subquery(reading.values(\"finish_date\")[:1]),\n author=Subquery(\n models.Book.objects.filter(id=OuterRef(\"id\")).values(\"authors__name\")[\n :1\n ]\n ),\n ).prefetch_related(\"authors\")\n\n books = sort_books(books, request.GET.get(\"sort\"))\n\n paginated = Paginator(\n books,\n PAGE_LENGTH,\n )\n page = paginated.get_page(request.GET.get(\"page\"))\n data = {\n \"user\": user,\n \"is_self\": is_self,\n \"shelves\": shelves,\n \"shelf\": shelf,\n \"books\": page,\n \"edit_form\": forms.ShelfForm(instance=shelf if shelf_identifier else None),\n \"create_form\": forms.ShelfForm(),\n \"sort\": request.GET.get(\"sort\"),\n \"page_range\": paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n ),\n }\n\n return TemplateResponse(request, \"shelf/shelf.html\", data)\n\n @method_decorator(login_required, name=\"dispatch\")\n # pylint: disable=unused-argument\n def post(self, request, username, shelf_identifier):\n \"\"\"edit a shelf\"\"\"\n user = get_user_from_username(request.user, username)\n shelf = get_object_or_404(user.shelf_set, identifier=shelf_identifier)\n shelf.raise_not_editable(request.user)\n\n # you can't change the name of the default shelves\n if not shelf.editable and request.POST.get(\"name\") != shelf.name:\n return HttpResponseBadRequest()\n\n form = forms.ShelfForm(request.POST, instance=shelf)\n if not form.is_valid():\n return redirect(shelf.local_path)\n shelf = form.save()\n return redirect(shelf.local_path)\n\n\ndef sort_books(books, sort):\n \"\"\"Books in shelf sorting\"\"\"\n sort_fields = [\n \"title\",\n \"author\",\n \"shelved_date\",\n \"start_date\",\n \"finish_date\",\n \"rating\",\n ]\n\n if sort in sort_fields:\n books = books.order_by(sort)\n elif sort and sort[1:] in sort_fields:\n books = books.order_by(F(sort[1:]).desc(nulls_last=True))\n else:\n books = books.order_by(\"-shelved_date\")\n return books\n", "path": "bookwyrm/views/shelf/shelf.py"}]}
| 1,805 | 206 |
gh_patches_debug_30254
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-436
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Warn when mismatching cygwin git/python
See #352 and #353 for how this can manifest itself
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/git.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import functools
4 import logging
5 import os.path
6 import re
7
8 from pre_commit.errors import FatalError
9 from pre_commit.util import CalledProcessError
10 from pre_commit.util import cmd_output
11 from pre_commit.util import memoize_by_cwd
12
13
14 logger = logging.getLogger('pre_commit')
15
16
17 def get_root():
18 try:
19 return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()
20 except CalledProcessError:
21 raise FatalError(
22 'git failed. Is it installed, and are you in a Git repository '
23 'directory?'
24 )
25
26
27 def get_git_dir(git_root):
28 return os.path.normpath(os.path.join(
29 git_root,
30 cmd_output('git', 'rev-parse', '--git-dir', cwd=git_root)[1].strip(),
31 ))
32
33
34 def is_in_merge_conflict():
35 git_dir = get_git_dir('.')
36 return (
37 os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and
38 os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))
39 )
40
41
42 def parse_merge_msg_for_conflicts(merge_msg):
43 # Conflicted files start with tabs
44 return [
45 line.lstrip('#').strip()
46 for line in merge_msg.splitlines()
47 # '#\t' for git 2.4.1
48 if line.startswith(('\t', '#\t'))
49 ]
50
51
52 @memoize_by_cwd
53 def get_conflicted_files():
54 logger.info('Checking merge-conflict files only.')
55 # Need to get the conflicted files from the MERGE_MSG because they could
56 # have resolved the conflict by choosing one side or the other
57 merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG')).read()
58 merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)
59
60 # This will get the rest of the changes made after the merge.
61 # If they resolved the merge conflict by choosing a mesh of both sides
62 # this will also include the conflicted files
63 tree_hash = cmd_output('git', 'write-tree')[1].strip()
64 merge_diff_filenames = cmd_output(
65 'git', 'diff', '-m', tree_hash, 'HEAD', 'MERGE_HEAD', '--name-only',
66 )[1].splitlines()
67 return set(merge_conflict_filenames) | set(merge_diff_filenames)
68
69
70 @memoize_by_cwd
71 def get_staged_files():
72 return cmd_output(
73 'git', 'diff', '--staged', '--name-only',
74 # Everything except for D
75 '--diff-filter=ACMRTUXB'
76 )[1].splitlines()
77
78
79 @memoize_by_cwd
80 def get_all_files():
81 return cmd_output('git', 'ls-files')[1].splitlines()
82
83
84 def get_files_matching(all_file_list_strategy):
85 @functools.wraps(all_file_list_strategy)
86 @memoize_by_cwd
87 def wrapper(include_expr, exclude_expr):
88 include_regex = re.compile(include_expr)
89 exclude_regex = re.compile(exclude_expr)
90 return set(
91 filename
92 for filename in all_file_list_strategy()
93 if (
94 include_regex.search(filename) and
95 not exclude_regex.search(filename) and
96 os.path.lexists(filename)
97 )
98 )
99 return wrapper
100
101
102 get_staged_files_matching = get_files_matching(get_staged_files)
103 get_all_files_matching = get_files_matching(get_all_files)
104 get_conflicted_files_matching = get_files_matching(get_conflicted_files)
105
```
Path: `pre_commit/main.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import argparse
4 import os
5 import sys
6
7 import pkg_resources
8
9 from pre_commit import color
10 from pre_commit import five
11 from pre_commit import git
12 from pre_commit.commands.autoupdate import autoupdate
13 from pre_commit.commands.clean import clean
14 from pre_commit.commands.install_uninstall import install
15 from pre_commit.commands.install_uninstall import uninstall
16 from pre_commit.commands.run import run
17 from pre_commit.error_handler import error_handler
18 from pre_commit.logging_handler import add_logging_handler
19 from pre_commit.runner import Runner
20
21
22 # https://github.com/pre-commit/pre-commit/issues/217
23 # On OSX, making a virtualenv using pyvenv at . causes `virtualenv` and `pip`
24 # to install packages to the wrong place. We don't want anything to deal with
25 # pyvenv
26 os.environ.pop('__PYVENV_LAUNCHER__', None)
27
28
29 def _add_color_option(parser):
30 parser.add_argument(
31 '--color', default='auto', type=color.use_color,
32 metavar='{' + ','.join(color.COLOR_CHOICES) + '}',
33 help='Whether to use color in output. Defaults to `%(default)s`.',
34 )
35
36
37 def main(argv=None):
38 argv = argv if argv is not None else sys.argv[1:]
39 argv = [five.to_text(arg) for arg in argv]
40 parser = argparse.ArgumentParser()
41
42 # http://stackoverflow.com/a/8521644/812183
43 parser.add_argument(
44 '-V', '--version',
45 action='version',
46 version='%(prog)s {}'.format(
47 pkg_resources.get_distribution('pre-commit').version
48 )
49 )
50
51 subparsers = parser.add_subparsers(dest='command')
52
53 install_parser = subparsers.add_parser(
54 'install', help='Install the pre-commit script.',
55 )
56 _add_color_option(install_parser)
57 install_parser.add_argument(
58 '-f', '--overwrite', action='store_true',
59 help='Overwrite existing hooks / remove migration mode.',
60 )
61 install_parser.add_argument(
62 '--install-hooks', action='store_true',
63 help=(
64 'Whether to install hook environments for all environments '
65 'in the config file.'
66 ),
67 )
68 install_parser.add_argument(
69 '-t', '--hook-type', choices=('pre-commit', 'pre-push'),
70 default='pre-commit',
71 )
72
73 uninstall_parser = subparsers.add_parser(
74 'uninstall', help='Uninstall the pre-commit script.',
75 )
76 _add_color_option(uninstall_parser)
77 uninstall_parser.add_argument(
78 '-t', '--hook-type', choices=('pre-commit', 'pre-push'),
79 default='pre-commit',
80 )
81
82 clean_parser = subparsers.add_parser(
83 'clean', help='Clean out pre-commit files.',
84 )
85 _add_color_option(clean_parser)
86
87 autoupdate_parser = subparsers.add_parser(
88 'autoupdate',
89 help="Auto-update pre-commit config to the latest repos' versions.",
90 )
91 _add_color_option(autoupdate_parser)
92
93 run_parser = subparsers.add_parser('run', help='Run hooks.')
94 _add_color_option(run_parser)
95 run_parser.add_argument('hook', nargs='?', help='A single hook-id to run')
96 run_parser.add_argument(
97 '--no-stash', default=False, action='store_true',
98 help='Use this option to prevent auto stashing of unstaged files.',
99 )
100 run_parser.add_argument(
101 '--verbose', '-v', action='store_true', default=False,
102 )
103 run_parser.add_argument(
104 '--origin', '-o',
105 help="The origin branch's commit_id when using `git push`.",
106 )
107 run_parser.add_argument(
108 '--source', '-s',
109 help="The remote branch's commit_id when using `git push`.",
110 )
111 run_parser.add_argument(
112 '--allow-unstaged-config', default=False, action='store_true',
113 help=(
114 'Allow an unstaged config to be present. Note that this will '
115 'be stashed before parsing unless --no-stash is specified.'
116 ),
117 )
118 run_parser.add_argument(
119 '--hook-stage', choices=('commit', 'push'), default='commit',
120 help='The stage during which the hook is fired e.g. commit or push.',
121 )
122 run_mutex_group = run_parser.add_mutually_exclusive_group(required=False)
123 run_mutex_group.add_argument(
124 '--all-files', '-a', action='store_true', default=False,
125 help='Run on all the files in the repo. Implies --no-stash.',
126 )
127 run_mutex_group.add_argument(
128 '--files', nargs='*', default=[],
129 help='Specific filenames to run hooks on.',
130 )
131
132 help = subparsers.add_parser(
133 'help', help='Show help for a specific command.',
134 )
135 help.add_argument('help_cmd', nargs='?', help='Command to show help for.')
136
137 # Argparse doesn't really provide a way to use a `default` subparser
138 if len(argv) == 0:
139 argv = ['run']
140 args = parser.parse_args(argv)
141 if args.command == 'run':
142 args.files = [
143 os.path.relpath(os.path.abspath(filename), git.get_root())
144 for filename in args.files
145 ]
146
147 if args.command == 'help':
148 if args.help_cmd:
149 parser.parse_args([args.help_cmd, '--help'])
150 else:
151 parser.parse_args(['--help'])
152
153 with error_handler():
154 add_logging_handler(args.color)
155 runner = Runner.create()
156
157 if args.command == 'install':
158 return install(
159 runner, overwrite=args.overwrite, hooks=args.install_hooks,
160 hook_type=args.hook_type,
161 )
162 elif args.command == 'uninstall':
163 return uninstall(runner, hook_type=args.hook_type)
164 elif args.command == 'clean':
165 return clean(runner)
166 elif args.command == 'autoupdate':
167 return autoupdate(runner)
168 elif args.command == 'run':
169 return run(runner, args)
170 else:
171 raise NotImplementedError(
172 'Command {} not implemented.'.format(args.command)
173 )
174
175 raise AssertionError(
176 'Command {} failed to exit with a returncode'.format(args.command)
177 )
178
179
180 if __name__ == '__main__':
181 exit(main())
182
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/git.py b/pre_commit/git.py
--- a/pre_commit/git.py
+++ b/pre_commit/git.py
@@ -4,6 +4,7 @@
import logging
import os.path
import re
+import sys
from pre_commit.errors import FatalError
from pre_commit.util import CalledProcessError
@@ -102,3 +103,26 @@
get_staged_files_matching = get_files_matching(get_staged_files)
get_all_files_matching = get_files_matching(get_all_files)
get_conflicted_files_matching = get_files_matching(get_conflicted_files)
+
+
+def check_for_cygwin_mismatch():
+ """See https://github.com/pre-commit/pre-commit/issues/354"""
+ if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)
+ is_cygwin_python = sys.platform == 'cygwin'
+ toplevel = cmd_output('git', 'rev-parse', '--show-toplevel')[1]
+ is_cygwin_git = toplevel.startswith('/')
+
+ if is_cygwin_python ^ is_cygwin_git:
+ exe_type = {True: '(cygwin)', False: '(windows)'}
+ logger.warn(
+ 'pre-commit has detected a mix of cygwin python / git\n'
+ 'This combination is not supported, it is likely you will '
+ 'receive an error later in the program.\n'
+ 'Make sure to use cygwin git+python while using cygwin\n'
+ 'These can be installed through the cygwin installer.\n'
+ ' - python {}\n'
+ ' - git {}\n'.format(
+ exe_type[is_cygwin_python],
+ exe_type[is_cygwin_git],
+ )
+ )
diff --git a/pre_commit/main.py b/pre_commit/main.py
--- a/pre_commit/main.py
+++ b/pre_commit/main.py
@@ -152,6 +152,7 @@
with error_handler():
add_logging_handler(args.color)
+ git.check_for_cygwin_mismatch()
runner = Runner.create()
if args.command == 'install':
|
{"golden_diff": "diff --git a/pre_commit/git.py b/pre_commit/git.py\n--- a/pre_commit/git.py\n+++ b/pre_commit/git.py\n@@ -4,6 +4,7 @@\n import logging\n import os.path\n import re\n+import sys\n \n from pre_commit.errors import FatalError\n from pre_commit.util import CalledProcessError\n@@ -102,3 +103,26 @@\n get_staged_files_matching = get_files_matching(get_staged_files)\n get_all_files_matching = get_files_matching(get_all_files)\n get_conflicted_files_matching = get_files_matching(get_conflicted_files)\n+\n+\n+def check_for_cygwin_mismatch():\n+ \"\"\"See https://github.com/pre-commit/pre-commit/issues/354\"\"\"\n+ if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)\n+ is_cygwin_python = sys.platform == 'cygwin'\n+ toplevel = cmd_output('git', 'rev-parse', '--show-toplevel')[1]\n+ is_cygwin_git = toplevel.startswith('/')\n+\n+ if is_cygwin_python ^ is_cygwin_git:\n+ exe_type = {True: '(cygwin)', False: '(windows)'}\n+ logger.warn(\n+ 'pre-commit has detected a mix of cygwin python / git\\n'\n+ 'This combination is not supported, it is likely you will '\n+ 'receive an error later in the program.\\n'\n+ 'Make sure to use cygwin git+python while using cygwin\\n'\n+ 'These can be installed through the cygwin installer.\\n'\n+ ' - python {}\\n'\n+ ' - git {}\\n'.format(\n+ exe_type[is_cygwin_python],\n+ exe_type[is_cygwin_git],\n+ )\n+ )\ndiff --git a/pre_commit/main.py b/pre_commit/main.py\n--- a/pre_commit/main.py\n+++ b/pre_commit/main.py\n@@ -152,6 +152,7 @@\n \n with error_handler():\n add_logging_handler(args.color)\n+ git.check_for_cygwin_mismatch()\n runner = Runner.create()\n \n if args.command == 'install':\n", "issue": "Warn when mismatching cygwin git/python\nSee #352 and #353 for how this can manifest itself\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport functools\nimport logging\nimport os.path\nimport re\n\nfrom pre_commit.errors import FatalError\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import memoize_by_cwd\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef get_root():\n try:\n return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()\n except CalledProcessError:\n raise FatalError(\n 'git failed. Is it installed, and are you in a Git repository '\n 'directory?'\n )\n\n\ndef get_git_dir(git_root):\n return os.path.normpath(os.path.join(\n git_root,\n cmd_output('git', 'rev-parse', '--git-dir', cwd=git_root)[1].strip(),\n ))\n\n\ndef is_in_merge_conflict():\n git_dir = get_git_dir('.')\n return (\n os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and\n os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))\n )\n\n\ndef parse_merge_msg_for_conflicts(merge_msg):\n # Conflicted files start with tabs\n return [\n line.lstrip('#').strip()\n for line in merge_msg.splitlines()\n # '#\\t' for git 2.4.1\n if line.startswith(('\\t', '#\\t'))\n ]\n\n\n@memoize_by_cwd\ndef get_conflicted_files():\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG')).read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n\n # This will get the rest of the changes made after the merge.\n # If they resolved the merge conflict by choosing a mesh of both sides\n # this will also include the conflicted files\n tree_hash = cmd_output('git', 'write-tree')[1].strip()\n merge_diff_filenames = cmd_output(\n 'git', 'diff', '-m', tree_hash, 'HEAD', 'MERGE_HEAD', '--name-only',\n )[1].splitlines()\n return set(merge_conflict_filenames) | set(merge_diff_filenames)\n\n\n@memoize_by_cwd\ndef get_staged_files():\n return cmd_output(\n 'git', 'diff', '--staged', '--name-only',\n # Everything except for D\n '--diff-filter=ACMRTUXB'\n )[1].splitlines()\n\n\n@memoize_by_cwd\ndef get_all_files():\n return cmd_output('git', 'ls-files')[1].splitlines()\n\n\ndef get_files_matching(all_file_list_strategy):\n @functools.wraps(all_file_list_strategy)\n @memoize_by_cwd\n def wrapper(include_expr, exclude_expr):\n include_regex = re.compile(include_expr)\n exclude_regex = re.compile(exclude_expr)\n return set(\n filename\n for filename in all_file_list_strategy()\n if (\n include_regex.search(filename) and\n not exclude_regex.search(filename) and\n os.path.lexists(filename)\n )\n )\n return wrapper\n\n\nget_staged_files_matching = get_files_matching(get_staged_files)\nget_all_files_matching = get_files_matching(get_all_files)\nget_conflicted_files_matching = get_files_matching(get_conflicted_files)\n", "path": "pre_commit/git.py"}, {"content": "from __future__ import unicode_literals\n\nimport argparse\nimport os\nimport sys\n\nimport pkg_resources\n\nfrom pre_commit import color\nfrom pre_commit import five\nfrom pre_commit import git\nfrom pre_commit.commands.autoupdate import autoupdate\nfrom pre_commit.commands.clean import clean\nfrom pre_commit.commands.install_uninstall import install\nfrom pre_commit.commands.install_uninstall import uninstall\nfrom pre_commit.commands.run import run\nfrom pre_commit.error_handler import error_handler\nfrom pre_commit.logging_handler import add_logging_handler\nfrom pre_commit.runner import Runner\n\n\n# https://github.com/pre-commit/pre-commit/issues/217\n# On OSX, making a virtualenv using pyvenv at . causes `virtualenv` and `pip`\n# to install packages to the wrong place. We don't want anything to deal with\n# pyvenv\nos.environ.pop('__PYVENV_LAUNCHER__', None)\n\n\ndef _add_color_option(parser):\n parser.add_argument(\n '--color', default='auto', type=color.use_color,\n metavar='{' + ','.join(color.COLOR_CHOICES) + '}',\n help='Whether to use color in output. Defaults to `%(default)s`.',\n )\n\n\ndef main(argv=None):\n argv = argv if argv is not None else sys.argv[1:]\n argv = [five.to_text(arg) for arg in argv]\n parser = argparse.ArgumentParser()\n\n # http://stackoverflow.com/a/8521644/812183\n parser.add_argument(\n '-V', '--version',\n action='version',\n version='%(prog)s {}'.format(\n pkg_resources.get_distribution('pre-commit').version\n )\n )\n\n subparsers = parser.add_subparsers(dest='command')\n\n install_parser = subparsers.add_parser(\n 'install', help='Install the pre-commit script.',\n )\n _add_color_option(install_parser)\n install_parser.add_argument(\n '-f', '--overwrite', action='store_true',\n help='Overwrite existing hooks / remove migration mode.',\n )\n install_parser.add_argument(\n '--install-hooks', action='store_true',\n help=(\n 'Whether to install hook environments for all environments '\n 'in the config file.'\n ),\n )\n install_parser.add_argument(\n '-t', '--hook-type', choices=('pre-commit', 'pre-push'),\n default='pre-commit',\n )\n\n uninstall_parser = subparsers.add_parser(\n 'uninstall', help='Uninstall the pre-commit script.',\n )\n _add_color_option(uninstall_parser)\n uninstall_parser.add_argument(\n '-t', '--hook-type', choices=('pre-commit', 'pre-push'),\n default='pre-commit',\n )\n\n clean_parser = subparsers.add_parser(\n 'clean', help='Clean out pre-commit files.',\n )\n _add_color_option(clean_parser)\n\n autoupdate_parser = subparsers.add_parser(\n 'autoupdate',\n help=\"Auto-update pre-commit config to the latest repos' versions.\",\n )\n _add_color_option(autoupdate_parser)\n\n run_parser = subparsers.add_parser('run', help='Run hooks.')\n _add_color_option(run_parser)\n run_parser.add_argument('hook', nargs='?', help='A single hook-id to run')\n run_parser.add_argument(\n '--no-stash', default=False, action='store_true',\n help='Use this option to prevent auto stashing of unstaged files.',\n )\n run_parser.add_argument(\n '--verbose', '-v', action='store_true', default=False,\n )\n run_parser.add_argument(\n '--origin', '-o',\n help=\"The origin branch's commit_id when using `git push`.\",\n )\n run_parser.add_argument(\n '--source', '-s',\n help=\"The remote branch's commit_id when using `git push`.\",\n )\n run_parser.add_argument(\n '--allow-unstaged-config', default=False, action='store_true',\n help=(\n 'Allow an unstaged config to be present. Note that this will '\n 'be stashed before parsing unless --no-stash is specified.'\n ),\n )\n run_parser.add_argument(\n '--hook-stage', choices=('commit', 'push'), default='commit',\n help='The stage during which the hook is fired e.g. commit or push.',\n )\n run_mutex_group = run_parser.add_mutually_exclusive_group(required=False)\n run_mutex_group.add_argument(\n '--all-files', '-a', action='store_true', default=False,\n help='Run on all the files in the repo. Implies --no-stash.',\n )\n run_mutex_group.add_argument(\n '--files', nargs='*', default=[],\n help='Specific filenames to run hooks on.',\n )\n\n help = subparsers.add_parser(\n 'help', help='Show help for a specific command.',\n )\n help.add_argument('help_cmd', nargs='?', help='Command to show help for.')\n\n # Argparse doesn't really provide a way to use a `default` subparser\n if len(argv) == 0:\n argv = ['run']\n args = parser.parse_args(argv)\n if args.command == 'run':\n args.files = [\n os.path.relpath(os.path.abspath(filename), git.get_root())\n for filename in args.files\n ]\n\n if args.command == 'help':\n if args.help_cmd:\n parser.parse_args([args.help_cmd, '--help'])\n else:\n parser.parse_args(['--help'])\n\n with error_handler():\n add_logging_handler(args.color)\n runner = Runner.create()\n\n if args.command == 'install':\n return install(\n runner, overwrite=args.overwrite, hooks=args.install_hooks,\n hook_type=args.hook_type,\n )\n elif args.command == 'uninstall':\n return uninstall(runner, hook_type=args.hook_type)\n elif args.command == 'clean':\n return clean(runner)\n elif args.command == 'autoupdate':\n return autoupdate(runner)\n elif args.command == 'run':\n return run(runner, args)\n else:\n raise NotImplementedError(\n 'Command {} not implemented.'.format(args.command)\n )\n\n raise AssertionError(\n 'Command {} failed to exit with a returncode'.format(args.command)\n )\n\n\nif __name__ == '__main__':\n exit(main())\n", "path": "pre_commit/main.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport functools\nimport logging\nimport os.path\nimport re\nimport sys\n\nfrom pre_commit.errors import FatalError\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import memoize_by_cwd\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef get_root():\n try:\n return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()\n except CalledProcessError:\n raise FatalError(\n 'git failed. Is it installed, and are you in a Git repository '\n 'directory?'\n )\n\n\ndef get_git_dir(git_root):\n return os.path.normpath(os.path.join(\n git_root,\n cmd_output('git', 'rev-parse', '--git-dir', cwd=git_root)[1].strip(),\n ))\n\n\ndef is_in_merge_conflict():\n git_dir = get_git_dir('.')\n return (\n os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and\n os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))\n )\n\n\ndef parse_merge_msg_for_conflicts(merge_msg):\n # Conflicted files start with tabs\n return [\n line.lstrip('#').strip()\n for line in merge_msg.splitlines()\n # '#\\t' for git 2.4.1\n if line.startswith(('\\t', '#\\t'))\n ]\n\n\n@memoize_by_cwd\ndef get_conflicted_files():\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG')).read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n\n # This will get the rest of the changes made after the merge.\n # If they resolved the merge conflict by choosing a mesh of both sides\n # this will also include the conflicted files\n tree_hash = cmd_output('git', 'write-tree')[1].strip()\n merge_diff_filenames = cmd_output(\n 'git', 'diff', '-m', tree_hash, 'HEAD', 'MERGE_HEAD', '--name-only',\n )[1].splitlines()\n return set(merge_conflict_filenames) | set(merge_diff_filenames)\n\n\n@memoize_by_cwd\ndef get_staged_files():\n return cmd_output(\n 'git', 'diff', '--staged', '--name-only',\n # Everything except for D\n '--diff-filter=ACMRTUXB'\n )[1].splitlines()\n\n\n@memoize_by_cwd\ndef get_all_files():\n return cmd_output('git', 'ls-files')[1].splitlines()\n\n\ndef get_files_matching(all_file_list_strategy):\n @functools.wraps(all_file_list_strategy)\n @memoize_by_cwd\n def wrapper(include_expr, exclude_expr):\n include_regex = re.compile(include_expr)\n exclude_regex = re.compile(exclude_expr)\n return set(\n filename\n for filename in all_file_list_strategy()\n if (\n include_regex.search(filename) and\n not exclude_regex.search(filename) and\n os.path.lexists(filename)\n )\n )\n return wrapper\n\n\nget_staged_files_matching = get_files_matching(get_staged_files)\nget_all_files_matching = get_files_matching(get_all_files)\nget_conflicted_files_matching = get_files_matching(get_conflicted_files)\n\n\ndef check_for_cygwin_mismatch():\n \"\"\"See https://github.com/pre-commit/pre-commit/issues/354\"\"\"\n if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)\n is_cygwin_python = sys.platform == 'cygwin'\n toplevel = cmd_output('git', 'rev-parse', '--show-toplevel')[1]\n is_cygwin_git = toplevel.startswith('/')\n\n if is_cygwin_python ^ is_cygwin_git:\n exe_type = {True: '(cygwin)', False: '(windows)'}\n logger.warn(\n 'pre-commit has detected a mix of cygwin python / git\\n'\n 'This combination is not supported, it is likely you will '\n 'receive an error later in the program.\\n'\n 'Make sure to use cygwin git+python while using cygwin\\n'\n 'These can be installed through the cygwin installer.\\n'\n ' - python {}\\n'\n ' - git {}\\n'.format(\n exe_type[is_cygwin_python],\n exe_type[is_cygwin_git],\n )\n )\n", "path": "pre_commit/git.py"}, {"content": "from __future__ import unicode_literals\n\nimport argparse\nimport os\nimport sys\n\nimport pkg_resources\n\nfrom pre_commit import color\nfrom pre_commit import five\nfrom pre_commit import git\nfrom pre_commit.commands.autoupdate import autoupdate\nfrom pre_commit.commands.clean import clean\nfrom pre_commit.commands.install_uninstall import install\nfrom pre_commit.commands.install_uninstall import uninstall\nfrom pre_commit.commands.run import run\nfrom pre_commit.error_handler import error_handler\nfrom pre_commit.logging_handler import add_logging_handler\nfrom pre_commit.runner import Runner\n\n\n# https://github.com/pre-commit/pre-commit/issues/217\n# On OSX, making a virtualenv using pyvenv at . causes `virtualenv` and `pip`\n# to install packages to the wrong place. We don't want anything to deal with\n# pyvenv\nos.environ.pop('__PYVENV_LAUNCHER__', None)\n\n\ndef _add_color_option(parser):\n parser.add_argument(\n '--color', default='auto', type=color.use_color,\n metavar='{' + ','.join(color.COLOR_CHOICES) + '}',\n help='Whether to use color in output. Defaults to `%(default)s`.',\n )\n\n\ndef main(argv=None):\n argv = argv if argv is not None else sys.argv[1:]\n argv = [five.to_text(arg) for arg in argv]\n parser = argparse.ArgumentParser()\n\n # http://stackoverflow.com/a/8521644/812183\n parser.add_argument(\n '-V', '--version',\n action='version',\n version='%(prog)s {}'.format(\n pkg_resources.get_distribution('pre-commit').version\n )\n )\n\n subparsers = parser.add_subparsers(dest='command')\n\n install_parser = subparsers.add_parser(\n 'install', help='Install the pre-commit script.',\n )\n _add_color_option(install_parser)\n install_parser.add_argument(\n '-f', '--overwrite', action='store_true',\n help='Overwrite existing hooks / remove migration mode.',\n )\n install_parser.add_argument(\n '--install-hooks', action='store_true',\n help=(\n 'Whether to install hook environments for all environments '\n 'in the config file.'\n ),\n )\n install_parser.add_argument(\n '-t', '--hook-type', choices=('pre-commit', 'pre-push'),\n default='pre-commit',\n )\n\n uninstall_parser = subparsers.add_parser(\n 'uninstall', help='Uninstall the pre-commit script.',\n )\n _add_color_option(uninstall_parser)\n uninstall_parser.add_argument(\n '-t', '--hook-type', choices=('pre-commit', 'pre-push'),\n default='pre-commit',\n )\n\n clean_parser = subparsers.add_parser(\n 'clean', help='Clean out pre-commit files.',\n )\n _add_color_option(clean_parser)\n\n autoupdate_parser = subparsers.add_parser(\n 'autoupdate',\n help=\"Auto-update pre-commit config to the latest repos' versions.\",\n )\n _add_color_option(autoupdate_parser)\n\n run_parser = subparsers.add_parser('run', help='Run hooks.')\n _add_color_option(run_parser)\n run_parser.add_argument('hook', nargs='?', help='A single hook-id to run')\n run_parser.add_argument(\n '--no-stash', default=False, action='store_true',\n help='Use this option to prevent auto stashing of unstaged files.',\n )\n run_parser.add_argument(\n '--verbose', '-v', action='store_true', default=False,\n )\n run_parser.add_argument(\n '--origin', '-o',\n help=\"The origin branch's commit_id when using `git push`.\",\n )\n run_parser.add_argument(\n '--source', '-s',\n help=\"The remote branch's commit_id when using `git push`.\",\n )\n run_parser.add_argument(\n '--allow-unstaged-config', default=False, action='store_true',\n help=(\n 'Allow an unstaged config to be present. Note that this will '\n 'be stashed before parsing unless --no-stash is specified.'\n ),\n )\n run_parser.add_argument(\n '--hook-stage', choices=('commit', 'push'), default='commit',\n help='The stage during which the hook is fired e.g. commit or push.',\n )\n run_mutex_group = run_parser.add_mutually_exclusive_group(required=False)\n run_mutex_group.add_argument(\n '--all-files', '-a', action='store_true', default=False,\n help='Run on all the files in the repo. Implies --no-stash.',\n )\n run_mutex_group.add_argument(\n '--files', nargs='*', default=[],\n help='Specific filenames to run hooks on.',\n )\n\n help = subparsers.add_parser(\n 'help', help='Show help for a specific command.',\n )\n help.add_argument('help_cmd', nargs='?', help='Command to show help for.')\n\n # Argparse doesn't really provide a way to use a `default` subparser\n if len(argv) == 0:\n argv = ['run']\n args = parser.parse_args(argv)\n if args.command == 'run':\n args.files = [\n os.path.relpath(os.path.abspath(filename), git.get_root())\n for filename in args.files\n ]\n\n if args.command == 'help':\n if args.help_cmd:\n parser.parse_args([args.help_cmd, '--help'])\n else:\n parser.parse_args(['--help'])\n\n with error_handler():\n add_logging_handler(args.color)\n git.check_for_cygwin_mismatch()\n runner = Runner.create()\n\n if args.command == 'install':\n return install(\n runner, overwrite=args.overwrite, hooks=args.install_hooks,\n hook_type=args.hook_type,\n )\n elif args.command == 'uninstall':\n return uninstall(runner, hook_type=args.hook_type)\n elif args.command == 'clean':\n return clean(runner)\n elif args.command == 'autoupdate':\n return autoupdate(runner)\n elif args.command == 'run':\n return run(runner, args)\n else:\n raise NotImplementedError(\n 'Command {} not implemented.'.format(args.command)\n )\n\n raise AssertionError(\n 'Command {} failed to exit with a returncode'.format(args.command)\n )\n\n\nif __name__ == '__main__':\n exit(main())\n", "path": "pre_commit/main.py"}]}
| 3,065 | 477 |
gh_patches_debug_5366
|
rasdani/github-patches
|
git_diff
|
keras-team__keras-7634
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tokenizer failed with unicode text
Tokenizer failed with unicode text
d = [u'ali veli kırk dokuz elli',u'ali veli kırk dokuz elli veli kırk dokuz']
tokenizer.fit_on_texts(d)
TypeError Traceback (most recent call last)
<ipython-input-181-b9cd26967244> in <module>()
----> 1 tokenizer.fit_on_texts(d)
C:\Anaconda2\lib\site-packages\keras-2.0.3-py2.7.egg\keras\preprocessing\text.pyc in fit_on_texts(self, texts)
117 self.filters,
118 self.lower,
--> 119 self.split)
120 for w in seq:
121 if w in self.word_counts:
C:\Anaconda2\lib\site-packages\keras-2.0.3-py2.7.egg\keras\preprocessing\text.pyc in text_to_word_sequence(text, filters, lower, split)
36 if lower:
37 text = text.lower()
---> 38 text = text.translate(maketrans(filters, split * len(filters)))
39 seq = text.split(split)
40 return [i for i in seq if i]
TypeError: character mapping must return integer, None or unicode
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `keras/preprocessing/text.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Utilities for text input preprocessing.
3
4 May benefit from a fast Cython rewrite.
5 """
6 from __future__ import absolute_import
7 from __future__ import division
8
9 import string
10 import sys
11 import warnings
12 from collections import OrderedDict
13 from hashlib import md5
14
15 import numpy as np
16 from six.moves import range
17 from six.moves import zip
18
19 if sys.version_info < (3,):
20 maketrans = string.maketrans
21 else:
22 maketrans = str.maketrans
23
24
25 def text_to_word_sequence(text,
26 filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
27 lower=True, split=" "):
28 """Converts a text to a sequence of words (or tokens).
29
30 # Arguments
31 text: Input text (string).
32 filters: Sequence of characters to filter out.
33 lower: Whether to convert the input to lowercase.
34 split: Sentence split marker (string).
35
36 # Returns
37 A list of words (or tokens).
38 """
39 if lower:
40 text = text.lower()
41 text = text.translate(maketrans(filters, split * len(filters)))
42 seq = text.split(split)
43 return [i for i in seq if i]
44
45
46 def one_hot(text, n,
47 filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
48 lower=True,
49 split=' '):
50 """One-hot encodes a text into a list of word indexes of size n.
51
52 This is a wrapper to the `hashing_trick` function using `hash` as the
53 hashing function, unicity of word to index mapping non-guaranteed.
54 """
55 return hashing_trick(text, n,
56 hash_function=hash,
57 filters=filters,
58 lower=lower,
59 split=split)
60
61
62 def hashing_trick(text, n,
63 hash_function=None,
64 filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
65 lower=True,
66 split=' '):
67 """Converts a text to a sequence of indexes in a fixed-size hashing space.
68
69 # Arguments
70 text: Input text (string).
71 n: Dimension of the hashing space.
72 hash_function: if `None` uses python `hash` function, can be 'md5' or
73 any function that takes in input a string and returns a int.
74 Note that `hash` is not a stable hashing function, so
75 it is not consistent across different runs, while 'md5'
76 is a stable hashing function.
77 filters: Sequence of characters to filter out.
78 lower: Whether to convert the input to lowercase.
79 split: Sentence split marker (string).
80
81 # Returns
82 A list of integer word indices (unicity non-guaranteed).
83
84 `0` is a reserved index that won't be assigned to any word.
85
86 Two or more words may be assigned to the same index, due to possible
87 collisions by the hashing function.
88 The [probability](https://en.wikipedia.org/wiki/Birthday_problem#Probability_table)
89 of a collision is in relation to the dimension of the hashing space and
90 the number of distinct objects.
91 """
92 if hash_function is None:
93 hash_function = hash
94 elif hash_function == 'md5':
95 hash_function = lambda w: int(md5(w.encode()).hexdigest(), 16)
96
97 seq = text_to_word_sequence(text,
98 filters=filters,
99 lower=lower,
100 split=split)
101 return [(hash_function(w) % (n - 1) + 1) for w in seq]
102
103
104 class Tokenizer(object):
105 """Text tokenization utility class.
106
107 This class allows to vectorize a text corpus, by turning each
108 text into either a sequence of integers (each integer being the index
109 of a token in a dictionary) or into a vector where the coefficient
110 for each token could be binary, based on word count, based on tf-idf...
111
112 # Arguments
113 num_words: the maximum number of words to keep, based
114 on word frequency. Only the most common `num_words` words will
115 be kept.
116 filters: a string where each element is a character that will be
117 filtered from the texts. The default is all punctuation, plus
118 tabs and line breaks, minus the `'` character.
119 lower: boolean. Whether to convert the texts to lowercase.
120 split: character or string to use for token splitting.
121 char_level: if True, every character will be treated as a token.
122
123 By default, all punctuation is removed, turning the texts into
124 space-separated sequences of words
125 (words maybe include the `'` character). These sequences are then
126 split into lists of tokens. They will then be indexed or vectorized.
127
128 `0` is a reserved index that won't be assigned to any word.
129 """
130
131 def __init__(self, num_words=None,
132 filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
133 lower=True,
134 split=' ',
135 char_level=False,
136 **kwargs):
137 # Legacy support
138 if 'nb_words' in kwargs:
139 warnings.warn('The `nb_words` argument in `Tokenizer` '
140 'has been renamed `num_words`.')
141 num_words = kwargs.pop('nb_words')
142 if kwargs:
143 raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
144
145 self.word_counts = OrderedDict()
146 self.word_docs = {}
147 self.filters = filters
148 self.split = split
149 self.lower = lower
150 self.num_words = num_words
151 self.document_count = 0
152 self.char_level = char_level
153
154 def fit_on_texts(self, texts):
155 """Updates internal vocabulary based on a list of texts.
156
157 Required before using `texts_to_sequences` or `texts_to_matrix`.
158
159 # Arguments
160 texts: can be a list of strings,
161 or a generator of strings (for memory-efficiency)
162 """
163 self.document_count = 0
164 for text in texts:
165 self.document_count += 1
166 seq = text if self.char_level else text_to_word_sequence(text,
167 self.filters,
168 self.lower,
169 self.split)
170 for w in seq:
171 if w in self.word_counts:
172 self.word_counts[w] += 1
173 else:
174 self.word_counts[w] = 1
175 for w in set(seq):
176 if w in self.word_docs:
177 self.word_docs[w] += 1
178 else:
179 self.word_docs[w] = 1
180
181 wcounts = list(self.word_counts.items())
182 wcounts.sort(key=lambda x: x[1], reverse=True)
183 sorted_voc = [wc[0] for wc in wcounts]
184 # note that index 0 is reserved, never assigned to an existing word
185 self.word_index = dict(list(zip(sorted_voc, list(range(1, len(sorted_voc) + 1)))))
186
187 self.index_docs = {}
188 for w, c in list(self.word_docs.items()):
189 self.index_docs[self.word_index[w]] = c
190
191 def fit_on_sequences(self, sequences):
192 """Updates internal vocabulary based on a list of sequences.
193
194 Required before using `sequences_to_matrix`
195 (if `fit_on_texts` was never called).
196
197 # Arguments
198 sequences: A list of sequence.
199 A "sequence" is a list of integer word indices.
200 """
201 self.document_count = len(sequences)
202 self.index_docs = {}
203 for seq in sequences:
204 seq = set(seq)
205 for i in seq:
206 if i not in self.index_docs:
207 self.index_docs[i] = 1
208 else:
209 self.index_docs[i] += 1
210
211 def texts_to_sequences(self, texts):
212 """Transforms each text in texts in a sequence of integers.
213
214 Only top "num_words" most frequent words will be taken into account.
215 Only words known by the tokenizer will be taken into account.
216
217 # Arguments
218 texts: A list of texts (strings).
219
220 # Returns
221 A list of sequences.
222 """
223 res = []
224 for vect in self.texts_to_sequences_generator(texts):
225 res.append(vect)
226 return res
227
228 def texts_to_sequences_generator(self, texts):
229 """Transforms each text in texts in a sequence of integers.
230
231 Only top "num_words" most frequent words will be taken into account.
232 Only words known by the tokenizer will be taken into account.
233
234 # Arguments
235 texts: A list of texts (strings).
236
237 # Yields
238 Yields individual sequences.
239 """
240 num_words = self.num_words
241 for text in texts:
242 seq = text if self.char_level else text_to_word_sequence(text,
243 self.filters,
244 self.lower,
245 self.split)
246 vect = []
247 for w in seq:
248 i = self.word_index.get(w)
249 if i is not None:
250 if num_words and i >= num_words:
251 continue
252 else:
253 vect.append(i)
254 yield vect
255
256 def texts_to_matrix(self, texts, mode='binary'):
257 """Convert a list of texts to a Numpy matrix.
258
259 # Arguments
260 texts: list of strings.
261 mode: one of "binary", "count", "tfidf", "freq".
262
263 # Returns
264 A Numpy matrix.
265 """
266 sequences = self.texts_to_sequences(texts)
267 return self.sequences_to_matrix(sequences, mode=mode)
268
269 def sequences_to_matrix(self, sequences, mode='binary'):
270 """Converts a list of sequences into a Numpy matrix.
271
272 # Arguments
273 sequences: list of sequences
274 (a sequence is a list of integer word indices).
275 mode: one of "binary", "count", "tfidf", "freq"
276
277 # Returns
278 A Numpy matrix.
279
280 # Raises
281 ValueError: In case of invalid `mode` argument,
282 or if the Tokenizer requires to be fit to sample data.
283 """
284 if not self.num_words:
285 if self.word_index:
286 num_words = len(self.word_index) + 1
287 else:
288 raise ValueError('Specify a dimension (num_words argument), '
289 'or fit on some text data first.')
290 else:
291 num_words = self.num_words
292
293 if mode == 'tfidf' and not self.document_count:
294 raise ValueError('Fit the Tokenizer on some data '
295 'before using tfidf mode.')
296
297 x = np.zeros((len(sequences), num_words))
298 for i, seq in enumerate(sequences):
299 if not seq:
300 continue
301 counts = {}
302 for j in seq:
303 if j >= num_words:
304 continue
305 if j not in counts:
306 counts[j] = 1.
307 else:
308 counts[j] += 1
309 for j, c in list(counts.items()):
310 if mode == 'count':
311 x[i][j] = c
312 elif mode == 'freq':
313 x[i][j] = c / len(seq)
314 elif mode == 'binary':
315 x[i][j] = 1
316 elif mode == 'tfidf':
317 # Use weighting scheme 2 in
318 # https://en.wikipedia.org/wiki/Tf%E2%80%93idf
319 tf = 1 + np.log(c)
320 idf = np.log(1 + self.document_count /
321 (1 + self.index_docs.get(j, 0)))
322 x[i][j] = tf * idf
323 else:
324 raise ValueError('Unknown vectorization mode:', mode)
325 return x
326
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/keras/preprocessing/text.py b/keras/preprocessing/text.py
--- a/keras/preprocessing/text.py
+++ b/keras/preprocessing/text.py
@@ -38,7 +38,13 @@
"""
if lower:
text = text.lower()
- text = text.translate(maketrans(filters, split * len(filters)))
+
+ if sys.version_info < (3,) and isinstance(text, unicode):
+ translate_map = dict((ord(c), unicode(split)) for c in filters)
+ else:
+ translate_map = maketrans(filters, split * len(filters))
+
+ text = text.translate(translate_map)
seq = text.split(split)
return [i for i in seq if i]
|
{"golden_diff": "diff --git a/keras/preprocessing/text.py b/keras/preprocessing/text.py\n--- a/keras/preprocessing/text.py\n+++ b/keras/preprocessing/text.py\n@@ -38,7 +38,13 @@\n \"\"\"\n if lower:\n text = text.lower()\n- text = text.translate(maketrans(filters, split * len(filters)))\n+\n+ if sys.version_info < (3,) and isinstance(text, unicode):\n+ translate_map = dict((ord(c), unicode(split)) for c in filters)\n+ else:\n+ translate_map = maketrans(filters, split * len(filters))\n+\n+ text = text.translate(translate_map)\n seq = text.split(split)\n return [i for i in seq if i]\n", "issue": "Tokenizer failed with unicode text\nTokenizer failed with unicode text \r\n\r\nd = [u'ali veli k\u0131rk dokuz elli',u'ali veli k\u0131rk dokuz elli veli k\u0131rk dokuz']\r\ntokenizer.fit_on_texts(d)\r\n\r\nTypeError Traceback (most recent call last)\r\n<ipython-input-181-b9cd26967244> in <module>()\r\n----> 1 tokenizer.fit_on_texts(d)\r\n\r\nC:\\Anaconda2\\lib\\site-packages\\keras-2.0.3-py2.7.egg\\keras\\preprocessing\\text.pyc in fit_on_texts(self, texts)\r\n 117 self.filters,\r\n 118 self.lower,\r\n--> 119 self.split)\r\n 120 for w in seq:\r\n 121 if w in self.word_counts:\r\n\r\nC:\\Anaconda2\\lib\\site-packages\\keras-2.0.3-py2.7.egg\\keras\\preprocessing\\text.pyc in text_to_word_sequence(text, filters, lower, split)\r\n 36 if lower:\r\n 37 text = text.lower()\r\n---> 38 text = text.translate(maketrans(filters, split * len(filters)))\r\n 39 seq = text.split(split)\r\n 40 return [i for i in seq if i]\r\n\r\nTypeError: character mapping must return integer, None or unicode\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Utilities for text input preprocessing.\n\nMay benefit from a fast Cython rewrite.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport string\nimport sys\nimport warnings\nfrom collections import OrderedDict\nfrom hashlib import md5\n\nimport numpy as np\nfrom six.moves import range\nfrom six.moves import zip\n\nif sys.version_info < (3,):\n maketrans = string.maketrans\nelse:\n maketrans = str.maketrans\n\n\ndef text_to_word_sequence(text,\n filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n',\n lower=True, split=\" \"):\n \"\"\"Converts a text to a sequence of words (or tokens).\n\n # Arguments\n text: Input text (string).\n filters: Sequence of characters to filter out.\n lower: Whether to convert the input to lowercase.\n split: Sentence split marker (string).\n\n # Returns\n A list of words (or tokens).\n \"\"\"\n if lower:\n text = text.lower()\n text = text.translate(maketrans(filters, split * len(filters)))\n seq = text.split(split)\n return [i for i in seq if i]\n\n\ndef one_hot(text, n,\n filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n',\n lower=True,\n split=' '):\n \"\"\"One-hot encodes a text into a list of word indexes of size n.\n\n This is a wrapper to the `hashing_trick` function using `hash` as the\n hashing function, unicity of word to index mapping non-guaranteed.\n \"\"\"\n return hashing_trick(text, n,\n hash_function=hash,\n filters=filters,\n lower=lower,\n split=split)\n\n\ndef hashing_trick(text, n,\n hash_function=None,\n filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n',\n lower=True,\n split=' '):\n \"\"\"Converts a text to a sequence of indexes in a fixed-size hashing space.\n\n # Arguments\n text: Input text (string).\n n: Dimension of the hashing space.\n hash_function: if `None` uses python `hash` function, can be 'md5' or\n any function that takes in input a string and returns a int.\n Note that `hash` is not a stable hashing function, so\n it is not consistent across different runs, while 'md5'\n is a stable hashing function.\n filters: Sequence of characters to filter out.\n lower: Whether to convert the input to lowercase.\n split: Sentence split marker (string).\n\n # Returns\n A list of integer word indices (unicity non-guaranteed).\n\n `0` is a reserved index that won't be assigned to any word.\n\n Two or more words may be assigned to the same index, due to possible\n collisions by the hashing function.\n The [probability](https://en.wikipedia.org/wiki/Birthday_problem#Probability_table)\n of a collision is in relation to the dimension of the hashing space and\n the number of distinct objects.\n \"\"\"\n if hash_function is None:\n hash_function = hash\n elif hash_function == 'md5':\n hash_function = lambda w: int(md5(w.encode()).hexdigest(), 16)\n\n seq = text_to_word_sequence(text,\n filters=filters,\n lower=lower,\n split=split)\n return [(hash_function(w) % (n - 1) + 1) for w in seq]\n\n\nclass Tokenizer(object):\n \"\"\"Text tokenization utility class.\n\n This class allows to vectorize a text corpus, by turning each\n text into either a sequence of integers (each integer being the index\n of a token in a dictionary) or into a vector where the coefficient\n for each token could be binary, based on word count, based on tf-idf...\n\n # Arguments\n num_words: the maximum number of words to keep, based\n on word frequency. Only the most common `num_words` words will\n be kept.\n filters: a string where each element is a character that will be\n filtered from the texts. The default is all punctuation, plus\n tabs and line breaks, minus the `'` character.\n lower: boolean. Whether to convert the texts to lowercase.\n split: character or string to use for token splitting.\n char_level: if True, every character will be treated as a token.\n\n By default, all punctuation is removed, turning the texts into\n space-separated sequences of words\n (words maybe include the `'` character). These sequences are then\n split into lists of tokens. They will then be indexed or vectorized.\n\n `0` is a reserved index that won't be assigned to any word.\n \"\"\"\n\n def __init__(self, num_words=None,\n filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n',\n lower=True,\n split=' ',\n char_level=False,\n **kwargs):\n # Legacy support\n if 'nb_words' in kwargs:\n warnings.warn('The `nb_words` argument in `Tokenizer` '\n 'has been renamed `num_words`.')\n num_words = kwargs.pop('nb_words')\n if kwargs:\n raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))\n\n self.word_counts = OrderedDict()\n self.word_docs = {}\n self.filters = filters\n self.split = split\n self.lower = lower\n self.num_words = num_words\n self.document_count = 0\n self.char_level = char_level\n\n def fit_on_texts(self, texts):\n \"\"\"Updates internal vocabulary based on a list of texts.\n\n Required before using `texts_to_sequences` or `texts_to_matrix`.\n\n # Arguments\n texts: can be a list of strings,\n or a generator of strings (for memory-efficiency)\n \"\"\"\n self.document_count = 0\n for text in texts:\n self.document_count += 1\n seq = text if self.char_level else text_to_word_sequence(text,\n self.filters,\n self.lower,\n self.split)\n for w in seq:\n if w in self.word_counts:\n self.word_counts[w] += 1\n else:\n self.word_counts[w] = 1\n for w in set(seq):\n if w in self.word_docs:\n self.word_docs[w] += 1\n else:\n self.word_docs[w] = 1\n\n wcounts = list(self.word_counts.items())\n wcounts.sort(key=lambda x: x[1], reverse=True)\n sorted_voc = [wc[0] for wc in wcounts]\n # note that index 0 is reserved, never assigned to an existing word\n self.word_index = dict(list(zip(sorted_voc, list(range(1, len(sorted_voc) + 1)))))\n\n self.index_docs = {}\n for w, c in list(self.word_docs.items()):\n self.index_docs[self.word_index[w]] = c\n\n def fit_on_sequences(self, sequences):\n \"\"\"Updates internal vocabulary based on a list of sequences.\n\n Required before using `sequences_to_matrix`\n (if `fit_on_texts` was never called).\n\n # Arguments\n sequences: A list of sequence.\n A \"sequence\" is a list of integer word indices.\n \"\"\"\n self.document_count = len(sequences)\n self.index_docs = {}\n for seq in sequences:\n seq = set(seq)\n for i in seq:\n if i not in self.index_docs:\n self.index_docs[i] = 1\n else:\n self.index_docs[i] += 1\n\n def texts_to_sequences(self, texts):\n \"\"\"Transforms each text in texts in a sequence of integers.\n\n Only top \"num_words\" most frequent words will be taken into account.\n Only words known by the tokenizer will be taken into account.\n\n # Arguments\n texts: A list of texts (strings).\n\n # Returns\n A list of sequences.\n \"\"\"\n res = []\n for vect in self.texts_to_sequences_generator(texts):\n res.append(vect)\n return res\n\n def texts_to_sequences_generator(self, texts):\n \"\"\"Transforms each text in texts in a sequence of integers.\n\n Only top \"num_words\" most frequent words will be taken into account.\n Only words known by the tokenizer will be taken into account.\n\n # Arguments\n texts: A list of texts (strings).\n\n # Yields\n Yields individual sequences.\n \"\"\"\n num_words = self.num_words\n for text in texts:\n seq = text if self.char_level else text_to_word_sequence(text,\n self.filters,\n self.lower,\n self.split)\n vect = []\n for w in seq:\n i = self.word_index.get(w)\n if i is not None:\n if num_words and i >= num_words:\n continue\n else:\n vect.append(i)\n yield vect\n\n def texts_to_matrix(self, texts, mode='binary'):\n \"\"\"Convert a list of texts to a Numpy matrix.\n\n # Arguments\n texts: list of strings.\n mode: one of \"binary\", \"count\", \"tfidf\", \"freq\".\n\n # Returns\n A Numpy matrix.\n \"\"\"\n sequences = self.texts_to_sequences(texts)\n return self.sequences_to_matrix(sequences, mode=mode)\n\n def sequences_to_matrix(self, sequences, mode='binary'):\n \"\"\"Converts a list of sequences into a Numpy matrix.\n\n # Arguments\n sequences: list of sequences\n (a sequence is a list of integer word indices).\n mode: one of \"binary\", \"count\", \"tfidf\", \"freq\"\n\n # Returns\n A Numpy matrix.\n\n # Raises\n ValueError: In case of invalid `mode` argument,\n or if the Tokenizer requires to be fit to sample data.\n \"\"\"\n if not self.num_words:\n if self.word_index:\n num_words = len(self.word_index) + 1\n else:\n raise ValueError('Specify a dimension (num_words argument), '\n 'or fit on some text data first.')\n else:\n num_words = self.num_words\n\n if mode == 'tfidf' and not self.document_count:\n raise ValueError('Fit the Tokenizer on some data '\n 'before using tfidf mode.')\n\n x = np.zeros((len(sequences), num_words))\n for i, seq in enumerate(sequences):\n if not seq:\n continue\n counts = {}\n for j in seq:\n if j >= num_words:\n continue\n if j not in counts:\n counts[j] = 1.\n else:\n counts[j] += 1\n for j, c in list(counts.items()):\n if mode == 'count':\n x[i][j] = c\n elif mode == 'freq':\n x[i][j] = c / len(seq)\n elif mode == 'binary':\n x[i][j] = 1\n elif mode == 'tfidf':\n # Use weighting scheme 2 in\n # https://en.wikipedia.org/wiki/Tf%E2%80%93idf\n tf = 1 + np.log(c)\n idf = np.log(1 + self.document_count /\n (1 + self.index_docs.get(j, 0)))\n x[i][j] = tf * idf\n else:\n raise ValueError('Unknown vectorization mode:', mode)\n return x\n", "path": "keras/preprocessing/text.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Utilities for text input preprocessing.\n\nMay benefit from a fast Cython rewrite.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport string\nimport sys\nimport warnings\nfrom collections import OrderedDict\nfrom hashlib import md5\n\nimport numpy as np\nfrom six.moves import range\nfrom six.moves import zip\n\nif sys.version_info < (3,):\n maketrans = string.maketrans\nelse:\n maketrans = str.maketrans\n\n\ndef text_to_word_sequence(text,\n filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n',\n lower=True, split=\" \"):\n \"\"\"Converts a text to a sequence of words (or tokens).\n\n # Arguments\n text: Input text (string).\n filters: Sequence of characters to filter out.\n lower: Whether to convert the input to lowercase.\n split: Sentence split marker (string).\n\n # Returns\n A list of words (or tokens).\n \"\"\"\n if lower:\n text = text.lower()\n\n if sys.version_info < (3,) and isinstance(text, unicode):\n translate_map = dict((ord(c), unicode(split)) for c in filters)\n else:\n translate_map = maketrans(filters, split * len(filters))\n\n text = text.translate(translate_map)\n seq = text.split(split)\n return [i for i in seq if i]\n\n\ndef one_hot(text, n,\n filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n',\n lower=True,\n split=' '):\n \"\"\"One-hot encodes a text into a list of word indexes of size n.\n\n This is a wrapper to the `hashing_trick` function using `hash` as the\n hashing function, unicity of word to index mapping non-guaranteed.\n \"\"\"\n return hashing_trick(text, n,\n hash_function=hash,\n filters=filters,\n lower=lower,\n split=split)\n\n\ndef hashing_trick(text, n,\n hash_function=None,\n filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n',\n lower=True,\n split=' '):\n \"\"\"Converts a text to a sequence of indexes in a fixed-size hashing space.\n\n # Arguments\n text: Input text (string).\n n: Dimension of the hashing space.\n hash_function: if `None` uses python `hash` function, can be 'md5' or\n any function that takes in input a string and returns a int.\n Note that `hash` is not a stable hashing function, so\n it is not consistent across different runs, while 'md5'\n is a stable hashing function.\n filters: Sequence of characters to filter out.\n lower: Whether to convert the input to lowercase.\n split: Sentence split marker (string).\n\n # Returns\n A list of integer word indices (unicity non-guaranteed).\n\n `0` is a reserved index that won't be assigned to any word.\n\n Two or more words may be assigned to the same index, due to possible\n collisions by the hashing function.\n The [probability](https://en.wikipedia.org/wiki/Birthday_problem#Probability_table)\n of a collision is in relation to the dimension of the hashing space and\n the number of distinct objects.\n \"\"\"\n if hash_function is None:\n hash_function = hash\n elif hash_function == 'md5':\n hash_function = lambda w: int(md5(w.encode()).hexdigest(), 16)\n\n seq = text_to_word_sequence(text,\n filters=filters,\n lower=lower,\n split=split)\n return [(hash_function(w) % (n - 1) + 1) for w in seq]\n\n\nclass Tokenizer(object):\n \"\"\"Text tokenization utility class.\n\n This class allows to vectorize a text corpus, by turning each\n text into either a sequence of integers (each integer being the index\n of a token in a dictionary) or into a vector where the coefficient\n for each token could be binary, based on word count, based on tf-idf...\n\n # Arguments\n num_words: the maximum number of words to keep, based\n on word frequency. Only the most common `num_words` words will\n be kept.\n filters: a string where each element is a character that will be\n filtered from the texts. The default is all punctuation, plus\n tabs and line breaks, minus the `'` character.\n lower: boolean. Whether to convert the texts to lowercase.\n split: character or string to use for token splitting.\n char_level: if True, every character will be treated as a token.\n\n By default, all punctuation is removed, turning the texts into\n space-separated sequences of words\n (words maybe include the `'` character). These sequences are then\n split into lists of tokens. They will then be indexed or vectorized.\n\n `0` is a reserved index that won't be assigned to any word.\n \"\"\"\n\n def __init__(self, num_words=None,\n filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n',\n lower=True,\n split=' ',\n char_level=False,\n **kwargs):\n # Legacy support\n if 'nb_words' in kwargs:\n warnings.warn('The `nb_words` argument in `Tokenizer` '\n 'has been renamed `num_words`.')\n num_words = kwargs.pop('nb_words')\n if kwargs:\n raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))\n\n self.word_counts = OrderedDict()\n self.word_docs = {}\n self.filters = filters\n self.split = split\n self.lower = lower\n self.num_words = num_words\n self.document_count = 0\n self.char_level = char_level\n\n def fit_on_texts(self, texts):\n \"\"\"Updates internal vocabulary based on a list of texts.\n\n Required before using `texts_to_sequences` or `texts_to_matrix`.\n\n # Arguments\n texts: can be a list of strings,\n or a generator of strings (for memory-efficiency)\n \"\"\"\n self.document_count = 0\n for text in texts:\n self.document_count += 1\n seq = text if self.char_level else text_to_word_sequence(text,\n self.filters,\n self.lower,\n self.split)\n for w in seq:\n if w in self.word_counts:\n self.word_counts[w] += 1\n else:\n self.word_counts[w] = 1\n for w in set(seq):\n if w in self.word_docs:\n self.word_docs[w] += 1\n else:\n self.word_docs[w] = 1\n\n wcounts = list(self.word_counts.items())\n wcounts.sort(key=lambda x: x[1], reverse=True)\n sorted_voc = [wc[0] for wc in wcounts]\n # note that index 0 is reserved, never assigned to an existing word\n self.word_index = dict(list(zip(sorted_voc, list(range(1, len(sorted_voc) + 1)))))\n\n self.index_docs = {}\n for w, c in list(self.word_docs.items()):\n self.index_docs[self.word_index[w]] = c\n\n def fit_on_sequences(self, sequences):\n \"\"\"Updates internal vocabulary based on a list of sequences.\n\n Required before using `sequences_to_matrix`\n (if `fit_on_texts` was never called).\n\n # Arguments\n sequences: A list of sequence.\n A \"sequence\" is a list of integer word indices.\n \"\"\"\n self.document_count = len(sequences)\n self.index_docs = {}\n for seq in sequences:\n seq = set(seq)\n for i in seq:\n if i not in self.index_docs:\n self.index_docs[i] = 1\n else:\n self.index_docs[i] += 1\n\n def texts_to_sequences(self, texts):\n \"\"\"Transforms each text in texts in a sequence of integers.\n\n Only top \"num_words\" most frequent words will be taken into account.\n Only words known by the tokenizer will be taken into account.\n\n # Arguments\n texts: A list of texts (strings).\n\n # Returns\n A list of sequences.\n \"\"\"\n res = []\n for vect in self.texts_to_sequences_generator(texts):\n res.append(vect)\n return res\n\n def texts_to_sequences_generator(self, texts):\n \"\"\"Transforms each text in texts in a sequence of integers.\n\n Only top \"num_words\" most frequent words will be taken into account.\n Only words known by the tokenizer will be taken into account.\n\n # Arguments\n texts: A list of texts (strings).\n\n # Yields\n Yields individual sequences.\n \"\"\"\n num_words = self.num_words\n for text in texts:\n seq = text if self.char_level else text_to_word_sequence(text,\n self.filters,\n self.lower,\n self.split)\n vect = []\n for w in seq:\n i = self.word_index.get(w)\n if i is not None:\n if num_words and i >= num_words:\n continue\n else:\n vect.append(i)\n yield vect\n\n def texts_to_matrix(self, texts, mode='binary'):\n \"\"\"Convert a list of texts to a Numpy matrix.\n\n # Arguments\n texts: list of strings.\n mode: one of \"binary\", \"count\", \"tfidf\", \"freq\".\n\n # Returns\n A Numpy matrix.\n \"\"\"\n sequences = self.texts_to_sequences(texts)\n return self.sequences_to_matrix(sequences, mode=mode)\n\n def sequences_to_matrix(self, sequences, mode='binary'):\n \"\"\"Converts a list of sequences into a Numpy matrix.\n\n # Arguments\n sequences: list of sequences\n (a sequence is a list of integer word indices).\n mode: one of \"binary\", \"count\", \"tfidf\", \"freq\"\n\n # Returns\n A Numpy matrix.\n\n # Raises\n ValueError: In case of invalid `mode` argument,\n or if the Tokenizer requires to be fit to sample data.\n \"\"\"\n if not self.num_words:\n if self.word_index:\n num_words = len(self.word_index) + 1\n else:\n raise ValueError('Specify a dimension (num_words argument), '\n 'or fit on some text data first.')\n else:\n num_words = self.num_words\n\n if mode == 'tfidf' and not self.document_count:\n raise ValueError('Fit the Tokenizer on some data '\n 'before using tfidf mode.')\n\n x = np.zeros((len(sequences), num_words))\n for i, seq in enumerate(sequences):\n if not seq:\n continue\n counts = {}\n for j in seq:\n if j >= num_words:\n continue\n if j not in counts:\n counts[j] = 1.\n else:\n counts[j] += 1\n for j, c in list(counts.items()):\n if mode == 'count':\n x[i][j] = c\n elif mode == 'freq':\n x[i][j] = c / len(seq)\n elif mode == 'binary':\n x[i][j] = 1\n elif mode == 'tfidf':\n # Use weighting scheme 2 in\n # https://en.wikipedia.org/wiki/Tf%E2%80%93idf\n tf = 1 + np.log(c)\n idf = np.log(1 + self.document_count /\n (1 + self.index_docs.get(j, 0)))\n x[i][j] = tf * idf\n else:\n raise ValueError('Unknown vectorization mode:', mode)\n return x\n", "path": "keras/preprocessing/text.py"}]}
| 3,984 | 165 |
gh_patches_debug_38807
|
rasdani/github-patches
|
git_diff
|
pulp__pulpcore-2193
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PulpImport/Export of kickstart repos with subrepos broken
See https://bugzilla.redhat.com/show_bug.cgi?id=2040870 for details.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/app/importexport.py`
Content:
```
1 import os
2 import io
3 import json
4 import tarfile
5 import tempfile
6 import logging
7
8 from django.conf import settings
9 from django.db.models.query import QuerySet
10
11 from pulpcore.app.apps import get_plugin_config
12 from pulpcore.app.models.progress import ProgressReport
13 from pulpcore.app.models.repository import Repository
14 from pulpcore.app.modelresource import (
15 ArtifactResource,
16 ContentArtifactResource,
17 RepositoryResource,
18 )
19 from pulpcore.constants import TASK_STATES, EXPORT_BATCH_SIZE
20
21 log = logging.getLogger(__name__)
22
23
24 def _write_export(the_tarfile, resource, dest_dir=None):
25 """
26 Write the JSON export for the specified resource to the specified tarfile.
27
28 The resulting file will be found at <dest_dir>/<resource.__class__.__name__>.json. If dest_dir
29 is None, the file will be added at the 'top level' of the_tarfile.
30
31 Export-files are UTF-8 encoded.
32
33 Args:
34 the_tarfile (tarfile.Tarfile): tarfile we are writing into
35 resource (import_export.resources.ModelResource): ModelResource to be exported
36 dest_dir str(directory-path): directory 'inside' the tarfile to write to
37 """
38 filename = "{}.{}.json".format(resource.__module__, type(resource).__name__)
39 if dest_dir:
40 dest_filename = os.path.join(dest_dir, filename)
41 else:
42 dest_filename = filename
43
44 # If the resource is the type of QuerySet, then export the data in batch to save memory.
45 # Otherwise, export all data in oneshot. This is because the underlying libraries
46 # (json; django-import-export) do not support to stream the output to file, we export
47 # the data in batches to memory and concatenate the json lists via string manipulation.
48 with tempfile.NamedTemporaryFile(dir=os.getcwd(), mode="w", encoding="utf8") as temp_file:
49 if isinstance(resource.queryset, QuerySet):
50 temp_file.write("[")
51 total = resource.queryset.count()
52 for i in range(0, total, EXPORT_BATCH_SIZE):
53 current_batch = i + EXPORT_BATCH_SIZE
54 dataset = resource.export(resource.queryset[i:current_batch])
55 # Strip "[" and "]" as we are writing the dataset in batch
56 temp_file.write(dataset.json.lstrip("[").rstrip("]"))
57 if current_batch < total:
58 # Write "," if not last loop
59 temp_file.write(", ")
60 temp_file.write("]")
61 else:
62 dataset = resource.export(resource.queryset)
63 temp_file.write(dataset.json)
64
65 temp_file.flush()
66 info = tarfile.TarInfo(name=dest_filename)
67 info.size = os.path.getsize(temp_file.name)
68 with open(temp_file.name, "rb") as fd:
69 the_tarfile.addfile(info, fd)
70
71
72 def export_versions(export, version_info):
73 """
74 Write a JSON list of plugins and their versions as 'versions.json' to export.tarfile
75
76 Output format is [{"component": "<pluginname>", "version": "<pluginversion>"},...]
77
78 Args:
79 export (django.db.models.PulpExport): export instance that's doing the export
80 version_info (set): set of (distribution-label,version) tuples for repos in this export
81 """
82 # build the version-list from the distributions for each component
83 versions = [{"component": label, "version": version} for (label, version) in version_info]
84
85 version_json = json.dumps(versions).encode("utf8")
86 info = tarfile.TarInfo(name="versions.json")
87 info.size = len(version_json)
88 export.tarfile.addfile(info, io.BytesIO(version_json))
89
90
91 def export_artifacts(export, artifacts):
92 """
93 Export a set of Artifacts, ArtifactResources, and RepositoryResources
94
95 Args:
96 export (django.db.models.PulpExport): export instance that's doing the export
97 artifacts (django.db.models.Artifacts): list of artifacts in all repos being exported
98
99 Raises:
100 ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
101 """
102 data = dict(message="Exporting Artifacts", code="export.artifacts", total=len(artifacts))
103 with ProgressReport(**data) as pb:
104 for artifact in pb.iter(artifacts):
105 dest = artifact.file.name
106 if settings.DEFAULT_FILE_STORAGE != "pulpcore.app.models.storage.FileSystem":
107 with tempfile.TemporaryDirectory() as temp_dir:
108 with tempfile.NamedTemporaryFile(dir=temp_dir) as temp_file:
109 temp_file.write(artifact.file.read())
110 temp_file.flush()
111 artifact.file.close()
112 export.tarfile.add(temp_file.name, dest)
113 else:
114 export.tarfile.add(artifact.file.path, dest)
115
116 resource = ArtifactResource()
117 resource.queryset = artifacts
118 _write_export(export.tarfile, resource)
119
120 resource = RepositoryResource()
121 resource.queryset = Repository.objects.filter(pk__in=export.exporter.repositories.all())
122 _write_export(export.tarfile, resource)
123
124
125 def export_content(export, repository_version):
126 """
127 Export db-content, and the db-content of the owning repositories
128
129 Args:
130 export (django.db.models.PulpExport): export instance that's doing the export
131 repository_version (django.db.models.RepositoryVersion): RepositoryVersion being exported
132 """
133
134 def _combine_content_mappings(map1, map2):
135 """Combine two content mapping dicts into one by combining ids for for each key."""
136 result = {}
137 for key in map1.keys() | map2.keys():
138 result[key] = list(set(map1.get(key, []) + map2.get(key, [])))
139 return result
140
141 dest_dir = os.path.join(
142 "repository-{}_{}".format(
143 str(repository_version.repository.name), repository_version.number
144 )
145 )
146
147 # Export the connection between content and artifacts
148 resource = ContentArtifactResource(repository_version)
149 _write_export(export.tarfile, resource, dest_dir)
150
151 # content mapping is used by repo versions with subrepos (eg distribution tree repos)
152 content_mapping = {}
153
154 # find and export any ModelResource found in pulp_<repo-type>.app.modelresource
155 plugin_name = repository_version.repository.pulp_type.split(".")[0]
156 cfg = get_plugin_config(plugin_name)
157 if cfg.exportable_classes:
158 for cls in cfg.exportable_classes:
159 resource = cls(repository_version)
160 _write_export(export.tarfile, resource, dest_dir)
161
162 if hasattr(resource, "content_mapping") and resource.content_mapping:
163 content_mapping = _combine_content_mappings(
164 content_mapping, resource.content_mapping
165 )
166
167 msg = (
168 f"Exporting content for {plugin_name} "
169 f"repository-version {repository_version.repository.name}/{repository_version.number}"
170 )
171 content_count = repository_version.content.count()
172 data = dict(
173 message=msg,
174 code="export.repo.version.content",
175 total=content_count,
176 done=content_count,
177 state=TASK_STATES.COMPLETED,
178 )
179 pb = ProgressReport(**data)
180 pb.save()
181
182 if content_mapping:
183 # write the content mapping to tarfile
184 cm_json = json.dumps(content_mapping).encode("utf8")
185 info = tarfile.TarInfo(name=f"{dest_dir}/content_mapping.json")
186 info.size = len(cm_json)
187 export.tarfile.addfile(info, io.BytesIO(cm_json))
188
```
Path: `pulpcore/app/modelresource.py`
Content:
```
1 from import_export import fields
2 from import_export.widgets import ForeignKeyWidget
3 from logging import getLogger
4
5 from pulpcore.app.models.content import (
6 Artifact,
7 Content,
8 ContentArtifact,
9 )
10 from pulpcore.app.models.repository import Repository
11 from pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS
12 from pulpcore.plugin.importexport import QueryModelResource
13
14
15 log = getLogger(__name__)
16
17
18 #
19 # Artifact and Repository are different from other import-export entities, in that they are not
20 # repo-version-specific.
21 #
22 class ArtifactResource(QueryModelResource):
23 """Resource for import/export of artifacts."""
24
25 def before_import_row(self, row, **kwargs):
26 """
27 Sets digests to None if they are blank strings.
28
29 Args:
30 row (tablib.Dataset row): incoming import-row representing a single Variant.
31 kwargs: args passed along from the import() call.
32
33 """
34 # the export converts None to blank strings but sha384 and sha512 have unique constraints
35 # that get triggered if they are blank. convert checksums back into None if they are blank.
36 for checksum in ALL_KNOWN_CONTENT_CHECKSUMS:
37 if row[checksum] == "":
38 row[checksum] = None
39
40 class Meta:
41 model = Artifact
42 exclude = (
43 "pulp_id",
44 "pulp_created",
45 "pulp_last_updated",
46 )
47 import_id_fields = ("sha256",)
48
49
50 class RepositoryResource(QueryModelResource):
51 class Meta:
52 model = Repository
53 import_id_fields = ("name",)
54 exclude = (
55 "pulp_id",
56 "pulp_created",
57 "pulp_last_updated",
58 "content",
59 )
60
61
62 class ContentArtifactResource(QueryModelResource):
63 """
64 Handles import/export of the ContentArtifact model.
65
66 ContentArtifact is different from other import-export entities because it has no 'natural key'
67 other than a pulp_id, which aren't shared across instances. We do some magic to link up
68 ContentArtifacts to their matching (already-imported) Content.
69 """
70
71 artifact = fields.Field(
72 column_name="artifact", attribute="artifact", widget=ForeignKeyWidget(Artifact, "sha256")
73 )
74
75 def before_import_row(self, row, **kwargs):
76 """
77 Fixes the content-ptr of an incoming content-artifact row at import time.
78
79 Finds the 'original uuid' of the Content for this row, looks it up as the
80 'upstream_id' of imported Content, and then replaces the Content-pk with its
81 (new) uuid.
82
83 Args:
84 row (tablib.Dataset row): incoming import-row representing a single ContentArtifact.
85 kwargs: args passed along from the import() call.
86
87 Returns:
88 (tablib.Dataset row): row that now points to the new downstream uuid for its content.
89 """
90
91 linked_content = Content.objects.get(upstream_id=row["content"])
92 row["content"] = str(linked_content.pulp_id)
93
94 def set_up_queryset(self):
95 return ContentArtifact.objects.filter(content__in=self.repo_version.content).order_by(
96 "content", "relative_path"
97 )
98
99 class Meta:
100 model = ContentArtifact
101 import_id_fields = (
102 "content",
103 "relative_path",
104 )
105 exclude = (
106 "pulp_created",
107 "pulp_last_updated",
108 "_artifacts",
109 "pulp_id",
110 )
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -144,10 +144,6 @@
)
)
- # Export the connection between content and artifacts
- resource = ContentArtifactResource(repository_version)
- _write_export(export.tarfile, resource, dest_dir)
-
# content mapping is used by repo versions with subrepos (eg distribution tree repos)
content_mapping = {}
@@ -164,6 +160,10 @@
content_mapping, resource.content_mapping
)
+ # Export the connection between content and artifacts
+ resource = ContentArtifactResource(repository_version, content_mapping)
+ _write_export(export.tarfile, resource, dest_dir)
+
msg = (
f"Exporting content for {plugin_name} "
f"repository-version {repository_version.repository.name}/{repository_version.number}"
diff --git a/pulpcore/app/modelresource.py b/pulpcore/app/modelresource.py
--- a/pulpcore/app/modelresource.py
+++ b/pulpcore/app/modelresource.py
@@ -66,12 +66,19 @@
ContentArtifact is different from other import-export entities because it has no 'natural key'
other than a pulp_id, which aren't shared across instances. We do some magic to link up
ContentArtifacts to their matching (already-imported) Content.
+
+ Some plugin-models have sub-repositories. We take advantage of the content-mapping
+ machinery to account for those contentartifacts as well.
"""
artifact = fields.Field(
column_name="artifact", attribute="artifact", widget=ForeignKeyWidget(Artifact, "sha256")
)
+ def __init__(self, repo_version=None, content_mapping=None):
+ self.content_mapping = content_mapping
+ super().__init__(repo_version)
+
def before_import_row(self, row, **kwargs):
"""
Fixes the content-ptr of an incoming content-artifact row at import time.
@@ -92,9 +99,15 @@
row["content"] = str(linked_content.pulp_id)
def set_up_queryset(self):
- return ContentArtifact.objects.filter(content__in=self.repo_version.content).order_by(
- "content", "relative_path"
- )
+ vers_content = ContentArtifact.objects.filter(content__in=self.repo_version.content)
+ if self.content_mapping:
+ all_content = []
+ for content_ids in self.content_mapping.values():
+ all_content.extend(content_ids)
+ vers_content = vers_content.union(
+ ContentArtifact.objects.filter(content__in=all_content)
+ )
+ return vers_content.order_by("content", "relative_path")
class Meta:
model = ContentArtifact
|
{"golden_diff": "diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py\n--- a/pulpcore/app/importexport.py\n+++ b/pulpcore/app/importexport.py\n@@ -144,10 +144,6 @@\n )\n )\n \n- # Export the connection between content and artifacts\n- resource = ContentArtifactResource(repository_version)\n- _write_export(export.tarfile, resource, dest_dir)\n-\n # content mapping is used by repo versions with subrepos (eg distribution tree repos)\n content_mapping = {}\n \n@@ -164,6 +160,10 @@\n content_mapping, resource.content_mapping\n )\n \n+ # Export the connection between content and artifacts\n+ resource = ContentArtifactResource(repository_version, content_mapping)\n+ _write_export(export.tarfile, resource, dest_dir)\n+\n msg = (\n f\"Exporting content for {plugin_name} \"\n f\"repository-version {repository_version.repository.name}/{repository_version.number}\"\ndiff --git a/pulpcore/app/modelresource.py b/pulpcore/app/modelresource.py\n--- a/pulpcore/app/modelresource.py\n+++ b/pulpcore/app/modelresource.py\n@@ -66,12 +66,19 @@\n ContentArtifact is different from other import-export entities because it has no 'natural key'\n other than a pulp_id, which aren't shared across instances. We do some magic to link up\n ContentArtifacts to their matching (already-imported) Content.\n+\n+ Some plugin-models have sub-repositories. We take advantage of the content-mapping\n+ machinery to account for those contentartifacts as well.\n \"\"\"\n \n artifact = fields.Field(\n column_name=\"artifact\", attribute=\"artifact\", widget=ForeignKeyWidget(Artifact, \"sha256\")\n )\n \n+ def __init__(self, repo_version=None, content_mapping=None):\n+ self.content_mapping = content_mapping\n+ super().__init__(repo_version)\n+\n def before_import_row(self, row, **kwargs):\n \"\"\"\n Fixes the content-ptr of an incoming content-artifact row at import time.\n@@ -92,9 +99,15 @@\n row[\"content\"] = str(linked_content.pulp_id)\n \n def set_up_queryset(self):\n- return ContentArtifact.objects.filter(content__in=self.repo_version.content).order_by(\n- \"content\", \"relative_path\"\n- )\n+ vers_content = ContentArtifact.objects.filter(content__in=self.repo_version.content)\n+ if self.content_mapping:\n+ all_content = []\n+ for content_ids in self.content_mapping.values():\n+ all_content.extend(content_ids)\n+ vers_content = vers_content.union(\n+ ContentArtifact.objects.filter(content__in=all_content)\n+ )\n+ return vers_content.order_by(\"content\", \"relative_path\")\n \n class Meta:\n model = ContentArtifact\n", "issue": "PulpImport/Export of kickstart repos with subrepos broken\nSee https://bugzilla.redhat.com/show_bug.cgi?id=2040870 for details.\n", "before_files": [{"content": "import os\nimport io\nimport json\nimport tarfile\nimport tempfile\nimport logging\n\nfrom django.conf import settings\nfrom django.db.models.query import QuerySet\n\nfrom pulpcore.app.apps import get_plugin_config\nfrom pulpcore.app.models.progress import ProgressReport\nfrom pulpcore.app.models.repository import Repository\nfrom pulpcore.app.modelresource import (\n ArtifactResource,\n ContentArtifactResource,\n RepositoryResource,\n)\nfrom pulpcore.constants import TASK_STATES, EXPORT_BATCH_SIZE\n\nlog = logging.getLogger(__name__)\n\n\ndef _write_export(the_tarfile, resource, dest_dir=None):\n \"\"\"\n Write the JSON export for the specified resource to the specified tarfile.\n\n The resulting file will be found at <dest_dir>/<resource.__class__.__name__>.json. If dest_dir\n is None, the file will be added at the 'top level' of the_tarfile.\n\n Export-files are UTF-8 encoded.\n\n Args:\n the_tarfile (tarfile.Tarfile): tarfile we are writing into\n resource (import_export.resources.ModelResource): ModelResource to be exported\n dest_dir str(directory-path): directory 'inside' the tarfile to write to\n \"\"\"\n filename = \"{}.{}.json\".format(resource.__module__, type(resource).__name__)\n if dest_dir:\n dest_filename = os.path.join(dest_dir, filename)\n else:\n dest_filename = filename\n\n # If the resource is the type of QuerySet, then export the data in batch to save memory.\n # Otherwise, export all data in oneshot. This is because the underlying libraries\n # (json; django-import-export) do not support to stream the output to file, we export\n # the data in batches to memory and concatenate the json lists via string manipulation.\n with tempfile.NamedTemporaryFile(dir=os.getcwd(), mode=\"w\", encoding=\"utf8\") as temp_file:\n if isinstance(resource.queryset, QuerySet):\n temp_file.write(\"[\")\n total = resource.queryset.count()\n for i in range(0, total, EXPORT_BATCH_SIZE):\n current_batch = i + EXPORT_BATCH_SIZE\n dataset = resource.export(resource.queryset[i:current_batch])\n # Strip \"[\" and \"]\" as we are writing the dataset in batch\n temp_file.write(dataset.json.lstrip(\"[\").rstrip(\"]\"))\n if current_batch < total:\n # Write \",\" if not last loop\n temp_file.write(\", \")\n temp_file.write(\"]\")\n else:\n dataset = resource.export(resource.queryset)\n temp_file.write(dataset.json)\n\n temp_file.flush()\n info = tarfile.TarInfo(name=dest_filename)\n info.size = os.path.getsize(temp_file.name)\n with open(temp_file.name, \"rb\") as fd:\n the_tarfile.addfile(info, fd)\n\n\ndef export_versions(export, version_info):\n \"\"\"\n Write a JSON list of plugins and their versions as 'versions.json' to export.tarfile\n\n Output format is [{\"component\": \"<pluginname>\", \"version\": \"<pluginversion>\"},...]\n\n Args:\n export (django.db.models.PulpExport): export instance that's doing the export\n version_info (set): set of (distribution-label,version) tuples for repos in this export\n \"\"\"\n # build the version-list from the distributions for each component\n versions = [{\"component\": label, \"version\": version} for (label, version) in version_info]\n\n version_json = json.dumps(versions).encode(\"utf8\")\n info = tarfile.TarInfo(name=\"versions.json\")\n info.size = len(version_json)\n export.tarfile.addfile(info, io.BytesIO(version_json))\n\n\ndef export_artifacts(export, artifacts):\n \"\"\"\n Export a set of Artifacts, ArtifactResources, and RepositoryResources\n\n Args:\n export (django.db.models.PulpExport): export instance that's doing the export\n artifacts (django.db.models.Artifacts): list of artifacts in all repos being exported\n\n Raises:\n ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting\n \"\"\"\n data = dict(message=\"Exporting Artifacts\", code=\"export.artifacts\", total=len(artifacts))\n with ProgressReport(**data) as pb:\n for artifact in pb.iter(artifacts):\n dest = artifact.file.name\n if settings.DEFAULT_FILE_STORAGE != \"pulpcore.app.models.storage.FileSystem\":\n with tempfile.TemporaryDirectory() as temp_dir:\n with tempfile.NamedTemporaryFile(dir=temp_dir) as temp_file:\n temp_file.write(artifact.file.read())\n temp_file.flush()\n artifact.file.close()\n export.tarfile.add(temp_file.name, dest)\n else:\n export.tarfile.add(artifact.file.path, dest)\n\n resource = ArtifactResource()\n resource.queryset = artifacts\n _write_export(export.tarfile, resource)\n\n resource = RepositoryResource()\n resource.queryset = Repository.objects.filter(pk__in=export.exporter.repositories.all())\n _write_export(export.tarfile, resource)\n\n\ndef export_content(export, repository_version):\n \"\"\"\n Export db-content, and the db-content of the owning repositories\n\n Args:\n export (django.db.models.PulpExport): export instance that's doing the export\n repository_version (django.db.models.RepositoryVersion): RepositoryVersion being exported\n \"\"\"\n\n def _combine_content_mappings(map1, map2):\n \"\"\"Combine two content mapping dicts into one by combining ids for for each key.\"\"\"\n result = {}\n for key in map1.keys() | map2.keys():\n result[key] = list(set(map1.get(key, []) + map2.get(key, [])))\n return result\n\n dest_dir = os.path.join(\n \"repository-{}_{}\".format(\n str(repository_version.repository.name), repository_version.number\n )\n )\n\n # Export the connection between content and artifacts\n resource = ContentArtifactResource(repository_version)\n _write_export(export.tarfile, resource, dest_dir)\n\n # content mapping is used by repo versions with subrepos (eg distribution tree repos)\n content_mapping = {}\n\n # find and export any ModelResource found in pulp_<repo-type>.app.modelresource\n plugin_name = repository_version.repository.pulp_type.split(\".\")[0]\n cfg = get_plugin_config(plugin_name)\n if cfg.exportable_classes:\n for cls in cfg.exportable_classes:\n resource = cls(repository_version)\n _write_export(export.tarfile, resource, dest_dir)\n\n if hasattr(resource, \"content_mapping\") and resource.content_mapping:\n content_mapping = _combine_content_mappings(\n content_mapping, resource.content_mapping\n )\n\n msg = (\n f\"Exporting content for {plugin_name} \"\n f\"repository-version {repository_version.repository.name}/{repository_version.number}\"\n )\n content_count = repository_version.content.count()\n data = dict(\n message=msg,\n code=\"export.repo.version.content\",\n total=content_count,\n done=content_count,\n state=TASK_STATES.COMPLETED,\n )\n pb = ProgressReport(**data)\n pb.save()\n\n if content_mapping:\n # write the content mapping to tarfile\n cm_json = json.dumps(content_mapping).encode(\"utf8\")\n info = tarfile.TarInfo(name=f\"{dest_dir}/content_mapping.json\")\n info.size = len(cm_json)\n export.tarfile.addfile(info, io.BytesIO(cm_json))\n", "path": "pulpcore/app/importexport.py"}, {"content": "from import_export import fields\nfrom import_export.widgets import ForeignKeyWidget\nfrom logging import getLogger\n\nfrom pulpcore.app.models.content import (\n Artifact,\n Content,\n ContentArtifact,\n)\nfrom pulpcore.app.models.repository import Repository\nfrom pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS\nfrom pulpcore.plugin.importexport import QueryModelResource\n\n\nlog = getLogger(__name__)\n\n\n#\n# Artifact and Repository are different from other import-export entities, in that they are not\n# repo-version-specific.\n#\nclass ArtifactResource(QueryModelResource):\n \"\"\"Resource for import/export of artifacts.\"\"\"\n\n def before_import_row(self, row, **kwargs):\n \"\"\"\n Sets digests to None if they are blank strings.\n\n Args:\n row (tablib.Dataset row): incoming import-row representing a single Variant.\n kwargs: args passed along from the import() call.\n\n \"\"\"\n # the export converts None to blank strings but sha384 and sha512 have unique constraints\n # that get triggered if they are blank. convert checksums back into None if they are blank.\n for checksum in ALL_KNOWN_CONTENT_CHECKSUMS:\n if row[checksum] == \"\":\n row[checksum] = None\n\n class Meta:\n model = Artifact\n exclude = (\n \"pulp_id\",\n \"pulp_created\",\n \"pulp_last_updated\",\n )\n import_id_fields = (\"sha256\",)\n\n\nclass RepositoryResource(QueryModelResource):\n class Meta:\n model = Repository\n import_id_fields = (\"name\",)\n exclude = (\n \"pulp_id\",\n \"pulp_created\",\n \"pulp_last_updated\",\n \"content\",\n )\n\n\nclass ContentArtifactResource(QueryModelResource):\n \"\"\"\n Handles import/export of the ContentArtifact model.\n\n ContentArtifact is different from other import-export entities because it has no 'natural key'\n other than a pulp_id, which aren't shared across instances. We do some magic to link up\n ContentArtifacts to their matching (already-imported) Content.\n \"\"\"\n\n artifact = fields.Field(\n column_name=\"artifact\", attribute=\"artifact\", widget=ForeignKeyWidget(Artifact, \"sha256\")\n )\n\n def before_import_row(self, row, **kwargs):\n \"\"\"\n Fixes the content-ptr of an incoming content-artifact row at import time.\n\n Finds the 'original uuid' of the Content for this row, looks it up as the\n 'upstream_id' of imported Content, and then replaces the Content-pk with its\n (new) uuid.\n\n Args:\n row (tablib.Dataset row): incoming import-row representing a single ContentArtifact.\n kwargs: args passed along from the import() call.\n\n Returns:\n (tablib.Dataset row): row that now points to the new downstream uuid for its content.\n \"\"\"\n\n linked_content = Content.objects.get(upstream_id=row[\"content\"])\n row[\"content\"] = str(linked_content.pulp_id)\n\n def set_up_queryset(self):\n return ContentArtifact.objects.filter(content__in=self.repo_version.content).order_by(\n \"content\", \"relative_path\"\n )\n\n class Meta:\n model = ContentArtifact\n import_id_fields = (\n \"content\",\n \"relative_path\",\n )\n exclude = (\n \"pulp_created\",\n \"pulp_last_updated\",\n \"_artifacts\",\n \"pulp_id\",\n )\n", "path": "pulpcore/app/modelresource.py"}], "after_files": [{"content": "import os\nimport io\nimport json\nimport tarfile\nimport tempfile\nimport logging\n\nfrom django.conf import settings\nfrom django.db.models.query import QuerySet\n\nfrom pulpcore.app.apps import get_plugin_config\nfrom pulpcore.app.models.progress import ProgressReport\nfrom pulpcore.app.models.repository import Repository\nfrom pulpcore.app.modelresource import (\n ArtifactResource,\n ContentArtifactResource,\n RepositoryResource,\n)\nfrom pulpcore.constants import TASK_STATES, EXPORT_BATCH_SIZE\n\nlog = logging.getLogger(__name__)\n\n\ndef _write_export(the_tarfile, resource, dest_dir=None):\n \"\"\"\n Write the JSON export for the specified resource to the specified tarfile.\n\n The resulting file will be found at <dest_dir>/<resource.__class__.__name__>.json. If dest_dir\n is None, the file will be added at the 'top level' of the_tarfile.\n\n Export-files are UTF-8 encoded.\n\n Args:\n the_tarfile (tarfile.Tarfile): tarfile we are writing into\n resource (import_export.resources.ModelResource): ModelResource to be exported\n dest_dir str(directory-path): directory 'inside' the tarfile to write to\n \"\"\"\n filename = \"{}.{}.json\".format(resource.__module__, type(resource).__name__)\n if dest_dir:\n dest_filename = os.path.join(dest_dir, filename)\n else:\n dest_filename = filename\n\n # If the resource is the type of QuerySet, then export the data in batch to save memory.\n # Otherwise, export all data in oneshot. This is because the underlying libraries\n # (json; django-import-export) do not support to stream the output to file, we export\n # the data in batches to memory and concatenate the json lists via string manipulation.\n with tempfile.NamedTemporaryFile(dir=os.getcwd(), mode=\"w\", encoding=\"utf8\") as temp_file:\n if isinstance(resource.queryset, QuerySet):\n temp_file.write(\"[\")\n total = resource.queryset.count()\n for i in range(0, total, EXPORT_BATCH_SIZE):\n current_batch = i + EXPORT_BATCH_SIZE\n dataset = resource.export(resource.queryset[i:current_batch])\n # Strip \"[\" and \"]\" as we are writing the dataset in batch\n temp_file.write(dataset.json.lstrip(\"[\").rstrip(\"]\"))\n if current_batch < total:\n # Write \",\" if not last loop\n temp_file.write(\", \")\n temp_file.write(\"]\")\n else:\n dataset = resource.export(resource.queryset)\n temp_file.write(dataset.json)\n\n temp_file.flush()\n info = tarfile.TarInfo(name=dest_filename)\n info.size = os.path.getsize(temp_file.name)\n with open(temp_file.name, \"rb\") as fd:\n the_tarfile.addfile(info, fd)\n\n\ndef export_versions(export, version_info):\n \"\"\"\n Write a JSON list of plugins and their versions as 'versions.json' to export.tarfile\n\n Output format is [{\"component\": \"<pluginname>\", \"version\": \"<pluginversion>\"},...]\n\n Args:\n export (django.db.models.PulpExport): export instance that's doing the export\n version_info (set): set of (distribution-label,version) tuples for repos in this export\n \"\"\"\n # build the version-list from the distributions for each component\n versions = [{\"component\": label, \"version\": version} for (label, version) in version_info]\n\n version_json = json.dumps(versions).encode(\"utf8\")\n info = tarfile.TarInfo(name=\"versions.json\")\n info.size = len(version_json)\n export.tarfile.addfile(info, io.BytesIO(version_json))\n\n\ndef export_artifacts(export, artifacts):\n \"\"\"\n Export a set of Artifacts, ArtifactResources, and RepositoryResources\n\n Args:\n export (django.db.models.PulpExport): export instance that's doing the export\n artifacts (django.db.models.Artifacts): list of artifacts in all repos being exported\n\n Raises:\n ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting\n \"\"\"\n data = dict(message=\"Exporting Artifacts\", code=\"export.artifacts\", total=len(artifacts))\n with ProgressReport(**data) as pb:\n for artifact in pb.iter(artifacts):\n dest = artifact.file.name\n if settings.DEFAULT_FILE_STORAGE != \"pulpcore.app.models.storage.FileSystem\":\n with tempfile.TemporaryDirectory() as temp_dir:\n with tempfile.NamedTemporaryFile(dir=temp_dir) as temp_file:\n temp_file.write(artifact.file.read())\n temp_file.flush()\n artifact.file.close()\n export.tarfile.add(temp_file.name, dest)\n else:\n export.tarfile.add(artifact.file.path, dest)\n\n resource = ArtifactResource()\n resource.queryset = artifacts\n _write_export(export.tarfile, resource)\n\n resource = RepositoryResource()\n resource.queryset = Repository.objects.filter(pk__in=export.exporter.repositories.all())\n _write_export(export.tarfile, resource)\n\n\ndef export_content(export, repository_version):\n \"\"\"\n Export db-content, and the db-content of the owning repositories\n\n Args:\n export (django.db.models.PulpExport): export instance that's doing the export\n repository_version (django.db.models.RepositoryVersion): RepositoryVersion being exported\n \"\"\"\n\n def _combine_content_mappings(map1, map2):\n \"\"\"Combine two content mapping dicts into one by combining ids for for each key.\"\"\"\n result = {}\n for key in map1.keys() | map2.keys():\n result[key] = list(set(map1.get(key, []) + map2.get(key, [])))\n return result\n\n dest_dir = os.path.join(\n \"repository-{}_{}\".format(\n str(repository_version.repository.name), repository_version.number\n )\n )\n\n # content mapping is used by repo versions with subrepos (eg distribution tree repos)\n content_mapping = {}\n\n # find and export any ModelResource found in pulp_<repo-type>.app.modelresource\n plugin_name = repository_version.repository.pulp_type.split(\".\")[0]\n cfg = get_plugin_config(plugin_name)\n if cfg.exportable_classes:\n for cls in cfg.exportable_classes:\n resource = cls(repository_version)\n _write_export(export.tarfile, resource, dest_dir)\n\n if hasattr(resource, \"content_mapping\") and resource.content_mapping:\n content_mapping = _combine_content_mappings(\n content_mapping, resource.content_mapping\n )\n\n # Export the connection between content and artifacts\n resource = ContentArtifactResource(repository_version, content_mapping)\n _write_export(export.tarfile, resource, dest_dir)\n\n msg = (\n f\"Exporting content for {plugin_name} \"\n f\"repository-version {repository_version.repository.name}/{repository_version.number}\"\n )\n content_count = repository_version.content.count()\n data = dict(\n message=msg,\n code=\"export.repo.version.content\",\n total=content_count,\n done=content_count,\n state=TASK_STATES.COMPLETED,\n )\n pb = ProgressReport(**data)\n pb.save()\n\n if content_mapping:\n # write the content mapping to tarfile\n cm_json = json.dumps(content_mapping).encode(\"utf8\")\n info = tarfile.TarInfo(name=f\"{dest_dir}/content_mapping.json\")\n info.size = len(cm_json)\n export.tarfile.addfile(info, io.BytesIO(cm_json))\n", "path": "pulpcore/app/importexport.py"}, {"content": "from import_export import fields\nfrom import_export.widgets import ForeignKeyWidget\nfrom logging import getLogger\n\nfrom pulpcore.app.models.content import (\n Artifact,\n Content,\n ContentArtifact,\n)\nfrom pulpcore.app.models.repository import Repository\nfrom pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS\nfrom pulpcore.plugin.importexport import QueryModelResource\n\n\nlog = getLogger(__name__)\n\n\n#\n# Artifact and Repository are different from other import-export entities, in that they are not\n# repo-version-specific.\n#\nclass ArtifactResource(QueryModelResource):\n \"\"\"Resource for import/export of artifacts.\"\"\"\n\n def before_import_row(self, row, **kwargs):\n \"\"\"\n Sets digests to None if they are blank strings.\n\n Args:\n row (tablib.Dataset row): incoming import-row representing a single Variant.\n kwargs: args passed along from the import() call.\n\n \"\"\"\n # the export converts None to blank strings but sha384 and sha512 have unique constraints\n # that get triggered if they are blank. convert checksums back into None if they are blank.\n for checksum in ALL_KNOWN_CONTENT_CHECKSUMS:\n if row[checksum] == \"\":\n row[checksum] = None\n\n class Meta:\n model = Artifact\n exclude = (\n \"pulp_id\",\n \"pulp_created\",\n \"pulp_last_updated\",\n )\n import_id_fields = (\"sha256\",)\n\n\nclass RepositoryResource(QueryModelResource):\n class Meta:\n model = Repository\n import_id_fields = (\"name\",)\n exclude = (\n \"pulp_id\",\n \"pulp_created\",\n \"pulp_last_updated\",\n \"content\",\n )\n\n\nclass ContentArtifactResource(QueryModelResource):\n \"\"\"\n Handles import/export of the ContentArtifact model.\n\n ContentArtifact is different from other import-export entities because it has no 'natural key'\n other than a pulp_id, which aren't shared across instances. We do some magic to link up\n ContentArtifacts to their matching (already-imported) Content.\n\n Some plugin-models have sub-repositories. We take advantage of the content-mapping\n machinery to account for those contentartifacts as well.\n \"\"\"\n\n artifact = fields.Field(\n column_name=\"artifact\", attribute=\"artifact\", widget=ForeignKeyWidget(Artifact, \"sha256\")\n )\n\n def __init__(self, repo_version=None, content_mapping=None):\n self.content_mapping = content_mapping\n super().__init__(repo_version)\n\n def before_import_row(self, row, **kwargs):\n \"\"\"\n Fixes the content-ptr of an incoming content-artifact row at import time.\n\n Finds the 'original uuid' of the Content for this row, looks it up as the\n 'upstream_id' of imported Content, and then replaces the Content-pk with its\n (new) uuid.\n\n Args:\n row (tablib.Dataset row): incoming import-row representing a single ContentArtifact.\n kwargs: args passed along from the import() call.\n\n Returns:\n (tablib.Dataset row): row that now points to the new downstream uuid for its content.\n \"\"\"\n\n linked_content = Content.objects.get(upstream_id=row[\"content\"])\n row[\"content\"] = str(linked_content.pulp_id)\n\n def set_up_queryset(self):\n vers_content = ContentArtifact.objects.filter(content__in=self.repo_version.content)\n if self.content_mapping:\n all_content = []\n for content_ids in self.content_mapping.values():\n all_content.extend(content_ids)\n vers_content = vers_content.union(\n ContentArtifact.objects.filter(content__in=all_content)\n )\n return vers_content.order_by(\"content\", \"relative_path\")\n\n class Meta:\n model = ContentArtifact\n import_id_fields = (\n \"content\",\n \"relative_path\",\n )\n exclude = (\n \"pulp_created\",\n \"pulp_last_updated\",\n \"_artifacts\",\n \"pulp_id\",\n )\n", "path": "pulpcore/app/modelresource.py"}]}
| 3,303 | 624 |
gh_patches_debug_5645
|
rasdani/github-patches
|
git_diff
|
cal-itp__benefits-447
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ensure OAuth redirect URLs are secure
We construct the OAUth redirect URLs using the Django [`reverse()`](https://docs.djangoproject.com/en/3.2/ref/urlresolvers/#reverse) helper and [`request.build_absolute_uri()`](https://docs.djangoproject.com/en/3.2/ref/request-response/#django.http.HttpRequest.build_absolute_uri).
This is done in e.g. the [`login()` view](https://github.com/cal-itp/benefits/blob/dev/benefits/oauth/views.py):
```python
ROUTE_AUTH = "oauth:authorize"
route = reverse(ROUTE_AUTH)
redirect_uri = request.build_absolute_uri(route)
```
The docs for `build_absolute_uri` say:
> Mixing HTTP and HTTPS on the same site is discouraged, therefore [build_absolute_uri()](https://docs.djangoproject.com/en/3.2/ref/request-response/#django.http.HttpRequest.build_absolute_uri) will always generate an absolute URI with the same scheme the current request has. If you need to redirect users to HTTPS, it’s best to let your Web server redirect all HTTP traffic to HTTPS.
When running in AWS, we have the Load Balancer sitting in front of all requests; this is the layer where HTTP --> HTTPS redirection happens and where the cert that the browser sees is installed.
Then the request gets to the application container, where it first hits `nginx`. This is where requests for static files end; application requests are forwarded through to `gunicorn` over a Unix socket. Strictly speaking, Django does not receive an HTTPS request.
## Potential solution
The [`SECURE_PROXY_SSL_HEADER`](https://docs.djangoproject.com/en/3.2/ref/settings/#secure-proxy-ssl-header) Django setting seems to be aimed at helping this situation, where `nginx` sets a header with an indicated value on the request forwarded into the application via gunicorn, to tell the app if the request was originally secure or not.
In the app's [`nginx.conf`](https://github.com/cal-itp/benefits/blob/dev/nginx.conf#L55) we do set that header on forwarded requests.
## Todo
* [x] Ensure the current `nginx.conf` setting overwrites any header value coming in with the request and/or clear that header first
* [x] Add the `SECURE_PROXY_SSL_HEADER` to `settings.py` when `DEBUG = False`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benefits/settings.py`
Content:
```
1 """
2 Django settings for benefits project.
3 """
4 import os
5
6
7 def _filter_empty(ls):
8 return [s for s in ls if s]
9
10
11 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
12 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
13
14 # SECURITY WARNING: keep the secret key used in production secret!
15 SECRET_KEY = os.environ["DJANGO_SECRET_KEY"]
16
17 # SECURITY WARNING: don't run with debug turned on in production!
18 DEBUG = os.environ.get("DJANGO_DEBUG", "False").lower() == "true"
19
20 ADMIN = os.environ.get("DJANGO_ADMIN", "False").lower() == "true"
21
22 ALLOWED_HOSTS = _filter_empty(os.environ["DJANGO_ALLOWED_HOSTS"].split(","))
23
24 # Application definition
25
26 INSTALLED_APPS = [
27 "django.contrib.messages",
28 "django.contrib.sessions",
29 "django.contrib.staticfiles",
30 "benefits.core",
31 "benefits.enrollment",
32 "benefits.eligibility",
33 "benefits.oauth",
34 ]
35
36 if ADMIN:
37 INSTALLED_APPS.extend(
38 [
39 "django.contrib.admin",
40 "django.contrib.auth",
41 "django.contrib.contenttypes",
42 ]
43 )
44
45 MIDDLEWARE = [
46 "django.middleware.security.SecurityMiddleware",
47 "django.contrib.sessions.middleware.SessionMiddleware",
48 "django.contrib.messages.middleware.MessageMiddleware",
49 "django.middleware.locale.LocaleMiddleware",
50 "benefits.core.middleware.Healthcheck",
51 "django.middleware.common.CommonMiddleware",
52 "django.middleware.csrf.CsrfViewMiddleware",
53 "django.middleware.clickjacking.XFrameOptionsMiddleware",
54 "csp.middleware.CSPMiddleware",
55 "benefits.core.middleware.ChangedLanguageEvent",
56 ]
57
58 if ADMIN:
59 MIDDLEWARE.extend(
60 [
61 "django.contrib.auth.middleware.AuthenticationMiddleware",
62 "django.contrib.messages.middleware.MessageMiddleware",
63 ]
64 )
65
66 if DEBUG:
67 MIDDLEWARE.extend(["benefits.core.middleware.DebugSession"])
68
69 CSRF_COOKIE_AGE = None
70 CSRF_COOKIE_SAMESITE = "Strict"
71 CSRF_COOKIE_HTTPONLY = True
72 CSRF_TRUSTED_ORIGINS = _filter_empty(os.environ["DJANGO_TRUSTED_ORIGINS"].split(","))
73
74 # With `Strict`, the user loses their Django session between leaving our app to
75 # sign in with OAuth, and coming back into our app from the OAuth redirect.
76 # This is because `Strict` disallows our cookie being sent from an external
77 # domain and so the session cookie is lost.
78 #
79 # `Lax` allows the cookie to travel with the user and be sent back to us by the
80 # OAuth server, as long as the request is "safe" i.e. GET
81 SESSION_COOKIE_SAMESITE = "Lax"
82 SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
83 SESSION_EXPIRE_AT_BROWSER_CLOSE = True
84 SESSION_COOKIE_NAME = "_benefitssessionid"
85
86 if not DEBUG:
87 CSRF_COOKIE_SECURE = True
88 CSRF_FAILURE_VIEW = "benefits.core.views.csrf_failure"
89 SESSION_COOKIE_SECURE = True
90
91 SECURE_BROWSER_XSS_FILTER = True
92
93 ROOT_URLCONF = "benefits.urls"
94
95 template_ctx_processors = [
96 "django.template.context_processors.request",
97 "django.contrib.messages.context_processors.messages",
98 "benefits.core.context_processors.analytics",
99 "benefits.core.context_processors.recaptcha",
100 ]
101
102 if DEBUG:
103 template_ctx_processors.extend(
104 [
105 "django.template.context_processors.debug",
106 "benefits.core.context_processors.debug",
107 ]
108 )
109
110 if ADMIN:
111 template_ctx_processors.extend(
112 [
113 "django.contrib.auth.context_processors.auth",
114 "django.contrib.messages.context_processors.messages",
115 ]
116 )
117
118 TEMPLATES = [
119 {
120 "BACKEND": "django.template.backends.django.DjangoTemplates",
121 "DIRS": [os.path.join(BASE_DIR, "benefits", "templates")],
122 "APP_DIRS": True,
123 "OPTIONS": {
124 "context_processors": template_ctx_processors,
125 },
126 },
127 ]
128
129 WSGI_APPLICATION = "benefits.wsgi.application"
130
131 DATABASES = {
132 "default": {
133 "ENGINE": "django.db.backends.sqlite3",
134 "NAME": os.environ.get("DJANGO_DB", "django") + ".db",
135 }
136 }
137
138 # Password validation
139
140 AUTH_PASSWORD_VALIDATORS = []
141
142 if ADMIN:
143 AUTH_PASSWORD_VALIDATORS.extend(
144 [
145 {
146 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
147 },
148 {
149 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
150 },
151 {
152 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
153 },
154 {
155 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
156 },
157 ]
158 )
159
160 # OAuth configuration
161
162 OAUTH_CLIENT_NAME = os.environ.get("DJANGO_OAUTH_CLIENT_NAME")
163
164 if OAUTH_CLIENT_NAME:
165 AUTHLIB_OAUTH_CLIENTS = {
166 OAUTH_CLIENT_NAME: {
167 "client_id": os.environ.get("DJANGO_OAUTH_CLIENT_ID"),
168 "server_metadata_url": f"{os.environ.get('DJANGO_OAUTH_AUTHORITY')}/.well-known/openid-configuration",
169 "client_kwargs": {"code_challenge_method": "S256", "scope": os.environ.get("DJANGO_OAUTH_SCOPE")},
170 }
171 }
172
173 # Internationalization
174
175 LANGUAGE_CODE = "en"
176
177 LANGUAGE_COOKIE_HTTPONLY = True
178 LANGUAGE_COOKIE_SAMESITE = "Strict"
179 LANGUAGE_COOKIE_SECURE = True
180
181 LANGUAGES = [("en", "English"), ("es", "Español")]
182
183 LOCALE_PATHS = [os.path.join(BASE_DIR, "benefits", "locale")]
184
185 USE_I18N = True
186 USE_L10N = True
187
188 TIME_ZONE = "UTC"
189 USE_TZ = True
190
191 # Static files (CSS, JavaScript, Images)
192
193 STATIC_URL = "/static/"
194 STATICFILES_DIRS = [os.path.join(BASE_DIR, "benefits", "static")]
195 STATICFILES_STORAGE = "django.contrib.staticfiles.storage.ManifestStaticFilesStorage"
196 STATIC_ROOT = os.path.join(BASE_DIR, "static")
197
198 # Logging configuration
199
200 LOG_LEVEL = os.environ.get("DJANGO_LOG_LEVEL", "DEBUG" if DEBUG else "WARNING")
201 LOGGING = {
202 "version": 1,
203 "disable_existing_loggers": False,
204 "formatters": {
205 "default": {
206 "format": "[{asctime}] {levelname} {name}:{lineno} {message}",
207 "datefmt": "%d/%b/%Y %H:%M:%S",
208 "style": "{",
209 },
210 },
211 "handlers": {
212 "default": {"class": "logging.StreamHandler", "formatter": "default"},
213 },
214 "root": {
215 "handlers": ["default"],
216 "level": LOG_LEVEL,
217 },
218 "loggers": {"django": {"handlers": ["default"], "propagate": False}},
219 }
220
221 # Analytics configuration
222
223 ANALYTICS_KEY = os.environ.get("ANALYTICS_KEY")
224
225 # Content Security Policy
226 # Configuration docs at https://django-csp.readthedocs.io/en/latest/configuration.html
227
228 # In particular, note that the inner single-quotes are required!
229 # https://django-csp.readthedocs.io/en/latest/configuration.html#policy-settings
230
231 CSP_DEFAULT_SRC = ["'self'"]
232
233 env_connect_src = _filter_empty(os.environ.get("DJANGO_CSP_CONNECT_SRC", "").split(","))
234 CSP_CONNECT_SRC = ["'self'"]
235 CSP_CONNECT_SRC.extend(env_connect_src)
236
237 env_font_src = _filter_empty(os.environ.get("DJANGO_CSP_FONT_SRC", "").split(","))
238 CSP_FONT_SRC = list(env_font_src)
239
240 CSP_FRAME_ANCESTORS = ["'none'"]
241 CSP_FRAME_SRC = ["'none'"]
242 env_frame_src = _filter_empty(os.environ.get("DJANGO_CSP_FRAME_SRC", "").split(","))
243 if any(env_frame_src):
244 CSP_FRAME_SRC = list(env_frame_src)
245
246 env_script_src = _filter_empty(os.environ.get("DJANGO_CSP_SCRIPT_SRC", "").split(","))
247 CSP_SCRIPT_SRC = ["'unsafe-inline'"]
248 CSP_SCRIPT_SRC.extend(env_script_src)
249
250 env_style_src = _filter_empty(os.environ.get("DJANGO_CSP_STYLE_SRC", "").split(","))
251 CSP_STYLE_SRC = ["'self'", "'unsafe-inline'"]
252 CSP_STYLE_SRC.extend(env_style_src)
253
254 # rate limit configuration
255
256 # number of requests allowed in the given period
257 RATE_LIMIT = int(os.environ.get("DJANGO_RATE_LIMIT", 0))
258
259 # HTTP request methods to rate limit
260 RATE_LIMIT_METHODS = os.environ.get("DJANGO_RATE_LIMIT_METHODS", "").upper().split(",")
261
262 # number of seconds before additional requests are denied
263 RATE_LIMIT_PERIOD = int(os.environ.get("DJANGO_RATE_LIMIT_PERIOD", 0))
264
265 # reCAPTCHA configuration
266
267 RECAPTCHA_API_URL = os.environ.get("DJANGO_RECAPTCHA_API_URL")
268 RECAPTCHA_SITE_KEY = os.environ.get("DJANGO_RECAPTCHA_SITE_KEY")
269 RECAPTCHA_SECRET_KEY = os.environ.get("DJANGO_RECAPTCHA_SECRET_KEY")
270 RECAPTCHA_VERIFY_URL = os.environ.get("DJANGO_RECAPTCHA_VERIFY_URL")
271 RECAPTCHA_ENABLED = all((RECAPTCHA_API_URL, RECAPTCHA_SITE_KEY, RECAPTCHA_SECRET_KEY, RECAPTCHA_VERIFY_URL))
272
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/benefits/settings.py b/benefits/settings.py
--- a/benefits/settings.py
+++ b/benefits/settings.py
@@ -90,6 +90,14 @@
SECURE_BROWSER_XSS_FILTER = True
+# the NGINX reverse proxy sits in front of the application in deployed environments
+# SSL terminates before getting to Django, and NGINX adds this header to indicate
+# if the original request was secure or not
+#
+# See https://docs.djangoproject.com/en/3.2/ref/settings/#secure-proxy-ssl-header
+if not DEBUG:
+ SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
+
ROOT_URLCONF = "benefits.urls"
template_ctx_processors = [
|
{"golden_diff": "diff --git a/benefits/settings.py b/benefits/settings.py\n--- a/benefits/settings.py\n+++ b/benefits/settings.py\n@@ -90,6 +90,14 @@\n \n SECURE_BROWSER_XSS_FILTER = True\n \n+# the NGINX reverse proxy sits in front of the application in deployed environments\n+# SSL terminates before getting to Django, and NGINX adds this header to indicate\n+# if the original request was secure or not\n+#\n+# See https://docs.djangoproject.com/en/3.2/ref/settings/#secure-proxy-ssl-header\n+if not DEBUG:\n+ SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n+\n ROOT_URLCONF = \"benefits.urls\"\n \n template_ctx_processors = [\n", "issue": "Ensure OAuth redirect URLs are secure\nWe construct the OAUth redirect URLs using the Django [`reverse()`](https://docs.djangoproject.com/en/3.2/ref/urlresolvers/#reverse) helper and [`request.build_absolute_uri()`](https://docs.djangoproject.com/en/3.2/ref/request-response/#django.http.HttpRequest.build_absolute_uri).\r\n\r\nThis is done in e.g. the [`login()` view](https://github.com/cal-itp/benefits/blob/dev/benefits/oauth/views.py):\r\n\r\n```python\r\nROUTE_AUTH = \"oauth:authorize\"\r\n\r\nroute = reverse(ROUTE_AUTH)\r\n\r\nredirect_uri = request.build_absolute_uri(route)\r\n```\r\n\r\nThe docs for `build_absolute_uri` say:\r\n\r\n> Mixing HTTP and HTTPS on the same site is discouraged, therefore [build_absolute_uri()](https://docs.djangoproject.com/en/3.2/ref/request-response/#django.http.HttpRequest.build_absolute_uri) will always generate an absolute URI with the same scheme the current request has. If you need to redirect users to HTTPS, it\u2019s best to let your Web server redirect all HTTP traffic to HTTPS.\r\n\r\nWhen running in AWS, we have the Load Balancer sitting in front of all requests; this is the layer where HTTP --> HTTPS redirection happens and where the cert that the browser sees is installed.\r\n\r\nThen the request gets to the application container, where it first hits `nginx`. This is where requests for static files end; application requests are forwarded through to `gunicorn` over a Unix socket. Strictly speaking, Django does not receive an HTTPS request.\r\n\r\n## Potential solution\r\n\r\nThe [`SECURE_PROXY_SSL_HEADER`](https://docs.djangoproject.com/en/3.2/ref/settings/#secure-proxy-ssl-header) Django setting seems to be aimed at helping this situation, where `nginx` sets a header with an indicated value on the request forwarded into the application via gunicorn, to tell the app if the request was originally secure or not.\r\n\r\nIn the app's [`nginx.conf`](https://github.com/cal-itp/benefits/blob/dev/nginx.conf#L55) we do set that header on forwarded requests.\r\n\r\n## Todo\r\n\r\n* [x] Ensure the current `nginx.conf` setting overwrites any header value coming in with the request and/or clear that header first\r\n* [x] Add the `SECURE_PROXY_SSL_HEADER` to `settings.py` when `DEBUG = False`\n", "before_files": [{"content": "\"\"\"\nDjango settings for benefits project.\n\"\"\"\nimport os\n\n\ndef _filter_empty(ls):\n return [s for s in ls if s]\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ[\"DJANGO_SECRET_KEY\"]\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get(\"DJANGO_DEBUG\", \"False\").lower() == \"true\"\n\nADMIN = os.environ.get(\"DJANGO_ADMIN\", \"False\").lower() == \"true\"\n\nALLOWED_HOSTS = _filter_empty(os.environ[\"DJANGO_ALLOWED_HOSTS\"].split(\",\"))\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.messages\",\n \"django.contrib.sessions\",\n \"django.contrib.staticfiles\",\n \"benefits.core\",\n \"benefits.enrollment\",\n \"benefits.eligibility\",\n \"benefits.oauth\",\n]\n\nif ADMIN:\n INSTALLED_APPS.extend(\n [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n ]\n )\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"benefits.core.middleware.Healthcheck\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"csp.middleware.CSPMiddleware\",\n \"benefits.core.middleware.ChangedLanguageEvent\",\n]\n\nif ADMIN:\n MIDDLEWARE.extend(\n [\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n ]\n )\n\nif DEBUG:\n MIDDLEWARE.extend([\"benefits.core.middleware.DebugSession\"])\n\nCSRF_COOKIE_AGE = None\nCSRF_COOKIE_SAMESITE = \"Strict\"\nCSRF_COOKIE_HTTPONLY = True\nCSRF_TRUSTED_ORIGINS = _filter_empty(os.environ[\"DJANGO_TRUSTED_ORIGINS\"].split(\",\"))\n\n# With `Strict`, the user loses their Django session between leaving our app to\n# sign in with OAuth, and coming back into our app from the OAuth redirect.\n# This is because `Strict` disallows our cookie being sent from an external\n# domain and so the session cookie is lost.\n#\n# `Lax` allows the cookie to travel with the user and be sent back to us by the\n# OAuth server, as long as the request is \"safe\" i.e. GET\nSESSION_COOKIE_SAMESITE = \"Lax\"\nSESSION_ENGINE = \"django.contrib.sessions.backends.signed_cookies\"\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\nSESSION_COOKIE_NAME = \"_benefitssessionid\"\n\nif not DEBUG:\n CSRF_COOKIE_SECURE = True\n CSRF_FAILURE_VIEW = \"benefits.core.views.csrf_failure\"\n SESSION_COOKIE_SECURE = True\n\nSECURE_BROWSER_XSS_FILTER = True\n\nROOT_URLCONF = \"benefits.urls\"\n\ntemplate_ctx_processors = [\n \"django.template.context_processors.request\",\n \"django.contrib.messages.context_processors.messages\",\n \"benefits.core.context_processors.analytics\",\n \"benefits.core.context_processors.recaptcha\",\n]\n\nif DEBUG:\n template_ctx_processors.extend(\n [\n \"django.template.context_processors.debug\",\n \"benefits.core.context_processors.debug\",\n ]\n )\n\nif ADMIN:\n template_ctx_processors.extend(\n [\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n )\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"benefits\", \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": template_ctx_processors,\n },\n },\n]\n\nWSGI_APPLICATION = \"benefits.wsgi.application\"\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": os.environ.get(\"DJANGO_DB\", \"django\") + \".db\",\n }\n}\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = []\n\nif ADMIN:\n AUTH_PASSWORD_VALIDATORS.extend(\n [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n ]\n )\n\n# OAuth configuration\n\nOAUTH_CLIENT_NAME = os.environ.get(\"DJANGO_OAUTH_CLIENT_NAME\")\n\nif OAUTH_CLIENT_NAME:\n AUTHLIB_OAUTH_CLIENTS = {\n OAUTH_CLIENT_NAME: {\n \"client_id\": os.environ.get(\"DJANGO_OAUTH_CLIENT_ID\"),\n \"server_metadata_url\": f\"{os.environ.get('DJANGO_OAUTH_AUTHORITY')}/.well-known/openid-configuration\",\n \"client_kwargs\": {\"code_challenge_method\": \"S256\", \"scope\": os.environ.get(\"DJANGO_OAUTH_SCOPE\")},\n }\n }\n\n# Internationalization\n\nLANGUAGE_CODE = \"en\"\n\nLANGUAGE_COOKIE_HTTPONLY = True\nLANGUAGE_COOKIE_SAMESITE = \"Strict\"\nLANGUAGE_COOKIE_SECURE = True\n\nLANGUAGES = [(\"en\", \"English\"), (\"es\", \"Espa\u00f1ol\")]\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, \"benefits\", \"locale\")]\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = \"UTC\"\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"benefits\", \"static\")]\nSTATICFILES_STORAGE = \"django.contrib.staticfiles.storage.ManifestStaticFilesStorage\"\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static\")\n\n# Logging configuration\n\nLOG_LEVEL = os.environ.get(\"DJANGO_LOG_LEVEL\", \"DEBUG\" if DEBUG else \"WARNING\")\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"default\": {\n \"format\": \"[{asctime}] {levelname} {name}:{lineno} {message}\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"default\": {\"class\": \"logging.StreamHandler\", \"formatter\": \"default\"},\n },\n \"root\": {\n \"handlers\": [\"default\"],\n \"level\": LOG_LEVEL,\n },\n \"loggers\": {\"django\": {\"handlers\": [\"default\"], \"propagate\": False}},\n}\n\n# Analytics configuration\n\nANALYTICS_KEY = os.environ.get(\"ANALYTICS_KEY\")\n\n# Content Security Policy\n# Configuration docs at https://django-csp.readthedocs.io/en/latest/configuration.html\n\n# In particular, note that the inner single-quotes are required!\n# https://django-csp.readthedocs.io/en/latest/configuration.html#policy-settings\n\nCSP_DEFAULT_SRC = [\"'self'\"]\n\nenv_connect_src = _filter_empty(os.environ.get(\"DJANGO_CSP_CONNECT_SRC\", \"\").split(\",\"))\nCSP_CONNECT_SRC = [\"'self'\"]\nCSP_CONNECT_SRC.extend(env_connect_src)\n\nenv_font_src = _filter_empty(os.environ.get(\"DJANGO_CSP_FONT_SRC\", \"\").split(\",\"))\nCSP_FONT_SRC = list(env_font_src)\n\nCSP_FRAME_ANCESTORS = [\"'none'\"]\nCSP_FRAME_SRC = [\"'none'\"]\nenv_frame_src = _filter_empty(os.environ.get(\"DJANGO_CSP_FRAME_SRC\", \"\").split(\",\"))\nif any(env_frame_src):\n CSP_FRAME_SRC = list(env_frame_src)\n\nenv_script_src = _filter_empty(os.environ.get(\"DJANGO_CSP_SCRIPT_SRC\", \"\").split(\",\"))\nCSP_SCRIPT_SRC = [\"'unsafe-inline'\"]\nCSP_SCRIPT_SRC.extend(env_script_src)\n\nenv_style_src = _filter_empty(os.environ.get(\"DJANGO_CSP_STYLE_SRC\", \"\").split(\",\"))\nCSP_STYLE_SRC = [\"'self'\", \"'unsafe-inline'\"]\nCSP_STYLE_SRC.extend(env_style_src)\n\n# rate limit configuration\n\n# number of requests allowed in the given period\nRATE_LIMIT = int(os.environ.get(\"DJANGO_RATE_LIMIT\", 0))\n\n# HTTP request methods to rate limit\nRATE_LIMIT_METHODS = os.environ.get(\"DJANGO_RATE_LIMIT_METHODS\", \"\").upper().split(\",\")\n\n# number of seconds before additional requests are denied\nRATE_LIMIT_PERIOD = int(os.environ.get(\"DJANGO_RATE_LIMIT_PERIOD\", 0))\n\n# reCAPTCHA configuration\n\nRECAPTCHA_API_URL = os.environ.get(\"DJANGO_RECAPTCHA_API_URL\")\nRECAPTCHA_SITE_KEY = os.environ.get(\"DJANGO_RECAPTCHA_SITE_KEY\")\nRECAPTCHA_SECRET_KEY = os.environ.get(\"DJANGO_RECAPTCHA_SECRET_KEY\")\nRECAPTCHA_VERIFY_URL = os.environ.get(\"DJANGO_RECAPTCHA_VERIFY_URL\")\nRECAPTCHA_ENABLED = all((RECAPTCHA_API_URL, RECAPTCHA_SITE_KEY, RECAPTCHA_SECRET_KEY, RECAPTCHA_VERIFY_URL))\n", "path": "benefits/settings.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for benefits project.\n\"\"\"\nimport os\n\n\ndef _filter_empty(ls):\n return [s for s in ls if s]\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ[\"DJANGO_SECRET_KEY\"]\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get(\"DJANGO_DEBUG\", \"False\").lower() == \"true\"\n\nADMIN = os.environ.get(\"DJANGO_ADMIN\", \"False\").lower() == \"true\"\n\nALLOWED_HOSTS = _filter_empty(os.environ[\"DJANGO_ALLOWED_HOSTS\"].split(\",\"))\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.messages\",\n \"django.contrib.sessions\",\n \"django.contrib.staticfiles\",\n \"benefits.core\",\n \"benefits.enrollment\",\n \"benefits.eligibility\",\n \"benefits.oauth\",\n]\n\nif ADMIN:\n INSTALLED_APPS.extend(\n [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n ]\n )\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"benefits.core.middleware.Healthcheck\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"csp.middleware.CSPMiddleware\",\n \"benefits.core.middleware.ChangedLanguageEvent\",\n]\n\nif ADMIN:\n MIDDLEWARE.extend(\n [\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n ]\n )\n\nif DEBUG:\n MIDDLEWARE.extend([\"benefits.core.middleware.DebugSession\"])\n\nCSRF_COOKIE_AGE = None\nCSRF_COOKIE_SAMESITE = \"Strict\"\nCSRF_COOKIE_HTTPONLY = True\nCSRF_TRUSTED_ORIGINS = _filter_empty(os.environ[\"DJANGO_TRUSTED_ORIGINS\"].split(\",\"))\n\n# With `Strict`, the user loses their Django session between leaving our app to\n# sign in with OAuth, and coming back into our app from the OAuth redirect.\n# This is because `Strict` disallows our cookie being sent from an external\n# domain and so the session cookie is lost.\n#\n# `Lax` allows the cookie to travel with the user and be sent back to us by the\n# OAuth server, as long as the request is \"safe\" i.e. GET\nSESSION_COOKIE_SAMESITE = \"Lax\"\nSESSION_ENGINE = \"django.contrib.sessions.backends.signed_cookies\"\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\nSESSION_COOKIE_NAME = \"_benefitssessionid\"\n\nif not DEBUG:\n CSRF_COOKIE_SECURE = True\n CSRF_FAILURE_VIEW = \"benefits.core.views.csrf_failure\"\n SESSION_COOKIE_SECURE = True\n\nSECURE_BROWSER_XSS_FILTER = True\n\n# the NGINX reverse proxy sits in front of the application in deployed environments\n# SSL terminates before getting to Django, and NGINX adds this header to indicate\n# if the original request was secure or not\n#\n# See https://docs.djangoproject.com/en/3.2/ref/settings/#secure-proxy-ssl-header\nif not DEBUG:\n SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n\nROOT_URLCONF = \"benefits.urls\"\n\ntemplate_ctx_processors = [\n \"django.template.context_processors.request\",\n \"django.contrib.messages.context_processors.messages\",\n \"benefits.core.context_processors.analytics\",\n \"benefits.core.context_processors.recaptcha\",\n]\n\nif DEBUG:\n template_ctx_processors.extend(\n [\n \"django.template.context_processors.debug\",\n \"benefits.core.context_processors.debug\",\n ]\n )\n\nif ADMIN:\n template_ctx_processors.extend(\n [\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n )\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"benefits\", \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": template_ctx_processors,\n },\n },\n]\n\nWSGI_APPLICATION = \"benefits.wsgi.application\"\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": os.environ.get(\"DJANGO_DB\", \"django\") + \".db\",\n }\n}\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = []\n\nif ADMIN:\n AUTH_PASSWORD_VALIDATORS.extend(\n [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n ]\n )\n\n# OAuth configuration\n\nOAUTH_CLIENT_NAME = os.environ.get(\"DJANGO_OAUTH_CLIENT_NAME\")\n\nif OAUTH_CLIENT_NAME:\n AUTHLIB_OAUTH_CLIENTS = {\n OAUTH_CLIENT_NAME: {\n \"client_id\": os.environ.get(\"DJANGO_OAUTH_CLIENT_ID\"),\n \"server_metadata_url\": f\"{os.environ.get('DJANGO_OAUTH_AUTHORITY')}/.well-known/openid-configuration\",\n \"client_kwargs\": {\"code_challenge_method\": \"S256\", \"scope\": os.environ.get(\"DJANGO_OAUTH_SCOPE\")},\n }\n }\n\n# Internationalization\n\nLANGUAGE_CODE = \"en\"\n\nLANGUAGE_COOKIE_HTTPONLY = True\nLANGUAGE_COOKIE_SAMESITE = \"Strict\"\nLANGUAGE_COOKIE_SECURE = True\n\nLANGUAGES = [(\"en\", \"English\"), (\"es\", \"Espa\u00f1ol\")]\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, \"benefits\", \"locale\")]\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = \"UTC\"\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"benefits\", \"static\")]\nSTATICFILES_STORAGE = \"django.contrib.staticfiles.storage.ManifestStaticFilesStorage\"\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static\")\n\n# Logging configuration\n\nLOG_LEVEL = os.environ.get(\"DJANGO_LOG_LEVEL\", \"DEBUG\" if DEBUG else \"WARNING\")\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"default\": {\n \"format\": \"[{asctime}] {levelname} {name}:{lineno} {message}\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"default\": {\"class\": \"logging.StreamHandler\", \"formatter\": \"default\"},\n },\n \"root\": {\n \"handlers\": [\"default\"],\n \"level\": LOG_LEVEL,\n },\n \"loggers\": {\"django\": {\"handlers\": [\"default\"], \"propagate\": False}},\n}\n\n# Analytics configuration\n\nANALYTICS_KEY = os.environ.get(\"ANALYTICS_KEY\")\n\n# Content Security Policy\n# Configuration docs at https://django-csp.readthedocs.io/en/latest/configuration.html\n\n# In particular, note that the inner single-quotes are required!\n# https://django-csp.readthedocs.io/en/latest/configuration.html#policy-settings\n\nCSP_DEFAULT_SRC = [\"'self'\"]\n\nenv_connect_src = _filter_empty(os.environ.get(\"DJANGO_CSP_CONNECT_SRC\", \"\").split(\",\"))\nCSP_CONNECT_SRC = [\"'self'\"]\nCSP_CONNECT_SRC.extend(env_connect_src)\n\nenv_font_src = _filter_empty(os.environ.get(\"DJANGO_CSP_FONT_SRC\", \"\").split(\",\"))\nCSP_FONT_SRC = list(env_font_src)\n\nCSP_FRAME_ANCESTORS = [\"'none'\"]\nCSP_FRAME_SRC = [\"'none'\"]\nenv_frame_src = _filter_empty(os.environ.get(\"DJANGO_CSP_FRAME_SRC\", \"\").split(\",\"))\nif any(env_frame_src):\n CSP_FRAME_SRC = list(env_frame_src)\n\nenv_script_src = _filter_empty(os.environ.get(\"DJANGO_CSP_SCRIPT_SRC\", \"\").split(\",\"))\nCSP_SCRIPT_SRC = [\"'unsafe-inline'\"]\nCSP_SCRIPT_SRC.extend(env_script_src)\n\nenv_style_src = _filter_empty(os.environ.get(\"DJANGO_CSP_STYLE_SRC\", \"\").split(\",\"))\nCSP_STYLE_SRC = [\"'self'\", \"'unsafe-inline'\"]\nCSP_STYLE_SRC.extend(env_style_src)\n\n# rate limit configuration\n\n# number of requests allowed in the given period\nRATE_LIMIT = int(os.environ.get(\"DJANGO_RATE_LIMIT\", 0))\n\n# HTTP request methods to rate limit\nRATE_LIMIT_METHODS = os.environ.get(\"DJANGO_RATE_LIMIT_METHODS\", \"\").upper().split(\",\")\n\n# number of seconds before additional requests are denied\nRATE_LIMIT_PERIOD = int(os.environ.get(\"DJANGO_RATE_LIMIT_PERIOD\", 0))\n\n# reCAPTCHA configuration\n\nRECAPTCHA_API_URL = os.environ.get(\"DJANGO_RECAPTCHA_API_URL\")\nRECAPTCHA_SITE_KEY = os.environ.get(\"DJANGO_RECAPTCHA_SITE_KEY\")\nRECAPTCHA_SECRET_KEY = os.environ.get(\"DJANGO_RECAPTCHA_SECRET_KEY\")\nRECAPTCHA_VERIFY_URL = os.environ.get(\"DJANGO_RECAPTCHA_VERIFY_URL\")\nRECAPTCHA_ENABLED = all((RECAPTCHA_API_URL, RECAPTCHA_SITE_KEY, RECAPTCHA_SECRET_KEY, RECAPTCHA_VERIFY_URL))\n", "path": "benefits/settings.py"}]}
| 3,412 | 162 |
gh_patches_debug_34322
|
rasdani/github-patches
|
git_diff
|
networkx__networkx-2525
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Create bridges.py
Contains three simple functions for detection and retrieval of bridges or local bridges in a undirected network.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `networkx/algorithms/bridges.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # bridges.py - bridge-finding algorithms
3 #
4 # Copyright 2004-2016 NetworkX developers.
5 #
6 # This file is part of NetworkX.
7 #
8 # NetworkX is distributed under a BSD license; see LICENSE.txt for more
9 # information.
10 """Bridge-finding algorithms."""
11 from itertools import chain
12
13 import networkx as nx
14 from networkx.utils import not_implemented_for
15
16 __all__ = ['bridges', 'has_bridges']
17
18
19 @not_implemented_for('multigraph')
20 @not_implemented_for('directed')
21 def bridges(G, root=None):
22 """Generate all bridges in a graph.
23
24 A *bridge* in a graph is an edge whose removal causes the number of
25 connected components of the graph to increase.
26
27 Parameters
28 ----------
29 G : undirected graph
30
31 root : node (optional)
32 A node in the graph `G`. If specified, only the bridges in the
33 connected component containing this node will be returned.
34
35 Yields
36 ------
37 e : edge
38 An edge in the graph whose removal disconnects the graph (or
39 causes the number of connected components to increase).
40
41 Raises
42 ------
43 NodeNotFound
44 If `root` is not in the graph `G`.
45
46 Examples
47 --------
48 The barbell graph with parameter zero has a single bridge::
49
50 >>> G = nx.barbell_graph(10, 0)
51 >>> list(nx.bridges(G))
52 [(9, 10)]
53
54 Notes
55 -----
56 This implementation uses the :func:`networkx.chain_decomposition`
57 function, so it shares its worst-case time complexity, :math:`O(m +
58 n)`, ignoring polylogarithmic factors, where *n* is the number of
59 nodes in the graph and *m* is the number of edges.
60
61 """
62 chains = nx.chain_decomposition(G, root=root)
63 chain_edges = set(chain.from_iterable(chains))
64 for u, v in G.edges():
65 if (u, v) not in chain_edges and (v, u) not in chain_edges:
66 yield u, v
67
68
69 @not_implemented_for('multigraph')
70 @not_implemented_for('directed')
71 def has_bridges(G, root=None):
72 """Decide whether a graph has any bridges.
73
74 A *bridge* in a graph is an edge whose removal causes the number of
75 connected components of the graph to increase.
76
77 Parameters
78 ----------
79 G : undirected graph
80
81 root : node (optional)
82 A node in the graph `G`. If specified, only the bridges in the
83 connected component containing this node will be considered.
84
85 Returns
86 -------
87 bool
88 Whether the graph (or the connected component containing `root`)
89 has any bridges.
90
91 Raises
92 ------
93 NodeNotFound
94 If `root` is not in the graph `G`.
95
96 Examples
97 --------
98 The barbell graph with parameter zero has a single bridge::
99
100 >>> G = nx.barbell_graph(10, 0)
101 >>> nx.has_bridges(G)
102 True
103
104 On the other hand, the cycle graph has no bridges::
105
106 >>> G = nx.cycle_graph(5)
107 >>> nx.has_bridges(G)
108 False
109
110 Notes
111 -----
112 This implementation uses the :func:`networkx.bridges` function, so
113 it shares its worst-case time complexity, :math:`O(m + n)`, ignoring
114 polylogarithmic factors, where *n* is the number of nodes in the
115 graph and *m* is the number of edges.
116
117 """
118 try:
119 next(bridges(G))
120 except StopIteration:
121 return False
122 else:
123 return True
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/networkx/algorithms/bridges.py b/networkx/algorithms/bridges.py
--- a/networkx/algorithms/bridges.py
+++ b/networkx/algorithms/bridges.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# bridges.py - bridge-finding algorithms
#
-# Copyright 2004-2016 NetworkX developers.
+# Copyright 2004-2017 NetworkX developers.
#
# This file is part of NetworkX.
#
@@ -13,7 +13,7 @@
import networkx as nx
from networkx.utils import not_implemented_for
-__all__ = ['bridges', 'has_bridges']
+__all__ = ['bridges', 'has_bridges', 'local_bridges']
@not_implemented_for('multigraph')
@@ -121,3 +121,61 @@
return False
else:
return True
+
+
+@not_implemented_for('multigraph')
+@not_implemented_for('directed')
+def local_bridges(G, with_span=True, weight=None):
+ """Iterate over local bridges of `G` optionally computing the span
+
+ A *local bridge* is an edge whose endpoints have no common neighbors.
+ That is, the edge is not part of a triangle in the graph.
+
+ The *span* of a *local bridge* is the shortest path length between
+ the endpoints if the local bridge is removed.
+
+ Parameters
+ ----------
+ G : undirected graph
+
+ with_span : bool
+ If True, yield a 3-tuple `(u, v, span)`
+
+ weight : function, string or None (default: None)
+ If function, used to compute edge weights for the span.
+ If string, the edge data attribute used in calculating span.
+ If None, all edges have weight 1.
+
+ Yields
+ ------
+ e : edge
+ The local bridges as an edge 2-tuple of nodes `(u, v)` or
+ as a 3-tuple `(u, v, span)` when `with_span is True`.
+
+ Examples
+ --------
+ A cycle graph has every edge a local bridge with span N-1.
+
+ >>> G = nx.cycle_graph(9)
+ >>> (0, 8, 8) in set(nx.local_bridges(G))
+ True
+ """
+ if with_span is not True:
+ for u, v in G.edges:
+ if not (set(G[u]) & set(G[v])):
+ yield u, v
+ else:
+ wt = nx.weighted._weight_function(G, weight)
+ for u, v in G.edges:
+ if not (set(G[u]) & set(G[v])):
+ enodes = {u, v}
+ def hide_edge(n, nbr, d):
+ if n not in enodes or nbr not in enodes:
+ return wt(n, nbr, d)
+ return None
+
+ try:
+ span = nx.shortest_path_length(G, u, v, weight=hide_edge)
+ yield u, v, span
+ except nx.NetworkXNoPath:
+ yield u, v, float('inf')
|
{"golden_diff": "diff --git a/networkx/algorithms/bridges.py b/networkx/algorithms/bridges.py\n--- a/networkx/algorithms/bridges.py\n+++ b/networkx/algorithms/bridges.py\n@@ -1,7 +1,7 @@\n # -*- coding: utf-8 -*-\n # bridges.py - bridge-finding algorithms\n #\n-# Copyright 2004-2016 NetworkX developers.\n+# Copyright 2004-2017 NetworkX developers.\n #\n # This file is part of NetworkX.\n #\n@@ -13,7 +13,7 @@\n import networkx as nx\n from networkx.utils import not_implemented_for\n \n-__all__ = ['bridges', 'has_bridges']\n+__all__ = ['bridges', 'has_bridges', 'local_bridges']\n \n \n @not_implemented_for('multigraph')\n@@ -121,3 +121,61 @@\n return False\n else:\n return True\n+\n+\n+@not_implemented_for('multigraph')\n+@not_implemented_for('directed')\n+def local_bridges(G, with_span=True, weight=None):\n+ \"\"\"Iterate over local bridges of `G` optionally computing the span\n+\n+ A *local bridge* is an edge whose endpoints have no common neighbors.\n+ That is, the edge is not part of a triangle in the graph.\n+\n+ The *span* of a *local bridge* is the shortest path length between\n+ the endpoints if the local bridge is removed.\n+\n+ Parameters\n+ ----------\n+ G : undirected graph\n+\n+ with_span : bool\n+ If True, yield a 3-tuple `(u, v, span)`\n+\n+ weight : function, string or None (default: None)\n+ If function, used to compute edge weights for the span.\n+ If string, the edge data attribute used in calculating span.\n+ If None, all edges have weight 1.\n+\n+ Yields\n+ ------\n+ e : edge\n+ The local bridges as an edge 2-tuple of nodes `(u, v)` or\n+ as a 3-tuple `(u, v, span)` when `with_span is True`.\n+\n+ Examples\n+ --------\n+ A cycle graph has every edge a local bridge with span N-1.\n+\n+ >>> G = nx.cycle_graph(9)\n+ >>> (0, 8, 8) in set(nx.local_bridges(G))\n+ True\n+ \"\"\"\n+ if with_span is not True:\n+ for u, v in G.edges:\n+ if not (set(G[u]) & set(G[v])):\n+ yield u, v\n+ else:\n+ wt = nx.weighted._weight_function(G, weight)\n+ for u, v in G.edges:\n+ if not (set(G[u]) & set(G[v])):\n+ enodes = {u, v}\n+ def hide_edge(n, nbr, d):\n+ if n not in enodes or nbr not in enodes:\n+ return wt(n, nbr, d)\n+ return None\n+\n+ try:\n+ span = nx.shortest_path_length(G, u, v, weight=hide_edge)\n+ yield u, v, span\n+ except nx.NetworkXNoPath:\n+ yield u, v, float('inf')\n", "issue": "Create bridges.py\nContains three simple functions for detection and retrieval of bridges or local bridges in a undirected network.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# bridges.py - bridge-finding algorithms\n#\n# Copyright 2004-2016 NetworkX developers.\n#\n# This file is part of NetworkX.\n#\n# NetworkX is distributed under a BSD license; see LICENSE.txt for more\n# information.\n\"\"\"Bridge-finding algorithms.\"\"\"\nfrom itertools import chain\n\nimport networkx as nx\nfrom networkx.utils import not_implemented_for\n\n__all__ = ['bridges', 'has_bridges']\n\n\n@not_implemented_for('multigraph')\n@not_implemented_for('directed')\ndef bridges(G, root=None):\n \"\"\"Generate all bridges in a graph.\n\n A *bridge* in a graph is an edge whose removal causes the number of\n connected components of the graph to increase.\n\n Parameters\n ----------\n G : undirected graph\n\n root : node (optional)\n A node in the graph `G`. If specified, only the bridges in the\n connected component containing this node will be returned.\n\n Yields\n ------\n e : edge\n An edge in the graph whose removal disconnects the graph (or\n causes the number of connected components to increase).\n\n Raises\n ------\n NodeNotFound\n If `root` is not in the graph `G`.\n\n Examples\n --------\n The barbell graph with parameter zero has a single bridge::\n\n >>> G = nx.barbell_graph(10, 0)\n >>> list(nx.bridges(G))\n [(9, 10)]\n\n Notes\n -----\n This implementation uses the :func:`networkx.chain_decomposition`\n function, so it shares its worst-case time complexity, :math:`O(m +\n n)`, ignoring polylogarithmic factors, where *n* is the number of\n nodes in the graph and *m* is the number of edges.\n\n \"\"\"\n chains = nx.chain_decomposition(G, root=root)\n chain_edges = set(chain.from_iterable(chains))\n for u, v in G.edges():\n if (u, v) not in chain_edges and (v, u) not in chain_edges:\n yield u, v\n\n\n@not_implemented_for('multigraph')\n@not_implemented_for('directed')\ndef has_bridges(G, root=None):\n \"\"\"Decide whether a graph has any bridges.\n\n A *bridge* in a graph is an edge whose removal causes the number of\n connected components of the graph to increase.\n\n Parameters\n ----------\n G : undirected graph\n\n root : node (optional)\n A node in the graph `G`. If specified, only the bridges in the\n connected component containing this node will be considered.\n\n Returns\n -------\n bool\n Whether the graph (or the connected component containing `root`)\n has any bridges.\n\n Raises\n ------\n NodeNotFound\n If `root` is not in the graph `G`.\n\n Examples\n --------\n The barbell graph with parameter zero has a single bridge::\n\n >>> G = nx.barbell_graph(10, 0)\n >>> nx.has_bridges(G)\n True\n\n On the other hand, the cycle graph has no bridges::\n\n >>> G = nx.cycle_graph(5)\n >>> nx.has_bridges(G)\n False\n\n Notes\n -----\n This implementation uses the :func:`networkx.bridges` function, so\n it shares its worst-case time complexity, :math:`O(m + n)`, ignoring\n polylogarithmic factors, where *n* is the number of nodes in the\n graph and *m* is the number of edges.\n\n \"\"\"\n try:\n next(bridges(G))\n except StopIteration:\n return False\n else:\n return True\n", "path": "networkx/algorithms/bridges.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# bridges.py - bridge-finding algorithms\n#\n# Copyright 2004-2017 NetworkX developers.\n#\n# This file is part of NetworkX.\n#\n# NetworkX is distributed under a BSD license; see LICENSE.txt for more\n# information.\n\"\"\"Bridge-finding algorithms.\"\"\"\nfrom itertools import chain\n\nimport networkx as nx\nfrom networkx.utils import not_implemented_for\n\n__all__ = ['bridges', 'has_bridges', 'local_bridges']\n\n\n@not_implemented_for('multigraph')\n@not_implemented_for('directed')\ndef bridges(G, root=None):\n \"\"\"Generate all bridges in a graph.\n\n A *bridge* in a graph is an edge whose removal causes the number of\n connected components of the graph to increase.\n\n Parameters\n ----------\n G : undirected graph\n\n root : node (optional)\n A node in the graph `G`. If specified, only the bridges in the\n connected component containing this node will be returned.\n\n Yields\n ------\n e : edge\n An edge in the graph whose removal disconnects the graph (or\n causes the number of connected components to increase).\n\n Raises\n ------\n NodeNotFound\n If `root` is not in the graph `G`.\n\n Examples\n --------\n The barbell graph with parameter zero has a single bridge::\n\n >>> G = nx.barbell_graph(10, 0)\n >>> list(nx.bridges(G))\n [(9, 10)]\n\n Notes\n -----\n This implementation uses the :func:`networkx.chain_decomposition`\n function, so it shares its worst-case time complexity, :math:`O(m +\n n)`, ignoring polylogarithmic factors, where *n* is the number of\n nodes in the graph and *m* is the number of edges.\n\n \"\"\"\n chains = nx.chain_decomposition(G, root=root)\n chain_edges = set(chain.from_iterable(chains))\n for u, v in G.edges():\n if (u, v) not in chain_edges and (v, u) not in chain_edges:\n yield u, v\n\n\n@not_implemented_for('multigraph')\n@not_implemented_for('directed')\ndef has_bridges(G, root=None):\n \"\"\"Decide whether a graph has any bridges.\n\n A *bridge* in a graph is an edge whose removal causes the number of\n connected components of the graph to increase.\n\n Parameters\n ----------\n G : undirected graph\n\n root : node (optional)\n A node in the graph `G`. If specified, only the bridges in the\n connected component containing this node will be considered.\n\n Returns\n -------\n bool\n Whether the graph (or the connected component containing `root`)\n has any bridges.\n\n Raises\n ------\n NodeNotFound\n If `root` is not in the graph `G`.\n\n Examples\n --------\n The barbell graph with parameter zero has a single bridge::\n\n >>> G = nx.barbell_graph(10, 0)\n >>> nx.has_bridges(G)\n True\n\n On the other hand, the cycle graph has no bridges::\n\n >>> G = nx.cycle_graph(5)\n >>> nx.has_bridges(G)\n False\n\n Notes\n -----\n This implementation uses the :func:`networkx.bridges` function, so\n it shares its worst-case time complexity, :math:`O(m + n)`, ignoring\n polylogarithmic factors, where *n* is the number of nodes in the\n graph and *m* is the number of edges.\n\n \"\"\"\n try:\n next(bridges(G))\n except StopIteration:\n return False\n else:\n return True\n\n\n@not_implemented_for('multigraph')\n@not_implemented_for('directed')\ndef local_bridges(G, with_span=True, weight=None):\n \"\"\"Iterate over local bridges of `G` optionally computing the span\n\n A *local bridge* is an edge whose endpoints have no common neighbors.\n That is, the edge is not part of a triangle in the graph.\n\n The *span* of a *local bridge* is the shortest path length between\n the endpoints if the local bridge is removed.\n\n Parameters\n ----------\n G : undirected graph\n\n with_span : bool\n If True, yield a 3-tuple `(u, v, span)`\n\n weight : function, string or None (default: None)\n If function, used to compute edge weights for the span.\n If string, the edge data attribute used in calculating span.\n If None, all edges have weight 1.\n\n Yields\n ------\n e : edge\n The local bridges as an edge 2-tuple of nodes `(u, v)` or\n as a 3-tuple `(u, v, span)` when `with_span is True`.\n\n Examples\n --------\n A cycle graph has every edge a local bridge with span N-1.\n\n >>> G = nx.cycle_graph(9)\n >>> (0, 8, 8) in set(nx.local_bridges(G))\n True\n \"\"\"\n if with_span is not True:\n for u, v in G.edges:\n if not (set(G[u]) & set(G[v])):\n yield u, v\n else:\n wt = nx.weighted._weight_function(G, weight)\n for u, v in G.edges:\n if not (set(G[u]) & set(G[v])):\n enodes = {u, v}\n def hide_edge(n, nbr, d):\n if n not in enodes or nbr not in enodes:\n return wt(n, nbr, d)\n return None\n\n try:\n span = nx.shortest_path_length(G, u, v, weight=hide_edge)\n yield u, v, span\n except nx.NetworkXNoPath:\n yield u, v, float('inf')\n", "path": "networkx/algorithms/bridges.py"}]}
| 1,377 | 744 |
gh_patches_debug_37021
|
rasdani/github-patches
|
git_diff
|
Cloud-CV__EvalAI-855
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add nullable attribute to the Team Model
Following changes are required in the Team model:
- [x] Convert the URLFields to CharField with URL
- [x] Change the fields `email`, `github_url`, `linkedin_url`, `personal_website` to nullable fields
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/web/models.py`
Content:
```
1 from __future__ import unicode_literals
2
3 from django.db import models
4
5 from base.models import (TimeStampedModel, )
6
7
8 class Contact(TimeStampedModel):
9 """Model representing details of User submitting queries."""
10 name = models.CharField(max_length=100,)
11 email = models.EmailField(max_length=70,)
12 message = models.CharField(max_length=500,)
13
14 def __unicode__(self):
15 return "%s: %s: %s" % (self.name, self.email, self.message)
16
17 class Meta:
18 app_label = 'web'
19 db_table = 'contact'
20
21
22 class Team(models.Model):
23 """Model representing details of Team"""
24
25 # Team Type Options
26 CORE_TEAM = 'Core Team'
27 CONTRIBUTOR = 'Contributor'
28
29 TEAM_TYPE_OPTIONS = (
30 (CORE_TEAM, CORE_TEAM),
31 (CONTRIBUTOR, CONTRIBUTOR),
32 )
33
34 name = models.CharField(max_length=100)
35 email = models.EmailField(max_length=70, null=True)
36 description = models.TextField(null=True)
37 headshot = models.ImageField(upload_to="headshots", null=True, blank=True)
38 visible = models.BooleanField(default=True)
39 github_url = models.CharField(max_length=200, null=True)
40 linkedin_url = models.CharField(max_length=200, null=True)
41 personal_website = models.CharField(max_length=200, null=True)
42 background_image = models.ImageField(upload_to="bg-images", null=True, blank=True)
43 team_type = models.CharField(choices=TEAM_TYPE_OPTIONS, max_length=50)
44
45 def __unicode__(self):
46 return self.name
47
48 class Meta:
49 app_label = 'web'
50 db_table = 'teams'
51
```
Path: `apps/web/migrations/0004_change_team_model_field_type.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Generated by Django 1.10.2 on 2017-03-19 21:06
3 from __future__ import unicode_literals
4
5 from django.db import migrations, models
6
7
8 class Migration(migrations.Migration):
9
10 dependencies = [
11 ('web', '0003_added_description_and_background_image_to_team_model'),
12 ]
13
14 operations = [
15 migrations.AlterField(
16 model_name='team',
17 name='email',
18 field=models.EmailField(max_length=70, null=True),
19 ),
20 migrations.AlterField(
21 model_name='team',
22 name='github_url',
23 field=models.CharField(max_length=200, null=True),
24 ),
25 migrations.AlterField(
26 model_name='team',
27 name='linkedin_url',
28 field=models.CharField(max_length=200, null=True),
29 ),
30 migrations.AlterField(
31 model_name='team',
32 name='personal_website',
33 field=models.CharField(max_length=200, null=True),
34 ),
35 ]
36
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/apps/web/migrations/0004_change_team_model_field_type.py b/apps/web/migrations/0004_change_team_model_field_type.py
--- a/apps/web/migrations/0004_change_team_model_field_type.py
+++ b/apps/web/migrations/0004_change_team_model_field_type.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Generated by Django 1.10.2 on 2017-03-19 21:06
+# Generated by Django 1.10.2 on 2017-03-19 21:58
from __future__ import unicode_literals
from django.db import migrations, models
@@ -15,21 +15,21 @@
migrations.AlterField(
model_name='team',
name='email',
- field=models.EmailField(max_length=70, null=True),
+ field=models.EmailField(blank=True, max_length=70, null=True),
),
migrations.AlterField(
model_name='team',
name='github_url',
- field=models.CharField(max_length=200, null=True),
+ field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='team',
name='linkedin_url',
- field=models.CharField(max_length=200, null=True),
+ field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='team',
name='personal_website',
- field=models.CharField(max_length=200, null=True),
+ field=models.CharField(blank=True, max_length=200, null=True),
),
]
diff --git a/apps/web/models.py b/apps/web/models.py
--- a/apps/web/models.py
+++ b/apps/web/models.py
@@ -32,13 +32,13 @@
)
name = models.CharField(max_length=100)
- email = models.EmailField(max_length=70, null=True)
+ email = models.EmailField(max_length=70, null=True, blank=True)
description = models.TextField(null=True)
headshot = models.ImageField(upload_to="headshots", null=True, blank=True)
visible = models.BooleanField(default=True)
- github_url = models.CharField(max_length=200, null=True)
- linkedin_url = models.CharField(max_length=200, null=True)
- personal_website = models.CharField(max_length=200, null=True)
+ github_url = models.CharField(max_length=200, null=True, blank=True)
+ linkedin_url = models.CharField(max_length=200, null=True, blank=True)
+ personal_website = models.CharField(max_length=200, null=True, blank=True)
background_image = models.ImageField(upload_to="bg-images", null=True, blank=True)
team_type = models.CharField(choices=TEAM_TYPE_OPTIONS, max_length=50)
|
{"golden_diff": "diff --git a/apps/web/migrations/0004_change_team_model_field_type.py b/apps/web/migrations/0004_change_team_model_field_type.py\n--- a/apps/web/migrations/0004_change_team_model_field_type.py\n+++ b/apps/web/migrations/0004_change_team_model_field_type.py\n@@ -1,5 +1,5 @@\n # -*- coding: utf-8 -*-\n-# Generated by Django 1.10.2 on 2017-03-19 21:06\n+# Generated by Django 1.10.2 on 2017-03-19 21:58\n from __future__ import unicode_literals\n \n from django.db import migrations, models\n@@ -15,21 +15,21 @@\n migrations.AlterField(\n model_name='team',\n name='email',\n- field=models.EmailField(max_length=70, null=True),\n+ field=models.EmailField(blank=True, max_length=70, null=True),\n ),\n migrations.AlterField(\n model_name='team',\n name='github_url',\n- field=models.CharField(max_length=200, null=True),\n+ field=models.CharField(blank=True, max_length=200, null=True),\n ),\n migrations.AlterField(\n model_name='team',\n name='linkedin_url',\n- field=models.CharField(max_length=200, null=True),\n+ field=models.CharField(blank=True, max_length=200, null=True),\n ),\n migrations.AlterField(\n model_name='team',\n name='personal_website',\n- field=models.CharField(max_length=200, null=True),\n+ field=models.CharField(blank=True, max_length=200, null=True),\n ),\n ]\ndiff --git a/apps/web/models.py b/apps/web/models.py\n--- a/apps/web/models.py\n+++ b/apps/web/models.py\n@@ -32,13 +32,13 @@\n )\n \n name = models.CharField(max_length=100)\n- email = models.EmailField(max_length=70, null=True)\n+ email = models.EmailField(max_length=70, null=True, blank=True)\n description = models.TextField(null=True)\n headshot = models.ImageField(upload_to=\"headshots\", null=True, blank=True)\n visible = models.BooleanField(default=True)\n- github_url = models.CharField(max_length=200, null=True)\n- linkedin_url = models.CharField(max_length=200, null=True)\n- personal_website = models.CharField(max_length=200, null=True)\n+ github_url = models.CharField(max_length=200, null=True, blank=True)\n+ linkedin_url = models.CharField(max_length=200, null=True, blank=True)\n+ personal_website = models.CharField(max_length=200, null=True, blank=True)\n background_image = models.ImageField(upload_to=\"bg-images\", null=True, blank=True)\n team_type = models.CharField(choices=TEAM_TYPE_OPTIONS, max_length=50)\n", "issue": "Add nullable attribute to the Team Model\nFollowing changes are required in the Team model: \r\n\r\n- [x] Convert the URLFields to CharField with URL\r\n\r\n- [x] Change the fields `email`, `github_url`, `linkedin_url`, `personal_website` to nullable fields\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.db import models\n\nfrom base.models import (TimeStampedModel, )\n\n\nclass Contact(TimeStampedModel):\n \"\"\"Model representing details of User submitting queries.\"\"\"\n name = models.CharField(max_length=100,)\n email = models.EmailField(max_length=70,)\n message = models.CharField(max_length=500,)\n\n def __unicode__(self):\n return \"%s: %s: %s\" % (self.name, self.email, self.message)\n\n class Meta:\n app_label = 'web'\n db_table = 'contact'\n\n\nclass Team(models.Model):\n \"\"\"Model representing details of Team\"\"\"\n\n # Team Type Options\n CORE_TEAM = 'Core Team'\n CONTRIBUTOR = 'Contributor'\n\n TEAM_TYPE_OPTIONS = (\n (CORE_TEAM, CORE_TEAM),\n (CONTRIBUTOR, CONTRIBUTOR),\n )\n\n name = models.CharField(max_length=100)\n email = models.EmailField(max_length=70, null=True)\n description = models.TextField(null=True)\n headshot = models.ImageField(upload_to=\"headshots\", null=True, blank=True)\n visible = models.BooleanField(default=True)\n github_url = models.CharField(max_length=200, null=True)\n linkedin_url = models.CharField(max_length=200, null=True)\n personal_website = models.CharField(max_length=200, null=True)\n background_image = models.ImageField(upload_to=\"bg-images\", null=True, blank=True)\n team_type = models.CharField(choices=TEAM_TYPE_OPTIONS, max_length=50)\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n app_label = 'web'\n db_table = 'teams'\n", "path": "apps/web/models.py"}, {"content": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.2 on 2017-03-19 21:06\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('web', '0003_added_description_and_background_image_to_team_model'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='team',\n name='email',\n field=models.EmailField(max_length=70, null=True),\n ),\n migrations.AlterField(\n model_name='team',\n name='github_url',\n field=models.CharField(max_length=200, null=True),\n ),\n migrations.AlterField(\n model_name='team',\n name='linkedin_url',\n field=models.CharField(max_length=200, null=True),\n ),\n migrations.AlterField(\n model_name='team',\n name='personal_website',\n field=models.CharField(max_length=200, null=True),\n ),\n ]\n", "path": "apps/web/migrations/0004_change_team_model_field_type.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.db import models\n\nfrom base.models import (TimeStampedModel, )\n\n\nclass Contact(TimeStampedModel):\n \"\"\"Model representing details of User submitting queries.\"\"\"\n name = models.CharField(max_length=100,)\n email = models.EmailField(max_length=70,)\n message = models.CharField(max_length=500,)\n\n def __unicode__(self):\n return \"%s: %s: %s\" % (self.name, self.email, self.message)\n\n class Meta:\n app_label = 'web'\n db_table = 'contact'\n\n\nclass Team(models.Model):\n \"\"\"Model representing details of Team\"\"\"\n\n # Team Type Options\n CORE_TEAM = 'Core Team'\n CONTRIBUTOR = 'Contributor'\n\n TEAM_TYPE_OPTIONS = (\n (CORE_TEAM, CORE_TEAM),\n (CONTRIBUTOR, CONTRIBUTOR),\n )\n\n name = models.CharField(max_length=100)\n email = models.EmailField(max_length=70, null=True, blank=True)\n description = models.TextField(null=True)\n headshot = models.ImageField(upload_to=\"headshots\", null=True, blank=True)\n visible = models.BooleanField(default=True)\n github_url = models.CharField(max_length=200, null=True, blank=True)\n linkedin_url = models.CharField(max_length=200, null=True, blank=True)\n personal_website = models.CharField(max_length=200, null=True, blank=True)\n background_image = models.ImageField(upload_to=\"bg-images\", null=True, blank=True)\n team_type = models.CharField(choices=TEAM_TYPE_OPTIONS, max_length=50)\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n app_label = 'web'\n db_table = 'teams'\n", "path": "apps/web/models.py"}, {"content": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.2 on 2017-03-19 21:58\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('web', '0003_added_description_and_background_image_to_team_model'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='team',\n name='email',\n field=models.EmailField(blank=True, max_length=70, null=True),\n ),\n migrations.AlterField(\n model_name='team',\n name='github_url',\n field=models.CharField(blank=True, max_length=200, null=True),\n ),\n migrations.AlterField(\n model_name='team',\n name='linkedin_url',\n field=models.CharField(blank=True, max_length=200, null=True),\n ),\n migrations.AlterField(\n model_name='team',\n name='personal_website',\n field=models.CharField(blank=True, max_length=200, null=True),\n ),\n ]\n", "path": "apps/web/migrations/0004_change_team_model_field_type.py"}]}
| 1,098 | 665 |
gh_patches_debug_39809
|
rasdani/github-patches
|
git_diff
|
dask__distributed-246
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Network send/recv to web UI plot
It would be nice to see network activity over time.
We already capture this data in the same way we capture the CPU/Memory data that we already plot, so the job of figuring out how to move the data into the `ColumnDataSource` isn't hard. What _is_ tricky is that we don't have a percentage, but instead have actual MB/s bandwidth counts. We would have to add a second Y-axis (this is doable) and think a bit about how best to set the axes.
This might be an interesting task for anyone looking to get into the Bokeh Web UI, notably @martindurant
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `distributed/bokeh/worker_monitor.py`
Content:
```
1 from __future__ import print_function, division, absolute_import
2
3 from collections import defaultdict
4 from itertools import chain
5
6 from toolz import pluck
7
8 from ..utils import ignoring
9
10 with ignoring(ImportError):
11 from bokeh.models import (ColumnDataSource, DataRange1d, Range1d,
12 NumeralTickFormatter)
13 from bokeh.palettes import Spectral9
14 from bokeh.plotting import figure
15
16
17 def resource_profile_plot(width=600, height=300):
18 names = ['time', 'cpu', 'memory-percent']
19 source = ColumnDataSource({k: [] for k in names})
20
21 x_range = DataRange1d(follow='end', follow_interval=30000, range_padding=0)
22 y_range = Range1d(0, 1)
23 p = figure(width=width, height=height, x_axis_type='datetime',
24 responsive=True, tools='xpan,xwheel_zoom,box_zoom,resize,reset',
25 x_range=x_range, y_range=y_range)
26 p.line(x='time', y='memory-percent', line_width=2, line_alpha=0.8,
27 color=Spectral9[7], legend='Avg Memory Usage', source=source)
28 p.line(x='time', y='cpu', line_width=2, line_alpha=0.8,
29 color=Spectral9[0], legend='Avg CPU Usage', source=source)
30 p.legend[0].location = 'top_left'
31 p.yaxis[0].formatter = NumeralTickFormatter(format="0 %")
32 p.min_border_right = 10
33
34 return source, p
35
36
37 def resource_profile_update(source, worker_buffer, times_buffer):
38 data = defaultdict(list)
39
40 workers = sorted(list(set(chain(*list(w.keys() for w in worker_buffer)))))
41
42 for name in ['cpu', 'memory-percent']:
43 data[name] = [[msg[w][name] if w in msg and name in msg[w] else 'null'
44 for msg in worker_buffer]
45 for w in workers]
46
47 data['workers'] = workers
48 data['times'] = [[t * 1000 if w in worker_buffer[i] else 'null'
49 for i, t in enumerate(times_buffer)]
50 for w in workers]
51
52 source.data.update(data)
53
54
55 def resource_append(lists, msg):
56 L = list(msg.values())
57 if not L:
58 return
59 for k in ['cpu', 'memory-percent']:
60 lists[k].append(mean(pluck(k, L)) / 100)
61
62 lists['time'].append(mean(pluck('time', L)) * 1000)
63
64
65 def mean(seq):
66 seq = list(seq)
67 return sum(seq) / len(seq)
68
```
Path: `distributed/bokeh/status/server_lifecycle.py`
Content:
```
1 #!/usr/bin/env python
2 from __future__ import print_function, division, absolute_import
3
4 from collections import deque
5 import json
6 import os
7 from time import time
8
9 from tornado import gen
10 from tornado.httpclient import AsyncHTTPClient
11 from tornado.iostream import StreamClosedError
12 from tornado.ioloop import IOLoop
13
14 from distributed.core import read
15 from distributed.diagnostics.progress_stream import progress_stream
16 from distributed.bokeh.worker_monitor import resource_append
17 import distributed.bokeh
18 from distributed.utils import log_errors
19
20 client = AsyncHTTPClient()
21
22 messages = distributed.bokeh.messages # monkey-patching
23
24 if os.path.exists('.dask-web-ui.json'):
25 with open('.dask-web-ui.json', 'r') as f:
26 options = json.load(f)
27 else:
28 options = {'host': '127.0.0.1',
29 'tcp-port': 8786,
30 'http-port': 9786}
31
32
33 @gen.coroutine
34 def http_get(route):
35 """ Get data from JSON route, store in messages deques """
36 with log_errors():
37 try:
38 response = yield client.fetch(
39 'http://%(host)s:%(http-port)d/' % options
40 + route + '.json')
41 except ConnectionRefusedError:
42 import sys; sys.exit(0)
43 msg = json.loads(response.body.decode())
44 messages[route]['deque'].append(msg)
45 messages[route]['times'].append(time())
46
47
48 last_index = [0]
49 @gen.coroutine
50 def workers():
51 """ Get data from JSON route, store in messages deques """
52 with log_errors():
53 response = yield client.fetch(
54 'http://%(host)s:%(http-port)d/workers.json' % options)
55 msg = json.loads(response.body.decode())
56 if msg:
57 messages['workers']['deque'].append(msg)
58 messages['workers']['times'].append(time())
59 resource_append(messages['workers']['plot-data'], msg)
60 index = messages['workers']['index']
61 index.append(last_index[0] + 1)
62 last_index[0] += 1
63
64
65 @gen.coroutine
66 def progress():
67 with log_errors():
68 stream = yield progress_stream('%(host)s:%(tcp-port)d' % options, 0.050)
69 while True:
70 try:
71 msg = yield read(stream)
72 except StreamClosedError:
73 break
74 else:
75 messages['progress'] = msg
76
77
78 def on_server_loaded(server_context):
79 n = 60
80 messages['workers'] = {'interval': 500,
81 'deque': deque(maxlen=n),
82 'times': deque(maxlen=n),
83 'index': deque(maxlen=n),
84 'plot-data': {'time': deque(maxlen=n),
85 'cpu': deque(maxlen=n),
86 'memory-percent': deque(maxlen=n)}}
87 server_context.add_periodic_callback(workers, 500)
88
89 messages['tasks'] = {'interval': 100,
90 'deque': deque(maxlen=100),
91 'times': deque(maxlen=100)}
92 server_context.add_periodic_callback(lambda: http_get('tasks'), 100)
93
94 messages['progress'] = {'all': {}, 'in_memory': {},
95 'erred': {}, 'released': {}}
96
97 IOLoop.current().add_callback(progress)
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/distributed/bokeh/status/server_lifecycle.py b/distributed/bokeh/status/server_lifecycle.py
--- a/distributed/bokeh/status/server_lifecycle.py
+++ b/distributed/bokeh/status/server_lifecycle.py
@@ -83,7 +83,8 @@
'index': deque(maxlen=n),
'plot-data': {'time': deque(maxlen=n),
'cpu': deque(maxlen=n),
- 'memory-percent': deque(maxlen=n)}}
+ 'memory-percent': deque(maxlen=n),
+ 'network-send': deque(maxlen=n)}}
server_context.add_periodic_callback(workers, 500)
messages['tasks'] = {'interval': 100,
diff --git a/distributed/bokeh/worker_monitor.py b/distributed/bokeh/worker_monitor.py
--- a/distributed/bokeh/worker_monitor.py
+++ b/distributed/bokeh/worker_monitor.py
@@ -9,13 +9,13 @@
with ignoring(ImportError):
from bokeh.models import (ColumnDataSource, DataRange1d, Range1d,
- NumeralTickFormatter)
+ NumeralTickFormatter, LinearAxis)
from bokeh.palettes import Spectral9
from bokeh.plotting import figure
def resource_profile_plot(width=600, height=300):
- names = ['time', 'cpu', 'memory-percent']
+ names = ['time', 'cpu', 'memory-percent', 'network-send']
source = ColumnDataSource({k: [] for k in names})
x_range = DataRange1d(follow='end', follow_interval=30000, range_padding=0)
@@ -30,6 +30,14 @@
p.legend[0].location = 'top_left'
p.yaxis[0].formatter = NumeralTickFormatter(format="0 %")
p.min_border_right = 10
+ p.extra_y_ranges = {"send": DataRange1d(bounds=(0, None))}
+ p.add_layout(LinearAxis(y_range_name="send", axis_label="Throughput (MB/s)"),
+ 'right')
+ p.yaxis.axis_label_text_font_size = "10pt"
+
+ p.line(x='time', y='network-send', line_width=2, line_alpha=0.8,
+ color=Spectral9[2], legend='Network', source=source,
+ y_range_name="send")
return source, p
@@ -39,7 +47,7 @@
workers = sorted(list(set(chain(*list(w.keys() for w in worker_buffer)))))
- for name in ['cpu', 'memory-percent']:
+ for name in ['cpu', 'memory-percent', 'network-send']:
data[name] = [[msg[w][name] if w in msg and name in msg[w] else 'null'
for msg in worker_buffer]
for w in workers]
@@ -60,6 +68,13 @@
lists[k].append(mean(pluck(k, L)) / 100)
lists['time'].append(mean(pluck('time', L)) * 1000)
+ net = mean(pluck('network-send', L, 0))
+ if len(lists['time']) >= 2:
+ t1, t2 = lists['time'][-2], lists['time'][-1]
+ interval = (t2 - t1) / 1000
+ else:
+ interval = 0.5
+ lists['network-send'].append(net / 2**20 / interval)
def mean(seq):
|
{"golden_diff": "diff --git a/distributed/bokeh/status/server_lifecycle.py b/distributed/bokeh/status/server_lifecycle.py\n--- a/distributed/bokeh/status/server_lifecycle.py\n+++ b/distributed/bokeh/status/server_lifecycle.py\n@@ -83,7 +83,8 @@\n 'index': deque(maxlen=n),\n 'plot-data': {'time': deque(maxlen=n),\n 'cpu': deque(maxlen=n),\n- 'memory-percent': deque(maxlen=n)}}\n+ 'memory-percent': deque(maxlen=n),\n+ 'network-send': deque(maxlen=n)}}\n server_context.add_periodic_callback(workers, 500)\n \n messages['tasks'] = {'interval': 100,\ndiff --git a/distributed/bokeh/worker_monitor.py b/distributed/bokeh/worker_monitor.py\n--- a/distributed/bokeh/worker_monitor.py\n+++ b/distributed/bokeh/worker_monitor.py\n@@ -9,13 +9,13 @@\n \n with ignoring(ImportError):\n from bokeh.models import (ColumnDataSource, DataRange1d, Range1d,\n- NumeralTickFormatter)\n+ NumeralTickFormatter, LinearAxis)\n from bokeh.palettes import Spectral9\n from bokeh.plotting import figure\n \n \n def resource_profile_plot(width=600, height=300):\n- names = ['time', 'cpu', 'memory-percent']\n+ names = ['time', 'cpu', 'memory-percent', 'network-send']\n source = ColumnDataSource({k: [] for k in names})\n \n x_range = DataRange1d(follow='end', follow_interval=30000, range_padding=0)\n@@ -30,6 +30,14 @@\n p.legend[0].location = 'top_left'\n p.yaxis[0].formatter = NumeralTickFormatter(format=\"0 %\")\n p.min_border_right = 10\n+ p.extra_y_ranges = {\"send\": DataRange1d(bounds=(0, None))}\n+ p.add_layout(LinearAxis(y_range_name=\"send\", axis_label=\"Throughput (MB/s)\"),\n+ 'right')\n+ p.yaxis.axis_label_text_font_size = \"10pt\"\n+\n+ p.line(x='time', y='network-send', line_width=2, line_alpha=0.8,\n+ color=Spectral9[2], legend='Network', source=source,\n+ y_range_name=\"send\")\n \n return source, p\n \n@@ -39,7 +47,7 @@\n \n workers = sorted(list(set(chain(*list(w.keys() for w in worker_buffer)))))\n \n- for name in ['cpu', 'memory-percent']:\n+ for name in ['cpu', 'memory-percent', 'network-send']:\n data[name] = [[msg[w][name] if w in msg and name in msg[w] else 'null'\n for msg in worker_buffer]\n for w in workers]\n@@ -60,6 +68,13 @@\n lists[k].append(mean(pluck(k, L)) / 100)\n \n lists['time'].append(mean(pluck('time', L)) * 1000)\n+ net = mean(pluck('network-send', L, 0))\n+ if len(lists['time']) >= 2:\n+ t1, t2 = lists['time'][-2], lists['time'][-1]\n+ interval = (t2 - t1) / 1000\n+ else:\n+ interval = 0.5\n+ lists['network-send'].append(net / 2**20 / interval)\n \n \n def mean(seq):\n", "issue": "Add Network send/recv to web UI plot\nIt would be nice to see network activity over time.\n\nWe already capture this data in the same way we capture the CPU/Memory data that we already plot, so the job of figuring out how to move the data into the `ColumnDataSource` isn't hard. What _is_ tricky is that we don't have a percentage, but instead have actual MB/s bandwidth counts. We would have to add a second Y-axis (this is doable) and think a bit about how best to set the axes.\n\nThis might be an interesting task for anyone looking to get into the Bokeh Web UI, notably @martindurant \n\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nfrom collections import defaultdict\nfrom itertools import chain\n\nfrom toolz import pluck\n\nfrom ..utils import ignoring\n\nwith ignoring(ImportError):\n from bokeh.models import (ColumnDataSource, DataRange1d, Range1d,\n NumeralTickFormatter)\n from bokeh.palettes import Spectral9\n from bokeh.plotting import figure\n\n\ndef resource_profile_plot(width=600, height=300):\n names = ['time', 'cpu', 'memory-percent']\n source = ColumnDataSource({k: [] for k in names})\n\n x_range = DataRange1d(follow='end', follow_interval=30000, range_padding=0)\n y_range = Range1d(0, 1)\n p = figure(width=width, height=height, x_axis_type='datetime',\n responsive=True, tools='xpan,xwheel_zoom,box_zoom,resize,reset',\n x_range=x_range, y_range=y_range)\n p.line(x='time', y='memory-percent', line_width=2, line_alpha=0.8,\n color=Spectral9[7], legend='Avg Memory Usage', source=source)\n p.line(x='time', y='cpu', line_width=2, line_alpha=0.8,\n color=Spectral9[0], legend='Avg CPU Usage', source=source)\n p.legend[0].location = 'top_left'\n p.yaxis[0].formatter = NumeralTickFormatter(format=\"0 %\")\n p.min_border_right = 10\n\n return source, p\n\n\ndef resource_profile_update(source, worker_buffer, times_buffer):\n data = defaultdict(list)\n\n workers = sorted(list(set(chain(*list(w.keys() for w in worker_buffer)))))\n\n for name in ['cpu', 'memory-percent']:\n data[name] = [[msg[w][name] if w in msg and name in msg[w] else 'null'\n for msg in worker_buffer]\n for w in workers]\n\n data['workers'] = workers\n data['times'] = [[t * 1000 if w in worker_buffer[i] else 'null'\n for i, t in enumerate(times_buffer)]\n for w in workers]\n\n source.data.update(data)\n\n\ndef resource_append(lists, msg):\n L = list(msg.values())\n if not L:\n return\n for k in ['cpu', 'memory-percent']:\n lists[k].append(mean(pluck(k, L)) / 100)\n\n lists['time'].append(mean(pluck('time', L)) * 1000)\n\n\ndef mean(seq):\n seq = list(seq)\n return sum(seq) / len(seq)\n", "path": "distributed/bokeh/worker_monitor.py"}, {"content": "#!/usr/bin/env python\nfrom __future__ import print_function, division, absolute_import\n\nfrom collections import deque\nimport json\nimport os\nfrom time import time\n\nfrom tornado import gen\nfrom tornado.httpclient import AsyncHTTPClient\nfrom tornado.iostream import StreamClosedError\nfrom tornado.ioloop import IOLoop\n\nfrom distributed.core import read\nfrom distributed.diagnostics.progress_stream import progress_stream\nfrom distributed.bokeh.worker_monitor import resource_append\nimport distributed.bokeh\nfrom distributed.utils import log_errors\n\nclient = AsyncHTTPClient()\n\nmessages = distributed.bokeh.messages # monkey-patching\n\nif os.path.exists('.dask-web-ui.json'):\n with open('.dask-web-ui.json', 'r') as f:\n options = json.load(f)\nelse:\n options = {'host': '127.0.0.1',\n 'tcp-port': 8786,\n 'http-port': 9786}\n\n\[email protected]\ndef http_get(route):\n \"\"\" Get data from JSON route, store in messages deques \"\"\"\n with log_errors():\n try:\n response = yield client.fetch(\n 'http://%(host)s:%(http-port)d/' % options\n + route + '.json')\n except ConnectionRefusedError:\n import sys; sys.exit(0)\n msg = json.loads(response.body.decode())\n messages[route]['deque'].append(msg)\n messages[route]['times'].append(time())\n\n\nlast_index = [0]\[email protected]\ndef workers():\n \"\"\" Get data from JSON route, store in messages deques \"\"\"\n with log_errors():\n response = yield client.fetch(\n 'http://%(host)s:%(http-port)d/workers.json' % options)\n msg = json.loads(response.body.decode())\n if msg:\n messages['workers']['deque'].append(msg)\n messages['workers']['times'].append(time())\n resource_append(messages['workers']['plot-data'], msg)\n index = messages['workers']['index']\n index.append(last_index[0] + 1)\n last_index[0] += 1\n\n\[email protected]\ndef progress():\n with log_errors():\n stream = yield progress_stream('%(host)s:%(tcp-port)d' % options, 0.050)\n while True:\n try:\n msg = yield read(stream)\n except StreamClosedError:\n break\n else:\n messages['progress'] = msg\n\n\ndef on_server_loaded(server_context):\n n = 60\n messages['workers'] = {'interval': 500,\n 'deque': deque(maxlen=n),\n 'times': deque(maxlen=n),\n 'index': deque(maxlen=n),\n 'plot-data': {'time': deque(maxlen=n),\n 'cpu': deque(maxlen=n),\n 'memory-percent': deque(maxlen=n)}}\n server_context.add_periodic_callback(workers, 500)\n\n messages['tasks'] = {'interval': 100,\n 'deque': deque(maxlen=100),\n 'times': deque(maxlen=100)}\n server_context.add_periodic_callback(lambda: http_get('tasks'), 100)\n\n messages['progress'] = {'all': {}, 'in_memory': {},\n 'erred': {}, 'released': {}}\n\n IOLoop.current().add_callback(progress)\n", "path": "distributed/bokeh/status/server_lifecycle.py"}], "after_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nfrom collections import defaultdict\nfrom itertools import chain\n\nfrom toolz import pluck\n\nfrom ..utils import ignoring\n\nwith ignoring(ImportError):\n from bokeh.models import (ColumnDataSource, DataRange1d, Range1d,\n NumeralTickFormatter, LinearAxis)\n from bokeh.palettes import Spectral9\n from bokeh.plotting import figure\n\n\ndef resource_profile_plot(width=600, height=300):\n names = ['time', 'cpu', 'memory-percent', 'network-send']\n source = ColumnDataSource({k: [] for k in names})\n\n x_range = DataRange1d(follow='end', follow_interval=30000, range_padding=0)\n y_range = Range1d(0, 1)\n p = figure(width=width, height=height, x_axis_type='datetime',\n responsive=True, tools='xpan,xwheel_zoom,box_zoom,resize,reset',\n x_range=x_range, y_range=y_range)\n p.line(x='time', y='memory-percent', line_width=2, line_alpha=0.8,\n color=Spectral9[7], legend='Avg Memory Usage', source=source)\n p.line(x='time', y='cpu', line_width=2, line_alpha=0.8,\n color=Spectral9[0], legend='Avg CPU Usage', source=source)\n p.legend[0].location = 'top_left'\n p.yaxis[0].formatter = NumeralTickFormatter(format=\"0 %\")\n p.min_border_right = 10\n p.extra_y_ranges = {\"send\": DataRange1d(bounds=(0, None))}\n p.add_layout(LinearAxis(y_range_name=\"send\", axis_label=\"Throughput (MB/s)\"),\n 'right')\n p.yaxis.axis_label_text_font_size = \"10pt\"\n\n p.line(x='time', y='network-send', line_width=2, line_alpha=0.8,\n color=Spectral9[2], legend='Network', source=source,\n y_range_name=\"send\")\n\n return source, p\n\n\ndef resource_profile_update(source, worker_buffer, times_buffer):\n data = defaultdict(list)\n\n workers = sorted(list(set(chain(*list(w.keys() for w in worker_buffer)))))\n\n for name in ['cpu', 'memory-percent', 'network-send']:\n data[name] = [[msg[w][name] if w in msg and name in msg[w] else 'null'\n for msg in worker_buffer]\n for w in workers]\n\n data['workers'] = workers\n data['times'] = [[t * 1000 if w in worker_buffer[i] else 'null'\n for i, t in enumerate(times_buffer)]\n for w in workers]\n\n source.data.update(data)\n\n\ndef resource_append(lists, msg):\n L = list(msg.values())\n if not L:\n return\n for k in ['cpu', 'memory-percent']:\n lists[k].append(mean(pluck(k, L)) / 100)\n\n lists['time'].append(mean(pluck('time', L)) * 1000)\n net = mean(pluck('network-send', L, 0))\n if len(lists['time']) >= 2:\n t1, t2 = lists['time'][-2], lists['time'][-1]\n interval = (t2 - t1) / 1000\n else:\n interval = 0.5\n lists['network-send'].append(net / 2**20 / interval)\n\n\ndef mean(seq):\n seq = list(seq)\n return sum(seq) / len(seq)\n", "path": "distributed/bokeh/worker_monitor.py"}, {"content": "#!/usr/bin/env python\nfrom __future__ import print_function, division, absolute_import\n\nfrom collections import deque\nimport json\nimport os\nfrom time import time\n\nfrom tornado import gen\nfrom tornado.httpclient import AsyncHTTPClient\nfrom tornado.iostream import StreamClosedError\nfrom tornado.ioloop import IOLoop\n\nfrom distributed.core import read\nfrom distributed.diagnostics.progress_stream import progress_stream\nfrom distributed.bokeh.worker_monitor import resource_append\nimport distributed.bokeh\nfrom distributed.utils import log_errors\n\nclient = AsyncHTTPClient()\n\nmessages = distributed.bokeh.messages # monkey-patching\n\nif os.path.exists('.dask-web-ui.json'):\n with open('.dask-web-ui.json', 'r') as f:\n options = json.load(f)\nelse:\n options = {'host': '127.0.0.1',\n 'tcp-port': 8786,\n 'http-port': 9786}\n\n\[email protected]\ndef http_get(route):\n \"\"\" Get data from JSON route, store in messages deques \"\"\"\n with log_errors():\n try:\n response = yield client.fetch(\n 'http://%(host)s:%(http-port)d/' % options\n + route + '.json')\n except ConnectionRefusedError:\n import sys; sys.exit(0)\n msg = json.loads(response.body.decode())\n messages[route]['deque'].append(msg)\n messages[route]['times'].append(time())\n\n\nlast_index = [0]\[email protected]\ndef workers():\n \"\"\" Get data from JSON route, store in messages deques \"\"\"\n with log_errors():\n response = yield client.fetch(\n 'http://%(host)s:%(http-port)d/workers.json' % options)\n msg = json.loads(response.body.decode())\n if msg:\n messages['workers']['deque'].append(msg)\n messages['workers']['times'].append(time())\n resource_append(messages['workers']['plot-data'], msg)\n index = messages['workers']['index']\n index.append(last_index[0] + 1)\n last_index[0] += 1\n\n\[email protected]\ndef progress():\n with log_errors():\n stream = yield progress_stream('%(host)s:%(tcp-port)d' % options, 0.050)\n while True:\n try:\n msg = yield read(stream)\n except StreamClosedError:\n break\n else:\n messages['progress'] = msg\n\n\ndef on_server_loaded(server_context):\n n = 60\n messages['workers'] = {'interval': 500,\n 'deque': deque(maxlen=n),\n 'times': deque(maxlen=n),\n 'index': deque(maxlen=n),\n 'plot-data': {'time': deque(maxlen=n),\n 'cpu': deque(maxlen=n),\n 'memory-percent': deque(maxlen=n),\n 'network-send': deque(maxlen=n)}}\n server_context.add_periodic_callback(workers, 500)\n\n messages['tasks'] = {'interval': 100,\n 'deque': deque(maxlen=100),\n 'times': deque(maxlen=100)}\n server_context.add_periodic_callback(lambda: http_get('tasks'), 100)\n\n messages['progress'] = {'all': {}, 'in_memory': {},\n 'erred': {}, 'released': {}}\n\n IOLoop.current().add_callback(progress)\n", "path": "distributed/bokeh/status/server_lifecycle.py"}]}
| 2,062 | 806 |
gh_patches_debug_15190
|
rasdani/github-patches
|
git_diff
|
mirumee__ariadne-490
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release Ariadne 0.13
# TODO
- [x] Update dependencies
- [x] Fix linters errors on repo
- [x] #483
- [x] Update changelog
- [x] Write [release notes](https://github.com/mirumee/ariadne-website/pull/75)
- [x] Reach to our amazing art team for tweet graphics
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #! /usr/bin/env python
2 import os
3 from setuptools import setup
4
5 CLASSIFIERS = [
6 "Development Status :: 4 - Beta",
7 "Intended Audience :: Developers",
8 "License :: OSI Approved :: BSD License",
9 "Operating System :: OS Independent",
10 "Programming Language :: Python",
11 "Programming Language :: Python :: 3.6",
12 "Programming Language :: Python :: 3.7",
13 "Programming Language :: Python :: 3.8",
14 "Topic :: Software Development :: Libraries :: Python Modules",
15 ]
16
17 README_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md")
18 with open(README_PATH, "r", encoding="utf8") as f:
19 README = f.read()
20
21 setup(
22 name="ariadne",
23 author="Mirumee Software",
24 author_email="[email protected]",
25 description="Ariadne is a Python library for implementing GraphQL servers.",
26 long_description=README,
27 long_description_content_type="text/markdown",
28 license="BSD",
29 version="0.12.0",
30 url="https://github.com/mirumee/ariadne",
31 packages=["ariadne"],
32 include_package_data=True,
33 install_requires=[
34 "graphql-core>=3.1.0",
35 "starlette<0.15",
36 "typing_extensions>=3.6.0",
37 ],
38 extras_require={"asgi-file-uploads": ["python-multipart>=0.0.5"]},
39 classifiers=CLASSIFIERS,
40 platforms=["any"],
41 zip_safe=False,
42 )
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -11,6 +11,7 @@
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
"Topic :: Software Development :: Libraries :: Python Modules",
]
@@ -26,7 +27,7 @@
long_description=README,
long_description_content_type="text/markdown",
license="BSD",
- version="0.12.0",
+ version="0.13.0",
url="https://github.com/mirumee/ariadne",
packages=["ariadne"],
include_package_data=True,
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -11,6 +11,7 @@\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n+ \"Programming Language :: Python :: 3.9\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ]\n \n@@ -26,7 +27,7 @@\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n- version=\"0.12.0\",\n+ version=\"0.13.0\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n include_package_data=True,\n", "issue": "Release Ariadne 0.13\n# TODO\r\n\r\n- [x] Update dependencies\r\n- [x] Fix linters errors on repo\r\n- [x] #483 \r\n- [x] Update changelog\r\n- [x] Write [release notes](https://github.com/mirumee/ariadne-website/pull/75)\r\n- [x] Reach to our amazing art team for tweet graphics\n", "before_files": [{"content": "#! /usr/bin/env python\nimport os\nfrom setuptools import setup\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nREADME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"README.md\")\nwith open(README_PATH, \"r\", encoding=\"utf8\") as f:\n README = f.read()\n\nsetup(\n name=\"ariadne\",\n author=\"Mirumee Software\",\n author_email=\"[email protected]\",\n description=\"Ariadne is a Python library for implementing GraphQL servers.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n version=\"0.12.0\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n include_package_data=True,\n install_requires=[\n \"graphql-core>=3.1.0\",\n \"starlette<0.15\",\n \"typing_extensions>=3.6.0\",\n ],\n extras_require={\"asgi-file-uploads\": [\"python-multipart>=0.0.5\"]},\n classifiers=CLASSIFIERS,\n platforms=[\"any\"],\n zip_safe=False,\n)\n", "path": "setup.py"}], "after_files": [{"content": "#! /usr/bin/env python\nimport os\nfrom setuptools import setup\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nREADME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"README.md\")\nwith open(README_PATH, \"r\", encoding=\"utf8\") as f:\n README = f.read()\n\nsetup(\n name=\"ariadne\",\n author=\"Mirumee Software\",\n author_email=\"[email protected]\",\n description=\"Ariadne is a Python library for implementing GraphQL servers.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n version=\"0.13.0\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n include_package_data=True,\n install_requires=[\n \"graphql-core>=3.1.0\",\n \"starlette<0.15\",\n \"typing_extensions>=3.6.0\",\n ],\n extras_require={\"asgi-file-uploads\": [\"python-multipart>=0.0.5\"]},\n classifiers=CLASSIFIERS,\n platforms=[\"any\"],\n zip_safe=False,\n)\n", "path": "setup.py"}]}
| 764 | 179 |
gh_patches_debug_25353
|
rasdani/github-patches
|
git_diff
|
OpenMined__PySyft-3759
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement Negation operation for FV HE Scheme
## Feature Description
Negation operations of FV Scheme need to be implemented.
It should Negate a ciphertext object and return the result in ciphertext form.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `syft/frameworks/torch/he/fv/evaluator.py`
Content:
```
1 import copy
2
3 from syft.frameworks.torch.he.fv.util.operations import poly_add_mod
4 from syft.frameworks.torch.he.fv.util.operations import multiply_add_plain_with_delta
5 from syft.frameworks.torch.he.fv.ciphertext import CipherText
6 from syft.frameworks.torch.he.fv.plaintext import PlainText
7
8
9 class Evaluator:
10 def __init__(self, context):
11 self.context = context
12 self.coeff_modulus = context.param.coeff_modulus
13 self.plain_modulus = context.param.plain_modulus
14
15 def add(self, op1, op2):
16 """Adds two operands using FV scheme.
17
18 Args:
19 op1 (Ciphertext/Plaintext): First argument.
20 op2 (Ciphertext/Plaintext): Second argument.
21
22 Returns:
23 If both arguments are Plaintext elements then the result will be a Plaintext object
24 otherwise a Ciphertext object with value equivalent to the result of addition
25 operation of two provided arguments.
26 """
27 if isinstance(op1, CipherText) and isinstance(op2, CipherText):
28 return self._add_cipher_cipher(op1, op2)
29
30 elif isinstance(op1, PlainText) and isinstance(op2, PlainText):
31 return self._add_plain_plain(op1, op2)
32
33 elif isinstance(op1, PlainText) and isinstance(op2, CipherText):
34 return self._add_plain_cipher(op1, op2)
35
36 elif isinstance(op1, CipherText) and isinstance(op2, PlainText):
37 return self._add_plain_cipher(op2, op1)
38
39 else:
40 raise TypeError(f"Addition Operation not supported between {type(op1)} and {type(op2)}")
41
42 def _add_cipher_cipher(self, ct1, ct2):
43 """Adds two ciphertexts.
44
45 Args:
46 ct1 (Ciphertext): First argument.
47 ct2 (Ciphertext): Second argument.
48
49 Returns:
50 A Ciphertext object with value equivalent to result of addition of two provided
51 arguments.
52 """
53 ct1, ct2 = copy.deepcopy(ct1.data), copy.deepcopy(ct2.data)
54 result = ct2 if len(ct2) > len(ct1) else ct1
55
56 for i in range(min(len(ct1), len(ct2))):
57 for j in range(len(self.coeff_modulus)):
58 result[i][j] = poly_add_mod(ct1[i][j], ct2[i][j], self.coeff_modulus[j])
59
60 return CipherText(result)
61
62 def _add_plain_cipher(self, pt, ct):
63 """Adds a ciphertext and a plaintext.
64
65 Args:
66 pt (Plaintext): First argument.
67 ct (Ciphertext): Second argument.
68 Returns:
69 A Ciphertext object with value equivalent to result of addition of two provided
70 arguments.
71 """
72 ct = copy.deepcopy(ct)
73 return multiply_add_plain_with_delta(ct, pt, self.context)
74
75 def _add_plain_plain(self, pt1, pt2):
76 """Adds two plaintexts object.
77
78 Args:
79 pt1 (Plaintext): First argument.
80 pt2 (Plaintext): Second argument.
81
82 Returns:
83 A Plaintext object with value equivalent to result of addition of two provided
84 arguments.
85 """
86 pt1, pt2 = copy.deepcopy(pt1), copy.deepcopy(pt2)
87 return PlainText(poly_add_mod(pt1.data, pt2.data, self.plain_modulus))
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/syft/frameworks/torch/he/fv/evaluator.py b/syft/frameworks/torch/he/fv/evaluator.py
--- a/syft/frameworks/torch/he/fv/evaluator.py
+++ b/syft/frameworks/torch/he/fv/evaluator.py
@@ -1,6 +1,7 @@
import copy
from syft.frameworks.torch.he.fv.util.operations import poly_add_mod
+from syft.frameworks.torch.he.fv.util.operations import negate_mod
from syft.frameworks.torch.he.fv.util.operations import multiply_add_plain_with_delta
from syft.frameworks.torch.he.fv.ciphertext import CipherText
from syft.frameworks.torch.he.fv.plaintext import PlainText
@@ -39,6 +40,24 @@
else:
raise TypeError(f"Addition Operation not supported between {type(op1)} and {type(op2)}")
+ def negate(self, ct):
+ """Negate a cipher i.e -(ct_value)
+
+ Args:
+ ct (Ciphertext): Ciphertext to be negated.
+
+ Returns:
+ A Ciphertext object with value equivalent to result of -(ct_value).
+ """
+ result = copy.deepcopy(ct.data)
+
+ for i in range(len(result)):
+ for j in range(len(result[i])):
+ for k in range(len(result[i][j])):
+ result[i][j][k] = negate_mod(ct.data[i][j][k], self.coeff_modulus[j])
+
+ return CipherText(result)
+
def _add_cipher_cipher(self, ct1, ct2):
"""Adds two ciphertexts.
|
{"golden_diff": "diff --git a/syft/frameworks/torch/he/fv/evaluator.py b/syft/frameworks/torch/he/fv/evaluator.py\n--- a/syft/frameworks/torch/he/fv/evaluator.py\n+++ b/syft/frameworks/torch/he/fv/evaluator.py\n@@ -1,6 +1,7 @@\n import copy\n \n from syft.frameworks.torch.he.fv.util.operations import poly_add_mod\n+from syft.frameworks.torch.he.fv.util.operations import negate_mod\n from syft.frameworks.torch.he.fv.util.operations import multiply_add_plain_with_delta\n from syft.frameworks.torch.he.fv.ciphertext import CipherText\n from syft.frameworks.torch.he.fv.plaintext import PlainText\n@@ -39,6 +40,24 @@\n else:\n raise TypeError(f\"Addition Operation not supported between {type(op1)} and {type(op2)}\")\n \n+ def negate(self, ct):\n+ \"\"\"Negate a cipher i.e -(ct_value)\n+\n+ Args:\n+ ct (Ciphertext): Ciphertext to be negated.\n+\n+ Returns:\n+ A Ciphertext object with value equivalent to result of -(ct_value).\n+ \"\"\"\n+ result = copy.deepcopy(ct.data)\n+\n+ for i in range(len(result)):\n+ for j in range(len(result[i])):\n+ for k in range(len(result[i][j])):\n+ result[i][j][k] = negate_mod(ct.data[i][j][k], self.coeff_modulus[j])\n+\n+ return CipherText(result)\n+\n def _add_cipher_cipher(self, ct1, ct2):\n \"\"\"Adds two ciphertexts.\n", "issue": "Implement Negation operation for FV HE Scheme\n## Feature Description\r\nNegation operations of FV Scheme need to be implemented.\r\n\r\nIt should Negate a ciphertext object and return the result in ciphertext form.\n", "before_files": [{"content": "import copy\n\nfrom syft.frameworks.torch.he.fv.util.operations import poly_add_mod\nfrom syft.frameworks.torch.he.fv.util.operations import multiply_add_plain_with_delta\nfrom syft.frameworks.torch.he.fv.ciphertext import CipherText\nfrom syft.frameworks.torch.he.fv.plaintext import PlainText\n\n\nclass Evaluator:\n def __init__(self, context):\n self.context = context\n self.coeff_modulus = context.param.coeff_modulus\n self.plain_modulus = context.param.plain_modulus\n\n def add(self, op1, op2):\n \"\"\"Adds two operands using FV scheme.\n\n Args:\n op1 (Ciphertext/Plaintext): First argument.\n op2 (Ciphertext/Plaintext): Second argument.\n\n Returns:\n If both arguments are Plaintext elements then the result will be a Plaintext object\n otherwise a Ciphertext object with value equivalent to the result of addition\n operation of two provided arguments.\n \"\"\"\n if isinstance(op1, CipherText) and isinstance(op2, CipherText):\n return self._add_cipher_cipher(op1, op2)\n\n elif isinstance(op1, PlainText) and isinstance(op2, PlainText):\n return self._add_plain_plain(op1, op2)\n\n elif isinstance(op1, PlainText) and isinstance(op2, CipherText):\n return self._add_plain_cipher(op1, op2)\n\n elif isinstance(op1, CipherText) and isinstance(op2, PlainText):\n return self._add_plain_cipher(op2, op1)\n\n else:\n raise TypeError(f\"Addition Operation not supported between {type(op1)} and {type(op2)}\")\n\n def _add_cipher_cipher(self, ct1, ct2):\n \"\"\"Adds two ciphertexts.\n\n Args:\n ct1 (Ciphertext): First argument.\n ct2 (Ciphertext): Second argument.\n\n Returns:\n A Ciphertext object with value equivalent to result of addition of two provided\n arguments.\n \"\"\"\n ct1, ct2 = copy.deepcopy(ct1.data), copy.deepcopy(ct2.data)\n result = ct2 if len(ct2) > len(ct1) else ct1\n\n for i in range(min(len(ct1), len(ct2))):\n for j in range(len(self.coeff_modulus)):\n result[i][j] = poly_add_mod(ct1[i][j], ct2[i][j], self.coeff_modulus[j])\n\n return CipherText(result)\n\n def _add_plain_cipher(self, pt, ct):\n \"\"\"Adds a ciphertext and a plaintext.\n\n Args:\n pt (Plaintext): First argument.\n ct (Ciphertext): Second argument.\n Returns:\n A Ciphertext object with value equivalent to result of addition of two provided\n arguments.\n \"\"\"\n ct = copy.deepcopy(ct)\n return multiply_add_plain_with_delta(ct, pt, self.context)\n\n def _add_plain_plain(self, pt1, pt2):\n \"\"\"Adds two plaintexts object.\n\n Args:\n pt1 (Plaintext): First argument.\n pt2 (Plaintext): Second argument.\n\n Returns:\n A Plaintext object with value equivalent to result of addition of two provided\n arguments.\n \"\"\"\n pt1, pt2 = copy.deepcopy(pt1), copy.deepcopy(pt2)\n return PlainText(poly_add_mod(pt1.data, pt2.data, self.plain_modulus))\n", "path": "syft/frameworks/torch/he/fv/evaluator.py"}], "after_files": [{"content": "import copy\n\nfrom syft.frameworks.torch.he.fv.util.operations import poly_add_mod\nfrom syft.frameworks.torch.he.fv.util.operations import negate_mod\nfrom syft.frameworks.torch.he.fv.util.operations import multiply_add_plain_with_delta\nfrom syft.frameworks.torch.he.fv.ciphertext import CipherText\nfrom syft.frameworks.torch.he.fv.plaintext import PlainText\n\n\nclass Evaluator:\n def __init__(self, context):\n self.context = context\n self.coeff_modulus = context.param.coeff_modulus\n self.plain_modulus = context.param.plain_modulus\n\n def add(self, op1, op2):\n \"\"\"Adds two operands using FV scheme.\n\n Args:\n op1 (Ciphertext/Plaintext): First argument.\n op2 (Ciphertext/Plaintext): Second argument.\n\n Returns:\n If both arguments are Plaintext elements then the result will be a Plaintext object\n otherwise a Ciphertext object with value equivalent to the result of addition\n operation of two provided arguments.\n \"\"\"\n if isinstance(op1, CipherText) and isinstance(op2, CipherText):\n return self._add_cipher_cipher(op1, op2)\n\n elif isinstance(op1, PlainText) and isinstance(op2, PlainText):\n return self._add_plain_plain(op1, op2)\n\n elif isinstance(op1, PlainText) and isinstance(op2, CipherText):\n return self._add_plain_cipher(op1, op2)\n\n elif isinstance(op1, CipherText) and isinstance(op2, PlainText):\n return self._add_plain_cipher(op2, op1)\n\n else:\n raise TypeError(f\"Addition Operation not supported between {type(op1)} and {type(op2)}\")\n\n def negate(self, ct):\n \"\"\"Negate a cipher i.e -(ct_value)\n\n Args:\n ct (Ciphertext): Ciphertext to be negated.\n\n Returns:\n A Ciphertext object with value equivalent to result of -(ct_value).\n \"\"\"\n result = copy.deepcopy(ct.data)\n\n for i in range(len(result)):\n for j in range(len(result[i])):\n for k in range(len(result[i][j])):\n result[i][j][k] = negate_mod(ct.data[i][j][k], self.coeff_modulus[j])\n\n return CipherText(result)\n\n def _add_cipher_cipher(self, ct1, ct2):\n \"\"\"Adds two ciphertexts.\n\n Args:\n ct1 (Ciphertext): First argument.\n ct2 (Ciphertext): Second argument.\n\n Returns:\n A Ciphertext object with value equivalent to result of addition of two provided\n arguments.\n \"\"\"\n ct1, ct2 = copy.deepcopy(ct1.data), copy.deepcopy(ct2.data)\n result = ct2 if len(ct2) > len(ct1) else ct1\n\n for i in range(min(len(ct1), len(ct2))):\n for j in range(len(self.coeff_modulus)):\n result[i][j] = poly_add_mod(ct1[i][j], ct2[i][j], self.coeff_modulus[j])\n\n return CipherText(result)\n\n def _add_plain_cipher(self, pt, ct):\n \"\"\"Adds a ciphertext and a plaintext.\n\n Args:\n pt (Plaintext): First argument.\n ct (Ciphertext): Second argument.\n Returns:\n A Ciphertext object with value equivalent to result of addition of two provided\n arguments.\n \"\"\"\n ct = copy.deepcopy(ct)\n return multiply_add_plain_with_delta(ct, pt, self.context)\n\n def _add_plain_plain(self, pt1, pt2):\n \"\"\"Adds two plaintexts object.\n\n Args:\n pt1 (Plaintext): First argument.\n pt2 (Plaintext): Second argument.\n\n Returns:\n A Plaintext object with value equivalent to result of addition of two provided\n arguments.\n \"\"\"\n pt1, pt2 = copy.deepcopy(pt1), copy.deepcopy(pt2)\n return PlainText(poly_add_mod(pt1.data, pt2.data, self.plain_modulus))\n", "path": "syft/frameworks/torch/he/fv/evaluator.py"}]}
| 1,217 | 368 |
gh_patches_debug_28241
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-2589
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TypeError: PromotionRequestAdmin.mark_finished() takes 2 positional arguments but 3 were given
Sentry Issue: [CONCREXIT-JD](https://sentry.io/organizations/thalia/issues/3668103253/?referrer=github_integration)
```
TypeError: PromotionRequestAdmin.mark_finished() takes 2 positional arguments but 3 were given
(5 additional frame(s) were not displayed)
...
File "django/contrib/admin/sites.py", line 242, in inner
return view(request, *args, **kwargs)
File "django/utils/decorators.py", line 46, in _wrapper
return bound_method(*args, **kwargs)
File "django/utils/decorators.py", line 133, in _wrapped_view
response = view_func(request, *args, **kwargs)
File "django/contrib/admin/options.py", line 1959, in changelist_view
response = self.response_action(
File "django/contrib/admin/options.py", line 1588, in response_action
response = func(self, request, queryset)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/promotion/admin.py`
Content:
```
1 """Registers admin interfaces for the models defined in this module."""
2 from django.contrib import admin
3 from django.contrib.admin import ModelAdmin
4
5 from promotion.forms import PromotionRequestForm
6 from events.services import is_organiser
7
8 from .models import PromotionChannel, PromotionRequest
9
10
11 @admin.register(PromotionRequest)
12 class PromotionRequestAdmin(admin.ModelAdmin):
13 """This manages the admin interface for the model items."""
14
15 list_display = ("event", "publish_date", "channel", "assigned_to", "status")
16 list_filter = (
17 "publish_date",
18 "assigned_to",
19 "status",
20 )
21 date_hierarchy = "publish_date"
22 form = PromotionRequestForm
23 actions = ["mark_not_started", "mark_started", "mark_finished", "mark_published"]
24
25 def has_change_permission(self, request, obj=None):
26 if obj is not None and not is_organiser(request.member, obj.event):
27 return False
28 return super().has_change_permission(request, obj)
29
30 def mark_not_started(self, queryset):
31 """Change the status of the event to published."""
32 self._change_published(queryset, PromotionRequest.NOT_STARTED)
33
34 mark_not_started.short_description = "Mark requests as not started"
35
36 def mark_started(self, queryset):
37 """Change the status of the event to published."""
38 self._change_published(queryset, PromotionRequest.STARTED)
39
40 mark_started.short_description = "Mark requests as started"
41
42 def mark_finished(self, queryset):
43 """Change the status of the event to published."""
44 self._change_published(queryset, PromotionRequest.FINISHED)
45
46 mark_finished.short_description = "Mark requests as finished"
47
48 def mark_published(self, queryset):
49 """Change the status of the event to published."""
50 self._change_published(queryset, PromotionRequest.PUBLISHED)
51
52 mark_published.short_description = "Mark requests as published"
53
54 @staticmethod
55 def _change_published(queryset, status):
56 queryset.update(status=status)
57
58
59 @admin.register(PromotionChannel)
60 class PromotionChannelAdmin(ModelAdmin):
61 pass
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/promotion/admin.py b/website/promotion/admin.py
--- a/website/promotion/admin.py
+++ b/website/promotion/admin.py
@@ -27,25 +27,25 @@
return False
return super().has_change_permission(request, obj)
- def mark_not_started(self, queryset):
+ def mark_not_started(self, request, queryset):
"""Change the status of the event to published."""
self._change_published(queryset, PromotionRequest.NOT_STARTED)
mark_not_started.short_description = "Mark requests as not started"
- def mark_started(self, queryset):
+ def mark_started(self, request, queryset):
"""Change the status of the event to published."""
self._change_published(queryset, PromotionRequest.STARTED)
mark_started.short_description = "Mark requests as started"
- def mark_finished(self, queryset):
+ def mark_finished(self, request, queryset):
"""Change the status of the event to published."""
self._change_published(queryset, PromotionRequest.FINISHED)
mark_finished.short_description = "Mark requests as finished"
- def mark_published(self, queryset):
+ def mark_published(self, request, queryset):
"""Change the status of the event to published."""
self._change_published(queryset, PromotionRequest.PUBLISHED)
|
{"golden_diff": "diff --git a/website/promotion/admin.py b/website/promotion/admin.py\n--- a/website/promotion/admin.py\n+++ b/website/promotion/admin.py\n@@ -27,25 +27,25 @@\n return False\n return super().has_change_permission(request, obj)\n \n- def mark_not_started(self, queryset):\n+ def mark_not_started(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.NOT_STARTED)\n \n mark_not_started.short_description = \"Mark requests as not started\"\n \n- def mark_started(self, queryset):\n+ def mark_started(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.STARTED)\n \n mark_started.short_description = \"Mark requests as started\"\n \n- def mark_finished(self, queryset):\n+ def mark_finished(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.FINISHED)\n \n mark_finished.short_description = \"Mark requests as finished\"\n \n- def mark_published(self, queryset):\n+ def mark_published(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.PUBLISHED)\n", "issue": "TypeError: PromotionRequestAdmin.mark_finished() takes 2 positional arguments but 3 were given\nSentry Issue: [CONCREXIT-JD](https://sentry.io/organizations/thalia/issues/3668103253/?referrer=github_integration)\n\n```\nTypeError: PromotionRequestAdmin.mark_finished() takes 2 positional arguments but 3 were given\n(5 additional frame(s) were not displayed)\n...\n File \"django/contrib/admin/sites.py\", line 242, in inner\n return view(request, *args, **kwargs)\n File \"django/utils/decorators.py\", line 46, in _wrapper\n return bound_method(*args, **kwargs)\n File \"django/utils/decorators.py\", line 133, in _wrapped_view\n response = view_func(request, *args, **kwargs)\n File \"django/contrib/admin/options.py\", line 1959, in changelist_view\n response = self.response_action(\n File \"django/contrib/admin/options.py\", line 1588, in response_action\n response = func(self, request, queryset)\n```\n", "before_files": [{"content": "\"\"\"Registers admin interfaces for the models defined in this module.\"\"\"\nfrom django.contrib import admin\nfrom django.contrib.admin import ModelAdmin\n\nfrom promotion.forms import PromotionRequestForm\nfrom events.services import is_organiser\n\nfrom .models import PromotionChannel, PromotionRequest\n\n\[email protected](PromotionRequest)\nclass PromotionRequestAdmin(admin.ModelAdmin):\n \"\"\"This manages the admin interface for the model items.\"\"\"\n\n list_display = (\"event\", \"publish_date\", \"channel\", \"assigned_to\", \"status\")\n list_filter = (\n \"publish_date\",\n \"assigned_to\",\n \"status\",\n )\n date_hierarchy = \"publish_date\"\n form = PromotionRequestForm\n actions = [\"mark_not_started\", \"mark_started\", \"mark_finished\", \"mark_published\"]\n\n def has_change_permission(self, request, obj=None):\n if obj is not None and not is_organiser(request.member, obj.event):\n return False\n return super().has_change_permission(request, obj)\n\n def mark_not_started(self, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.NOT_STARTED)\n\n mark_not_started.short_description = \"Mark requests as not started\"\n\n def mark_started(self, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.STARTED)\n\n mark_started.short_description = \"Mark requests as started\"\n\n def mark_finished(self, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.FINISHED)\n\n mark_finished.short_description = \"Mark requests as finished\"\n\n def mark_published(self, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.PUBLISHED)\n\n mark_published.short_description = \"Mark requests as published\"\n\n @staticmethod\n def _change_published(queryset, status):\n queryset.update(status=status)\n\n\[email protected](PromotionChannel)\nclass PromotionChannelAdmin(ModelAdmin):\n pass\n", "path": "website/promotion/admin.py"}], "after_files": [{"content": "\"\"\"Registers admin interfaces for the models defined in this module.\"\"\"\nfrom django.contrib import admin\nfrom django.contrib.admin import ModelAdmin\n\nfrom promotion.forms import PromotionRequestForm\nfrom events.services import is_organiser\n\nfrom .models import PromotionChannel, PromotionRequest\n\n\[email protected](PromotionRequest)\nclass PromotionRequestAdmin(admin.ModelAdmin):\n \"\"\"This manages the admin interface for the model items.\"\"\"\n\n list_display = (\"event\", \"publish_date\", \"channel\", \"assigned_to\", \"status\")\n list_filter = (\n \"publish_date\",\n \"assigned_to\",\n \"status\",\n )\n date_hierarchy = \"publish_date\"\n form = PromotionRequestForm\n actions = [\"mark_not_started\", \"mark_started\", \"mark_finished\", \"mark_published\"]\n\n def has_change_permission(self, request, obj=None):\n if obj is not None and not is_organiser(request.member, obj.event):\n return False\n return super().has_change_permission(request, obj)\n\n def mark_not_started(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.NOT_STARTED)\n\n mark_not_started.short_description = \"Mark requests as not started\"\n\n def mark_started(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.STARTED)\n\n mark_started.short_description = \"Mark requests as started\"\n\n def mark_finished(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.FINISHED)\n\n mark_finished.short_description = \"Mark requests as finished\"\n\n def mark_published(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.PUBLISHED)\n\n mark_published.short_description = \"Mark requests as published\"\n\n @staticmethod\n def _change_published(queryset, status):\n queryset.update(status=status)\n\n\[email protected](PromotionChannel)\nclass PromotionChannelAdmin(ModelAdmin):\n pass\n", "path": "website/promotion/admin.py"}]}
| 1,057 | 290 |
gh_patches_debug_13577
|
rasdani/github-patches
|
git_diff
|
localstack__localstack-1397
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Service "s3" not yet available, retrying...
Hello there
After installing localstack and trying to start several services on my machine, s3 always failed to start
The command I am using to start them up is
`SERVICES=sqs,sns,s3,lambda DEBUG=1 localstack start`
(With `DEBUG=1` in place already for debugging)
First few lines of the output are:
```
2018-06-19T10:05:57:WARNING:infra.py: Service "s3" not yet available, retrying...
2018-06-19T10:06:00:WARNING:infra.py: Service "s3" not yet available, retrying...
2018-06-19T10:06:05:WARNING:infra.py: Service "s3" not yet available, retrying...
2018-06-19T10:06:08:WARNING:infra.py: Service "s3" not yet available, retrying...
2018-06-19T10:06:12:WARNING:infra.py: Service "s3" not yet available, retrying...
2018-06-19T10:06:15:WARNING:infra.py: Service "s3" not yet available, retrying...
2018-06-19T10:06:19:WARNING:infra.py: Service "s3" not yet available, retrying...
2018-06-19T10:06:22:ERROR:localstack.services.s3.s3_starter: S3 health check failed: An error occurred (ExpiredToken) when calling the AssumeRole operation: The security token included in the request is expired Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/localstack/services/infra.py", line 344, in check_infra
raise e
File "/usr/local/lib/python3.6/site-packages/localstack/services/infra.py", line 341, in check_infra
plugin.check(expect_shutdown=expect_shutdown, print_error=print_error)
File "/usr/local/lib/python3.6/site-packages/localstack/services/infra.py", line 80, in check
return self.check_function(expect_shutdown=expect_shutdown, print_error=print_error)
File "/usr/local/lib/python3.6/site-packages/localstack/services/s3/s3_starter.py", line 23, in check_s3
assert isinstance(out['Buckets'], list)
TypeError: 'NoneType' object is not subscriptable
During handling of the above exception, another exception occurred:
...
```
I have been trying to tackle this problem for a few hours already, without any success, I tried the latest verion of localstack as well as 0.8.6.1 which works on another machine
I am installing it trough pip (`pip install localstack`)
Thanks for any help in advance!
┆Issue is synchronized with this [Jira Bug](https://localstack.atlassian.net/browse/LOC-309) by [Unito](https://www.unito.io/learn-more)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `localstack/services/s3/s3_starter.py`
Content:
```
1 import sys
2 import logging
3 import traceback
4 from moto.s3 import models as s3_models
5 from moto.server import main as moto_main
6 from localstack import config
7 from localstack.constants import DEFAULT_PORT_S3_BACKEND
8 from localstack.utils.aws import aws_stack
9 from localstack.utils.common import wait_for_port_open
10 from localstack.services.infra import (
11 get_service_protocol, start_proxy_for_service, do_run, setup_logging)
12
13 LOGGER = logging.getLogger(__name__)
14
15 # max file size for S3 objects (in MB)
16 S3_MAX_FILE_SIZE_MB = 128
17
18
19 def check_s3(expect_shutdown=False, print_error=False):
20 out = None
21 try:
22 # wait for port to be opened
23 wait_for_port_open(DEFAULT_PORT_S3_BACKEND)
24 # check S3
25 out = aws_stack.connect_to_service(service_name='s3').list_buckets()
26 except Exception as e:
27 if print_error:
28 LOGGER.error('S3 health check failed: %s %s' % (e, traceback.format_exc()))
29 if expect_shutdown:
30 assert out is None
31 else:
32 assert isinstance(out['Buckets'], list)
33
34
35 def start_s3(port=None, backend_port=None, asynchronous=None, update_listener=None):
36 port = port or config.PORT_S3
37 backend_port = DEFAULT_PORT_S3_BACKEND
38 cmd = 'python "%s" s3 -p %s -H 0.0.0.0' % (__file__, backend_port)
39 print('Starting mock S3 (%s port %s)...' % (get_service_protocol(), port))
40 start_proxy_for_service('s3', port, backend_port, update_listener)
41 env_vars = {'PYTHONPATH': ':'.join(sys.path)}
42 return do_run(cmd, asynchronous, env_vars=env_vars)
43
44
45 def apply_patches():
46 s3_models.DEFAULT_KEY_BUFFER_SIZE = S3_MAX_FILE_SIZE_MB * 1024 * 1024
47
48 def init(self, name, value, storage='STANDARD', etag=None, is_versioned=False, version_id=0, max_buffer_size=None):
49 return original_init(self, name, value, storage=storage, etag=etag, is_versioned=is_versioned,
50 version_id=version_id, max_buffer_size=s3_models.DEFAULT_KEY_BUFFER_SIZE)
51
52 original_init = s3_models.FakeKey.__init__
53 s3_models.FakeKey.__init__ = init
54
55
56 def main():
57 setup_logging()
58 # patch moto implementation
59 apply_patches()
60 # start API
61 sys.exit(moto_main())
62
63
64 if __name__ == '__main__':
65 main()
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/localstack/services/s3/s3_starter.py b/localstack/services/s3/s3_starter.py
--- a/localstack/services/s3/s3_starter.py
+++ b/localstack/services/s3/s3_starter.py
@@ -35,7 +35,7 @@
def start_s3(port=None, backend_port=None, asynchronous=None, update_listener=None):
port = port or config.PORT_S3
backend_port = DEFAULT_PORT_S3_BACKEND
- cmd = 'python "%s" s3 -p %s -H 0.0.0.0' % (__file__, backend_port)
+ cmd = '%s "%s" s3 -p %s -H 0.0.0.0' % (sys.executable, __file__, backend_port)
print('Starting mock S3 (%s port %s)...' % (get_service_protocol(), port))
start_proxy_for_service('s3', port, backend_port, update_listener)
env_vars = {'PYTHONPATH': ':'.join(sys.path)}
|
{"golden_diff": "diff --git a/localstack/services/s3/s3_starter.py b/localstack/services/s3/s3_starter.py\n--- a/localstack/services/s3/s3_starter.py\n+++ b/localstack/services/s3/s3_starter.py\n@@ -35,7 +35,7 @@\n def start_s3(port=None, backend_port=None, asynchronous=None, update_listener=None):\n port = port or config.PORT_S3\n backend_port = DEFAULT_PORT_S3_BACKEND\n- cmd = 'python \"%s\" s3 -p %s -H 0.0.0.0' % (__file__, backend_port)\n+ cmd = '%s \"%s\" s3 -p %s -H 0.0.0.0' % (sys.executable, __file__, backend_port)\n print('Starting mock S3 (%s port %s)...' % (get_service_protocol(), port))\n start_proxy_for_service('s3', port, backend_port, update_listener)\n env_vars = {'PYTHONPATH': ':'.join(sys.path)}\n", "issue": "Service \"s3\" not yet available, retrying...\nHello there\n\nAfter installing localstack and trying to start several services on my machine, s3 always failed to start\nThe command I am using to start them up is\n\n`SERVICES=sqs,sns,s3,lambda DEBUG=1 localstack start`\n(With `DEBUG=1` in place already for debugging)\n\nFirst few lines of the output are:\n\n```\n2018-06-19T10:05:57:WARNING:infra.py: Service \"s3\" not yet available, retrying...\n2018-06-19T10:06:00:WARNING:infra.py: Service \"s3\" not yet available, retrying...\n2018-06-19T10:06:05:WARNING:infra.py: Service \"s3\" not yet available, retrying...\n2018-06-19T10:06:08:WARNING:infra.py: Service \"s3\" not yet available, retrying...\n2018-06-19T10:06:12:WARNING:infra.py: Service \"s3\" not yet available, retrying...\n2018-06-19T10:06:15:WARNING:infra.py: Service \"s3\" not yet available, retrying...\n2018-06-19T10:06:19:WARNING:infra.py: Service \"s3\" not yet available, retrying...\n2018-06-19T10:06:22:ERROR:localstack.services.s3.s3_starter: S3 health check failed: An error occurred (ExpiredToken) when calling the AssumeRole operation: The security token included in the request is expired Traceback (most recent call last):\n File \"/usr/local/lib/python3.6/site-packages/localstack/services/infra.py\", line 344, in check_infra\n raise e\n File \"/usr/local/lib/python3.6/site-packages/localstack/services/infra.py\", line 341, in check_infra\n plugin.check(expect_shutdown=expect_shutdown, print_error=print_error)\n File \"/usr/local/lib/python3.6/site-packages/localstack/services/infra.py\", line 80, in check\n return self.check_function(expect_shutdown=expect_shutdown, print_error=print_error)\n File \"/usr/local/lib/python3.6/site-packages/localstack/services/s3/s3_starter.py\", line 23, in check_s3\n assert isinstance(out['Buckets'], list)\nTypeError: 'NoneType' object is not subscriptable\n\nDuring handling of the above exception, another exception occurred:\n...\n```\n\nI have been trying to tackle this problem for a few hours already, without any success, I tried the latest verion of localstack as well as 0.8.6.1 which works on another machine\n\nI am installing it trough pip (`pip install localstack`)\n\nThanks for any help in advance!\n\n\n\n\u2506Issue is synchronized with this [Jira Bug](https://localstack.atlassian.net/browse/LOC-309) by [Unito](https://www.unito.io/learn-more)\n\n", "before_files": [{"content": "import sys\nimport logging\nimport traceback\nfrom moto.s3 import models as s3_models\nfrom moto.server import main as moto_main\nfrom localstack import config\nfrom localstack.constants import DEFAULT_PORT_S3_BACKEND\nfrom localstack.utils.aws import aws_stack\nfrom localstack.utils.common import wait_for_port_open\nfrom localstack.services.infra import (\n get_service_protocol, start_proxy_for_service, do_run, setup_logging)\n\nLOGGER = logging.getLogger(__name__)\n\n# max file size for S3 objects (in MB)\nS3_MAX_FILE_SIZE_MB = 128\n\n\ndef check_s3(expect_shutdown=False, print_error=False):\n out = None\n try:\n # wait for port to be opened\n wait_for_port_open(DEFAULT_PORT_S3_BACKEND)\n # check S3\n out = aws_stack.connect_to_service(service_name='s3').list_buckets()\n except Exception as e:\n if print_error:\n LOGGER.error('S3 health check failed: %s %s' % (e, traceback.format_exc()))\n if expect_shutdown:\n assert out is None\n else:\n assert isinstance(out['Buckets'], list)\n\n\ndef start_s3(port=None, backend_port=None, asynchronous=None, update_listener=None):\n port = port or config.PORT_S3\n backend_port = DEFAULT_PORT_S3_BACKEND\n cmd = 'python \"%s\" s3 -p %s -H 0.0.0.0' % (__file__, backend_port)\n print('Starting mock S3 (%s port %s)...' % (get_service_protocol(), port))\n start_proxy_for_service('s3', port, backend_port, update_listener)\n env_vars = {'PYTHONPATH': ':'.join(sys.path)}\n return do_run(cmd, asynchronous, env_vars=env_vars)\n\n\ndef apply_patches():\n s3_models.DEFAULT_KEY_BUFFER_SIZE = S3_MAX_FILE_SIZE_MB * 1024 * 1024\n\n def init(self, name, value, storage='STANDARD', etag=None, is_versioned=False, version_id=0, max_buffer_size=None):\n return original_init(self, name, value, storage=storage, etag=etag, is_versioned=is_versioned,\n version_id=version_id, max_buffer_size=s3_models.DEFAULT_KEY_BUFFER_SIZE)\n\n original_init = s3_models.FakeKey.__init__\n s3_models.FakeKey.__init__ = init\n\n\ndef main():\n setup_logging()\n # patch moto implementation\n apply_patches()\n # start API\n sys.exit(moto_main())\n\n\nif __name__ == '__main__':\n main()\n", "path": "localstack/services/s3/s3_starter.py"}], "after_files": [{"content": "import sys\nimport logging\nimport traceback\nfrom moto.s3 import models as s3_models\nfrom moto.server import main as moto_main\nfrom localstack import config\nfrom localstack.constants import DEFAULT_PORT_S3_BACKEND\nfrom localstack.utils.aws import aws_stack\nfrom localstack.utils.common import wait_for_port_open\nfrom localstack.services.infra import (\n get_service_protocol, start_proxy_for_service, do_run, setup_logging)\n\nLOGGER = logging.getLogger(__name__)\n\n# max file size for S3 objects (in MB)\nS3_MAX_FILE_SIZE_MB = 128\n\n\ndef check_s3(expect_shutdown=False, print_error=False):\n out = None\n try:\n # wait for port to be opened\n wait_for_port_open(DEFAULT_PORT_S3_BACKEND)\n # check S3\n out = aws_stack.connect_to_service(service_name='s3').list_buckets()\n except Exception as e:\n if print_error:\n LOGGER.error('S3 health check failed: %s %s' % (e, traceback.format_exc()))\n if expect_shutdown:\n assert out is None\n else:\n assert isinstance(out['Buckets'], list)\n\n\ndef start_s3(port=None, backend_port=None, asynchronous=None, update_listener=None):\n port = port or config.PORT_S3\n backend_port = DEFAULT_PORT_S3_BACKEND\n cmd = '%s \"%s\" s3 -p %s -H 0.0.0.0' % (sys.executable, __file__, backend_port)\n print('Starting mock S3 (%s port %s)...' % (get_service_protocol(), port))\n start_proxy_for_service('s3', port, backend_port, update_listener)\n env_vars = {'PYTHONPATH': ':'.join(sys.path)}\n return do_run(cmd, asynchronous, env_vars=env_vars)\n\n\ndef apply_patches():\n s3_models.DEFAULT_KEY_BUFFER_SIZE = S3_MAX_FILE_SIZE_MB * 1024 * 1024\n\n def init(self, name, value, storage='STANDARD', etag=None, is_versioned=False, version_id=0, max_buffer_size=None):\n return original_init(self, name, value, storage=storage, etag=etag, is_versioned=is_versioned,\n version_id=version_id, max_buffer_size=s3_models.DEFAULT_KEY_BUFFER_SIZE)\n\n original_init = s3_models.FakeKey.__init__\n s3_models.FakeKey.__init__ = init\n\n\ndef main():\n setup_logging()\n # patch moto implementation\n apply_patches()\n # start API\n sys.exit(moto_main())\n\n\nif __name__ == '__main__':\n main()\n", "path": "localstack/services/s3/s3_starter.py"}]}
| 1,669 | 226 |
gh_patches_debug_1000
|
rasdani/github-patches
|
git_diff
|
replicate__cog-653
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support lists of `BaseModel` for outputs
The following model fails on Replicate.com with a cryptic `Can't pickle <class 'predict.Output'>: it's not the same object as predict.Output` error:
```python
class Output(BaseModel):
foo: str
bar: str
class Predictor(BasePredictor):
def predict(self) -> List[Output]:
return [Output(foo="foo", bar="bar")]
```
This is [documented deep in Cog's Python layer](https://github.com/replicate/cog/blob/main/python/cog/server/runner.py#L238).
We should support this type of output.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/cog/server/runner.py`
Content:
```
1 import multiprocessing
2 import types
3 from enum import Enum
4 from multiprocessing.connection import Connection
5 from typing import Any, Dict, List, Optional
6
7 from pydantic import BaseModel
8
9 from ..predictor import load_config, load_predictor
10 from .log_capture import capture_log
11
12
13 class PredictionRunner:
14 PROCESSING_DONE = 1
15
16 class OutputType(Enum):
17 NOT_STARTED = 0
18 SINGLE = 1
19 GENERATOR = 2
20
21 def __init__(self) -> None:
22 self.logs_pipe_reader, self.logs_pipe_writer = multiprocessing.Pipe(
23 duplex=False
24 )
25 (
26 self.prediction_input_pipe_reader,
27 self.prediction_input_pipe_writer,
28 ) = multiprocessing.Pipe(duplex=False)
29 self.predictor_pipe_reader, self.predictor_pipe_writer = multiprocessing.Pipe(
30 duplex=False
31 )
32 self.error_pipe_reader, self.error_pipe_writer = multiprocessing.Pipe(
33 duplex=False
34 )
35 self.done_pipe_reader, self.done_pipe_writer = multiprocessing.Pipe(
36 duplex=False
37 )
38
39 def setup(self) -> None:
40 """
41 Sets up the predictor in a subprocess. Blocks until the predictor has
42 finished setup. To start a prediction after setup call `run()`.
43 """
44 # `multiprocessing.get_context("spawn")` returns the same API as
45 # `multiprocessing`, but will use the spawn method when creating any
46 # subprocess. Using the spawn method for the predictor subprocess is
47 # useful for compatibility with CUDA, which cannot run in a process
48 # that gets forked. If we can guarantee that all initialization happens
49 # within the subprocess, we could probably get away with using fork
50 # here instead.
51 self.predictor_process = multiprocessing.get_context("spawn").Process(
52 target=self._start_predictor_process
53 )
54
55 self._is_processing = True
56 self.predictor_process.start()
57
58 # poll with an infinite timeout to avoid burning resources in the loop
59 while self.done_pipe_reader.poll(timeout=None) and self.is_processing():
60 pass
61
62 def _start_predictor_process(self) -> None:
63 config = load_config()
64 self.predictor = load_predictor(config)
65 self.predictor.setup()
66
67 # tell the main process we've finished setup
68 self.done_pipe_writer.send(self.PROCESSING_DONE)
69
70 while True:
71 try:
72 prediction_input = self.prediction_input_pipe_reader.recv()
73 self._run_prediction(prediction_input)
74 except EOFError:
75 continue
76
77 def run(self, **prediction_input: Dict[str, Any]) -> None:
78 """
79 Starts running a prediction in the predictor subprocess, using the
80 inputs provided in `prediction_input`.
81
82 The subprocess will send prediction output and logs to pipes as soon as
83 they're available. You can check if the pipes have any data using
84 `has_output_waiting()` and `has_logs_waiting()`. You can read data from
85 the pipes using `read_output()` and `read_logs()`.
86
87 Use `is_processing()` to check whether more data is expected in the
88 pipe for prediction output.
89 """
90 # We're starting processing!
91 self._is_processing = True
92
93 # We don't know whether or not we've got a generator (progressive
94 # output) until we start getting output from the model
95 self._is_output_generator = self.OutputType.NOT_STARTED
96
97 # We haven't encountered an error yet
98 self._error = None
99
100 # Send prediction input through the pipe to the predictor subprocess
101 self.prediction_input_pipe_writer.send(prediction_input)
102
103 def is_processing(self) -> bool:
104 """
105 Returns True if the subprocess running the prediction is still
106 processing.
107 """
108 if self.done_pipe_reader.poll():
109 try:
110 if self.done_pipe_reader.recv() == self.PROCESSING_DONE:
111 self._is_processing = False
112 except EOFError:
113 pass
114
115 return self._is_processing
116
117 def has_output_waiting(self) -> bool:
118 return self.predictor_pipe_reader.poll()
119
120 def read_output(self) -> List[Any]:
121 if self._is_output_generator is self.OutputType.NOT_STARTED:
122 return []
123
124 output = []
125 while self.has_output_waiting():
126 try:
127 output.append(self.predictor_pipe_reader.recv())
128 except EOFError:
129 break
130 return output
131
132 def has_logs_waiting(self) -> bool:
133 return self.logs_pipe_reader.poll()
134
135 def read_logs(self) -> List[str]:
136 logs = []
137 while self.has_logs_waiting():
138 try:
139 logs.append(self.logs_pipe_reader.recv())
140 except EOFError:
141 break
142 return logs
143
144 def is_output_generator(self) -> Optional[bool]:
145 """
146 Returns `True` if the output is a generator, `False` if it's not, and
147 `None` if we don't know yet.
148 """
149 if self._is_output_generator is self.OutputType.NOT_STARTED:
150 if self.has_output_waiting():
151 # if there's output waiting use the first one to set whether
152 # we've got a generator, with a safety check
153 self._is_output_generator = self.predictor_pipe_reader.recv()
154 assert isinstance(self._is_output_generator, self.OutputType)
155
156 if self._is_output_generator is self.OutputType.NOT_STARTED:
157 return None
158 elif self._is_output_generator is self.OutputType.SINGLE:
159 return False
160 elif self._is_output_generator is self.OutputType.GENERATOR:
161 return True
162
163 def _run_prediction(self, prediction_input: Dict[str, Any]) -> None:
164 """
165 Sends a boolean first, to indicate whether the output is a generator.
166 After that it sends the output(s).
167
168 If the predictor raises an exception it'll send it to the error pipe
169 writer and then exit.
170
171 When the prediction is finished it'll send a token to the done pipe.
172 """
173 # Empty all the pipes before we start sending more messages to them
174 drain_pipe(self.logs_pipe_reader)
175 drain_pipe(self.predictor_pipe_reader)
176 drain_pipe(self.error_pipe_reader)
177 drain_pipe(self.done_pipe_reader)
178
179 with capture_log(self.logs_pipe_writer):
180 try:
181 output = self.predictor.predict(**prediction_input)
182
183 if isinstance(output, types.GeneratorType):
184 self.predictor_pipe_writer.send(self.OutputType.GENERATOR)
185 while True:
186 try:
187 self.predictor_pipe_writer.send(
188 next(make_pickleable(output))
189 )
190 except StopIteration:
191 break
192 else:
193 self.predictor_pipe_writer.send(self.OutputType.SINGLE)
194 self.predictor_pipe_writer.send(make_pickleable(output))
195 except Exception as e:
196 self.error_pipe_writer.send(e)
197
198 self.done_pipe_writer.send(self.PROCESSING_DONE)
199
200 def error(self) -> Optional[str]:
201 """
202 Returns the error encountered by the predictor, if one exists.
203 """
204 if self._error is None and self.error_pipe_reader.poll():
205 try:
206 self._error = self.error_pipe_reader.recv()
207 except EOFError:
208 # I don't know how this is reachable ¯\_(ツ)_/¯
209 pass
210
211 return self._error
212
213
214 def drain_pipe(pipe_reader: Connection) -> None:
215 """
216 Reads all available messages from a pipe and discards them. This serves to
217 clear the pipe for future usage.
218 """
219 while pipe_reader.poll():
220 try:
221 pipe_reader.recv()
222 except EOFError:
223 break
224
225
226 def make_pickleable(obj: Any) -> Any:
227 """
228 Returns a version of `obj` which can be pickled and therefore sent through
229 the pipe to the main process.
230
231 If the predictor uses a custom output like:
232
233 class Output(BaseModel):
234 text: str
235
236 then the output can't be sent through the pipe because:
237
238 > Can't pickle <class 'predict.Output'>: it's not the same object as
239 > 'predict.Output'
240
241 The way we're getting around this here will only work for singly-nested
242 outputs. If there's a complex object inside a complex object, it's likely
243 to fall over.
244
245 A better fix for this would be to work out why the pickling process is
246 getting a different class when loading `Output`, so the pickling Just
247 Works.
248 """
249 if isinstance(obj, BaseModel):
250 return obj.dict(exclude_unset=True)
251 else:
252 return obj
253
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/python/cog/server/runner.py b/python/cog/server/runner.py
--- a/python/cog/server/runner.py
+++ b/python/cog/server/runner.py
@@ -248,5 +248,7 @@
"""
if isinstance(obj, BaseModel):
return obj.dict(exclude_unset=True)
+ elif isinstance(obj, List):
+ return [make_pickleable(item) for item in obj]
else:
return obj
|
{"golden_diff": "diff --git a/python/cog/server/runner.py b/python/cog/server/runner.py\n--- a/python/cog/server/runner.py\n+++ b/python/cog/server/runner.py\n@@ -248,5 +248,7 @@\n \"\"\"\n if isinstance(obj, BaseModel):\n return obj.dict(exclude_unset=True)\n+ elif isinstance(obj, List):\n+ return [make_pickleable(item) for item in obj]\n else:\n return obj\n", "issue": "Support lists of `BaseModel` for outputs\nThe following model fails on Replicate.com with a cryptic `Can't pickle <class 'predict.Output'>: it's not the same object as predict.Output` error:\r\n\r\n```python\r\nclass Output(BaseModel):\r\n foo: str\r\n bar: str\r\n\r\nclass Predictor(BasePredictor):\r\n def predict(self) -> List[Output]:\r\n return [Output(foo=\"foo\", bar=\"bar\")]\r\n```\r\n\r\nThis is [documented deep in Cog's Python layer](https://github.com/replicate/cog/blob/main/python/cog/server/runner.py#L238).\r\n\r\nWe should support this type of output.\n", "before_files": [{"content": "import multiprocessing\nimport types\nfrom enum import Enum\nfrom multiprocessing.connection import Connection\nfrom typing import Any, Dict, List, Optional\n\nfrom pydantic import BaseModel\n\nfrom ..predictor import load_config, load_predictor\nfrom .log_capture import capture_log\n\n\nclass PredictionRunner:\n PROCESSING_DONE = 1\n\n class OutputType(Enum):\n NOT_STARTED = 0\n SINGLE = 1\n GENERATOR = 2\n\n def __init__(self) -> None:\n self.logs_pipe_reader, self.logs_pipe_writer = multiprocessing.Pipe(\n duplex=False\n )\n (\n self.prediction_input_pipe_reader,\n self.prediction_input_pipe_writer,\n ) = multiprocessing.Pipe(duplex=False)\n self.predictor_pipe_reader, self.predictor_pipe_writer = multiprocessing.Pipe(\n duplex=False\n )\n self.error_pipe_reader, self.error_pipe_writer = multiprocessing.Pipe(\n duplex=False\n )\n self.done_pipe_reader, self.done_pipe_writer = multiprocessing.Pipe(\n duplex=False\n )\n\n def setup(self) -> None:\n \"\"\"\n Sets up the predictor in a subprocess. Blocks until the predictor has\n finished setup. To start a prediction after setup call `run()`.\n \"\"\"\n # `multiprocessing.get_context(\"spawn\")` returns the same API as\n # `multiprocessing`, but will use the spawn method when creating any\n # subprocess. Using the spawn method for the predictor subprocess is\n # useful for compatibility with CUDA, which cannot run in a process\n # that gets forked. If we can guarantee that all initialization happens\n # within the subprocess, we could probably get away with using fork\n # here instead.\n self.predictor_process = multiprocessing.get_context(\"spawn\").Process(\n target=self._start_predictor_process\n )\n\n self._is_processing = True\n self.predictor_process.start()\n\n # poll with an infinite timeout to avoid burning resources in the loop\n while self.done_pipe_reader.poll(timeout=None) and self.is_processing():\n pass\n\n def _start_predictor_process(self) -> None:\n config = load_config()\n self.predictor = load_predictor(config)\n self.predictor.setup()\n\n # tell the main process we've finished setup\n self.done_pipe_writer.send(self.PROCESSING_DONE)\n\n while True:\n try:\n prediction_input = self.prediction_input_pipe_reader.recv()\n self._run_prediction(prediction_input)\n except EOFError:\n continue\n\n def run(self, **prediction_input: Dict[str, Any]) -> None:\n \"\"\"\n Starts running a prediction in the predictor subprocess, using the\n inputs provided in `prediction_input`.\n\n The subprocess will send prediction output and logs to pipes as soon as\n they're available. You can check if the pipes have any data using\n `has_output_waiting()` and `has_logs_waiting()`. You can read data from\n the pipes using `read_output()` and `read_logs()`.\n\n Use `is_processing()` to check whether more data is expected in the\n pipe for prediction output.\n \"\"\"\n # We're starting processing!\n self._is_processing = True\n\n # We don't know whether or not we've got a generator (progressive\n # output) until we start getting output from the model\n self._is_output_generator = self.OutputType.NOT_STARTED\n\n # We haven't encountered an error yet\n self._error = None\n\n # Send prediction input through the pipe to the predictor subprocess\n self.prediction_input_pipe_writer.send(prediction_input)\n\n def is_processing(self) -> bool:\n \"\"\"\n Returns True if the subprocess running the prediction is still\n processing.\n \"\"\"\n if self.done_pipe_reader.poll():\n try:\n if self.done_pipe_reader.recv() == self.PROCESSING_DONE:\n self._is_processing = False\n except EOFError:\n pass\n\n return self._is_processing\n\n def has_output_waiting(self) -> bool:\n return self.predictor_pipe_reader.poll()\n\n def read_output(self) -> List[Any]:\n if self._is_output_generator is self.OutputType.NOT_STARTED:\n return []\n\n output = []\n while self.has_output_waiting():\n try:\n output.append(self.predictor_pipe_reader.recv())\n except EOFError:\n break\n return output\n\n def has_logs_waiting(self) -> bool:\n return self.logs_pipe_reader.poll()\n\n def read_logs(self) -> List[str]:\n logs = []\n while self.has_logs_waiting():\n try:\n logs.append(self.logs_pipe_reader.recv())\n except EOFError:\n break\n return logs\n\n def is_output_generator(self) -> Optional[bool]:\n \"\"\"\n Returns `True` if the output is a generator, `False` if it's not, and\n `None` if we don't know yet.\n \"\"\"\n if self._is_output_generator is self.OutputType.NOT_STARTED:\n if self.has_output_waiting():\n # if there's output waiting use the first one to set whether\n # we've got a generator, with a safety check\n self._is_output_generator = self.predictor_pipe_reader.recv()\n assert isinstance(self._is_output_generator, self.OutputType)\n\n if self._is_output_generator is self.OutputType.NOT_STARTED:\n return None\n elif self._is_output_generator is self.OutputType.SINGLE:\n return False\n elif self._is_output_generator is self.OutputType.GENERATOR:\n return True\n\n def _run_prediction(self, prediction_input: Dict[str, Any]) -> None:\n \"\"\"\n Sends a boolean first, to indicate whether the output is a generator.\n After that it sends the output(s).\n\n If the predictor raises an exception it'll send it to the error pipe\n writer and then exit.\n\n When the prediction is finished it'll send a token to the done pipe.\n \"\"\"\n # Empty all the pipes before we start sending more messages to them\n drain_pipe(self.logs_pipe_reader)\n drain_pipe(self.predictor_pipe_reader)\n drain_pipe(self.error_pipe_reader)\n drain_pipe(self.done_pipe_reader)\n\n with capture_log(self.logs_pipe_writer):\n try:\n output = self.predictor.predict(**prediction_input)\n\n if isinstance(output, types.GeneratorType):\n self.predictor_pipe_writer.send(self.OutputType.GENERATOR)\n while True:\n try:\n self.predictor_pipe_writer.send(\n next(make_pickleable(output))\n )\n except StopIteration:\n break\n else:\n self.predictor_pipe_writer.send(self.OutputType.SINGLE)\n self.predictor_pipe_writer.send(make_pickleable(output))\n except Exception as e:\n self.error_pipe_writer.send(e)\n\n self.done_pipe_writer.send(self.PROCESSING_DONE)\n\n def error(self) -> Optional[str]:\n \"\"\"\n Returns the error encountered by the predictor, if one exists.\n \"\"\"\n if self._error is None and self.error_pipe_reader.poll():\n try:\n self._error = self.error_pipe_reader.recv()\n except EOFError:\n # I don't know how this is reachable \u00af\\_(\u30c4)_/\u00af\n pass\n\n return self._error\n\n\ndef drain_pipe(pipe_reader: Connection) -> None:\n \"\"\"\n Reads all available messages from a pipe and discards them. This serves to\n clear the pipe for future usage.\n \"\"\"\n while pipe_reader.poll():\n try:\n pipe_reader.recv()\n except EOFError:\n break\n\n\ndef make_pickleable(obj: Any) -> Any:\n \"\"\"\n Returns a version of `obj` which can be pickled and therefore sent through\n the pipe to the main process.\n\n If the predictor uses a custom output like:\n\n class Output(BaseModel):\n text: str\n\n then the output can't be sent through the pipe because:\n\n > Can't pickle <class 'predict.Output'>: it's not the same object as\n > 'predict.Output'\n\n The way we're getting around this here will only work for singly-nested\n outputs. If there's a complex object inside a complex object, it's likely\n to fall over.\n\n A better fix for this would be to work out why the pickling process is\n getting a different class when loading `Output`, so the pickling Just\n Works.\n \"\"\"\n if isinstance(obj, BaseModel):\n return obj.dict(exclude_unset=True)\n else:\n return obj\n", "path": "python/cog/server/runner.py"}], "after_files": [{"content": "import multiprocessing\nimport types\nfrom enum import Enum\nfrom multiprocessing.connection import Connection\nfrom typing import Any, Dict, List, Optional\n\nfrom pydantic import BaseModel\n\nfrom ..predictor import load_config, load_predictor\nfrom .log_capture import capture_log\n\n\nclass PredictionRunner:\n PROCESSING_DONE = 1\n\n class OutputType(Enum):\n NOT_STARTED = 0\n SINGLE = 1\n GENERATOR = 2\n\n def __init__(self) -> None:\n self.logs_pipe_reader, self.logs_pipe_writer = multiprocessing.Pipe(\n duplex=False\n )\n (\n self.prediction_input_pipe_reader,\n self.prediction_input_pipe_writer,\n ) = multiprocessing.Pipe(duplex=False)\n self.predictor_pipe_reader, self.predictor_pipe_writer = multiprocessing.Pipe(\n duplex=False\n )\n self.error_pipe_reader, self.error_pipe_writer = multiprocessing.Pipe(\n duplex=False\n )\n self.done_pipe_reader, self.done_pipe_writer = multiprocessing.Pipe(\n duplex=False\n )\n\n def setup(self) -> None:\n \"\"\"\n Sets up the predictor in a subprocess. Blocks until the predictor has\n finished setup. To start a prediction after setup call `run()`.\n \"\"\"\n # `multiprocessing.get_context(\"spawn\")` returns the same API as\n # `multiprocessing`, but will use the spawn method when creating any\n # subprocess. Using the spawn method for the predictor subprocess is\n # useful for compatibility with CUDA, which cannot run in a process\n # that gets forked. If we can guarantee that all initialization happens\n # within the subprocess, we could probably get away with using fork\n # here instead.\n self.predictor_process = multiprocessing.get_context(\"spawn\").Process(\n target=self._start_predictor_process\n )\n\n self._is_processing = True\n self.predictor_process.start()\n\n # poll with an infinite timeout to avoid burning resources in the loop\n while self.done_pipe_reader.poll(timeout=None) and self.is_processing():\n pass\n\n def _start_predictor_process(self) -> None:\n config = load_config()\n self.predictor = load_predictor(config)\n self.predictor.setup()\n\n # tell the main process we've finished setup\n self.done_pipe_writer.send(self.PROCESSING_DONE)\n\n while True:\n try:\n prediction_input = self.prediction_input_pipe_reader.recv()\n self._run_prediction(prediction_input)\n except EOFError:\n continue\n\n def run(self, **prediction_input: Dict[str, Any]) -> None:\n \"\"\"\n Starts running a prediction in the predictor subprocess, using the\n inputs provided in `prediction_input`.\n\n The subprocess will send prediction output and logs to pipes as soon as\n they're available. You can check if the pipes have any data using\n `has_output_waiting()` and `has_logs_waiting()`. You can read data from\n the pipes using `read_output()` and `read_logs()`.\n\n Use `is_processing()` to check whether more data is expected in the\n pipe for prediction output.\n \"\"\"\n # We're starting processing!\n self._is_processing = True\n\n # We don't know whether or not we've got a generator (progressive\n # output) until we start getting output from the model\n self._is_output_generator = self.OutputType.NOT_STARTED\n\n # We haven't encountered an error yet\n self._error = None\n\n # Send prediction input through the pipe to the predictor subprocess\n self.prediction_input_pipe_writer.send(prediction_input)\n\n def is_processing(self) -> bool:\n \"\"\"\n Returns True if the subprocess running the prediction is still\n processing.\n \"\"\"\n if self.done_pipe_reader.poll():\n try:\n if self.done_pipe_reader.recv() == self.PROCESSING_DONE:\n self._is_processing = False\n except EOFError:\n pass\n\n return self._is_processing\n\n def has_output_waiting(self) -> bool:\n return self.predictor_pipe_reader.poll()\n\n def read_output(self) -> List[Any]:\n if self._is_output_generator is self.OutputType.NOT_STARTED:\n return []\n\n output = []\n while self.has_output_waiting():\n try:\n output.append(self.predictor_pipe_reader.recv())\n except EOFError:\n break\n return output\n\n def has_logs_waiting(self) -> bool:\n return self.logs_pipe_reader.poll()\n\n def read_logs(self) -> List[str]:\n logs = []\n while self.has_logs_waiting():\n try:\n logs.append(self.logs_pipe_reader.recv())\n except EOFError:\n break\n return logs\n\n def is_output_generator(self) -> Optional[bool]:\n \"\"\"\n Returns `True` if the output is a generator, `False` if it's not, and\n `None` if we don't know yet.\n \"\"\"\n if self._is_output_generator is self.OutputType.NOT_STARTED:\n if self.has_output_waiting():\n # if there's output waiting use the first one to set whether\n # we've got a generator, with a safety check\n self._is_output_generator = self.predictor_pipe_reader.recv()\n assert isinstance(self._is_output_generator, self.OutputType)\n\n if self._is_output_generator is self.OutputType.NOT_STARTED:\n return None\n elif self._is_output_generator is self.OutputType.SINGLE:\n return False\n elif self._is_output_generator is self.OutputType.GENERATOR:\n return True\n\n def _run_prediction(self, prediction_input: Dict[str, Any]) -> None:\n \"\"\"\n Sends a boolean first, to indicate whether the output is a generator.\n After that it sends the output(s).\n\n If the predictor raises an exception it'll send it to the error pipe\n writer and then exit.\n\n When the prediction is finished it'll send a token to the done pipe.\n \"\"\"\n # Empty all the pipes before we start sending more messages to them\n drain_pipe(self.logs_pipe_reader)\n drain_pipe(self.predictor_pipe_reader)\n drain_pipe(self.error_pipe_reader)\n drain_pipe(self.done_pipe_reader)\n\n with capture_log(self.logs_pipe_writer):\n try:\n output = self.predictor.predict(**prediction_input)\n\n if isinstance(output, types.GeneratorType):\n self.predictor_pipe_writer.send(self.OutputType.GENERATOR)\n while True:\n try:\n self.predictor_pipe_writer.send(\n next(make_pickleable(output))\n )\n except StopIteration:\n break\n else:\n self.predictor_pipe_writer.send(self.OutputType.SINGLE)\n self.predictor_pipe_writer.send(make_pickleable(output))\n except Exception as e:\n self.error_pipe_writer.send(e)\n\n self.done_pipe_writer.send(self.PROCESSING_DONE)\n\n def error(self) -> Optional[str]:\n \"\"\"\n Returns the error encountered by the predictor, if one exists.\n \"\"\"\n if self._error is None and self.error_pipe_reader.poll():\n try:\n self._error = self.error_pipe_reader.recv()\n except EOFError:\n # I don't know how this is reachable \u00af\\_(\u30c4)_/\u00af\n pass\n\n return self._error\n\n\ndef drain_pipe(pipe_reader: Connection) -> None:\n \"\"\"\n Reads all available messages from a pipe and discards them. This serves to\n clear the pipe for future usage.\n \"\"\"\n while pipe_reader.poll():\n try:\n pipe_reader.recv()\n except EOFError:\n break\n\n\ndef make_pickleable(obj: Any) -> Any:\n \"\"\"\n Returns a version of `obj` which can be pickled and therefore sent through\n the pipe to the main process.\n\n If the predictor uses a custom output like:\n\n class Output(BaseModel):\n text: str\n\n then the output can't be sent through the pipe because:\n\n > Can't pickle <class 'predict.Output'>: it's not the same object as\n > 'predict.Output'\n\n The way we're getting around this here will only work for singly-nested\n outputs. If there's a complex object inside a complex object, it's likely\n to fall over.\n\n A better fix for this would be to work out why the pickling process is\n getting a different class when loading `Output`, so the pickling Just\n Works.\n \"\"\"\n if isinstance(obj, BaseModel):\n return obj.dict(exclude_unset=True)\n elif isinstance(obj, List):\n return [make_pickleable(item) for item in obj]\n else:\n return obj\n", "path": "python/cog/server/runner.py"}]}
| 2,864 | 101 |
gh_patches_debug_28231
|
rasdani/github-patches
|
git_diff
|
pytorch__ignite-1197
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Distributed model initialisation - CIFAR 10 example
## ❓ Questions/Help/Support
In the cifar10 example when a [model](https://github.com/pytorch/ignite/blob/master/examples/contrib/cifar10/main.py#L51) is defined inside the process, is the model updated and shared between the `nproc_per_node`?
In most of the TPU examples I have seen they define the model before the `Parallel` process.
- [tpu-training-super-fast-xlmroberta](https://www.kaggle.com/shonenkov/tpu-training-super-fast-xlmroberta)
- [i-like-clean-tpu-training-kernels-i-can-not-lie](https://www.kaggle.com/abhishek/i-like-clean-tpu-training-kernels-i-can-not-lie)
Does the way ignite handles the model ensure the same model is used throughout the training?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ignite/distributed/auto.py`
Content:
```
1 import warnings
2
3 import torch
4 import torch.nn as nn
5 from torch.optim.optimizer import Optimizer
6 from torch.utils.data import DataLoader, Dataset
7 from torch.utils.data.distributed import DistributedSampler
8 from torch.utils.data.sampler import Sampler
9
10 from ignite.distributed import utils as idist
11 from ignite.distributed.comp_models import native as idist_native
12 from ignite.distributed.comp_models import xla as idist_xla
13 from ignite.utils import setup_logger
14
15 __all__ = ["auto_dataloader", "auto_model", "auto_optim", "DistributedProxySampler"]
16
17
18 def auto_dataloader(dataset, **kwargs):
19 """Helper method to create a dataloader adapted for non-distributed and distributed configurations (supporting
20 all available backends from :meth:`~ignite.distributed.utils.available_backends()`).
21
22 Internally, we create a dataloader with provided kwargs while applying the following updates:
23
24 - batch size is scaled by world size: ``batch_size / world_size`` if larger or equal world size.
25 - number of workers is scaled by number of local processes: ``num_workers / nprocs`` if larger or equal world size.
26 - if no sampler provided by user, `torch DistributedSampler` is setup.
27 - if a sampler is provided by user, it is wrapped by :class:`~ignite.distributed.auto.DistributedProxySampler`.
28 - if the default device is 'cuda', `pin_memory` is automatically set to `True`.
29
30 .. warning::
31
32 Custom batch sampler is not adapted for distributed configuration. Please, make sure that provided batch
33 sampler is compatible with distributed configuration.
34
35 Examples:
36
37 .. code-block:: python
38
39 import ignite.distribted as idist
40
41 train_loader = idist.auto_dataloader(
42 train_dataset,
43 batch_size=32,
44 num_workers=4,
45 shuffle=True,
46 pin_memory="cuda" in idist.device().type,
47 drop_last=True,
48 )
49
50 Args:
51 dataset (Dataset): input torch dataset
52 **kwargs: keyword arguments for `torch DataLoader`_.
53
54 Returns:
55 `torch DataLoader`_ or `XLA MpDeviceLoader`_ for XLA devices
56
57 .. _torch DataLoader: https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader
58 .. _XLA MpDeviceLoader: https://github.com/pytorch/xla/blob/master/torch_xla/distributed/parallel_loader.py#L178
59 .. _torch DistributedSampler:
60 https://pytorch.org/docs/stable/data.html#torch.utils.data.distributed.DistributedSampler
61 """
62 rank = idist.get_rank()
63 world_size = idist.get_world_size()
64
65 logger = setup_logger(__name__ + ".auto_dataloader")
66 if world_size > 1:
67 if "batch_size" in kwargs and kwargs["batch_size"] >= world_size:
68 kwargs["batch_size"] //= world_size
69
70 nproc = idist.get_nproc_per_node()
71 if "num_workers" in kwargs and kwargs["num_workers"] >= nproc:
72 kwargs["num_workers"] = (kwargs["num_workers"] + nproc - 1) // nproc
73
74 if "batch_sampler" not in kwargs:
75 if kwargs.get("sampler", None) is not None:
76 sampler = DistributedProxySampler(kwargs["sampler"], num_replicas=world_size, rank=rank)
77 else:
78 sampler = DistributedSampler(
79 dataset, num_replicas=world_size, rank=rank, shuffle=kwargs.get("shuffle", True)
80 )
81 # we need to remove "shuffle" from kwargs if sampler is used
82 if "shuffle" in kwargs:
83 del kwargs["shuffle"]
84
85 kwargs["sampler"] = sampler
86 else:
87 warnings.warn(
88 "Found batch_sampler in provided kwargs. Please, make sure that it is compatible "
89 "with distributed configuration"
90 )
91
92 if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and kwargs.get("pin_memory", False):
93 # TODO: How about XLA GPU ?
94 warnings.warn(
95 "Found incompatible options: xla support and pin_memory args equal True. "
96 "Argument `pin_memory=False` will be used to construct data loader."
97 )
98 kwargs["pin_memory"] = False
99 else:
100 kwargs["pin_memory"] = kwargs.get("pin_memory", "cuda" in idist.device().type)
101
102 logger.info("Use data loader kwargs for dataset '{}': \n\t{}".format(repr(dataset)[:20].strip(), kwargs))
103 dataloader = DataLoader(dataset, **kwargs)
104
105 if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and world_size > 1:
106
107 logger.info("DataLoader is wrapped by `MpDeviceLoader` on XLA")
108
109 mp_device_loader_cls = _MpDeviceLoader
110 try:
111 from torch_xla.distributed.parallel_loader import MpDeviceLoader
112
113 mp_device_loader_cls = MpDeviceLoader
114 except ImportError:
115 pass
116
117 sampler = dataloader.sampler
118 dataloader = mp_device_loader_cls(dataloader, idist.device())
119 dataloader.sampler = sampler
120
121 return dataloader
122
123
124 def auto_model(model: nn.Module) -> nn.Module:
125 """Helper method to adapt provided model for non-distributed and distributed configurations (supporting
126 all available backends from :meth:`~ignite.distributed.utils.available_backends()`).
127
128 Internally, we perform to following:
129
130 - send model to current :meth:`~ignite.distributed.utils.device()`.
131 - wrap the model to `torch DistributedDataParallel`_ for native torch distributed if world size is larger than 1
132 - wrap the model to `torch DataParallel`_ if no distributed context found and more than one CUDA devices available.
133
134 Examples:
135
136 .. code-block:: python
137
138 import ignite.distribted as idist
139
140 model = idist.auto_model(model)
141
142 Args:
143 model (torch.nn.Module): model to adapt.
144
145 Returns:
146 torch.nn.Module
147
148 .. _torch DistributedDataParallel: https://pytorch.org/docs/stable/nn.html#torch.nn.parallel.DistributedDataParallel
149 .. _torch DataParallel: https://pytorch.org/docs/stable/nn.html#torch.nn.DataParallel
150 """
151 logger = setup_logger(__name__ + ".auto_model")
152
153 model.to(idist.device())
154
155 # distributed data parallel model
156 if idist.get_world_size() > 1:
157 if idist.backend() == idist_native.NCCL:
158 lrank = idist.get_local_rank()
159 logger.info("Apply torch DistributedDataParallel on model, device id: {}".format(lrank))
160 model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[lrank,])
161 elif idist.backend() == idist_native.GLOO:
162 logger.info("Apply torch DistributedDataParallel on model")
163 model = torch.nn.parallel.DistributedDataParallel(model)
164
165 # not distributed but multiple GPUs reachable so data parallel model
166 elif torch.cuda.device_count() > 1 and "cuda" in idist.device().type:
167 logger.info("Apply torch DataParallel on model")
168 model = torch.nn.parallel.DataParallel(model)
169
170 return model
171
172
173 def auto_optim(optimizer: Optimizer) -> Optimizer:
174 """Helper method to adapt optimizer for non-distributed and distributed configurations (supporting
175 all available backends from :meth:`~ignite.distributed.utils.available_backends()`).
176
177 Internally, this method is no-op for non-distributed and torch native distributed configuration.
178 For XLA distributed configuration, we create a new class that inherits from provided optimizer.
179 The goal is to override the `step()` method with specific `xm.optimizer_step`_ implementation.
180
181 Examples:
182
183 .. code-block:: python
184
185 import ignite.distribted as idist
186
187 optimizer = idist.auto_optim(optimizer)
188
189
190 Args:
191 optimizer (Optimizer): input torch optimizer
192
193 Returns:
194 Optimizer
195
196 .. _xm.optimizer_step: http://pytorch.org/xla/release/1.5/index.html#torch_xla.core.xla_model.optimizer_step
197
198 """
199 if not (idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU):
200 return optimizer
201
202 cls = type(optimizer.__class__.__name__, (optimizer.__class__,), dict(_XLADistributedOptimizer.__dict__))
203 return cls(optimizer)
204
205
206 class DistributedProxySampler(DistributedSampler):
207 """Distributed sampler proxy to adapt user's sampler for distributed data parallelism configuration.
208
209 Code is based on https://github.com/pytorch/pytorch/issues/23430#issuecomment-562350407
210
211
212 .. note::
213 Input sampler is assumed to have a constant size.
214
215 Args:
216 sampler (Sampler): Input torch data sampler.
217 num_replicas (int, optional): Number of processes participating in distributed training.
218 rank (int, optional): Rank of the current process within ``num_replicas``.
219
220 """
221
222 def __init__(self, sampler: Sampler, num_replicas=None, rank=None):
223
224 if not isinstance(sampler, Sampler):
225 raise TypeError("Argument sampler should be instance of torch Sampler, but given: {}".format(type(sampler)))
226
227 if not hasattr(sampler, "__len__"):
228 raise TypeError("Argument sampler should have length")
229
230 super(DistributedProxySampler, self).__init__(sampler, num_replicas=num_replicas, rank=rank, shuffle=False)
231 self.sampler = sampler
232
233 def __iter__(self):
234 # deterministically shuffle based on epoch
235 torch.manual_seed(self.epoch)
236
237 indices = []
238 while len(indices) < self.total_size:
239 indices += list(self.sampler)
240
241 if len(indices) > self.total_size:
242 indices = indices[: self.total_size]
243
244 # subsample
245 indices = indices[self.rank : self.total_size : self.num_replicas]
246 if len(indices) != self.num_samples:
247 raise RuntimeError("{} vs {}".format(len(indices), self.num_samples))
248
249 return iter(indices)
250
251
252 if idist.has_xla_support:
253
254 import torch_xla.core.xla_model as xm
255 from torch_xla.distributed.parallel_loader import ParallelLoader
256
257 class _MpDeviceLoader:
258 # https://github.com/pytorch/xla/pull/2117
259 # From pytorch/xla if `torch_xla.distributed.parallel_loader.MpDeviceLoader` is not available
260 def __init__(self, loader, device, **kwargs):
261 self._loader = loader
262 self._device = device
263 self._parallel_loader_kwargs = kwargs
264
265 def __iter__(self):
266 parallel_loader = ParallelLoader(self._loader, [self._device], **self._parallel_loader_kwargs)
267 return parallel_loader.per_device_loader(self._device)
268
269 def __len__(self):
270 return len(self._loader)
271
272 class _XLADistributedOptimizer(Optimizer):
273 def __init__(self, optimizer):
274 super(self.__class__, self).__init__(optimizer.param_groups)
275 self.wrapped_optimizer = optimizer
276
277 def step(self, closure=None):
278 xm.optimizer_step(self.wrapped_optimizer, barrier=True)
279
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ignite/distributed/auto.py b/ignite/distributed/auto.py
--- a/ignite/distributed/auto.py
+++ b/ignite/distributed/auto.py
@@ -127,8 +127,8 @@
Internally, we perform to following:
- - send model to current :meth:`~ignite.distributed.utils.device()`.
- - wrap the model to `torch DistributedDataParallel`_ for native torch distributed if world size is larger than 1
+ - send model to current :meth:`~ignite.distributed.utils.device()` if model's parameters are not on the device.
+ - wrap the model to `torch DistributedDataParallel`_ for native torch distributed if world size is larger than 1.
- wrap the model to `torch DataParallel`_ if no distributed context found and more than one CUDA devices available.
Examples:
@@ -139,6 +139,15 @@
model = idist.auto_model(model)
+ In addition with NVidia/Apex, it can be used in the following way:
+
+ .. code-block:: python
+
+ import ignite.distribted as idist
+
+ model, optimizer = amp.initialize(model, optimizer, opt_level=opt_level)
+ model = idist.auto_model(model)
+
Args:
model (torch.nn.Module): model to adapt.
@@ -150,7 +159,10 @@
"""
logger = setup_logger(__name__ + ".auto_model")
- model.to(idist.device())
+ # Put model's parameters to device if its parameters are not on the device
+ device = idist.device()
+ if not all([p.device == device for p in model.parameters()]):
+ model.to(device)
# distributed data parallel model
if idist.get_world_size() > 1:
|
{"golden_diff": "diff --git a/ignite/distributed/auto.py b/ignite/distributed/auto.py\n--- a/ignite/distributed/auto.py\n+++ b/ignite/distributed/auto.py\n@@ -127,8 +127,8 @@\n \n Internally, we perform to following:\n \n- - send model to current :meth:`~ignite.distributed.utils.device()`.\n- - wrap the model to `torch DistributedDataParallel`_ for native torch distributed if world size is larger than 1\n+ - send model to current :meth:`~ignite.distributed.utils.device()` if model's parameters are not on the device.\n+ - wrap the model to `torch DistributedDataParallel`_ for native torch distributed if world size is larger than 1.\n - wrap the model to `torch DataParallel`_ if no distributed context found and more than one CUDA devices available.\n \n Examples:\n@@ -139,6 +139,15 @@\n \n model = idist.auto_model(model)\n \n+ In addition with NVidia/Apex, it can be used in the following way:\n+\n+ .. code-block:: python\n+\n+ import ignite.distribted as idist\n+\n+ model, optimizer = amp.initialize(model, optimizer, opt_level=opt_level)\n+ model = idist.auto_model(model)\n+\n Args:\n model (torch.nn.Module): model to adapt.\n \n@@ -150,7 +159,10 @@\n \"\"\"\n logger = setup_logger(__name__ + \".auto_model\")\n \n- model.to(idist.device())\n+ # Put model's parameters to device if its parameters are not on the device\n+ device = idist.device()\n+ if not all([p.device == device for p in model.parameters()]):\n+ model.to(device)\n \n # distributed data parallel model\n if idist.get_world_size() > 1:\n", "issue": "Distributed model initialisation - CIFAR 10 example\n## \u2753 Questions/Help/Support\r\n\r\nIn the cifar10 example when a [model](https://github.com/pytorch/ignite/blob/master/examples/contrib/cifar10/main.py#L51) is defined inside the process, is the model updated and shared between the `nproc_per_node`?\r\n\r\nIn most of the TPU examples I have seen they define the model before the `Parallel` process.\r\n- [tpu-training-super-fast-xlmroberta](https://www.kaggle.com/shonenkov/tpu-training-super-fast-xlmroberta)\r\n- [i-like-clean-tpu-training-kernels-i-can-not-lie](https://www.kaggle.com/abhishek/i-like-clean-tpu-training-kernels-i-can-not-lie)\r\n\r\nDoes the way ignite handles the model ensure the same model is used throughout the training?\n", "before_files": [{"content": "import warnings\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim.optimizer import Optimizer\nfrom torch.utils.data import DataLoader, Dataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.utils.data.sampler import Sampler\n\nfrom ignite.distributed import utils as idist\nfrom ignite.distributed.comp_models import native as idist_native\nfrom ignite.distributed.comp_models import xla as idist_xla\nfrom ignite.utils import setup_logger\n\n__all__ = [\"auto_dataloader\", \"auto_model\", \"auto_optim\", \"DistributedProxySampler\"]\n\n\ndef auto_dataloader(dataset, **kwargs):\n \"\"\"Helper method to create a dataloader adapted for non-distributed and distributed configurations (supporting\n all available backends from :meth:`~ignite.distributed.utils.available_backends()`).\n\n Internally, we create a dataloader with provided kwargs while applying the following updates:\n\n - batch size is scaled by world size: ``batch_size / world_size`` if larger or equal world size.\n - number of workers is scaled by number of local processes: ``num_workers / nprocs`` if larger or equal world size.\n - if no sampler provided by user, `torch DistributedSampler` is setup.\n - if a sampler is provided by user, it is wrapped by :class:`~ignite.distributed.auto.DistributedProxySampler`.\n - if the default device is 'cuda', `pin_memory` is automatically set to `True`.\n\n .. warning::\n\n Custom batch sampler is not adapted for distributed configuration. Please, make sure that provided batch\n sampler is compatible with distributed configuration.\n\n Examples:\n\n .. code-block:: python\n\n import ignite.distribted as idist\n\n train_loader = idist.auto_dataloader(\n train_dataset,\n batch_size=32,\n num_workers=4,\n shuffle=True,\n pin_memory=\"cuda\" in idist.device().type,\n drop_last=True,\n )\n\n Args:\n dataset (Dataset): input torch dataset\n **kwargs: keyword arguments for `torch DataLoader`_.\n\n Returns:\n `torch DataLoader`_ or `XLA MpDeviceLoader`_ for XLA devices\n\n .. _torch DataLoader: https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader\n .. _XLA MpDeviceLoader: https://github.com/pytorch/xla/blob/master/torch_xla/distributed/parallel_loader.py#L178\n .. _torch DistributedSampler:\n https://pytorch.org/docs/stable/data.html#torch.utils.data.distributed.DistributedSampler\n \"\"\"\n rank = idist.get_rank()\n world_size = idist.get_world_size()\n\n logger = setup_logger(__name__ + \".auto_dataloader\")\n if world_size > 1:\n if \"batch_size\" in kwargs and kwargs[\"batch_size\"] >= world_size:\n kwargs[\"batch_size\"] //= world_size\n\n nproc = idist.get_nproc_per_node()\n if \"num_workers\" in kwargs and kwargs[\"num_workers\"] >= nproc:\n kwargs[\"num_workers\"] = (kwargs[\"num_workers\"] + nproc - 1) // nproc\n\n if \"batch_sampler\" not in kwargs:\n if kwargs.get(\"sampler\", None) is not None:\n sampler = DistributedProxySampler(kwargs[\"sampler\"], num_replicas=world_size, rank=rank)\n else:\n sampler = DistributedSampler(\n dataset, num_replicas=world_size, rank=rank, shuffle=kwargs.get(\"shuffle\", True)\n )\n # we need to remove \"shuffle\" from kwargs if sampler is used\n if \"shuffle\" in kwargs:\n del kwargs[\"shuffle\"]\n\n kwargs[\"sampler\"] = sampler\n else:\n warnings.warn(\n \"Found batch_sampler in provided kwargs. Please, make sure that it is compatible \"\n \"with distributed configuration\"\n )\n\n if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and kwargs.get(\"pin_memory\", False):\n # TODO: How about XLA GPU ?\n warnings.warn(\n \"Found incompatible options: xla support and pin_memory args equal True. \"\n \"Argument `pin_memory=False` will be used to construct data loader.\"\n )\n kwargs[\"pin_memory\"] = False\n else:\n kwargs[\"pin_memory\"] = kwargs.get(\"pin_memory\", \"cuda\" in idist.device().type)\n\n logger.info(\"Use data loader kwargs for dataset '{}': \\n\\t{}\".format(repr(dataset)[:20].strip(), kwargs))\n dataloader = DataLoader(dataset, **kwargs)\n\n if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and world_size > 1:\n\n logger.info(\"DataLoader is wrapped by `MpDeviceLoader` on XLA\")\n\n mp_device_loader_cls = _MpDeviceLoader\n try:\n from torch_xla.distributed.parallel_loader import MpDeviceLoader\n\n mp_device_loader_cls = MpDeviceLoader\n except ImportError:\n pass\n\n sampler = dataloader.sampler\n dataloader = mp_device_loader_cls(dataloader, idist.device())\n dataloader.sampler = sampler\n\n return dataloader\n\n\ndef auto_model(model: nn.Module) -> nn.Module:\n \"\"\"Helper method to adapt provided model for non-distributed and distributed configurations (supporting\n all available backends from :meth:`~ignite.distributed.utils.available_backends()`).\n\n Internally, we perform to following:\n\n - send model to current :meth:`~ignite.distributed.utils.device()`.\n - wrap the model to `torch DistributedDataParallel`_ for native torch distributed if world size is larger than 1\n - wrap the model to `torch DataParallel`_ if no distributed context found and more than one CUDA devices available.\n\n Examples:\n\n .. code-block:: python\n\n import ignite.distribted as idist\n\n model = idist.auto_model(model)\n\n Args:\n model (torch.nn.Module): model to adapt.\n\n Returns:\n torch.nn.Module\n\n .. _torch DistributedDataParallel: https://pytorch.org/docs/stable/nn.html#torch.nn.parallel.DistributedDataParallel\n .. _torch DataParallel: https://pytorch.org/docs/stable/nn.html#torch.nn.DataParallel\n \"\"\"\n logger = setup_logger(__name__ + \".auto_model\")\n\n model.to(idist.device())\n\n # distributed data parallel model\n if idist.get_world_size() > 1:\n if idist.backend() == idist_native.NCCL:\n lrank = idist.get_local_rank()\n logger.info(\"Apply torch DistributedDataParallel on model, device id: {}\".format(lrank))\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[lrank,])\n elif idist.backend() == idist_native.GLOO:\n logger.info(\"Apply torch DistributedDataParallel on model\")\n model = torch.nn.parallel.DistributedDataParallel(model)\n\n # not distributed but multiple GPUs reachable so data parallel model\n elif torch.cuda.device_count() > 1 and \"cuda\" in idist.device().type:\n logger.info(\"Apply torch DataParallel on model\")\n model = torch.nn.parallel.DataParallel(model)\n\n return model\n\n\ndef auto_optim(optimizer: Optimizer) -> Optimizer:\n \"\"\"Helper method to adapt optimizer for non-distributed and distributed configurations (supporting\n all available backends from :meth:`~ignite.distributed.utils.available_backends()`).\n\n Internally, this method is no-op for non-distributed and torch native distributed configuration.\n For XLA distributed configuration, we create a new class that inherits from provided optimizer.\n The goal is to override the `step()` method with specific `xm.optimizer_step`_ implementation.\n\n Examples:\n\n .. code-block:: python\n\n import ignite.distribted as idist\n\n optimizer = idist.auto_optim(optimizer)\n\n\n Args:\n optimizer (Optimizer): input torch optimizer\n\n Returns:\n Optimizer\n\n .. _xm.optimizer_step: http://pytorch.org/xla/release/1.5/index.html#torch_xla.core.xla_model.optimizer_step\n\n \"\"\"\n if not (idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU):\n return optimizer\n\n cls = type(optimizer.__class__.__name__, (optimizer.__class__,), dict(_XLADistributedOptimizer.__dict__))\n return cls(optimizer)\n\n\nclass DistributedProxySampler(DistributedSampler):\n \"\"\"Distributed sampler proxy to adapt user's sampler for distributed data parallelism configuration.\n\n Code is based on https://github.com/pytorch/pytorch/issues/23430#issuecomment-562350407\n\n\n .. note::\n Input sampler is assumed to have a constant size.\n\n Args:\n sampler (Sampler): Input torch data sampler.\n num_replicas (int, optional): Number of processes participating in distributed training.\n rank (int, optional): Rank of the current process within ``num_replicas``.\n\n \"\"\"\n\n def __init__(self, sampler: Sampler, num_replicas=None, rank=None):\n\n if not isinstance(sampler, Sampler):\n raise TypeError(\"Argument sampler should be instance of torch Sampler, but given: {}\".format(type(sampler)))\n\n if not hasattr(sampler, \"__len__\"):\n raise TypeError(\"Argument sampler should have length\")\n\n super(DistributedProxySampler, self).__init__(sampler, num_replicas=num_replicas, rank=rank, shuffle=False)\n self.sampler = sampler\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n torch.manual_seed(self.epoch)\n\n indices = []\n while len(indices) < self.total_size:\n indices += list(self.sampler)\n\n if len(indices) > self.total_size:\n indices = indices[: self.total_size]\n\n # subsample\n indices = indices[self.rank : self.total_size : self.num_replicas]\n if len(indices) != self.num_samples:\n raise RuntimeError(\"{} vs {}\".format(len(indices), self.num_samples))\n\n return iter(indices)\n\n\nif idist.has_xla_support:\n\n import torch_xla.core.xla_model as xm\n from torch_xla.distributed.parallel_loader import ParallelLoader\n\n class _MpDeviceLoader:\n # https://github.com/pytorch/xla/pull/2117\n # From pytorch/xla if `torch_xla.distributed.parallel_loader.MpDeviceLoader` is not available\n def __init__(self, loader, device, **kwargs):\n self._loader = loader\n self._device = device\n self._parallel_loader_kwargs = kwargs\n\n def __iter__(self):\n parallel_loader = ParallelLoader(self._loader, [self._device], **self._parallel_loader_kwargs)\n return parallel_loader.per_device_loader(self._device)\n\n def __len__(self):\n return len(self._loader)\n\n class _XLADistributedOptimizer(Optimizer):\n def __init__(self, optimizer):\n super(self.__class__, self).__init__(optimizer.param_groups)\n self.wrapped_optimizer = optimizer\n\n def step(self, closure=None):\n xm.optimizer_step(self.wrapped_optimizer, barrier=True)\n", "path": "ignite/distributed/auto.py"}], "after_files": [{"content": "import warnings\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim.optimizer import Optimizer\nfrom torch.utils.data import DataLoader, Dataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.utils.data.sampler import Sampler\n\nfrom ignite.distributed import utils as idist\nfrom ignite.distributed.comp_models import native as idist_native\nfrom ignite.distributed.comp_models import xla as idist_xla\nfrom ignite.utils import setup_logger\n\n__all__ = [\"auto_dataloader\", \"auto_model\", \"auto_optim\", \"DistributedProxySampler\"]\n\n\ndef auto_dataloader(dataset, **kwargs):\n \"\"\"Helper method to create a dataloader adapted for non-distributed and distributed configurations (supporting\n all available backends from :meth:`~ignite.distributed.utils.available_backends()`).\n\n Internally, we create a dataloader with provided kwargs while applying the following updates:\n\n - batch size is scaled by world size: ``batch_size / world_size`` if larger or equal world size.\n - number of workers is scaled by number of local processes: ``num_workers / nprocs`` if larger or equal world size.\n - if no sampler provided by user, `torch DistributedSampler` is setup.\n - if a sampler is provided by user, it is wrapped by :class:`~ignite.distributed.auto.DistributedProxySampler`.\n - if the default device is 'cuda', `pin_memory` is automatically set to `True`.\n\n .. warning::\n\n Custom batch sampler is not adapted for distributed configuration. Please, make sure that provided batch\n sampler is compatible with distributed configuration.\n\n Examples:\n\n .. code-block:: python\n\n import ignite.distribted as idist\n\n train_loader = idist.auto_dataloader(\n train_dataset,\n batch_size=32,\n num_workers=4,\n shuffle=True,\n pin_memory=\"cuda\" in idist.device().type,\n drop_last=True,\n )\n\n Args:\n dataset (Dataset): input torch dataset\n **kwargs: keyword arguments for `torch DataLoader`_.\n\n Returns:\n `torch DataLoader`_ or `XLA MpDeviceLoader`_ for XLA devices\n\n .. _torch DataLoader: https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader\n .. _XLA MpDeviceLoader: https://github.com/pytorch/xla/blob/master/torch_xla/distributed/parallel_loader.py#L178\n .. _torch DistributedSampler:\n https://pytorch.org/docs/stable/data.html#torch.utils.data.distributed.DistributedSampler\n \"\"\"\n rank = idist.get_rank()\n world_size = idist.get_world_size()\n\n logger = setup_logger(__name__ + \".auto_dataloader\")\n if world_size > 1:\n if \"batch_size\" in kwargs and kwargs[\"batch_size\"] >= world_size:\n kwargs[\"batch_size\"] //= world_size\n\n nproc = idist.get_nproc_per_node()\n if \"num_workers\" in kwargs and kwargs[\"num_workers\"] >= nproc:\n kwargs[\"num_workers\"] = (kwargs[\"num_workers\"] + nproc - 1) // nproc\n\n if \"batch_sampler\" not in kwargs:\n if kwargs.get(\"sampler\", None) is not None:\n sampler = DistributedProxySampler(kwargs[\"sampler\"], num_replicas=world_size, rank=rank)\n else:\n sampler = DistributedSampler(\n dataset, num_replicas=world_size, rank=rank, shuffle=kwargs.get(\"shuffle\", True)\n )\n # we need to remove \"shuffle\" from kwargs if sampler is used\n if \"shuffle\" in kwargs:\n del kwargs[\"shuffle\"]\n\n kwargs[\"sampler\"] = sampler\n else:\n warnings.warn(\n \"Found batch_sampler in provided kwargs. Please, make sure that it is compatible \"\n \"with distributed configuration\"\n )\n\n if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and kwargs.get(\"pin_memory\", False):\n # TODO: How about XLA GPU ?\n warnings.warn(\n \"Found incompatible options: xla support and pin_memory args equal True. \"\n \"Argument `pin_memory=False` will be used to construct data loader.\"\n )\n kwargs[\"pin_memory\"] = False\n else:\n kwargs[\"pin_memory\"] = kwargs.get(\"pin_memory\", \"cuda\" in idist.device().type)\n\n logger.info(\"Use data loader kwargs for dataset '{}': \\n\\t{}\".format(repr(dataset)[:20].strip(), kwargs))\n dataloader = DataLoader(dataset, **kwargs)\n\n if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and world_size > 1:\n\n logger.info(\"DataLoader is wrapped by `MpDeviceLoader` on XLA\")\n\n mp_device_loader_cls = _MpDeviceLoader\n try:\n from torch_xla.distributed.parallel_loader import MpDeviceLoader\n\n mp_device_loader_cls = MpDeviceLoader\n except ImportError:\n pass\n\n sampler = dataloader.sampler\n dataloader = mp_device_loader_cls(dataloader, idist.device())\n dataloader.sampler = sampler\n\n return dataloader\n\n\ndef auto_model(model: nn.Module) -> nn.Module:\n \"\"\"Helper method to adapt provided model for non-distributed and distributed configurations (supporting\n all available backends from :meth:`~ignite.distributed.utils.available_backends()`).\n\n Internally, we perform to following:\n\n - send model to current :meth:`~ignite.distributed.utils.device()` if model's parameters are not on the device.\n - wrap the model to `torch DistributedDataParallel`_ for native torch distributed if world size is larger than 1.\n - wrap the model to `torch DataParallel`_ if no distributed context found and more than one CUDA devices available.\n\n Examples:\n\n .. code-block:: python\n\n import ignite.distribted as idist\n\n model = idist.auto_model(model)\n\n In addition with NVidia/Apex, it can be used in the following way:\n\n .. code-block:: python\n\n import ignite.distribted as idist\n\n model, optimizer = amp.initialize(model, optimizer, opt_level=opt_level)\n model = idist.auto_model(model)\n\n Args:\n model (torch.nn.Module): model to adapt.\n\n Returns:\n torch.nn.Module\n\n .. _torch DistributedDataParallel: https://pytorch.org/docs/stable/nn.html#torch.nn.parallel.DistributedDataParallel\n .. _torch DataParallel: https://pytorch.org/docs/stable/nn.html#torch.nn.DataParallel\n \"\"\"\n logger = setup_logger(__name__ + \".auto_model\")\n\n # Put model's parameters to device if its parameters are not on the device\n device = idist.device()\n if not all([p.device == device for p in model.parameters()]):\n model.to(device)\n\n # distributed data parallel model\n if idist.get_world_size() > 1:\n if idist.backend() == idist_native.NCCL:\n lrank = idist.get_local_rank()\n logger.info(\"Apply torch DistributedDataParallel on model, device id: {}\".format(lrank))\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[lrank,])\n elif idist.backend() == idist_native.GLOO:\n logger.info(\"Apply torch DistributedDataParallel on model\")\n model = torch.nn.parallel.DistributedDataParallel(model)\n\n # not distributed but multiple GPUs reachable so data parallel model\n elif torch.cuda.device_count() > 1 and \"cuda\" in idist.device().type:\n logger.info(\"Apply torch DataParallel on model\")\n model = torch.nn.parallel.DataParallel(model)\n\n return model\n\n\ndef auto_optim(optimizer: Optimizer) -> Optimizer:\n \"\"\"Helper method to adapt optimizer for non-distributed and distributed configurations (supporting\n all available backends from :meth:`~ignite.distributed.utils.available_backends()`).\n\n Internally, this method is no-op for non-distributed and torch native distributed configuration.\n For XLA distributed configuration, we create a new class that inherits from provided optimizer.\n The goal is to override the `step()` method with specific `xm.optimizer_step`_ implementation.\n\n Examples:\n\n .. code-block:: python\n\n import ignite.distribted as idist\n\n optimizer = idist.auto_optim(optimizer)\n\n\n Args:\n optimizer (Optimizer): input torch optimizer\n\n Returns:\n Optimizer\n\n .. _xm.optimizer_step: http://pytorch.org/xla/release/1.5/index.html#torch_xla.core.xla_model.optimizer_step\n\n \"\"\"\n if not (idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU):\n return optimizer\n\n cls = type(optimizer.__class__.__name__, (optimizer.__class__,), dict(_XLADistributedOptimizer.__dict__))\n return cls(optimizer)\n\n\nclass DistributedProxySampler(DistributedSampler):\n \"\"\"Distributed sampler proxy to adapt user's sampler for distributed data parallelism configuration.\n\n Code is based on https://github.com/pytorch/pytorch/issues/23430#issuecomment-562350407\n\n\n .. note::\n Input sampler is assumed to have a constant size.\n\n Args:\n sampler (Sampler): Input torch data sampler.\n num_replicas (int, optional): Number of processes participating in distributed training.\n rank (int, optional): Rank of the current process within ``num_replicas``.\n\n \"\"\"\n\n def __init__(self, sampler: Sampler, num_replicas=None, rank=None):\n\n if not isinstance(sampler, Sampler):\n raise TypeError(\"Argument sampler should be instance of torch Sampler, but given: {}\".format(type(sampler)))\n\n if not hasattr(sampler, \"__len__\"):\n raise TypeError(\"Argument sampler should have length\")\n\n super(DistributedProxySampler, self).__init__(sampler, num_replicas=num_replicas, rank=rank, shuffle=False)\n self.sampler = sampler\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n torch.manual_seed(self.epoch)\n\n indices = []\n while len(indices) < self.total_size:\n indices += list(self.sampler)\n\n if len(indices) > self.total_size:\n indices = indices[: self.total_size]\n\n # subsample\n indices = indices[self.rank : self.total_size : self.num_replicas]\n if len(indices) != self.num_samples:\n raise RuntimeError(\"{} vs {}\".format(len(indices), self.num_samples))\n\n return iter(indices)\n\n\nif idist.has_xla_support:\n\n import torch_xla.core.xla_model as xm\n from torch_xla.distributed.parallel_loader import ParallelLoader\n\n class _MpDeviceLoader:\n # https://github.com/pytorch/xla/pull/2117\n # From pytorch/xla if `torch_xla.distributed.parallel_loader.MpDeviceLoader` is not available\n def __init__(self, loader, device, **kwargs):\n self._loader = loader\n self._device = device\n self._parallel_loader_kwargs = kwargs\n\n def __iter__(self):\n parallel_loader = ParallelLoader(self._loader, [self._device], **self._parallel_loader_kwargs)\n return parallel_loader.per_device_loader(self._device)\n\n def __len__(self):\n return len(self._loader)\n\n class _XLADistributedOptimizer(Optimizer):\n def __init__(self, optimizer):\n super(self.__class__, self).__init__(optimizer.param_groups)\n self.wrapped_optimizer = optimizer\n\n def step(self, closure=None):\n xm.optimizer_step(self.wrapped_optimizer, barrier=True)\n", "path": "ignite/distributed/auto.py"}]}
| 3,624 | 407 |
gh_patches_debug_9033
|
rasdani/github-patches
|
git_diff
|
internetarchive__openlibrary-3408
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Provide beta UI for record merging
### Summary of Requirements
- [x] GETing /libtools/merge?olids=OL1W,OL2W displays the beta merging UI
- Used /works/merge instead
- [ ] UI will not merge works list backreferences (but allow the merge to continue for the other selected items)
- [ ] UI will not merge works if works are on reading logs (but will allow the merge to continue for the other selected items)
- [x] Should only be available to librarians
- [ ] Log to stats.ol.edits.action.merge-works when a merge occurs
- [x] There is a means of selecting items from the search page
- [ ] Batch edit is created for the merge, with action = merge-works
- [x] Pressing "Do merge" does a merge
### Relationships
- Part of #805
- Depends on #2963
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openlibrary/plugins/upstream/code.py`
Content:
```
1 """Upstream customizations."""
2
3 import datetime
4 import hashlib
5 import io
6 import os.path
7 import random
8
9 import web
10
11 from infogami import config
12 from infogami.core import code as core
13 from infogami.infobase import client
14 from infogami.utils import delegate, app, types
15 from infogami.utils.view import public, safeint, render
16 from infogami.utils.view import render_template # noqa: F401 used for its side effects
17 from infogami.utils.context import context
18
19 from openlibrary import accounts
20
21 from openlibrary.plugins.upstream import addbook, covers, merge_authors, models, utils
22 from openlibrary.plugins.upstream import spamcheck
23 from openlibrary.plugins.upstream import borrow, recentchanges # TODO: unused imports?
24 from openlibrary.plugins.upstream.utils import render_component
25
26 if not config.get('coverstore_url'):
27 config.coverstore_url = "https://covers.openlibrary.org"
28
29
30 class static(delegate.page):
31 path = "/images/.*"
32
33 def GET(self):
34 host = 'https://%s' % web.ctx.host if 'openlibrary.org' in web.ctx.host else ''
35 raise web.seeother(host + '/static' + web.ctx.path)
36
37
38 class edit(core.edit):
39 """Overwrite ?m=edit behaviour for author, book, work, and people pages."""
40
41 def GET(self, key):
42 page = web.ctx.site.get(key)
43
44 if web.re_compile('/(authors|books|works)/OL.*').match(key):
45 if page is None:
46 raise web.seeother(key)
47 else:
48 raise web.seeother(page.url(suffix="/edit"))
49 else:
50 return core.edit.GET(self, key)
51
52 def POST(self, key):
53 if web.re_compile('/(people/[^/]+)').match(key) and spamcheck.is_spam():
54 return render_template(
55 'message.html', 'Oops', 'Something went wrong. Please try again later.'
56 )
57 return core.edit.POST(self, key)
58
59
60 # handlers for change photo and change cover
61
62
63 class change_cover(delegate.mode):
64 path = r"(/books/OL\d+M)/cover"
65
66 def GET(self, key):
67 page = web.ctx.site.get(key)
68 if page is None or page.type.key not in ['/type/edition', '/type/author']:
69 raise web.seeother(key)
70 return render.change_cover(page)
71
72
73 class change_photo(change_cover):
74 path = r"(/authors/OL\d+A)/photo"
75
76
77 del delegate.modes[
78 'change_cover'
79 ] # delete change_cover mode added by openlibrary plugin
80
81
82 class components_test(delegate.page):
83 path = "/_dev/components/HelloWorld"
84
85 def GET(self):
86 return render_component('HelloWorld') + render_component('HelloWorld')
87
88
89 class library_explorer(delegate.page):
90 path = "/explore"
91
92 def GET(self):
93 return render_template('library_explorer')
94
95
96 class merge_work(delegate.page):
97 path = r"(/works/OL\d+W)/merge"
98
99 def GET(self, key):
100 return "This looks like a good place for a merge UI!"
101
102 def POST(self, key):
103 pass
104
105
106 @web.memoize
107 @public
108 def vendor_js():
109 pardir = os.path.pardir
110 path = os.path.abspath(
111 os.path.join(
112 __file__,
113 pardir,
114 pardir,
115 pardir,
116 pardir,
117 'static',
118 'upstream',
119 'js',
120 'vendor.js',
121 )
122 )
123 with open(path, 'rb') as in_file:
124 digest = hashlib.md5(in_file.read()).hexdigest()
125 return '/static/upstream/js/vendor.js?v=' + digest
126
127
128 @web.memoize
129 @public
130 def static_url(path):
131 """Takes path relative to static/ and constructs url to that resource with hash."""
132 pardir = os.path.pardir
133 fullpath = os.path.abspath(
134 os.path.join(__file__, pardir, pardir, pardir, pardir, "static", path)
135 )
136 with open(fullpath, 'rb') as in_file:
137 digest = hashlib.md5(in_file.read()).hexdigest()
138 return f"/static/{path}?v={digest}"
139
140
141 class DynamicDocument:
142 """Dynamic document is created by concatinating various rawtext documents in the DB.
143 Used to generate combined js/css using multiple js/css files in the system.
144 """
145
146 def __init__(self, root):
147 self.root = web.rstrips(root, '/')
148 self.docs = None
149 self._text = None
150 self.last_modified = None
151
152 def update(self):
153 keys = web.ctx.site.things({'type': '/type/rawtext', 'key~': self.root + '/*'})
154 docs = sorted(web.ctx.site.get_many(keys), key=lambda doc: doc.key)
155 if docs:
156 self.last_modified = min(doc.last_modified for doc in docs)
157 self._text = "\n\n".join(doc.get('body', '') for doc in docs)
158 else:
159 self.last_modified = datetime.datetime.utcnow()
160 self._text = ""
161
162 def get_text(self):
163 """Returns text of the combined documents"""
164 if self._text is None:
165 self.update()
166 return self._text
167
168 def md5(self):
169 """Returns md5 checksum of the combined documents"""
170 return hashlib.md5(self.get_text().encode('utf-8')).hexdigest()
171
172
173 def create_dynamic_document(url, prefix):
174 """Creates a handler for `url` for servering combined js/css for `prefix/*` pages"""
175 doc = DynamicDocument(prefix)
176
177 if url.endswith('.js'):
178 content_type = "text/javascript"
179 elif url.endswith(".css"):
180 content_type = "text/css"
181 else:
182 content_type = "text/plain"
183
184 class page(delegate.page):
185 """Handler for serving the combined content."""
186
187 path = "__registered_later_without_using_this__"
188
189 def GET(self):
190 i = web.input(v=None)
191 v = doc.md5()
192 if v != i.v:
193 raise web.seeother(web.changequery(v=v))
194
195 if web.modified(etag=v):
196 oneyear = 365 * 24 * 3600
197 web.header("Content-Type", content_type)
198 web.header("Cache-Control", "Public, max-age=%d" % oneyear)
199 web.lastmodified(doc.last_modified)
200 web.expires(oneyear)
201 return delegate.RawText(doc.get_text())
202
203 def url(self):
204 return url + "?v=" + doc.md5()
205
206 def reload(self):
207 doc.update()
208
209 class hook(client.hook):
210 """Hook to update the DynamicDocument when any of the source pages is updated."""
211
212 def on_new_version(self, page):
213 if page.key.startswith(doc.root):
214 doc.update()
215
216 # register the special page
217 delegate.pages[url] = {}
218 delegate.pages[url][None] = page
219 return page
220
221
222 all_js = create_dynamic_document("/js/all.js", config.get("js_root", "/js"))
223 web.template.Template.globals['all_js'] = all_js()
224
225 all_css = create_dynamic_document("/css/all.css", config.get("css_root", "/css"))
226 web.template.Template.globals['all_css'] = all_css()
227
228
229 def reload():
230 """Reload all.css and all.js"""
231 all_css().reload()
232 all_js().reload()
233
234
235 def setup_jquery_urls():
236 if config.get('use_google_cdn', True):
237 jquery_url = "http://ajax.googleapis.com/ajax/libs/jquery/1.3.2/jquery.min.js"
238 jqueryui_url = (
239 "http://ajax.googleapis.com/ajax/libs/jqueryui/1.7.2/jquery-ui.min.js"
240 )
241 else:
242 jquery_url = "/static/upstream/js/jquery-1.3.2.min.js"
243 jqueryui_url = "/static/upstream/js/jquery-ui-1.7.2.min.js"
244
245 web.template.Template.globals['jquery_url'] = jquery_url
246 web.template.Template.globals['jqueryui_url'] = jqueryui_url
247 web.template.Template.globals['use_google_cdn'] = config.get('use_google_cdn', True)
248
249
250 def user_is_admin_or_librarian():
251 user = web.ctx.site.get_user()
252 return user and (user.is_admin() or user.is_librarian())
253
254
255 @public
256 def get_document(key, limit_redirs=5):
257 doc = None
258 for i in range(limit_redirs):
259 doc = web.ctx.site.get(key)
260 if doc is None:
261 return None
262 if doc.type.key == "/type/redirect":
263 key = doc.location
264 else:
265 return doc
266 return doc
267
268
269 class revert(delegate.mode):
270 def GET(self, key):
271 raise web.seeother(web.changequery(m=None))
272
273 def POST(self, key):
274 i = web.input("v", _comment=None)
275 v = i.v and safeint(i.v, None)
276
277 if v is None:
278 raise web.seeother(web.changequery({}))
279
280 if not web.ctx.site.can_write(key) or not user_is_admin_or_librarian():
281 return render.permission_denied(
282 web.ctx.fullpath, "Permission denied to edit " + key + "."
283 )
284
285 thing = web.ctx.site.get(key, i.v)
286
287 if not thing:
288 raise web.notfound()
289
290 def revert(thing):
291 if thing.type.key == "/type/delete" and thing.revision > 1:
292 prev = web.ctx.site.get(thing.key, thing.revision - 1)
293 if prev.type.key in ["/type/delete", "/type/redirect"]:
294 return revert(prev)
295 else:
296 prev._save("revert to revision %d" % prev.revision)
297 return prev
298 elif thing.type.key == "/type/redirect":
299 redirect = web.ctx.site.get(thing.location)
300 if redirect and redirect.type.key not in [
301 "/type/delete",
302 "/type/redirect",
303 ]:
304 return redirect
305 else:
306 # bad redirect. Try the previous revision
307 prev = web.ctx.site.get(thing.key, thing.revision - 1)
308 return revert(prev)
309 else:
310 return thing
311
312 def process(value):
313 if isinstance(value, list):
314 return [process(v) for v in value]
315 elif isinstance(value, client.Thing):
316 if value.key:
317 if value.type.key in ['/type/delete', '/type/revert']:
318 return revert(value)
319 else:
320 return value
321 else:
322 for k in value:
323 value[k] = process(value[k])
324 return value
325 else:
326 return value
327
328 for k in thing:
329 thing[k] = process(thing[k])
330
331 comment = i._comment or "reverted to revision %d" % v
332 thing._save(comment)
333 raise web.seeother(key)
334
335
336 def setup():
337 """Setup for upstream plugin"""
338 models.setup()
339 utils.setup()
340 addbook.setup()
341 covers.setup()
342 merge_authors.setup()
343
344 from openlibrary.plugins.upstream import data, jsdef
345
346 data.setup()
347
348 # setup template globals
349 from openlibrary.i18n import ugettext, ungettext, gettext_territory
350
351 web.template.Template.globals.update(
352 {
353 "gettext": ugettext,
354 "ugettext": ugettext,
355 "_": ugettext,
356 "ungettext": ungettext,
357 "gettext_territory": gettext_territory,
358 "random": random.Random(),
359 "commify": web.commify,
360 "group": web.group,
361 "storage": web.storage,
362 "all": all,
363 "any": any,
364 "locals": locals,
365 }
366 )
367
368 web.template.STATEMENT_NODES["jsdef"] = jsdef.JSDefNode
369
370 setup_jquery_urls()
371
372
373 setup()
374
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/openlibrary/plugins/upstream/code.py b/openlibrary/plugins/upstream/code.py
--- a/openlibrary/plugins/upstream/code.py
+++ b/openlibrary/plugins/upstream/code.py
@@ -94,13 +94,17 @@
class merge_work(delegate.page):
- path = r"(/works/OL\d+W)/merge"
+ path = "/works/merge"
- def GET(self, key):
- return "This looks like a good place for a merge UI!"
-
- def POST(self, key):
- pass
+ def GET(self):
+ user = web.ctx.site.get_user()
+ has_access = user and (
+ (user.is_admin() or user.is_librarian())
+ and user.is_usergroup_member('/usergroup/librarian-work-merge')
+ )
+ if not has_access:
+ raise web.HTTPError('403 Forbidden')
+ return render_template('merge/works')
@web.memoize
|
{"golden_diff": "diff --git a/openlibrary/plugins/upstream/code.py b/openlibrary/plugins/upstream/code.py\n--- a/openlibrary/plugins/upstream/code.py\n+++ b/openlibrary/plugins/upstream/code.py\n@@ -94,13 +94,17 @@\n \n \n class merge_work(delegate.page):\n- path = r\"(/works/OL\\d+W)/merge\"\n+ path = \"/works/merge\"\n \n- def GET(self, key):\n- return \"This looks like a good place for a merge UI!\"\n-\n- def POST(self, key):\n- pass\n+ def GET(self):\n+ user = web.ctx.site.get_user()\n+ has_access = user and (\n+ (user.is_admin() or user.is_librarian())\n+ and user.is_usergroup_member('/usergroup/librarian-work-merge')\n+ )\n+ if not has_access:\n+ raise web.HTTPError('403 Forbidden')\n+ return render_template('merge/works')\n \n \n @web.memoize\n", "issue": "Provide beta UI for record merging\n### Summary of Requirements\r\n- [x] GETing /libtools/merge?olids=OL1W,OL2W displays the beta merging UI\r\n - Used /works/merge instead\r\n- [ ] UI will not merge works list backreferences (but allow the merge to continue for the other selected items)\r\n- [ ] UI will not merge works if works are on reading logs (but will allow the merge to continue for the other selected items)\r\n- [x] Should only be available to librarians\r\n- [ ] Log to stats.ol.edits.action.merge-works when a merge occurs\r\n- [x] There is a means of selecting items from the search page\r\n- [ ] Batch edit is created for the merge, with action = merge-works\r\n- [x] Pressing \"Do merge\" does a merge\r\n\r\n### Relationships\r\n- Part of #805\r\n- Depends on #2963\n", "before_files": [{"content": "\"\"\"Upstream customizations.\"\"\"\n\nimport datetime\nimport hashlib\nimport io\nimport os.path\nimport random\n\nimport web\n\nfrom infogami import config\nfrom infogami.core import code as core\nfrom infogami.infobase import client\nfrom infogami.utils import delegate, app, types\nfrom infogami.utils.view import public, safeint, render\nfrom infogami.utils.view import render_template # noqa: F401 used for its side effects\nfrom infogami.utils.context import context\n\nfrom openlibrary import accounts\n\nfrom openlibrary.plugins.upstream import addbook, covers, merge_authors, models, utils\nfrom openlibrary.plugins.upstream import spamcheck\nfrom openlibrary.plugins.upstream import borrow, recentchanges # TODO: unused imports?\nfrom openlibrary.plugins.upstream.utils import render_component\n\nif not config.get('coverstore_url'):\n config.coverstore_url = \"https://covers.openlibrary.org\"\n\n\nclass static(delegate.page):\n path = \"/images/.*\"\n\n def GET(self):\n host = 'https://%s' % web.ctx.host if 'openlibrary.org' in web.ctx.host else ''\n raise web.seeother(host + '/static' + web.ctx.path)\n\n\nclass edit(core.edit):\n \"\"\"Overwrite ?m=edit behaviour for author, book, work, and people pages.\"\"\"\n\n def GET(self, key):\n page = web.ctx.site.get(key)\n\n if web.re_compile('/(authors|books|works)/OL.*').match(key):\n if page is None:\n raise web.seeother(key)\n else:\n raise web.seeother(page.url(suffix=\"/edit\"))\n else:\n return core.edit.GET(self, key)\n\n def POST(self, key):\n if web.re_compile('/(people/[^/]+)').match(key) and spamcheck.is_spam():\n return render_template(\n 'message.html', 'Oops', 'Something went wrong. Please try again later.'\n )\n return core.edit.POST(self, key)\n\n\n# handlers for change photo and change cover\n\n\nclass change_cover(delegate.mode):\n path = r\"(/books/OL\\d+M)/cover\"\n\n def GET(self, key):\n page = web.ctx.site.get(key)\n if page is None or page.type.key not in ['/type/edition', '/type/author']:\n raise web.seeother(key)\n return render.change_cover(page)\n\n\nclass change_photo(change_cover):\n path = r\"(/authors/OL\\d+A)/photo\"\n\n\ndel delegate.modes[\n 'change_cover'\n] # delete change_cover mode added by openlibrary plugin\n\n\nclass components_test(delegate.page):\n path = \"/_dev/components/HelloWorld\"\n\n def GET(self):\n return render_component('HelloWorld') + render_component('HelloWorld')\n\n\nclass library_explorer(delegate.page):\n path = \"/explore\"\n\n def GET(self):\n return render_template('library_explorer')\n\n\nclass merge_work(delegate.page):\n path = r\"(/works/OL\\d+W)/merge\"\n\n def GET(self, key):\n return \"This looks like a good place for a merge UI!\"\n\n def POST(self, key):\n pass\n\n\[email protected]\n@public\ndef vendor_js():\n pardir = os.path.pardir\n path = os.path.abspath(\n os.path.join(\n __file__,\n pardir,\n pardir,\n pardir,\n pardir,\n 'static',\n 'upstream',\n 'js',\n 'vendor.js',\n )\n )\n with open(path, 'rb') as in_file:\n digest = hashlib.md5(in_file.read()).hexdigest()\n return '/static/upstream/js/vendor.js?v=' + digest\n\n\[email protected]\n@public\ndef static_url(path):\n \"\"\"Takes path relative to static/ and constructs url to that resource with hash.\"\"\"\n pardir = os.path.pardir\n fullpath = os.path.abspath(\n os.path.join(__file__, pardir, pardir, pardir, pardir, \"static\", path)\n )\n with open(fullpath, 'rb') as in_file:\n digest = hashlib.md5(in_file.read()).hexdigest()\n return f\"/static/{path}?v={digest}\"\n\n\nclass DynamicDocument:\n \"\"\"Dynamic document is created by concatinating various rawtext documents in the DB.\n Used to generate combined js/css using multiple js/css files in the system.\n \"\"\"\n\n def __init__(self, root):\n self.root = web.rstrips(root, '/')\n self.docs = None\n self._text = None\n self.last_modified = None\n\n def update(self):\n keys = web.ctx.site.things({'type': '/type/rawtext', 'key~': self.root + '/*'})\n docs = sorted(web.ctx.site.get_many(keys), key=lambda doc: doc.key)\n if docs:\n self.last_modified = min(doc.last_modified for doc in docs)\n self._text = \"\\n\\n\".join(doc.get('body', '') for doc in docs)\n else:\n self.last_modified = datetime.datetime.utcnow()\n self._text = \"\"\n\n def get_text(self):\n \"\"\"Returns text of the combined documents\"\"\"\n if self._text is None:\n self.update()\n return self._text\n\n def md5(self):\n \"\"\"Returns md5 checksum of the combined documents\"\"\"\n return hashlib.md5(self.get_text().encode('utf-8')).hexdigest()\n\n\ndef create_dynamic_document(url, prefix):\n \"\"\"Creates a handler for `url` for servering combined js/css for `prefix/*` pages\"\"\"\n doc = DynamicDocument(prefix)\n\n if url.endswith('.js'):\n content_type = \"text/javascript\"\n elif url.endswith(\".css\"):\n content_type = \"text/css\"\n else:\n content_type = \"text/plain\"\n\n class page(delegate.page):\n \"\"\"Handler for serving the combined content.\"\"\"\n\n path = \"__registered_later_without_using_this__\"\n\n def GET(self):\n i = web.input(v=None)\n v = doc.md5()\n if v != i.v:\n raise web.seeother(web.changequery(v=v))\n\n if web.modified(etag=v):\n oneyear = 365 * 24 * 3600\n web.header(\"Content-Type\", content_type)\n web.header(\"Cache-Control\", \"Public, max-age=%d\" % oneyear)\n web.lastmodified(doc.last_modified)\n web.expires(oneyear)\n return delegate.RawText(doc.get_text())\n\n def url(self):\n return url + \"?v=\" + doc.md5()\n\n def reload(self):\n doc.update()\n\n class hook(client.hook):\n \"\"\"Hook to update the DynamicDocument when any of the source pages is updated.\"\"\"\n\n def on_new_version(self, page):\n if page.key.startswith(doc.root):\n doc.update()\n\n # register the special page\n delegate.pages[url] = {}\n delegate.pages[url][None] = page\n return page\n\n\nall_js = create_dynamic_document(\"/js/all.js\", config.get(\"js_root\", \"/js\"))\nweb.template.Template.globals['all_js'] = all_js()\n\nall_css = create_dynamic_document(\"/css/all.css\", config.get(\"css_root\", \"/css\"))\nweb.template.Template.globals['all_css'] = all_css()\n\n\ndef reload():\n \"\"\"Reload all.css and all.js\"\"\"\n all_css().reload()\n all_js().reload()\n\n\ndef setup_jquery_urls():\n if config.get('use_google_cdn', True):\n jquery_url = \"http://ajax.googleapis.com/ajax/libs/jquery/1.3.2/jquery.min.js\"\n jqueryui_url = (\n \"http://ajax.googleapis.com/ajax/libs/jqueryui/1.7.2/jquery-ui.min.js\"\n )\n else:\n jquery_url = \"/static/upstream/js/jquery-1.3.2.min.js\"\n jqueryui_url = \"/static/upstream/js/jquery-ui-1.7.2.min.js\"\n\n web.template.Template.globals['jquery_url'] = jquery_url\n web.template.Template.globals['jqueryui_url'] = jqueryui_url\n web.template.Template.globals['use_google_cdn'] = config.get('use_google_cdn', True)\n\n\ndef user_is_admin_or_librarian():\n user = web.ctx.site.get_user()\n return user and (user.is_admin() or user.is_librarian())\n\n\n@public\ndef get_document(key, limit_redirs=5):\n doc = None\n for i in range(limit_redirs):\n doc = web.ctx.site.get(key)\n if doc is None:\n return None\n if doc.type.key == \"/type/redirect\":\n key = doc.location\n else:\n return doc\n return doc\n\n\nclass revert(delegate.mode):\n def GET(self, key):\n raise web.seeother(web.changequery(m=None))\n\n def POST(self, key):\n i = web.input(\"v\", _comment=None)\n v = i.v and safeint(i.v, None)\n\n if v is None:\n raise web.seeother(web.changequery({}))\n\n if not web.ctx.site.can_write(key) or not user_is_admin_or_librarian():\n return render.permission_denied(\n web.ctx.fullpath, \"Permission denied to edit \" + key + \".\"\n )\n\n thing = web.ctx.site.get(key, i.v)\n\n if not thing:\n raise web.notfound()\n\n def revert(thing):\n if thing.type.key == \"/type/delete\" and thing.revision > 1:\n prev = web.ctx.site.get(thing.key, thing.revision - 1)\n if prev.type.key in [\"/type/delete\", \"/type/redirect\"]:\n return revert(prev)\n else:\n prev._save(\"revert to revision %d\" % prev.revision)\n return prev\n elif thing.type.key == \"/type/redirect\":\n redirect = web.ctx.site.get(thing.location)\n if redirect and redirect.type.key not in [\n \"/type/delete\",\n \"/type/redirect\",\n ]:\n return redirect\n else:\n # bad redirect. Try the previous revision\n prev = web.ctx.site.get(thing.key, thing.revision - 1)\n return revert(prev)\n else:\n return thing\n\n def process(value):\n if isinstance(value, list):\n return [process(v) for v in value]\n elif isinstance(value, client.Thing):\n if value.key:\n if value.type.key in ['/type/delete', '/type/revert']:\n return revert(value)\n else:\n return value\n else:\n for k in value:\n value[k] = process(value[k])\n return value\n else:\n return value\n\n for k in thing:\n thing[k] = process(thing[k])\n\n comment = i._comment or \"reverted to revision %d\" % v\n thing._save(comment)\n raise web.seeother(key)\n\n\ndef setup():\n \"\"\"Setup for upstream plugin\"\"\"\n models.setup()\n utils.setup()\n addbook.setup()\n covers.setup()\n merge_authors.setup()\n\n from openlibrary.plugins.upstream import data, jsdef\n\n data.setup()\n\n # setup template globals\n from openlibrary.i18n import ugettext, ungettext, gettext_territory\n\n web.template.Template.globals.update(\n {\n \"gettext\": ugettext,\n \"ugettext\": ugettext,\n \"_\": ugettext,\n \"ungettext\": ungettext,\n \"gettext_territory\": gettext_territory,\n \"random\": random.Random(),\n \"commify\": web.commify,\n \"group\": web.group,\n \"storage\": web.storage,\n \"all\": all,\n \"any\": any,\n \"locals\": locals,\n }\n )\n\n web.template.STATEMENT_NODES[\"jsdef\"] = jsdef.JSDefNode\n\n setup_jquery_urls()\n\n\nsetup()\n", "path": "openlibrary/plugins/upstream/code.py"}], "after_files": [{"content": "\"\"\"Upstream customizations.\"\"\"\n\nimport datetime\nimport hashlib\nimport io\nimport os.path\nimport random\n\nimport web\n\nfrom infogami import config\nfrom infogami.core import code as core\nfrom infogami.infobase import client\nfrom infogami.utils import delegate, app, types\nfrom infogami.utils.view import public, safeint, render\nfrom infogami.utils.view import render_template # noqa: F401 used for its side effects\nfrom infogami.utils.context import context\n\nfrom openlibrary import accounts\n\nfrom openlibrary.plugins.upstream import addbook, covers, merge_authors, models, utils\nfrom openlibrary.plugins.upstream import spamcheck\nfrom openlibrary.plugins.upstream import borrow, recentchanges # TODO: unused imports?\nfrom openlibrary.plugins.upstream.utils import render_component\n\nif not config.get('coverstore_url'):\n config.coverstore_url = \"https://covers.openlibrary.org\"\n\n\nclass static(delegate.page):\n path = \"/images/.*\"\n\n def GET(self):\n host = 'https://%s' % web.ctx.host if 'openlibrary.org' in web.ctx.host else ''\n raise web.seeother(host + '/static' + web.ctx.path)\n\n\nclass edit(core.edit):\n \"\"\"Overwrite ?m=edit behaviour for author, book, work, and people pages.\"\"\"\n\n def GET(self, key):\n page = web.ctx.site.get(key)\n\n if web.re_compile('/(authors|books|works)/OL.*').match(key):\n if page is None:\n raise web.seeother(key)\n else:\n raise web.seeother(page.url(suffix=\"/edit\"))\n else:\n return core.edit.GET(self, key)\n\n def POST(self, key):\n if web.re_compile('/(people/[^/]+)').match(key) and spamcheck.is_spam():\n return render_template(\n 'message.html', 'Oops', 'Something went wrong. Please try again later.'\n )\n return core.edit.POST(self, key)\n\n\n# handlers for change photo and change cover\n\n\nclass change_cover(delegate.mode):\n path = r\"(/books/OL\\d+M)/cover\"\n\n def GET(self, key):\n page = web.ctx.site.get(key)\n if page is None or page.type.key not in ['/type/edition', '/type/author']:\n raise web.seeother(key)\n return render.change_cover(page)\n\n\nclass change_photo(change_cover):\n path = r\"(/authors/OL\\d+A)/photo\"\n\n\ndel delegate.modes[\n 'change_cover'\n] # delete change_cover mode added by openlibrary plugin\n\n\nclass components_test(delegate.page):\n path = \"/_dev/components/HelloWorld\"\n\n def GET(self):\n return render_component('HelloWorld') + render_component('HelloWorld')\n\n\nclass library_explorer(delegate.page):\n path = \"/explore\"\n\n def GET(self):\n return render_template('library_explorer')\n\n\nclass merge_work(delegate.page):\n path = \"/works/merge\"\n\n def GET(self):\n user = web.ctx.site.get_user()\n has_access = user and (\n (user.is_admin() or user.is_librarian())\n and user.is_usergroup_member('/usergroup/librarian-work-merge')\n )\n if not has_access:\n raise web.HTTPError('403 Forbidden')\n return render_template('merge/works')\n\n\[email protected]\n@public\ndef vendor_js():\n pardir = os.path.pardir\n path = os.path.abspath(\n os.path.join(\n __file__,\n pardir,\n pardir,\n pardir,\n pardir,\n 'static',\n 'upstream',\n 'js',\n 'vendor.js',\n )\n )\n with open(path, 'rb') as in_file:\n digest = hashlib.md5(in_file.read()).hexdigest()\n return '/static/upstream/js/vendor.js?v=' + digest\n\n\[email protected]\n@public\ndef static_url(path):\n \"\"\"Takes path relative to static/ and constructs url to that resource with hash.\"\"\"\n pardir = os.path.pardir\n fullpath = os.path.abspath(\n os.path.join(__file__, pardir, pardir, pardir, pardir, \"static\", path)\n )\n with open(fullpath, 'rb') as in_file:\n digest = hashlib.md5(in_file.read()).hexdigest()\n return f\"/static/{path}?v={digest}\"\n\n\nclass DynamicDocument:\n \"\"\"Dynamic document is created by concatinating various rawtext documents in the DB.\n Used to generate combined js/css using multiple js/css files in the system.\n \"\"\"\n\n def __init__(self, root):\n self.root = web.rstrips(root, '/')\n self.docs = None\n self._text = None\n self.last_modified = None\n\n def update(self):\n keys = web.ctx.site.things({'type': '/type/rawtext', 'key~': self.root + '/*'})\n docs = sorted(web.ctx.site.get_many(keys), key=lambda doc: doc.key)\n if docs:\n self.last_modified = min(doc.last_modified for doc in docs)\n self._text = \"\\n\\n\".join(doc.get('body', '') for doc in docs)\n else:\n self.last_modified = datetime.datetime.utcnow()\n self._text = \"\"\n\n def get_text(self):\n \"\"\"Returns text of the combined documents\"\"\"\n if self._text is None:\n self.update()\n return self._text\n\n def md5(self):\n \"\"\"Returns md5 checksum of the combined documents\"\"\"\n return hashlib.md5(self.get_text().encode('utf-8')).hexdigest()\n\n\ndef create_dynamic_document(url, prefix):\n \"\"\"Creates a handler for `url` for servering combined js/css for `prefix/*` pages\"\"\"\n doc = DynamicDocument(prefix)\n\n if url.endswith('.js'):\n content_type = \"text/javascript\"\n elif url.endswith(\".css\"):\n content_type = \"text/css\"\n else:\n content_type = \"text/plain\"\n\n class page(delegate.page):\n \"\"\"Handler for serving the combined content.\"\"\"\n\n path = \"__registered_later_without_using_this__\"\n\n def GET(self):\n i = web.input(v=None)\n v = doc.md5()\n if v != i.v:\n raise web.seeother(web.changequery(v=v))\n\n if web.modified(etag=v):\n oneyear = 365 * 24 * 3600\n web.header(\"Content-Type\", content_type)\n web.header(\"Cache-Control\", \"Public, max-age=%d\" % oneyear)\n web.lastmodified(doc.last_modified)\n web.expires(oneyear)\n return delegate.RawText(doc.get_text())\n\n def url(self):\n return url + \"?v=\" + doc.md5()\n\n def reload(self):\n doc.update()\n\n class hook(client.hook):\n \"\"\"Hook to update the DynamicDocument when any of the source pages is updated.\"\"\"\n\n def on_new_version(self, page):\n if page.key.startswith(doc.root):\n doc.update()\n\n # register the special page\n delegate.pages[url] = {}\n delegate.pages[url][None] = page\n return page\n\n\nall_js = create_dynamic_document(\"/js/all.js\", config.get(\"js_root\", \"/js\"))\nweb.template.Template.globals['all_js'] = all_js()\n\nall_css = create_dynamic_document(\"/css/all.css\", config.get(\"css_root\", \"/css\"))\nweb.template.Template.globals['all_css'] = all_css()\n\n\ndef reload():\n \"\"\"Reload all.css and all.js\"\"\"\n all_css().reload()\n all_js().reload()\n\n\ndef setup_jquery_urls():\n if config.get('use_google_cdn', True):\n jquery_url = \"http://ajax.googleapis.com/ajax/libs/jquery/1.3.2/jquery.min.js\"\n jqueryui_url = (\n \"http://ajax.googleapis.com/ajax/libs/jqueryui/1.7.2/jquery-ui.min.js\"\n )\n else:\n jquery_url = \"/static/upstream/js/jquery-1.3.2.min.js\"\n jqueryui_url = \"/static/upstream/js/jquery-ui-1.7.2.min.js\"\n\n web.template.Template.globals['jquery_url'] = jquery_url\n web.template.Template.globals['jqueryui_url'] = jqueryui_url\n web.template.Template.globals['use_google_cdn'] = config.get('use_google_cdn', True)\n\n\ndef user_is_admin_or_librarian():\n user = web.ctx.site.get_user()\n return user and (user.is_admin() or user.is_librarian())\n\n\n@public\ndef get_document(key, limit_redirs=5):\n doc = None\n for i in range(limit_redirs):\n doc = web.ctx.site.get(key)\n if doc is None:\n return None\n if doc.type.key == \"/type/redirect\":\n key = doc.location\n else:\n return doc\n return doc\n\n\nclass revert(delegate.mode):\n def GET(self, key):\n raise web.seeother(web.changequery(m=None))\n\n def POST(self, key):\n i = web.input(\"v\", _comment=None)\n v = i.v and safeint(i.v, None)\n\n if v is None:\n raise web.seeother(web.changequery({}))\n\n if not web.ctx.site.can_write(key) or not user_is_admin_or_librarian():\n return render.permission_denied(\n web.ctx.fullpath, \"Permission denied to edit \" + key + \".\"\n )\n\n thing = web.ctx.site.get(key, i.v)\n\n if not thing:\n raise web.notfound()\n\n def revert(thing):\n if thing.type.key == \"/type/delete\" and thing.revision > 1:\n prev = web.ctx.site.get(thing.key, thing.revision - 1)\n if prev.type.key in [\"/type/delete\", \"/type/redirect\"]:\n return revert(prev)\n else:\n prev._save(\"revert to revision %d\" % prev.revision)\n return prev\n elif thing.type.key == \"/type/redirect\":\n redirect = web.ctx.site.get(thing.location)\n if redirect and redirect.type.key not in [\n \"/type/delete\",\n \"/type/redirect\",\n ]:\n return redirect\n else:\n # bad redirect. Try the previous revision\n prev = web.ctx.site.get(thing.key, thing.revision - 1)\n return revert(prev)\n else:\n return thing\n\n def process(value):\n if isinstance(value, list):\n return [process(v) for v in value]\n elif isinstance(value, client.Thing):\n if value.key:\n if value.type.key in ['/type/delete', '/type/revert']:\n return revert(value)\n else:\n return value\n else:\n for k in value:\n value[k] = process(value[k])\n return value\n else:\n return value\n\n for k in thing:\n thing[k] = process(thing[k])\n\n comment = i._comment or \"reverted to revision %d\" % v\n thing._save(comment)\n raise web.seeother(key)\n\n\ndef setup():\n \"\"\"Setup for upstream plugin\"\"\"\n models.setup()\n utils.setup()\n addbook.setup()\n covers.setup()\n merge_authors.setup()\n\n from openlibrary.plugins.upstream import data, jsdef\n\n data.setup()\n\n # setup template globals\n from openlibrary.i18n import ugettext, ungettext, gettext_territory\n\n web.template.Template.globals.update(\n {\n \"gettext\": ugettext,\n \"ugettext\": ugettext,\n \"_\": ugettext,\n \"ungettext\": ungettext,\n \"gettext_territory\": gettext_territory,\n \"random\": random.Random(),\n \"commify\": web.commify,\n \"group\": web.group,\n \"storage\": web.storage,\n \"all\": all,\n \"any\": any,\n \"locals\": locals,\n }\n )\n\n web.template.STATEMENT_NODES[\"jsdef\"] = jsdef.JSDefNode\n\n setup_jquery_urls()\n\n\nsetup()\n", "path": "openlibrary/plugins/upstream/code.py"}]}
| 4,055 | 215 |
gh_patches_debug_6625
|
rasdani/github-patches
|
git_diff
|
ray-project__ray-2784
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[xray] Users get no warning for infeasible tasks.
Start Ray with
```
RAY_USE_XRAY=1 ray start --head --redis-port=6379 --num-gpus=0
```
Then start `RAY_USE_XRAY=1 ipython` and run
```python
import ray
ray.init(redis_address='localhost:6379')
@ray.remote(num_gpus=1)
def f():
return 1
f.remote()
```
`f` will never execute because it is infeasible, and yet the user will get no warning.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/ray/ray_constants.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4 """Ray constants used in the Python code."""
5
6 import os
7
8 import ray
9
10
11 def env_integer(key, default):
12 if key in os.environ:
13 return int(os.environ[key])
14 return default
15
16
17 ID_SIZE = 20
18 NIL_JOB_ID = ray.ObjectID(ID_SIZE * b"\x00")
19
20 # If a remote function or actor (or some other export) has serialized size
21 # greater than this quantity, print an warning.
22 PICKLE_OBJECT_WARNING_SIZE = 10**7
23
24 # The maximum resource quantity that is allowed. TODO(rkn): This could be
25 # relaxed, but the current implementation of the node manager will be slower
26 # for large resource quantities due to bookkeeping of specific resource IDs.
27 MAX_RESOURCE_QUANTITY = 512
28
29 # Different types of Ray errors that can be pushed to the driver.
30 # TODO(rkn): These should be defined in flatbuffers and must be synced with
31 # the existing C++ definitions.
32 WAIT_FOR_CLASS_PUSH_ERROR = "wait_for_class"
33 PICKLING_LARGE_OBJECT_PUSH_ERROR = "pickling_large_object"
34 WAIT_FOR_FUNCTION_PUSH_ERROR = "wait_for_function"
35 TASK_PUSH_ERROR = "task"
36 REGISTER_REMOTE_FUNCTION_PUSH_ERROR = "register_remote_function"
37 FUNCTION_TO_RUN_PUSH_ERROR = "function_to_run"
38 VERSION_MISMATCH_PUSH_ERROR = "version_mismatch"
39 CHECKPOINT_PUSH_ERROR = "checkpoint"
40 REGISTER_ACTOR_PUSH_ERROR = "register_actor"
41 WORKER_CRASH_PUSH_ERROR = "worker_crash"
42 WORKER_DIED_PUSH_ERROR = "worker_died"
43 PUT_RECONSTRUCTION_PUSH_ERROR = "put_reconstruction"
44 HASH_MISMATCH_PUSH_ERROR = "object_hash_mismatch"
45
46 # Abort autoscaling if more than this number of errors are encountered. This
47 # is a safety feature to prevent e.g. runaway node launches.
48 AUTOSCALER_MAX_NUM_FAILURES = env_integer("AUTOSCALER_MAX_NUM_FAILURES", 5)
49
50 # The maximum number of nodes to launch in a single request.
51 # Multiple requests may be made for this batch size, up to
52 # the limit of AUTOSCALER_MAX_CONCURRENT_LAUNCHES.
53 AUTOSCALER_MAX_LAUNCH_BATCH = env_integer("AUTOSCALER_MAX_LAUNCH_BATCH", 5)
54
55 # Max number of nodes to launch at a time.
56 AUTOSCALER_MAX_CONCURRENT_LAUNCHES = env_integer(
57 "AUTOSCALER_MAX_CONCURRENT_LAUNCHES", 10)
58
59 # Interval at which to perform autoscaling updates.
60 AUTOSCALER_UPDATE_INTERVAL_S = env_integer("AUTOSCALER_UPDATE_INTERVAL_S", 5)
61
62 # The autoscaler will attempt to restart Ray on nodes it hasn't heard from
63 # in more than this interval.
64 AUTOSCALER_HEARTBEAT_TIMEOUT_S = env_integer("AUTOSCALER_HEARTBEAT_TIMEOUT_S",
65 30)
66
67 # Max number of retries to AWS (default is 5, time increases exponentially)
68 BOTO_MAX_RETRIES = env_integer("BOTO_MAX_RETRIES", 12)
69
70 # Default logger format: only contains the message.
71 LOGGER_FORMAT = "%(message)s"
72 LOGGER_FORMAT_HELP = "The logging format. default='%(message)s'"
73 LOGGER_LEVEL = "info"
74 LOGGER_LEVEL_CHOICES = ['debug', 'info', 'warning', 'error', 'critical']
75 LOGGER_LEVEL_HELP = ("The logging level threshold, choices=['debug', 'info',"
76 " 'warning', 'error', 'critical'], default='info'")
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/python/ray/ray_constants.py b/python/ray/ray_constants.py
--- a/python/ray/ray_constants.py
+++ b/python/ray/ray_constants.py
@@ -42,6 +42,7 @@
WORKER_DIED_PUSH_ERROR = "worker_died"
PUT_RECONSTRUCTION_PUSH_ERROR = "put_reconstruction"
HASH_MISMATCH_PUSH_ERROR = "object_hash_mismatch"
+INFEASIBLE_TASK_ERROR = "infeasible_task"
# Abort autoscaling if more than this number of errors are encountered. This
# is a safety feature to prevent e.g. runaway node launches.
|
{"golden_diff": "diff --git a/python/ray/ray_constants.py b/python/ray/ray_constants.py\n--- a/python/ray/ray_constants.py\n+++ b/python/ray/ray_constants.py\n@@ -42,6 +42,7 @@\n WORKER_DIED_PUSH_ERROR = \"worker_died\"\n PUT_RECONSTRUCTION_PUSH_ERROR = \"put_reconstruction\"\n HASH_MISMATCH_PUSH_ERROR = \"object_hash_mismatch\"\n+INFEASIBLE_TASK_ERROR = \"infeasible_task\"\n \n # Abort autoscaling if more than this number of errors are encountered. This\n # is a safety feature to prevent e.g. runaway node launches.\n", "issue": "[xray] Users get no warning for infeasible tasks.\nStart Ray with \r\n\r\n```\r\nRAY_USE_XRAY=1 ray start --head --redis-port=6379 --num-gpus=0\r\n```\r\n\r\nThen start `RAY_USE_XRAY=1 ipython` and run\r\n\r\n```python\r\nimport ray\r\n\r\nray.init(redis_address='localhost:6379')\r\n\r\[email protected](num_gpus=1)\r\ndef f():\r\n return 1\r\n\r\nf.remote()\r\n```\r\n\r\n`f` will never execute because it is infeasible, and yet the user will get no warning.\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\"\"\"Ray constants used in the Python code.\"\"\"\n\nimport os\n\nimport ray\n\n\ndef env_integer(key, default):\n if key in os.environ:\n return int(os.environ[key])\n return default\n\n\nID_SIZE = 20\nNIL_JOB_ID = ray.ObjectID(ID_SIZE * b\"\\x00\")\n\n# If a remote function or actor (or some other export) has serialized size\n# greater than this quantity, print an warning.\nPICKLE_OBJECT_WARNING_SIZE = 10**7\n\n# The maximum resource quantity that is allowed. TODO(rkn): This could be\n# relaxed, but the current implementation of the node manager will be slower\n# for large resource quantities due to bookkeeping of specific resource IDs.\nMAX_RESOURCE_QUANTITY = 512\n\n# Different types of Ray errors that can be pushed to the driver.\n# TODO(rkn): These should be defined in flatbuffers and must be synced with\n# the existing C++ definitions.\nWAIT_FOR_CLASS_PUSH_ERROR = \"wait_for_class\"\nPICKLING_LARGE_OBJECT_PUSH_ERROR = \"pickling_large_object\"\nWAIT_FOR_FUNCTION_PUSH_ERROR = \"wait_for_function\"\nTASK_PUSH_ERROR = \"task\"\nREGISTER_REMOTE_FUNCTION_PUSH_ERROR = \"register_remote_function\"\nFUNCTION_TO_RUN_PUSH_ERROR = \"function_to_run\"\nVERSION_MISMATCH_PUSH_ERROR = \"version_mismatch\"\nCHECKPOINT_PUSH_ERROR = \"checkpoint\"\nREGISTER_ACTOR_PUSH_ERROR = \"register_actor\"\nWORKER_CRASH_PUSH_ERROR = \"worker_crash\"\nWORKER_DIED_PUSH_ERROR = \"worker_died\"\nPUT_RECONSTRUCTION_PUSH_ERROR = \"put_reconstruction\"\nHASH_MISMATCH_PUSH_ERROR = \"object_hash_mismatch\"\n\n# Abort autoscaling if more than this number of errors are encountered. This\n# is a safety feature to prevent e.g. runaway node launches.\nAUTOSCALER_MAX_NUM_FAILURES = env_integer(\"AUTOSCALER_MAX_NUM_FAILURES\", 5)\n\n# The maximum number of nodes to launch in a single request.\n# Multiple requests may be made for this batch size, up to\n# the limit of AUTOSCALER_MAX_CONCURRENT_LAUNCHES.\nAUTOSCALER_MAX_LAUNCH_BATCH = env_integer(\"AUTOSCALER_MAX_LAUNCH_BATCH\", 5)\n\n# Max number of nodes to launch at a time.\nAUTOSCALER_MAX_CONCURRENT_LAUNCHES = env_integer(\n \"AUTOSCALER_MAX_CONCURRENT_LAUNCHES\", 10)\n\n# Interval at which to perform autoscaling updates.\nAUTOSCALER_UPDATE_INTERVAL_S = env_integer(\"AUTOSCALER_UPDATE_INTERVAL_S\", 5)\n\n# The autoscaler will attempt to restart Ray on nodes it hasn't heard from\n# in more than this interval.\nAUTOSCALER_HEARTBEAT_TIMEOUT_S = env_integer(\"AUTOSCALER_HEARTBEAT_TIMEOUT_S\",\n 30)\n\n# Max number of retries to AWS (default is 5, time increases exponentially)\nBOTO_MAX_RETRIES = env_integer(\"BOTO_MAX_RETRIES\", 12)\n\n# Default logger format: only contains the message.\nLOGGER_FORMAT = \"%(message)s\"\nLOGGER_FORMAT_HELP = \"The logging format. default='%(message)s'\"\nLOGGER_LEVEL = \"info\"\nLOGGER_LEVEL_CHOICES = ['debug', 'info', 'warning', 'error', 'critical']\nLOGGER_LEVEL_HELP = (\"The logging level threshold, choices=['debug', 'info',\"\n \" 'warning', 'error', 'critical'], default='info'\")\n", "path": "python/ray/ray_constants.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\"\"\"Ray constants used in the Python code.\"\"\"\n\nimport os\n\nimport ray\n\n\ndef env_integer(key, default):\n if key in os.environ:\n return int(os.environ[key])\n return default\n\n\nID_SIZE = 20\nNIL_JOB_ID = ray.ObjectID(ID_SIZE * b\"\\x00\")\n\n# If a remote function or actor (or some other export) has serialized size\n# greater than this quantity, print an warning.\nPICKLE_OBJECT_WARNING_SIZE = 10**7\n\n# The maximum resource quantity that is allowed. TODO(rkn): This could be\n# relaxed, but the current implementation of the node manager will be slower\n# for large resource quantities due to bookkeeping of specific resource IDs.\nMAX_RESOURCE_QUANTITY = 512\n\n# Different types of Ray errors that can be pushed to the driver.\n# TODO(rkn): These should be defined in flatbuffers and must be synced with\n# the existing C++ definitions.\nWAIT_FOR_CLASS_PUSH_ERROR = \"wait_for_class\"\nPICKLING_LARGE_OBJECT_PUSH_ERROR = \"pickling_large_object\"\nWAIT_FOR_FUNCTION_PUSH_ERROR = \"wait_for_function\"\nTASK_PUSH_ERROR = \"task\"\nREGISTER_REMOTE_FUNCTION_PUSH_ERROR = \"register_remote_function\"\nFUNCTION_TO_RUN_PUSH_ERROR = \"function_to_run\"\nVERSION_MISMATCH_PUSH_ERROR = \"version_mismatch\"\nCHECKPOINT_PUSH_ERROR = \"checkpoint\"\nREGISTER_ACTOR_PUSH_ERROR = \"register_actor\"\nWORKER_CRASH_PUSH_ERROR = \"worker_crash\"\nWORKER_DIED_PUSH_ERROR = \"worker_died\"\nPUT_RECONSTRUCTION_PUSH_ERROR = \"put_reconstruction\"\nHASH_MISMATCH_PUSH_ERROR = \"object_hash_mismatch\"\nINFEASIBLE_TASK_ERROR = \"infeasible_task\"\n\n# Abort autoscaling if more than this number of errors are encountered. This\n# is a safety feature to prevent e.g. runaway node launches.\nAUTOSCALER_MAX_NUM_FAILURES = env_integer(\"AUTOSCALER_MAX_NUM_FAILURES\", 5)\n\n# The maximum number of nodes to launch in a single request.\n# Multiple requests may be made for this batch size, up to\n# the limit of AUTOSCALER_MAX_CONCURRENT_LAUNCHES.\nAUTOSCALER_MAX_LAUNCH_BATCH = env_integer(\"AUTOSCALER_MAX_LAUNCH_BATCH\", 5)\n\n# Max number of nodes to launch at a time.\nAUTOSCALER_MAX_CONCURRENT_LAUNCHES = env_integer(\n \"AUTOSCALER_MAX_CONCURRENT_LAUNCHES\", 10)\n\n# Interval at which to perform autoscaling updates.\nAUTOSCALER_UPDATE_INTERVAL_S = env_integer(\"AUTOSCALER_UPDATE_INTERVAL_S\", 5)\n\n# The autoscaler will attempt to restart Ray on nodes it hasn't heard from\n# in more than this interval.\nAUTOSCALER_HEARTBEAT_TIMEOUT_S = env_integer(\"AUTOSCALER_HEARTBEAT_TIMEOUT_S\",\n 30)\n\n# Max number of retries to AWS (default is 5, time increases exponentially)\nBOTO_MAX_RETRIES = env_integer(\"BOTO_MAX_RETRIES\", 12)\n\n# Default logger format: only contains the message.\nLOGGER_FORMAT = \"%(message)s\"\nLOGGER_FORMAT_HELP = \"The logging format. default='%(message)s'\"\nLOGGER_LEVEL = \"info\"\nLOGGER_LEVEL_CHOICES = ['debug', 'info', 'warning', 'error', 'critical']\nLOGGER_LEVEL_HELP = (\"The logging level threshold, choices=['debug', 'info',\"\n \" 'warning', 'error', 'critical'], default='info'\")\n", "path": "python/ray/ray_constants.py"}]}
| 1,281 | 133 |
gh_patches_debug_29014
|
rasdani/github-patches
|
git_diff
|
inventree__InvenTree-3829
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Confirmation Email uses local ip for link URL
### Please verify that this bug has NOT been raised before.
- [X] I checked and didn't find similar issue
### Describe the bug*
When sending a verification email, it shows the local IP not the set base URL
Eg. mywebsite.com
Hello from InvenTree!
You're receiving this e-mail because user kyle has given your e-mail address to register an account on https://mywebsite.com
To confirm this is correct, go to http://192.168.1.111:83/accounts/confirm-email/MQ:1olnC0:UXQyFESkK7GxGnQOOEJcP8AXY1gkd-bbeIK02tDoxoo/
Thank you for using InvenTree!
https://mywebsite.com
### Steps to Reproduce
1. Set base URL
2. Add an email
3. Send a verification email
### Expected behavior
URL should respect the base URL setting
### Deployment Method
- [X] Docker
- [ ] Bare metal
### Version Information
# Version Information:
InvenTree-Version: 0.8.3
Django Version: 3.2.15
Commit Hash: f9d2b14
Commit Date: 2022-09-23
Database: postgresql
Debug-Mode: False
Deployed using Docker: True
### Relevant log output
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `InvenTree/InvenTree/forms.py`
Content:
```
1 """Helper forms which subclass Django forms to provide additional functionality."""
2
3 import logging
4 from urllib.parse import urlencode
5
6 from django import forms
7 from django.conf import settings
8 from django.contrib.auth.models import Group, User
9 from django.http import HttpResponseRedirect
10 from django.urls import reverse
11 from django.utils.translation import gettext_lazy as _
12
13 from allauth.account.adapter import DefaultAccountAdapter
14 from allauth.account.forms import SignupForm, set_form_field_order
15 from allauth.exceptions import ImmediateHttpResponse
16 from allauth.socialaccount.adapter import DefaultSocialAccountAdapter
17 from allauth_2fa.adapter import OTPAdapter
18 from allauth_2fa.utils import user_has_valid_totp_device
19 from crispy_forms.bootstrap import (AppendedText, PrependedAppendedText,
20 PrependedText)
21 from crispy_forms.helper import FormHelper
22 from crispy_forms.layout import Field, Layout
23
24 from common.models import InvenTreeSetting
25
26 logger = logging.getLogger('inventree')
27
28
29 class HelperForm(forms.ModelForm):
30 """Provides simple integration of crispy_forms extension."""
31
32 # Custom field decorations can be specified here, per form class
33 field_prefix = {}
34 field_suffix = {}
35 field_placeholder = {}
36
37 def __init__(self, *args, **kwargs):
38 """Setup layout."""
39 super(forms.ModelForm, self).__init__(*args, **kwargs)
40 self.helper = FormHelper()
41
42 self.helper.form_tag = False
43 self.helper.form_show_errors = True
44
45 """
46 Create a default 'layout' for this form.
47 Ref: https://django-crispy-forms.readthedocs.io/en/latest/layouts.html
48 This is required to do fancy things later (like adding PrependedText, etc).
49
50 Simply create a 'blank' layout for each available field.
51 """
52
53 self.rebuild_layout()
54
55 def rebuild_layout(self):
56 """Build crispy layout out of current fields."""
57 layouts = []
58
59 for field in self.fields:
60 prefix = self.field_prefix.get(field, None)
61 suffix = self.field_suffix.get(field, None)
62 placeholder = self.field_placeholder.get(field, '')
63
64 # Look for font-awesome icons
65 if prefix and prefix.startswith('fa-'):
66 prefix = r"<i class='fas {fa}'/>".format(fa=prefix)
67
68 if suffix and suffix.startswith('fa-'):
69 suffix = r"<i class='fas {fa}'/>".format(fa=suffix)
70
71 if prefix and suffix:
72 layouts.append(
73 Field(
74 PrependedAppendedText(
75 field,
76 prepended_text=prefix,
77 appended_text=suffix,
78 placeholder=placeholder
79 )
80 )
81 )
82
83 elif prefix:
84 layouts.append(
85 Field(
86 PrependedText(
87 field,
88 prefix,
89 placeholder=placeholder
90 )
91 )
92 )
93
94 elif suffix:
95 layouts.append(
96 Field(
97 AppendedText(
98 field,
99 suffix,
100 placeholder=placeholder
101 )
102 )
103 )
104
105 else:
106 layouts.append(Field(field, placeholder=placeholder))
107
108 self.helper.layout = Layout(*layouts)
109
110
111 class EditUserForm(HelperForm):
112 """Form for editing user information."""
113
114 class Meta:
115 """Metaclass options."""
116
117 model = User
118 fields = [
119 'first_name',
120 'last_name',
121 ]
122
123
124 class SetPasswordForm(HelperForm):
125 """Form for setting user password."""
126
127 enter_password = forms.CharField(
128 max_length=100,
129 min_length=8,
130 required=True,
131 initial='',
132 widget=forms.PasswordInput(attrs={'autocomplete': 'off'}),
133 label=_('Enter password'),
134 help_text=_('Enter new password')
135 )
136
137 confirm_password = forms.CharField(
138 max_length=100,
139 min_length=8,
140 required=True,
141 initial='',
142 widget=forms.PasswordInput(attrs={'autocomplete': 'off'}),
143 label=_('Confirm password'),
144 help_text=_('Confirm new password')
145 )
146
147 old_password = forms.CharField(
148 label=_("Old password"),
149 strip=False,
150 widget=forms.PasswordInput(attrs={'autocomplete': 'current-password', 'autofocus': True}),
151 )
152
153 class Meta:
154 """Metaclass options."""
155
156 model = User
157 fields = [
158 'enter_password',
159 'confirm_password',
160 'old_password',
161 ]
162
163
164 # override allauth
165 class CustomSignupForm(SignupForm):
166 """Override to use dynamic settings."""
167
168 def __init__(self, *args, **kwargs):
169 """Check settings to influence which fields are needed."""
170 kwargs['email_required'] = InvenTreeSetting.get_setting('LOGIN_MAIL_REQUIRED')
171
172 super().__init__(*args, **kwargs)
173
174 # check for two mail fields
175 if InvenTreeSetting.get_setting('LOGIN_SIGNUP_MAIL_TWICE'):
176 self.fields["email2"] = forms.EmailField(
177 label=_("Email (again)"),
178 widget=forms.TextInput(
179 attrs={
180 "type": "email",
181 "placeholder": _("Email address confirmation"),
182 }
183 ),
184 )
185
186 # check for two password fields
187 if not InvenTreeSetting.get_setting('LOGIN_SIGNUP_PWD_TWICE'):
188 self.fields.pop("password2")
189
190 # reorder fields
191 set_form_field_order(self, ["username", "email", "email2", "password1", "password2", ])
192
193 def clean(self):
194 """Make sure the supllied emails match if enabled in settings."""
195 cleaned_data = super().clean()
196
197 # check for two mail fields
198 if InvenTreeSetting.get_setting('LOGIN_SIGNUP_MAIL_TWICE'):
199 email = cleaned_data.get("email")
200 email2 = cleaned_data.get("email2")
201 if (email and email2) and email != email2:
202 self.add_error("email2", _("You must type the same email each time."))
203
204 return cleaned_data
205
206
207 class RegistratonMixin:
208 """Mixin to check if registration should be enabled."""
209
210 def is_open_for_signup(self, request, *args, **kwargs):
211 """Check if signup is enabled in settings."""
212 if settings.EMAIL_HOST and InvenTreeSetting.get_setting('LOGIN_ENABLE_REG', True):
213 return super().is_open_for_signup(request, *args, **kwargs)
214 return False
215
216 def save_user(self, request, user, form, commit=True):
217 """Check if a default group is set in settings."""
218 user = super().save_user(request, user, form)
219 start_group = InvenTreeSetting.get_setting('SIGNUP_GROUP')
220 if start_group:
221 try:
222 group = Group.objects.get(id=start_group)
223 user.groups.add(group)
224 except Group.DoesNotExist:
225 logger.error('The setting `SIGNUP_GROUP` contains an non existant group', start_group)
226 user.save()
227 return user
228
229
230 class CustomAccountAdapter(RegistratonMixin, OTPAdapter, DefaultAccountAdapter):
231 """Override of adapter to use dynamic settings."""
232 def send_mail(self, template_prefix, email, context):
233 """Only send mail if backend configured."""
234 if settings.EMAIL_HOST:
235 return super().send_mail(template_prefix, email, context)
236 return False
237
238
239 class CustomSocialAccountAdapter(RegistratonMixin, DefaultSocialAccountAdapter):
240 """Override of adapter to use dynamic settings."""
241
242 def is_auto_signup_allowed(self, request, sociallogin):
243 """Check if auto signup is enabled in settings."""
244 if InvenTreeSetting.get_setting('LOGIN_SIGNUP_SSO_AUTO', True):
245 return super().is_auto_signup_allowed(request, sociallogin)
246 return False
247
248 # from OTPAdapter
249 def has_2fa_enabled(self, user):
250 """Returns True if the user has 2FA configured."""
251 return user_has_valid_totp_device(user)
252
253 def login(self, request, user):
254 """Ensure user is send to 2FA before login if enabled."""
255 # Require two-factor authentication if it has been configured.
256 if self.has_2fa_enabled(user):
257 # Cast to string for the case when this is not a JSON serializable
258 # object, e.g. a UUID.
259 request.session['allauth_2fa_user_id'] = str(user.id)
260
261 redirect_url = reverse('two-factor-authenticate')
262 # Add GET parameters to the URL if they exist.
263 if request.GET:
264 redirect_url += '?' + urlencode(request.GET)
265
266 raise ImmediateHttpResponse(
267 response=HttpResponseRedirect(redirect_url)
268 )
269
270 # Otherwise defer to the original allauth adapter.
271 return super().login(request, user)
272
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/InvenTree/InvenTree/forms.py b/InvenTree/InvenTree/forms.py
--- a/InvenTree/InvenTree/forms.py
+++ b/InvenTree/InvenTree/forms.py
@@ -6,6 +6,7 @@
from django import forms
from django.conf import settings
from django.contrib.auth.models import Group, User
+from django.contrib.sites.models import Site
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
@@ -227,7 +228,16 @@
return user
-class CustomAccountAdapter(RegistratonMixin, OTPAdapter, DefaultAccountAdapter):
+class CustomUrlMixin:
+ """Mixin to set urls."""
+
+ def get_email_confirmation_url(self, request, emailconfirmation):
+ """Custom email confirmation (activation) url."""
+ url = reverse("account_confirm_email", args=[emailconfirmation.key])
+ return Site.objects.get_current().domain + url
+
+
+class CustomAccountAdapter(CustomUrlMixin, RegistratonMixin, OTPAdapter, DefaultAccountAdapter):
"""Override of adapter to use dynamic settings."""
def send_mail(self, template_prefix, email, context):
"""Only send mail if backend configured."""
@@ -236,7 +246,7 @@
return False
-class CustomSocialAccountAdapter(RegistratonMixin, DefaultSocialAccountAdapter):
+class CustomSocialAccountAdapter(CustomUrlMixin, RegistratonMixin, DefaultSocialAccountAdapter):
"""Override of adapter to use dynamic settings."""
def is_auto_signup_allowed(self, request, sociallogin):
|
{"golden_diff": "diff --git a/InvenTree/InvenTree/forms.py b/InvenTree/InvenTree/forms.py\n--- a/InvenTree/InvenTree/forms.py\n+++ b/InvenTree/InvenTree/forms.py\n@@ -6,6 +6,7 @@\n from django import forms\n from django.conf import settings\n from django.contrib.auth.models import Group, User\n+from django.contrib.sites.models import Site\n from django.http import HttpResponseRedirect\n from django.urls import reverse\n from django.utils.translation import gettext_lazy as _\n@@ -227,7 +228,16 @@\n return user\n \n \n-class CustomAccountAdapter(RegistratonMixin, OTPAdapter, DefaultAccountAdapter):\n+class CustomUrlMixin:\n+ \"\"\"Mixin to set urls.\"\"\"\n+\n+ def get_email_confirmation_url(self, request, emailconfirmation):\n+ \"\"\"Custom email confirmation (activation) url.\"\"\"\n+ url = reverse(\"account_confirm_email\", args=[emailconfirmation.key])\n+ return Site.objects.get_current().domain + url\n+\n+\n+class CustomAccountAdapter(CustomUrlMixin, RegistratonMixin, OTPAdapter, DefaultAccountAdapter):\n \"\"\"Override of adapter to use dynamic settings.\"\"\"\n def send_mail(self, template_prefix, email, context):\n \"\"\"Only send mail if backend configured.\"\"\"\n@@ -236,7 +246,7 @@\n return False\n \n \n-class CustomSocialAccountAdapter(RegistratonMixin, DefaultSocialAccountAdapter):\n+class CustomSocialAccountAdapter(CustomUrlMixin, RegistratonMixin, DefaultSocialAccountAdapter):\n \"\"\"Override of adapter to use dynamic settings.\"\"\"\n \n def is_auto_signup_allowed(self, request, sociallogin):\n", "issue": "[BUG] Confirmation Email uses local ip for link URL\n### Please verify that this bug has NOT been raised before.\n\n- [X] I checked and didn't find similar issue\n\n### Describe the bug*\n\nWhen sending a verification email, it shows the local IP not the set base URL\r\n\r\nEg. mywebsite.com\r\n\r\nHello from InvenTree!\r\n\r\nYou're receiving this e-mail because user kyle has given your e-mail address to register an account on https://mywebsite.com\r\n\r\nTo confirm this is correct, go to http://192.168.1.111:83/accounts/confirm-email/MQ:1olnC0:UXQyFESkK7GxGnQOOEJcP8AXY1gkd-bbeIK02tDoxoo/\r\n\r\nThank you for using InvenTree!\r\nhttps://mywebsite.com\n\n### Steps to Reproduce\n\n1. Set base URL\r\n2. Add an email\r\n3. Send a verification email\n\n### Expected behavior\n\nURL should respect the base URL setting\n\n### Deployment Method\n\n- [X] Docker\n- [ ] Bare metal\n\n### Version Information\n\n# Version Information:\r\nInvenTree-Version: 0.8.3\r\nDjango Version: 3.2.15\r\nCommit Hash: f9d2b14\r\nCommit Date: 2022-09-23\r\nDatabase: postgresql\r\nDebug-Mode: False\r\nDeployed using Docker: True\r\n\n\n### Relevant log output\n\n_No response_\n", "before_files": [{"content": "\"\"\"Helper forms which subclass Django forms to provide additional functionality.\"\"\"\n\nimport logging\nfrom urllib.parse import urlencode\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.auth.models import Group, User\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\n\nfrom allauth.account.adapter import DefaultAccountAdapter\nfrom allauth.account.forms import SignupForm, set_form_field_order\nfrom allauth.exceptions import ImmediateHttpResponse\nfrom allauth.socialaccount.adapter import DefaultSocialAccountAdapter\nfrom allauth_2fa.adapter import OTPAdapter\nfrom allauth_2fa.utils import user_has_valid_totp_device\nfrom crispy_forms.bootstrap import (AppendedText, PrependedAppendedText,\n PrependedText)\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Field, Layout\n\nfrom common.models import InvenTreeSetting\n\nlogger = logging.getLogger('inventree')\n\n\nclass HelperForm(forms.ModelForm):\n \"\"\"Provides simple integration of crispy_forms extension.\"\"\"\n\n # Custom field decorations can be specified here, per form class\n field_prefix = {}\n field_suffix = {}\n field_placeholder = {}\n\n def __init__(self, *args, **kwargs):\n \"\"\"Setup layout.\"\"\"\n super(forms.ModelForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n\n self.helper.form_tag = False\n self.helper.form_show_errors = True\n\n \"\"\"\n Create a default 'layout' for this form.\n Ref: https://django-crispy-forms.readthedocs.io/en/latest/layouts.html\n This is required to do fancy things later (like adding PrependedText, etc).\n\n Simply create a 'blank' layout for each available field.\n \"\"\"\n\n self.rebuild_layout()\n\n def rebuild_layout(self):\n \"\"\"Build crispy layout out of current fields.\"\"\"\n layouts = []\n\n for field in self.fields:\n prefix = self.field_prefix.get(field, None)\n suffix = self.field_suffix.get(field, None)\n placeholder = self.field_placeholder.get(field, '')\n\n # Look for font-awesome icons\n if prefix and prefix.startswith('fa-'):\n prefix = r\"<i class='fas {fa}'/>\".format(fa=prefix)\n\n if suffix and suffix.startswith('fa-'):\n suffix = r\"<i class='fas {fa}'/>\".format(fa=suffix)\n\n if prefix and suffix:\n layouts.append(\n Field(\n PrependedAppendedText(\n field,\n prepended_text=prefix,\n appended_text=suffix,\n placeholder=placeholder\n )\n )\n )\n\n elif prefix:\n layouts.append(\n Field(\n PrependedText(\n field,\n prefix,\n placeholder=placeholder\n )\n )\n )\n\n elif suffix:\n layouts.append(\n Field(\n AppendedText(\n field,\n suffix,\n placeholder=placeholder\n )\n )\n )\n\n else:\n layouts.append(Field(field, placeholder=placeholder))\n\n self.helper.layout = Layout(*layouts)\n\n\nclass EditUserForm(HelperForm):\n \"\"\"Form for editing user information.\"\"\"\n\n class Meta:\n \"\"\"Metaclass options.\"\"\"\n\n model = User\n fields = [\n 'first_name',\n 'last_name',\n ]\n\n\nclass SetPasswordForm(HelperForm):\n \"\"\"Form for setting user password.\"\"\"\n\n enter_password = forms.CharField(\n max_length=100,\n min_length=8,\n required=True,\n initial='',\n widget=forms.PasswordInput(attrs={'autocomplete': 'off'}),\n label=_('Enter password'),\n help_text=_('Enter new password')\n )\n\n confirm_password = forms.CharField(\n max_length=100,\n min_length=8,\n required=True,\n initial='',\n widget=forms.PasswordInput(attrs={'autocomplete': 'off'}),\n label=_('Confirm password'),\n help_text=_('Confirm new password')\n )\n\n old_password = forms.CharField(\n label=_(\"Old password\"),\n strip=False,\n widget=forms.PasswordInput(attrs={'autocomplete': 'current-password', 'autofocus': True}),\n )\n\n class Meta:\n \"\"\"Metaclass options.\"\"\"\n\n model = User\n fields = [\n 'enter_password',\n 'confirm_password',\n 'old_password',\n ]\n\n\n# override allauth\nclass CustomSignupForm(SignupForm):\n \"\"\"Override to use dynamic settings.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Check settings to influence which fields are needed.\"\"\"\n kwargs['email_required'] = InvenTreeSetting.get_setting('LOGIN_MAIL_REQUIRED')\n\n super().__init__(*args, **kwargs)\n\n # check for two mail fields\n if InvenTreeSetting.get_setting('LOGIN_SIGNUP_MAIL_TWICE'):\n self.fields[\"email2\"] = forms.EmailField(\n label=_(\"Email (again)\"),\n widget=forms.TextInput(\n attrs={\n \"type\": \"email\",\n \"placeholder\": _(\"Email address confirmation\"),\n }\n ),\n )\n\n # check for two password fields\n if not InvenTreeSetting.get_setting('LOGIN_SIGNUP_PWD_TWICE'):\n self.fields.pop(\"password2\")\n\n # reorder fields\n set_form_field_order(self, [\"username\", \"email\", \"email2\", \"password1\", \"password2\", ])\n\n def clean(self):\n \"\"\"Make sure the supllied emails match if enabled in settings.\"\"\"\n cleaned_data = super().clean()\n\n # check for two mail fields\n if InvenTreeSetting.get_setting('LOGIN_SIGNUP_MAIL_TWICE'):\n email = cleaned_data.get(\"email\")\n email2 = cleaned_data.get(\"email2\")\n if (email and email2) and email != email2:\n self.add_error(\"email2\", _(\"You must type the same email each time.\"))\n\n return cleaned_data\n\n\nclass RegistratonMixin:\n \"\"\"Mixin to check if registration should be enabled.\"\"\"\n\n def is_open_for_signup(self, request, *args, **kwargs):\n \"\"\"Check if signup is enabled in settings.\"\"\"\n if settings.EMAIL_HOST and InvenTreeSetting.get_setting('LOGIN_ENABLE_REG', True):\n return super().is_open_for_signup(request, *args, **kwargs)\n return False\n\n def save_user(self, request, user, form, commit=True):\n \"\"\"Check if a default group is set in settings.\"\"\"\n user = super().save_user(request, user, form)\n start_group = InvenTreeSetting.get_setting('SIGNUP_GROUP')\n if start_group:\n try:\n group = Group.objects.get(id=start_group)\n user.groups.add(group)\n except Group.DoesNotExist:\n logger.error('The setting `SIGNUP_GROUP` contains an non existant group', start_group)\n user.save()\n return user\n\n\nclass CustomAccountAdapter(RegistratonMixin, OTPAdapter, DefaultAccountAdapter):\n \"\"\"Override of adapter to use dynamic settings.\"\"\"\n def send_mail(self, template_prefix, email, context):\n \"\"\"Only send mail if backend configured.\"\"\"\n if settings.EMAIL_HOST:\n return super().send_mail(template_prefix, email, context)\n return False\n\n\nclass CustomSocialAccountAdapter(RegistratonMixin, DefaultSocialAccountAdapter):\n \"\"\"Override of adapter to use dynamic settings.\"\"\"\n\n def is_auto_signup_allowed(self, request, sociallogin):\n \"\"\"Check if auto signup is enabled in settings.\"\"\"\n if InvenTreeSetting.get_setting('LOGIN_SIGNUP_SSO_AUTO', True):\n return super().is_auto_signup_allowed(request, sociallogin)\n return False\n\n # from OTPAdapter\n def has_2fa_enabled(self, user):\n \"\"\"Returns True if the user has 2FA configured.\"\"\"\n return user_has_valid_totp_device(user)\n\n def login(self, request, user):\n \"\"\"Ensure user is send to 2FA before login if enabled.\"\"\"\n # Require two-factor authentication if it has been configured.\n if self.has_2fa_enabled(user):\n # Cast to string for the case when this is not a JSON serializable\n # object, e.g. a UUID.\n request.session['allauth_2fa_user_id'] = str(user.id)\n\n redirect_url = reverse('two-factor-authenticate')\n # Add GET parameters to the URL if they exist.\n if request.GET:\n redirect_url += '?' + urlencode(request.GET)\n\n raise ImmediateHttpResponse(\n response=HttpResponseRedirect(redirect_url)\n )\n\n # Otherwise defer to the original allauth adapter.\n return super().login(request, user)\n", "path": "InvenTree/InvenTree/forms.py"}], "after_files": [{"content": "\"\"\"Helper forms which subclass Django forms to provide additional functionality.\"\"\"\n\nimport logging\nfrom urllib.parse import urlencode\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.auth.models import Group, User\nfrom django.contrib.sites.models import Site\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\n\nfrom allauth.account.adapter import DefaultAccountAdapter\nfrom allauth.account.forms import SignupForm, set_form_field_order\nfrom allauth.exceptions import ImmediateHttpResponse\nfrom allauth.socialaccount.adapter import DefaultSocialAccountAdapter\nfrom allauth_2fa.adapter import OTPAdapter\nfrom allauth_2fa.utils import user_has_valid_totp_device\nfrom crispy_forms.bootstrap import (AppendedText, PrependedAppendedText,\n PrependedText)\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Field, Layout\n\nfrom common.models import InvenTreeSetting\n\nlogger = logging.getLogger('inventree')\n\n\nclass HelperForm(forms.ModelForm):\n \"\"\"Provides simple integration of crispy_forms extension.\"\"\"\n\n # Custom field decorations can be specified here, per form class\n field_prefix = {}\n field_suffix = {}\n field_placeholder = {}\n\n def __init__(self, *args, **kwargs):\n \"\"\"Setup layout.\"\"\"\n super(forms.ModelForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n\n self.helper.form_tag = False\n self.helper.form_show_errors = True\n\n \"\"\"\n Create a default 'layout' for this form.\n Ref: https://django-crispy-forms.readthedocs.io/en/latest/layouts.html\n This is required to do fancy things later (like adding PrependedText, etc).\n\n Simply create a 'blank' layout for each available field.\n \"\"\"\n\n self.rebuild_layout()\n\n def rebuild_layout(self):\n \"\"\"Build crispy layout out of current fields.\"\"\"\n layouts = []\n\n for field in self.fields:\n prefix = self.field_prefix.get(field, None)\n suffix = self.field_suffix.get(field, None)\n placeholder = self.field_placeholder.get(field, '')\n\n # Look for font-awesome icons\n if prefix and prefix.startswith('fa-'):\n prefix = r\"<i class='fas {fa}'/>\".format(fa=prefix)\n\n if suffix and suffix.startswith('fa-'):\n suffix = r\"<i class='fas {fa}'/>\".format(fa=suffix)\n\n if prefix and suffix:\n layouts.append(\n Field(\n PrependedAppendedText(\n field,\n prepended_text=prefix,\n appended_text=suffix,\n placeholder=placeholder\n )\n )\n )\n\n elif prefix:\n layouts.append(\n Field(\n PrependedText(\n field,\n prefix,\n placeholder=placeholder\n )\n )\n )\n\n elif suffix:\n layouts.append(\n Field(\n AppendedText(\n field,\n suffix,\n placeholder=placeholder\n )\n )\n )\n\n else:\n layouts.append(Field(field, placeholder=placeholder))\n\n self.helper.layout = Layout(*layouts)\n\n\nclass EditUserForm(HelperForm):\n \"\"\"Form for editing user information.\"\"\"\n\n class Meta:\n \"\"\"Metaclass options.\"\"\"\n\n model = User\n fields = [\n 'first_name',\n 'last_name',\n ]\n\n\nclass SetPasswordForm(HelperForm):\n \"\"\"Form for setting user password.\"\"\"\n\n enter_password = forms.CharField(\n max_length=100,\n min_length=8,\n required=True,\n initial='',\n widget=forms.PasswordInput(attrs={'autocomplete': 'off'}),\n label=_('Enter password'),\n help_text=_('Enter new password')\n )\n\n confirm_password = forms.CharField(\n max_length=100,\n min_length=8,\n required=True,\n initial='',\n widget=forms.PasswordInput(attrs={'autocomplete': 'off'}),\n label=_('Confirm password'),\n help_text=_('Confirm new password')\n )\n\n old_password = forms.CharField(\n label=_(\"Old password\"),\n strip=False,\n widget=forms.PasswordInput(attrs={'autocomplete': 'current-password', 'autofocus': True}),\n )\n\n class Meta:\n \"\"\"Metaclass options.\"\"\"\n\n model = User\n fields = [\n 'enter_password',\n 'confirm_password',\n 'old_password',\n ]\n\n\n# override allauth\nclass CustomSignupForm(SignupForm):\n \"\"\"Override to use dynamic settings.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Check settings to influence which fields are needed.\"\"\"\n kwargs['email_required'] = InvenTreeSetting.get_setting('LOGIN_MAIL_REQUIRED')\n\n super().__init__(*args, **kwargs)\n\n # check for two mail fields\n if InvenTreeSetting.get_setting('LOGIN_SIGNUP_MAIL_TWICE'):\n self.fields[\"email2\"] = forms.EmailField(\n label=_(\"Email (again)\"),\n widget=forms.TextInput(\n attrs={\n \"type\": \"email\",\n \"placeholder\": _(\"Email address confirmation\"),\n }\n ),\n )\n\n # check for two password fields\n if not InvenTreeSetting.get_setting('LOGIN_SIGNUP_PWD_TWICE'):\n self.fields.pop(\"password2\")\n\n # reorder fields\n set_form_field_order(self, [\"username\", \"email\", \"email2\", \"password1\", \"password2\", ])\n\n def clean(self):\n \"\"\"Make sure the supllied emails match if enabled in settings.\"\"\"\n cleaned_data = super().clean()\n\n # check for two mail fields\n if InvenTreeSetting.get_setting('LOGIN_SIGNUP_MAIL_TWICE'):\n email = cleaned_data.get(\"email\")\n email2 = cleaned_data.get(\"email2\")\n if (email and email2) and email != email2:\n self.add_error(\"email2\", _(\"You must type the same email each time.\"))\n\n return cleaned_data\n\n\nclass RegistratonMixin:\n \"\"\"Mixin to check if registration should be enabled.\"\"\"\n\n def is_open_for_signup(self, request, *args, **kwargs):\n \"\"\"Check if signup is enabled in settings.\"\"\"\n if settings.EMAIL_HOST and InvenTreeSetting.get_setting('LOGIN_ENABLE_REG', True):\n return super().is_open_for_signup(request, *args, **kwargs)\n return False\n\n def save_user(self, request, user, form, commit=True):\n \"\"\"Check if a default group is set in settings.\"\"\"\n user = super().save_user(request, user, form)\n start_group = InvenTreeSetting.get_setting('SIGNUP_GROUP')\n if start_group:\n try:\n group = Group.objects.get(id=start_group)\n user.groups.add(group)\n except Group.DoesNotExist:\n logger.error('The setting `SIGNUP_GROUP` contains an non existant group', start_group)\n user.save()\n return user\n\n\nclass CustomUrlMixin:\n \"\"\"Mixin to set urls.\"\"\"\n\n def get_email_confirmation_url(self, request, emailconfirmation):\n \"\"\"Custom email confirmation (activation) url.\"\"\"\n url = reverse(\"account_confirm_email\", args=[emailconfirmation.key])\n return Site.objects.get_current().domain + url\n\n\nclass CustomAccountAdapter(CustomUrlMixin, RegistratonMixin, OTPAdapter, DefaultAccountAdapter):\n \"\"\"Override of adapter to use dynamic settings.\"\"\"\n def send_mail(self, template_prefix, email, context):\n \"\"\"Only send mail if backend configured.\"\"\"\n if settings.EMAIL_HOST:\n return super().send_mail(template_prefix, email, context)\n return False\n\n\nclass CustomSocialAccountAdapter(CustomUrlMixin, RegistratonMixin, DefaultSocialAccountAdapter):\n \"\"\"Override of adapter to use dynamic settings.\"\"\"\n\n def is_auto_signup_allowed(self, request, sociallogin):\n \"\"\"Check if auto signup is enabled in settings.\"\"\"\n if InvenTreeSetting.get_setting('LOGIN_SIGNUP_SSO_AUTO', True):\n return super().is_auto_signup_allowed(request, sociallogin)\n return False\n\n # from OTPAdapter\n def has_2fa_enabled(self, user):\n \"\"\"Returns True if the user has 2FA configured.\"\"\"\n return user_has_valid_totp_device(user)\n\n def login(self, request, user):\n \"\"\"Ensure user is send to 2FA before login if enabled.\"\"\"\n # Require two-factor authentication if it has been configured.\n if self.has_2fa_enabled(user):\n # Cast to string for the case when this is not a JSON serializable\n # object, e.g. a UUID.\n request.session['allauth_2fa_user_id'] = str(user.id)\n\n redirect_url = reverse('two-factor-authenticate')\n # Add GET parameters to the URL if they exist.\n if request.GET:\n redirect_url += '?' + urlencode(request.GET)\n\n raise ImmediateHttpResponse(\n response=HttpResponseRedirect(redirect_url)\n )\n\n # Otherwise defer to the original allauth adapter.\n return super().login(request, user)\n", "path": "InvenTree/InvenTree/forms.py"}]}
| 3,103 | 351 |
gh_patches_debug_11862
|
rasdani/github-patches
|
git_diff
|
pantsbuild__pants-18894
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
docs: `adhoc_tool` example field values swapped
When looking at the rendered docs it was much easier to spot that the example field values here are swapped (files vs directories). 👀
_Originally posted by @kaos in https://github.com/pantsbuild/pants/pull/18237#discussion_r1184219518_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/python/pants/backend/adhoc/target_types.py`
Content:
```
1 # Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6 from typing import ClassVar
7
8 from pants.core.util_rules.environments import EnvironmentField
9 from pants.engine.target import (
10 COMMON_TARGET_FIELDS,
11 BoolField,
12 Dependencies,
13 IntField,
14 MultipleSourcesField,
15 SpecialCasedDependencies,
16 StringField,
17 StringSequenceField,
18 Target,
19 ValidNumbers,
20 )
21 from pants.util.strutil import help_text
22
23
24 class AdhocToolDependenciesField(Dependencies):
25 pass
26
27
28 class AdhocToolRunnableField(StringField):
29 alias: ClassVar[str] = "runnable"
30 required = True
31 help = help_text(
32 lambda: f"""
33 Address to a target that can be invoked by the `run` goal (and does not set
34 `run_in_sandbox_behavior=NOT_SUPPORTED`). This will be executed along with any arguments
35 specified by `{AdhocToolArgumentsField.alias}`, in a sandbox with that target's transitive
36 dependencies, along with the transitive dependencies specified by
37 `{AdhocToolExecutionDependenciesField.alias}`.
38 """
39 )
40
41
42 class AdhocToolOutputFilesField(StringSequenceField):
43 alias: ClassVar[str] = "output_files"
44 required = False
45 default = ()
46 help = help_text(
47 lambda: f"""
48 Specify the output files to capture, relative to the value of
49 `{AdhocToolWorkdirField.alias}`.
50
51 For directories, use `{AdhocToolOutputDirectoriesField.alias}`. At least one of
52 `{AdhocToolOutputFilesField.alias}` and`{AdhocToolOutputDirectoriesField.alias}` must be
53 specified.
54
55 Relative paths (including `..`) may be used, as long as the path does not ascend further
56 than the build root.
57 """
58 )
59
60
61 class AdhocToolOutputDirectoriesField(StringSequenceField):
62 alias: ClassVar[str] = "output_directories"
63 required = False
64 default = ()
65 help = help_text(
66 lambda: f"""
67 Specify full directories (including recursive descendants) of output to capture, relative
68 to the value of `{AdhocToolWorkdirField.alias}`.
69
70 For individual files, use `{AdhocToolOutputFilesField.alias}`. At least one of
71 `{AdhocToolOutputFilesField.alias}` and`{AdhocToolOutputDirectoriesField.alias}` must be
72 specified.
73
74 Relative paths (including `..`) may be used, as long as the path does not ascend further
75 than the build root.
76 """
77 )
78
79
80 class AdhocToolOutputDependenciesField(AdhocToolDependenciesField):
81 supports_transitive_excludes = True
82 alias: ClassVar[str] = "output_dependencies"
83
84 help = help_text(
85 lambda: """
86 Any dependencies that need to be present (as transitive dependencies) whenever the outputs
87 of this target are consumed (including as dependencies).
88 """
89 )
90
91
92 class AdhocToolExecutionDependenciesField(SpecialCasedDependencies):
93 alias: ClassVar[str] = "execution_dependencies"
94 required = False
95 default = None
96
97 help = help_text(
98 lambda: f"""
99 The execution dependencies for this command.
100
101 Dependencies specified here are those required to make the command complete successfully
102 (e.g. file inputs, packages compiled from other targets, etc), but NOT required to make
103 the outputs of the command useful. Dependencies that are required to use the outputs
104 produced by this command should be specified using the
105 `{AdhocToolOutputDependenciesField.alias}` field.
106
107 If this field is specified, dependencies from `{AdhocToolOutputDependenciesField.alias}`
108 will not be added to the execution sandbox.
109 """
110 )
111
112
113 class AdhocToolRunnableDependenciesField(SpecialCasedDependencies):
114 alias: ClassVar[str] = "runnable_dependencies"
115 required = False
116 default = None
117
118 help = help_text(
119 lambda: f"""
120 The execution dependencies for this command.
121
122 Dependencies specified here are those required to exist on the `PATH` to make the command
123 complete successfully (interpreters specified in a `#!` command, etc). Note that these
124 dependencies will be made available on the `PATH` with the name of the target.
125
126 See also `{AdhocToolOutputDependenciesField.alias}` and
127 `{AdhocToolExecutionDependenciesField.alias}.
128 """
129 )
130
131
132 class AdhocToolSourcesField(MultipleSourcesField):
133 # We solely register this field for codegen to work.
134 alias: ClassVar[str] = "_sources"
135 uses_source_roots = False
136 expected_num_files = 0
137
138
139 class AdhocToolArgumentsField(StringSequenceField):
140 alias: ClassVar[str] = "args"
141 default = ()
142 help = help_text(
143 lambda: f"Extra arguments to pass into the `{AdhocToolRunnableField.alias}` field."
144 )
145
146
147 class AdhocToolStdoutFilenameField(StringField):
148 alias: ClassVar[str] = "stdout"
149 default = None
150 help = help_text(
151 lambda: f"""
152 A filename to capture the contents of `stdout` to. Relative paths are
153 relative to the value of `{AdhocToolWorkdirField.alias}`, absolute paths
154 start at the build root.
155 """
156 )
157
158
159 class AdhocToolStderrFilenameField(StringField):
160 alias: ClassVar[str] = "stderr"
161 default = None
162 help = help_text(
163 lambda: f"""
164 A filename to capture the contents of `stderr` to. Relative paths are
165 relative to the value of `{AdhocToolWorkdirField.alias}`, absolute paths
166 start at the build root.
167 """
168 )
169
170
171 class AdhocToolTimeoutField(IntField):
172 alias: ClassVar[str] = "timeout"
173 default = 30
174 help = "Command execution timeout (in seconds)."
175 valid_numbers = ValidNumbers.positive_only
176
177
178 class AdhocToolExtraEnvVarsField(StringSequenceField):
179 alias: ClassVar[str] = "extra_env_vars"
180 help = help_text(
181 """
182 Additional environment variables to provide to the process.
183
184 Entries are strings in the form `ENV_VAR=value` to use explicitly; or just
185 `ENV_VAR` to copy the value of a variable in Pants's own environment.
186 """
187 )
188
189
190 class AdhocToolLogOutputField(BoolField):
191 alias: ClassVar[str] = "log_output"
192 default = False
193 help = "Set to true if you want the output logged to the console."
194
195
196 class AdhocToolWorkdirField(StringField):
197 alias: ClassVar[str] = "workdir"
198 default = "."
199 help = help_text(
200 """
201 Sets the working directory for the process.
202
203 Values are relative to the build root, except in the following cases:
204
205 * `.` specifies the location of the `BUILD` file.
206 * Values beginning with `./` are relative to the location of the `BUILD` file.
207 * `/` or the empty string specifies the build root.
208 * Values beginning with `/` are also relative to the build root.
209 """
210 )
211
212
213 class AdhocToolOutputRootDirField(StringField):
214 alias: ClassVar[str] = "root_output_directory"
215 default = "/"
216 help = help_text(
217 """Adjusts the location of files output by this target, when consumed as a dependency.
218
219 Values are relative to the build root, except in the following cases:
220
221 * `.` specifies the location of the `BUILD` file.
222 * Values beginning with `./` are relative to the location of the `BUILD` file.
223 * `/` or the empty string specifies the build root.
224 * Values beginning with `/` are also relative to the build root.
225 """
226 )
227
228
229 class AdhocToolTarget(Target):
230 alias: ClassVar[str] = "adhoc_tool"
231 core_fields = (
232 *COMMON_TARGET_FIELDS,
233 AdhocToolRunnableField,
234 AdhocToolArgumentsField,
235 AdhocToolExecutionDependenciesField,
236 AdhocToolOutputDependenciesField,
237 AdhocToolRunnableDependenciesField,
238 AdhocToolLogOutputField,
239 AdhocToolOutputFilesField,
240 AdhocToolOutputDirectoriesField,
241 AdhocToolSourcesField,
242 AdhocToolTimeoutField,
243 AdhocToolExtraEnvVarsField,
244 AdhocToolWorkdirField,
245 AdhocToolOutputRootDirField,
246 AdhocToolStdoutFilenameField,
247 AdhocToolStderrFilenameField,
248 EnvironmentField,
249 )
250 help = help_text(
251 lambda: f"""
252 Execute any runnable target for its side effects.
253
254 Example BUILD file:
255
256 {AdhocToolTarget.alias}(
257 {AdhocToolRunnableField.alias}=":python_source",
258 {AdhocToolArgumentsField.alias}=[""],
259 {AdhocToolExecutionDependenciesField.alias}=[":scripts"],
260 {AdhocToolOutputDirectoriesField.alias}=["logs/my-script.log"],
261 {AdhocToolOutputFilesField.alias}=["results/"],
262 )
263
264 shell_sources(name="scripts")
265 """
266 )
267
268
269 # ---
270 # `system_binary` target
271 # ---
272
273
274 class SystemBinaryNameField(StringField):
275 alias: ClassVar[str] = "binary_name"
276 required = True
277 help = "The name of the binary to find."
278
279
280 class SystemBinaryExtraSearchPathsField(StringSequenceField):
281 alias: ClassVar[str] = "extra_search_paths"
282 default = ()
283 help = help_text(
284 """
285 Extra search paths to look for the binary. These take priority over Pants' default
286 search paths.
287 """
288 )
289
290
291 class SystemBinaryFingerprintPattern(StringField):
292 alias: ClassVar[str] = "fingerprint"
293 required = False
294 default = None
295 help = help_text(
296 """
297 A regular expression which will be used to match the fingerprint outputs from
298 candidate binaries found during the search process.
299 """
300 )
301
302
303 class SystemBinaryFingerprintArgsField(StringSequenceField):
304 alias: ClassVar[str] = "fingerprint_args"
305 default = ()
306 help = help_text(
307 "Specifies arguments that will be used to run the binary during the search process."
308 )
309
310
311 class SystemBinaryFingerprintDependenciesField(AdhocToolRunnableDependenciesField):
312 alias: ClassVar[str] = "fingerprint_dependencies"
313 help = help_text(
314 """
315 Specifies any runnable dependencies that need to be available on the `PATH` when the binary
316 is run, so that the search process may complete successfully. The name of the target must
317 be the name of the runnable dependency that is called by this binary.
318 """
319 )
320
321
322 class SystemBinaryTarget(Target):
323 alias: ClassVar[str] = "system_binary"
324 core_fields = (
325 *COMMON_TARGET_FIELDS,
326 SystemBinaryNameField,
327 SystemBinaryExtraSearchPathsField,
328 SystemBinaryFingerprintPattern,
329 SystemBinaryFingerprintArgsField,
330 SystemBinaryFingerprintDependenciesField,
331 )
332 help = help_text(
333 lambda: f"""
334 A system binary that can be run with `pants run` or consumed by `{AdhocToolTarget.alias}`.
335
336 Pants will search for binaries with name `{SystemBinaryNameField.alias}` in the search
337 paths provided, as well as default search paths. If
338 `{SystemBinaryFingerprintPattern.alias}` is specified, each binary that is located will be
339 executed with the arguments from `{SystemBinaryFingerprintArgsField.alias}`. Any binaries
340 whose output does not match the pattern will be excluded.
341
342 The first non-excluded binary will be the one that is resolved.
343 """
344 )
345
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/python/pants/backend/adhoc/target_types.py b/src/python/pants/backend/adhoc/target_types.py
--- a/src/python/pants/backend/adhoc/target_types.py
+++ b/src/python/pants/backend/adhoc/target_types.py
@@ -257,8 +257,8 @@
{AdhocToolRunnableField.alias}=":python_source",
{AdhocToolArgumentsField.alias}=[""],
{AdhocToolExecutionDependenciesField.alias}=[":scripts"],
- {AdhocToolOutputDirectoriesField.alias}=["logs/my-script.log"],
- {AdhocToolOutputFilesField.alias}=["results/"],
+ {AdhocToolOutputDirectoriesField.alias}=["results/"],
+ {AdhocToolOutputFilesField.alias}=["logs/my-script.log"],
)
shell_sources(name="scripts")
|
{"golden_diff": "diff --git a/src/python/pants/backend/adhoc/target_types.py b/src/python/pants/backend/adhoc/target_types.py\n--- a/src/python/pants/backend/adhoc/target_types.py\n+++ b/src/python/pants/backend/adhoc/target_types.py\n@@ -257,8 +257,8 @@\n {AdhocToolRunnableField.alias}=\":python_source\",\n {AdhocToolArgumentsField.alias}=[\"\"],\n {AdhocToolExecutionDependenciesField.alias}=[\":scripts\"],\n- {AdhocToolOutputDirectoriesField.alias}=[\"logs/my-script.log\"],\n- {AdhocToolOutputFilesField.alias}=[\"results/\"],\n+ {AdhocToolOutputDirectoriesField.alias}=[\"results/\"],\n+ {AdhocToolOutputFilesField.alias}=[\"logs/my-script.log\"],\n )\n \n shell_sources(name=\"scripts\")\n", "issue": "docs: `adhoc_tool` example field values swapped\n When looking at the rendered docs it was much easier to spot that the example field values here are swapped (files vs directories). \ud83d\udc40\r\n\r\n_Originally posted by @kaos in https://github.com/pantsbuild/pants/pull/18237#discussion_r1184219518_\r\n \n", "before_files": [{"content": "# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom typing import ClassVar\n\nfrom pants.core.util_rules.environments import EnvironmentField\nfrom pants.engine.target import (\n COMMON_TARGET_FIELDS,\n BoolField,\n Dependencies,\n IntField,\n MultipleSourcesField,\n SpecialCasedDependencies,\n StringField,\n StringSequenceField,\n Target,\n ValidNumbers,\n)\nfrom pants.util.strutil import help_text\n\n\nclass AdhocToolDependenciesField(Dependencies):\n pass\n\n\nclass AdhocToolRunnableField(StringField):\n alias: ClassVar[str] = \"runnable\"\n required = True\n help = help_text(\n lambda: f\"\"\"\n Address to a target that can be invoked by the `run` goal (and does not set\n `run_in_sandbox_behavior=NOT_SUPPORTED`). This will be executed along with any arguments\n specified by `{AdhocToolArgumentsField.alias}`, in a sandbox with that target's transitive\n dependencies, along with the transitive dependencies specified by\n `{AdhocToolExecutionDependenciesField.alias}`.\n \"\"\"\n )\n\n\nclass AdhocToolOutputFilesField(StringSequenceField):\n alias: ClassVar[str] = \"output_files\"\n required = False\n default = ()\n help = help_text(\n lambda: f\"\"\"\n Specify the output files to capture, relative to the value of\n `{AdhocToolWorkdirField.alias}`.\n\n For directories, use `{AdhocToolOutputDirectoriesField.alias}`. At least one of\n `{AdhocToolOutputFilesField.alias}` and`{AdhocToolOutputDirectoriesField.alias}` must be\n specified.\n\n Relative paths (including `..`) may be used, as long as the path does not ascend further\n than the build root.\n \"\"\"\n )\n\n\nclass AdhocToolOutputDirectoriesField(StringSequenceField):\n alias: ClassVar[str] = \"output_directories\"\n required = False\n default = ()\n help = help_text(\n lambda: f\"\"\"\n Specify full directories (including recursive descendants) of output to capture, relative\n to the value of `{AdhocToolWorkdirField.alias}`.\n\n For individual files, use `{AdhocToolOutputFilesField.alias}`. At least one of\n `{AdhocToolOutputFilesField.alias}` and`{AdhocToolOutputDirectoriesField.alias}` must be\n specified.\n\n Relative paths (including `..`) may be used, as long as the path does not ascend further\n than the build root.\n \"\"\"\n )\n\n\nclass AdhocToolOutputDependenciesField(AdhocToolDependenciesField):\n supports_transitive_excludes = True\n alias: ClassVar[str] = \"output_dependencies\"\n\n help = help_text(\n lambda: \"\"\"\n Any dependencies that need to be present (as transitive dependencies) whenever the outputs\n of this target are consumed (including as dependencies).\n \"\"\"\n )\n\n\nclass AdhocToolExecutionDependenciesField(SpecialCasedDependencies):\n alias: ClassVar[str] = \"execution_dependencies\"\n required = False\n default = None\n\n help = help_text(\n lambda: f\"\"\"\n The execution dependencies for this command.\n\n Dependencies specified here are those required to make the command complete successfully\n (e.g. file inputs, packages compiled from other targets, etc), but NOT required to make\n the outputs of the command useful. Dependencies that are required to use the outputs\n produced by this command should be specified using the\n `{AdhocToolOutputDependenciesField.alias}` field.\n\n If this field is specified, dependencies from `{AdhocToolOutputDependenciesField.alias}`\n will not be added to the execution sandbox.\n \"\"\"\n )\n\n\nclass AdhocToolRunnableDependenciesField(SpecialCasedDependencies):\n alias: ClassVar[str] = \"runnable_dependencies\"\n required = False\n default = None\n\n help = help_text(\n lambda: f\"\"\"\n The execution dependencies for this command.\n\n Dependencies specified here are those required to exist on the `PATH` to make the command\n complete successfully (interpreters specified in a `#!` command, etc). Note that these\n dependencies will be made available on the `PATH` with the name of the target.\n\n See also `{AdhocToolOutputDependenciesField.alias}` and\n `{AdhocToolExecutionDependenciesField.alias}.\n \"\"\"\n )\n\n\nclass AdhocToolSourcesField(MultipleSourcesField):\n # We solely register this field for codegen to work.\n alias: ClassVar[str] = \"_sources\"\n uses_source_roots = False\n expected_num_files = 0\n\n\nclass AdhocToolArgumentsField(StringSequenceField):\n alias: ClassVar[str] = \"args\"\n default = ()\n help = help_text(\n lambda: f\"Extra arguments to pass into the `{AdhocToolRunnableField.alias}` field.\"\n )\n\n\nclass AdhocToolStdoutFilenameField(StringField):\n alias: ClassVar[str] = \"stdout\"\n default = None\n help = help_text(\n lambda: f\"\"\"\n A filename to capture the contents of `stdout` to. Relative paths are\n relative to the value of `{AdhocToolWorkdirField.alias}`, absolute paths\n start at the build root.\n \"\"\"\n )\n\n\nclass AdhocToolStderrFilenameField(StringField):\n alias: ClassVar[str] = \"stderr\"\n default = None\n help = help_text(\n lambda: f\"\"\"\n A filename to capture the contents of `stderr` to. Relative paths are\n relative to the value of `{AdhocToolWorkdirField.alias}`, absolute paths\n start at the build root.\n \"\"\"\n )\n\n\nclass AdhocToolTimeoutField(IntField):\n alias: ClassVar[str] = \"timeout\"\n default = 30\n help = \"Command execution timeout (in seconds).\"\n valid_numbers = ValidNumbers.positive_only\n\n\nclass AdhocToolExtraEnvVarsField(StringSequenceField):\n alias: ClassVar[str] = \"extra_env_vars\"\n help = help_text(\n \"\"\"\n Additional environment variables to provide to the process.\n\n Entries are strings in the form `ENV_VAR=value` to use explicitly; or just\n `ENV_VAR` to copy the value of a variable in Pants's own environment.\n \"\"\"\n )\n\n\nclass AdhocToolLogOutputField(BoolField):\n alias: ClassVar[str] = \"log_output\"\n default = False\n help = \"Set to true if you want the output logged to the console.\"\n\n\nclass AdhocToolWorkdirField(StringField):\n alias: ClassVar[str] = \"workdir\"\n default = \".\"\n help = help_text(\n \"\"\"\n Sets the working directory for the process.\n\n Values are relative to the build root, except in the following cases:\n\n * `.` specifies the location of the `BUILD` file.\n * Values beginning with `./` are relative to the location of the `BUILD` file.\n * `/` or the empty string specifies the build root.\n * Values beginning with `/` are also relative to the build root.\n \"\"\"\n )\n\n\nclass AdhocToolOutputRootDirField(StringField):\n alias: ClassVar[str] = \"root_output_directory\"\n default = \"/\"\n help = help_text(\n \"\"\"Adjusts the location of files output by this target, when consumed as a dependency.\n\n Values are relative to the build root, except in the following cases:\n\n * `.` specifies the location of the `BUILD` file.\n * Values beginning with `./` are relative to the location of the `BUILD` file.\n * `/` or the empty string specifies the build root.\n * Values beginning with `/` are also relative to the build root.\n \"\"\"\n )\n\n\nclass AdhocToolTarget(Target):\n alias: ClassVar[str] = \"adhoc_tool\"\n core_fields = (\n *COMMON_TARGET_FIELDS,\n AdhocToolRunnableField,\n AdhocToolArgumentsField,\n AdhocToolExecutionDependenciesField,\n AdhocToolOutputDependenciesField,\n AdhocToolRunnableDependenciesField,\n AdhocToolLogOutputField,\n AdhocToolOutputFilesField,\n AdhocToolOutputDirectoriesField,\n AdhocToolSourcesField,\n AdhocToolTimeoutField,\n AdhocToolExtraEnvVarsField,\n AdhocToolWorkdirField,\n AdhocToolOutputRootDirField,\n AdhocToolStdoutFilenameField,\n AdhocToolStderrFilenameField,\n EnvironmentField,\n )\n help = help_text(\n lambda: f\"\"\"\n Execute any runnable target for its side effects.\n\n Example BUILD file:\n\n {AdhocToolTarget.alias}(\n {AdhocToolRunnableField.alias}=\":python_source\",\n {AdhocToolArgumentsField.alias}=[\"\"],\n {AdhocToolExecutionDependenciesField.alias}=[\":scripts\"],\n {AdhocToolOutputDirectoriesField.alias}=[\"logs/my-script.log\"],\n {AdhocToolOutputFilesField.alias}=[\"results/\"],\n )\n\n shell_sources(name=\"scripts\")\n \"\"\"\n )\n\n\n# ---\n# `system_binary` target\n# ---\n\n\nclass SystemBinaryNameField(StringField):\n alias: ClassVar[str] = \"binary_name\"\n required = True\n help = \"The name of the binary to find.\"\n\n\nclass SystemBinaryExtraSearchPathsField(StringSequenceField):\n alias: ClassVar[str] = \"extra_search_paths\"\n default = ()\n help = help_text(\n \"\"\"\n Extra search paths to look for the binary. These take priority over Pants' default\n search paths.\n \"\"\"\n )\n\n\nclass SystemBinaryFingerprintPattern(StringField):\n alias: ClassVar[str] = \"fingerprint\"\n required = False\n default = None\n help = help_text(\n \"\"\"\n A regular expression which will be used to match the fingerprint outputs from\n candidate binaries found during the search process.\n \"\"\"\n )\n\n\nclass SystemBinaryFingerprintArgsField(StringSequenceField):\n alias: ClassVar[str] = \"fingerprint_args\"\n default = ()\n help = help_text(\n \"Specifies arguments that will be used to run the binary during the search process.\"\n )\n\n\nclass SystemBinaryFingerprintDependenciesField(AdhocToolRunnableDependenciesField):\n alias: ClassVar[str] = \"fingerprint_dependencies\"\n help = help_text(\n \"\"\"\n Specifies any runnable dependencies that need to be available on the `PATH` when the binary\n is run, so that the search process may complete successfully. The name of the target must\n be the name of the runnable dependency that is called by this binary.\n \"\"\"\n )\n\n\nclass SystemBinaryTarget(Target):\n alias: ClassVar[str] = \"system_binary\"\n core_fields = (\n *COMMON_TARGET_FIELDS,\n SystemBinaryNameField,\n SystemBinaryExtraSearchPathsField,\n SystemBinaryFingerprintPattern,\n SystemBinaryFingerprintArgsField,\n SystemBinaryFingerprintDependenciesField,\n )\n help = help_text(\n lambda: f\"\"\"\n A system binary that can be run with `pants run` or consumed by `{AdhocToolTarget.alias}`.\n\n Pants will search for binaries with name `{SystemBinaryNameField.alias}` in the search\n paths provided, as well as default search paths. If\n `{SystemBinaryFingerprintPattern.alias}` is specified, each binary that is located will be\n executed with the arguments from `{SystemBinaryFingerprintArgsField.alias}`. Any binaries\n whose output does not match the pattern will be excluded.\n\n The first non-excluded binary will be the one that is resolved.\n \"\"\"\n )\n", "path": "src/python/pants/backend/adhoc/target_types.py"}], "after_files": [{"content": "# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom typing import ClassVar\n\nfrom pants.core.util_rules.environments import EnvironmentField\nfrom pants.engine.target import (\n COMMON_TARGET_FIELDS,\n BoolField,\n Dependencies,\n IntField,\n MultipleSourcesField,\n SpecialCasedDependencies,\n StringField,\n StringSequenceField,\n Target,\n ValidNumbers,\n)\nfrom pants.util.strutil import help_text\n\n\nclass AdhocToolDependenciesField(Dependencies):\n pass\n\n\nclass AdhocToolRunnableField(StringField):\n alias: ClassVar[str] = \"runnable\"\n required = True\n help = help_text(\n lambda: f\"\"\"\n Address to a target that can be invoked by the `run` goal (and does not set\n `run_in_sandbox_behavior=NOT_SUPPORTED`). This will be executed along with any arguments\n specified by `{AdhocToolArgumentsField.alias}`, in a sandbox with that target's transitive\n dependencies, along with the transitive dependencies specified by\n `{AdhocToolExecutionDependenciesField.alias}`.\n \"\"\"\n )\n\n\nclass AdhocToolOutputFilesField(StringSequenceField):\n alias: ClassVar[str] = \"output_files\"\n required = False\n default = ()\n help = help_text(\n lambda: f\"\"\"\n Specify the output files to capture, relative to the value of\n `{AdhocToolWorkdirField.alias}`.\n\n For directories, use `{AdhocToolOutputDirectoriesField.alias}`. At least one of\n `{AdhocToolOutputFilesField.alias}` and`{AdhocToolOutputDirectoriesField.alias}` must be\n specified.\n\n Relative paths (including `..`) may be used, as long as the path does not ascend further\n than the build root.\n \"\"\"\n )\n\n\nclass AdhocToolOutputDirectoriesField(StringSequenceField):\n alias: ClassVar[str] = \"output_directories\"\n required = False\n default = ()\n help = help_text(\n lambda: f\"\"\"\n Specify full directories (including recursive descendants) of output to capture, relative\n to the value of `{AdhocToolWorkdirField.alias}`.\n\n For individual files, use `{AdhocToolOutputFilesField.alias}`. At least one of\n `{AdhocToolOutputFilesField.alias}` and`{AdhocToolOutputDirectoriesField.alias}` must be\n specified.\n\n Relative paths (including `..`) may be used, as long as the path does not ascend further\n than the build root.\n \"\"\"\n )\n\n\nclass AdhocToolOutputDependenciesField(AdhocToolDependenciesField):\n supports_transitive_excludes = True\n alias: ClassVar[str] = \"output_dependencies\"\n\n help = help_text(\n lambda: \"\"\"\n Any dependencies that need to be present (as transitive dependencies) whenever the outputs\n of this target are consumed (including as dependencies).\n \"\"\"\n )\n\n\nclass AdhocToolExecutionDependenciesField(SpecialCasedDependencies):\n alias: ClassVar[str] = \"execution_dependencies\"\n required = False\n default = None\n\n help = help_text(\n lambda: f\"\"\"\n The execution dependencies for this command.\n\n Dependencies specified here are those required to make the command complete successfully\n (e.g. file inputs, packages compiled from other targets, etc), but NOT required to make\n the outputs of the command useful. Dependencies that are required to use the outputs\n produced by this command should be specified using the\n `{AdhocToolOutputDependenciesField.alias}` field.\n\n If this field is specified, dependencies from `{AdhocToolOutputDependenciesField.alias}`\n will not be added to the execution sandbox.\n \"\"\"\n )\n\n\nclass AdhocToolRunnableDependenciesField(SpecialCasedDependencies):\n alias: ClassVar[str] = \"runnable_dependencies\"\n required = False\n default = None\n\n help = help_text(\n lambda: f\"\"\"\n The execution dependencies for this command.\n\n Dependencies specified here are those required to exist on the `PATH` to make the command\n complete successfully (interpreters specified in a `#!` command, etc). Note that these\n dependencies will be made available on the `PATH` with the name of the target.\n\n See also `{AdhocToolOutputDependenciesField.alias}` and\n `{AdhocToolExecutionDependenciesField.alias}.\n \"\"\"\n )\n\n\nclass AdhocToolSourcesField(MultipleSourcesField):\n # We solely register this field for codegen to work.\n alias: ClassVar[str] = \"_sources\"\n uses_source_roots = False\n expected_num_files = 0\n\n\nclass AdhocToolArgumentsField(StringSequenceField):\n alias: ClassVar[str] = \"args\"\n default = ()\n help = help_text(\n lambda: f\"Extra arguments to pass into the `{AdhocToolRunnableField.alias}` field.\"\n )\n\n\nclass AdhocToolStdoutFilenameField(StringField):\n alias: ClassVar[str] = \"stdout\"\n default = None\n help = help_text(\n lambda: f\"\"\"\n A filename to capture the contents of `stdout` to. Relative paths are\n relative to the value of `{AdhocToolWorkdirField.alias}`, absolute paths\n start at the build root.\n \"\"\"\n )\n\n\nclass AdhocToolStderrFilenameField(StringField):\n alias: ClassVar[str] = \"stderr\"\n default = None\n help = help_text(\n lambda: f\"\"\"\n A filename to capture the contents of `stderr` to. Relative paths are\n relative to the value of `{AdhocToolWorkdirField.alias}`, absolute paths\n start at the build root.\n \"\"\"\n )\n\n\nclass AdhocToolTimeoutField(IntField):\n alias: ClassVar[str] = \"timeout\"\n default = 30\n help = \"Command execution timeout (in seconds).\"\n valid_numbers = ValidNumbers.positive_only\n\n\nclass AdhocToolExtraEnvVarsField(StringSequenceField):\n alias: ClassVar[str] = \"extra_env_vars\"\n help = help_text(\n \"\"\"\n Additional environment variables to provide to the process.\n\n Entries are strings in the form `ENV_VAR=value` to use explicitly; or just\n `ENV_VAR` to copy the value of a variable in Pants's own environment.\n \"\"\"\n )\n\n\nclass AdhocToolLogOutputField(BoolField):\n alias: ClassVar[str] = \"log_output\"\n default = False\n help = \"Set to true if you want the output logged to the console.\"\n\n\nclass AdhocToolWorkdirField(StringField):\n alias: ClassVar[str] = \"workdir\"\n default = \".\"\n help = help_text(\n \"\"\"\n Sets the working directory for the process.\n\n Values are relative to the build root, except in the following cases:\n\n * `.` specifies the location of the `BUILD` file.\n * Values beginning with `./` are relative to the location of the `BUILD` file.\n * `/` or the empty string specifies the build root.\n * Values beginning with `/` are also relative to the build root.\n \"\"\"\n )\n\n\nclass AdhocToolOutputRootDirField(StringField):\n alias: ClassVar[str] = \"root_output_directory\"\n default = \"/\"\n help = help_text(\n \"\"\"Adjusts the location of files output by this target, when consumed as a dependency.\n\n Values are relative to the build root, except in the following cases:\n\n * `.` specifies the location of the `BUILD` file.\n * Values beginning with `./` are relative to the location of the `BUILD` file.\n * `/` or the empty string specifies the build root.\n * Values beginning with `/` are also relative to the build root.\n \"\"\"\n )\n\n\nclass AdhocToolTarget(Target):\n alias: ClassVar[str] = \"adhoc_tool\"\n core_fields = (\n *COMMON_TARGET_FIELDS,\n AdhocToolRunnableField,\n AdhocToolArgumentsField,\n AdhocToolExecutionDependenciesField,\n AdhocToolOutputDependenciesField,\n AdhocToolRunnableDependenciesField,\n AdhocToolLogOutputField,\n AdhocToolOutputFilesField,\n AdhocToolOutputDirectoriesField,\n AdhocToolSourcesField,\n AdhocToolTimeoutField,\n AdhocToolExtraEnvVarsField,\n AdhocToolWorkdirField,\n AdhocToolOutputRootDirField,\n AdhocToolStdoutFilenameField,\n AdhocToolStderrFilenameField,\n EnvironmentField,\n )\n help = help_text(\n lambda: f\"\"\"\n Execute any runnable target for its side effects.\n\n Example BUILD file:\n\n {AdhocToolTarget.alias}(\n {AdhocToolRunnableField.alias}=\":python_source\",\n {AdhocToolArgumentsField.alias}=[\"\"],\n {AdhocToolExecutionDependenciesField.alias}=[\":scripts\"],\n {AdhocToolOutputDirectoriesField.alias}=[\"results/\"],\n {AdhocToolOutputFilesField.alias}=[\"logs/my-script.log\"],\n )\n\n shell_sources(name=\"scripts\")\n \"\"\"\n )\n\n\n# ---\n# `system_binary` target\n# ---\n\n\nclass SystemBinaryNameField(StringField):\n alias: ClassVar[str] = \"binary_name\"\n required = True\n help = \"The name of the binary to find.\"\n\n\nclass SystemBinaryExtraSearchPathsField(StringSequenceField):\n alias: ClassVar[str] = \"extra_search_paths\"\n default = ()\n help = help_text(\n \"\"\"\n Extra search paths to look for the binary. These take priority over Pants' default\n search paths.\n \"\"\"\n )\n\n\nclass SystemBinaryFingerprintPattern(StringField):\n alias: ClassVar[str] = \"fingerprint\"\n required = False\n default = None\n help = help_text(\n \"\"\"\n A regular expression which will be used to match the fingerprint outputs from\n candidate binaries found during the search process.\n \"\"\"\n )\n\n\nclass SystemBinaryFingerprintArgsField(StringSequenceField):\n alias: ClassVar[str] = \"fingerprint_args\"\n default = ()\n help = help_text(\n \"Specifies arguments that will be used to run the binary during the search process.\"\n )\n\n\nclass SystemBinaryFingerprintDependenciesField(AdhocToolRunnableDependenciesField):\n alias: ClassVar[str] = \"fingerprint_dependencies\"\n help = help_text(\n \"\"\"\n Specifies any runnable dependencies that need to be available on the `PATH` when the binary\n is run, so that the search process may complete successfully. The name of the target must\n be the name of the runnable dependency that is called by this binary.\n \"\"\"\n )\n\n\nclass SystemBinaryTarget(Target):\n alias: ClassVar[str] = \"system_binary\"\n core_fields = (\n *COMMON_TARGET_FIELDS,\n SystemBinaryNameField,\n SystemBinaryExtraSearchPathsField,\n SystemBinaryFingerprintPattern,\n SystemBinaryFingerprintArgsField,\n SystemBinaryFingerprintDependenciesField,\n )\n help = help_text(\n lambda: f\"\"\"\n A system binary that can be run with `pants run` or consumed by `{AdhocToolTarget.alias}`.\n\n Pants will search for binaries with name `{SystemBinaryNameField.alias}` in the search\n paths provided, as well as default search paths. If\n `{SystemBinaryFingerprintPattern.alias}` is specified, each binary that is located will be\n executed with the arguments from `{SystemBinaryFingerprintArgsField.alias}`. Any binaries\n whose output does not match the pattern will be excluded.\n\n The first non-excluded binary will be the one that is resolved.\n \"\"\"\n )\n", "path": "src/python/pants/backend/adhoc/target_types.py"}]}
| 3,791 | 179 |
gh_patches_debug_5754
|
rasdani/github-patches
|
git_diff
|
meltano__meltano-6745
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
revert: project-name in project_id
In https://github.com/meltano/meltano/pull/6621 we added the `project name` to the project_id on `init`. Even though we hash the id prior to sending, its addition was unnecessary and creates a few problems:
* having the project name in the id confuses the purpose of the id. We don't need to know or include the directory name in the project id and its inclusion sends the wrong signal that we do care about it somehow
* while it's true that `project_id` can be any string, we generally don't want to encourage folks to change it unless they have a strong reason to
* when trying to correlate local testing to events we're seeing through our production snowplow pipeline, having to hash the id is unnecessarily burdensome when we'd rather just look for the GUID itself
I would support a documentation update to make it clear that `project_id` can be any string but that it's generally not recommended to update if you don't need to.
Also, I did notice that as part of this update it seems like `project_id` gets put at the end of the file - is there anyway to bring that back up above the environments?
@WillDaSilva @aaronsteers
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/meltano/core/project_init_service.py`
Content:
```
1 """New Project Initialization Service."""
2 from __future__ import annotations
3
4 import os
5 import uuid
6
7 import click
8
9 from .cli_messages import GREETING
10 from .db import project_engine
11 from .plugin.meltano_file import MeltanoFilePlugin
12 from .project import Project
13 from .project_settings_service import ProjectSettingsService, SettingValueStore
14
15
16 class ProjectInitServiceError(Exception):
17 """Project Initialization Service Exception."""
18
19
20 class ProjectInitService:
21 """New Project Initialization Service."""
22
23 def __init__(self, project_name):
24 """Create a new ProjectInitService instance.
25
26 Args:
27 project_name: The name of the project to create
28 """
29 self.project_name = project_name.lower()
30
31 def init(self, activate: bool = True, add_discovery: bool = False) -> Project:
32 """Initialise Meltano Project.
33
34 Args:
35 activate: Activate newly created project
36 add_discovery: Add discovery.yml file to created project
37
38 Returns:
39 A new Project instance
40
41 Raises:
42 ProjectInitServiceError: Directory already exists
43 """
44 try:
45 os.mkdir(self.project_name)
46 except FileExistsError as ex:
47 raise ProjectInitServiceError(
48 f"Directory {self.project_name!r} already exists."
49 ) from ex
50 except PermissionError as ex:
51 raise ProjectInitServiceError(
52 f"Permission denied to create {self.project_name!r}."
53 ) from ex
54 except Exception as ex:
55 raise ProjectInitServiceError(
56 f"Could not create directory {self.project_name!r}. {ex}"
57 ) from ex
58
59 click.secho("Created", fg="blue", nl=False)
60 click.echo(f" {self.project_name}")
61
62 self.project = Project(self.project_name)
63
64 self.create_files(add_discovery=add_discovery)
65
66 self.settings_service = ProjectSettingsService(self.project)
67 self.settings_service.set(
68 "project_id",
69 f"{self.project_name}-{uuid.uuid4()}",
70 store=SettingValueStore.MELTANO_YML,
71 )
72 self.set_send_anonymous_usage_stats()
73 if activate:
74 Project.activate(self.project)
75
76 self.create_system_database()
77
78 return self.project
79
80 def create_dot_meltano_dir(self):
81 """Create .meltano directory."""
82 # explicitly create the .meltano directory if it doesn't exist
83 os.makedirs(self.project.meltano_dir(), exist_ok=True)
84 click.secho(" |--", fg="blue", nl=False)
85 click.echo(f" {self.project.meltano_dir().name}")
86
87 def create_files(self, add_discovery=False):
88 """Create project files.
89
90 Args:
91 add_discovery: Add discovery.yml file to created project
92 """
93 click.secho("Creating project files...", fg="blue")
94 click.echo(f" {self.project_name}/")
95
96 self.create_dot_meltano_dir()
97
98 plugin = MeltanoFilePlugin(discovery=add_discovery)
99 for path in plugin.create_files(self.project):
100 click.secho(" |--", fg="blue", nl=False)
101 click.echo(f" {path}")
102
103 def set_send_anonymous_usage_stats(self):
104 """Set Anonymous Usage Stats flag."""
105 # If set to false store explicitly in `meltano.yml`
106 if not self.settings_service.get("send_anonymous_usage_stats"):
107 self.settings_service.set(
108 "send_anonymous_usage_stats",
109 self.settings_service.get("send_anonymous_usage_stats"),
110 store=SettingValueStore.MELTANO_YML,
111 )
112
113 def create_system_database(self):
114 """Create Meltano System DB.
115
116 Raises:
117 ProjectInitServiceError: Database initialization failed
118 """
119 click.secho("Creating system database...", fg="blue", nl=False)
120
121 # register the system database connection
122 engine, _ = project_engine(self.project, default=True)
123
124 from meltano.core.migration_service import MigrationError, MigrationService
125
126 try:
127 migration_service = MigrationService(engine)
128 migration_service.upgrade(silent=True)
129 migration_service.seed(self.project)
130 click.secho(" Done!", fg="blue")
131 except MigrationError as err:
132 raise ProjectInitServiceError(str(err)) from err
133
134 def echo_instructions(self):
135 """Echo Next Steps to Click CLI."""
136 click.secho(GREETING, nl=False)
137 click.secho("\nProject ", nl=False)
138 click.secho(self.project_name, fg="magenta", nl=False)
139 click.echo(" has been created!\n")
140
141 click.echo("Meltano Environments initialized with ", nl=False)
142 click.secho("dev", fg="bright_green", nl=False)
143 click.echo(", ", nl=False)
144 click.secho("staging", fg="bright_yellow", nl=False)
145 click.echo(", and ", nl=False)
146 click.secho("prod", fg="bright_red", nl=False)
147 click.echo(".")
148 click.echo("To learn more about Environments visit: ", nl=False)
149 click.secho(
150 "https://docs.meltano.com/concepts/environments",
151 fg="cyan",
152 )
153
154 click.echo("\nNext steps:")
155 click.secho(" cd ", nl=False)
156 click.secho(self.project_name, fg="magenta")
157 click.echo(" Visit ", nl=False)
158 click.secho(
159 "https://docs.meltano.com/getting-started#create-your-meltano-project",
160 fg="cyan",
161 nl=False,
162 )
163 click.echo(" to learn where to go from here")
164
165 def join_with_project_base(self, filename):
166 """Join Path to Project base.
167
168 Args:
169 filename: File name to join with project base
170
171 Returns:
172 Joined base path and passed filename
173 """
174 return os.path.join(".", self.project_name, filename)
175
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/meltano/core/project_init_service.py b/src/meltano/core/project_init_service.py
--- a/src/meltano/core/project_init_service.py
+++ b/src/meltano/core/project_init_service.py
@@ -66,7 +66,7 @@
self.settings_service = ProjectSettingsService(self.project)
self.settings_service.set(
"project_id",
- f"{self.project_name}-{uuid.uuid4()}",
+ str(uuid.uuid4()),
store=SettingValueStore.MELTANO_YML,
)
self.set_send_anonymous_usage_stats()
|
{"golden_diff": "diff --git a/src/meltano/core/project_init_service.py b/src/meltano/core/project_init_service.py\n--- a/src/meltano/core/project_init_service.py\n+++ b/src/meltano/core/project_init_service.py\n@@ -66,7 +66,7 @@\n self.settings_service = ProjectSettingsService(self.project)\n self.settings_service.set(\n \"project_id\",\n- f\"{self.project_name}-{uuid.uuid4()}\",\n+ str(uuid.uuid4()),\n store=SettingValueStore.MELTANO_YML,\n )\n self.set_send_anonymous_usage_stats()\n", "issue": "revert: project-name in project_id\nIn https://github.com/meltano/meltano/pull/6621 we added the `project name` to the project_id on `init`. Even though we hash the id prior to sending, its addition was unnecessary and creates a few problems:\r\n\r\n* having the project name in the id confuses the purpose of the id. We don't need to know or include the directory name in the project id and its inclusion sends the wrong signal that we do care about it somehow\r\n* while it's true that `project_id` can be any string, we generally don't want to encourage folks to change it unless they have a strong reason to\r\n* when trying to correlate local testing to events we're seeing through our production snowplow pipeline, having to hash the id is unnecessarily burdensome when we'd rather just look for the GUID itself\r\n\r\nI would support a documentation update to make it clear that `project_id` can be any string but that it's generally not recommended to update if you don't need to.\r\n\r\nAlso, I did notice that as part of this update it seems like `project_id` gets put at the end of the file - is there anyway to bring that back up above the environments?\r\n\r\n@WillDaSilva @aaronsteers \n", "before_files": [{"content": "\"\"\"New Project Initialization Service.\"\"\"\nfrom __future__ import annotations\n\nimport os\nimport uuid\n\nimport click\n\nfrom .cli_messages import GREETING\nfrom .db import project_engine\nfrom .plugin.meltano_file import MeltanoFilePlugin\nfrom .project import Project\nfrom .project_settings_service import ProjectSettingsService, SettingValueStore\n\n\nclass ProjectInitServiceError(Exception):\n \"\"\"Project Initialization Service Exception.\"\"\"\n\n\nclass ProjectInitService:\n \"\"\"New Project Initialization Service.\"\"\"\n\n def __init__(self, project_name):\n \"\"\"Create a new ProjectInitService instance.\n\n Args:\n project_name: The name of the project to create\n \"\"\"\n self.project_name = project_name.lower()\n\n def init(self, activate: bool = True, add_discovery: bool = False) -> Project:\n \"\"\"Initialise Meltano Project.\n\n Args:\n activate: Activate newly created project\n add_discovery: Add discovery.yml file to created project\n\n Returns:\n A new Project instance\n\n Raises:\n ProjectInitServiceError: Directory already exists\n \"\"\"\n try:\n os.mkdir(self.project_name)\n except FileExistsError as ex:\n raise ProjectInitServiceError(\n f\"Directory {self.project_name!r} already exists.\"\n ) from ex\n except PermissionError as ex:\n raise ProjectInitServiceError(\n f\"Permission denied to create {self.project_name!r}.\"\n ) from ex\n except Exception as ex:\n raise ProjectInitServiceError(\n f\"Could not create directory {self.project_name!r}. {ex}\"\n ) from ex\n\n click.secho(\"Created\", fg=\"blue\", nl=False)\n click.echo(f\" {self.project_name}\")\n\n self.project = Project(self.project_name)\n\n self.create_files(add_discovery=add_discovery)\n\n self.settings_service = ProjectSettingsService(self.project)\n self.settings_service.set(\n \"project_id\",\n f\"{self.project_name}-{uuid.uuid4()}\",\n store=SettingValueStore.MELTANO_YML,\n )\n self.set_send_anonymous_usage_stats()\n if activate:\n Project.activate(self.project)\n\n self.create_system_database()\n\n return self.project\n\n def create_dot_meltano_dir(self):\n \"\"\"Create .meltano directory.\"\"\"\n # explicitly create the .meltano directory if it doesn't exist\n os.makedirs(self.project.meltano_dir(), exist_ok=True)\n click.secho(\" |--\", fg=\"blue\", nl=False)\n click.echo(f\" {self.project.meltano_dir().name}\")\n\n def create_files(self, add_discovery=False):\n \"\"\"Create project files.\n\n Args:\n add_discovery: Add discovery.yml file to created project\n \"\"\"\n click.secho(\"Creating project files...\", fg=\"blue\")\n click.echo(f\" {self.project_name}/\")\n\n self.create_dot_meltano_dir()\n\n plugin = MeltanoFilePlugin(discovery=add_discovery)\n for path in plugin.create_files(self.project):\n click.secho(\" |--\", fg=\"blue\", nl=False)\n click.echo(f\" {path}\")\n\n def set_send_anonymous_usage_stats(self):\n \"\"\"Set Anonymous Usage Stats flag.\"\"\"\n # If set to false store explicitly in `meltano.yml`\n if not self.settings_service.get(\"send_anonymous_usage_stats\"):\n self.settings_service.set(\n \"send_anonymous_usage_stats\",\n self.settings_service.get(\"send_anonymous_usage_stats\"),\n store=SettingValueStore.MELTANO_YML,\n )\n\n def create_system_database(self):\n \"\"\"Create Meltano System DB.\n\n Raises:\n ProjectInitServiceError: Database initialization failed\n \"\"\"\n click.secho(\"Creating system database...\", fg=\"blue\", nl=False)\n\n # register the system database connection\n engine, _ = project_engine(self.project, default=True)\n\n from meltano.core.migration_service import MigrationError, MigrationService\n\n try:\n migration_service = MigrationService(engine)\n migration_service.upgrade(silent=True)\n migration_service.seed(self.project)\n click.secho(\" Done!\", fg=\"blue\")\n except MigrationError as err:\n raise ProjectInitServiceError(str(err)) from err\n\n def echo_instructions(self):\n \"\"\"Echo Next Steps to Click CLI.\"\"\"\n click.secho(GREETING, nl=False)\n click.secho(\"\\nProject \", nl=False)\n click.secho(self.project_name, fg=\"magenta\", nl=False)\n click.echo(\" has been created!\\n\")\n\n click.echo(\"Meltano Environments initialized with \", nl=False)\n click.secho(\"dev\", fg=\"bright_green\", nl=False)\n click.echo(\", \", nl=False)\n click.secho(\"staging\", fg=\"bright_yellow\", nl=False)\n click.echo(\", and \", nl=False)\n click.secho(\"prod\", fg=\"bright_red\", nl=False)\n click.echo(\".\")\n click.echo(\"To learn more about Environments visit: \", nl=False)\n click.secho(\n \"https://docs.meltano.com/concepts/environments\",\n fg=\"cyan\",\n )\n\n click.echo(\"\\nNext steps:\")\n click.secho(\" cd \", nl=False)\n click.secho(self.project_name, fg=\"magenta\")\n click.echo(\" Visit \", nl=False)\n click.secho(\n \"https://docs.meltano.com/getting-started#create-your-meltano-project\",\n fg=\"cyan\",\n nl=False,\n )\n click.echo(\" to learn where to go from here\")\n\n def join_with_project_base(self, filename):\n \"\"\"Join Path to Project base.\n\n Args:\n filename: File name to join with project base\n\n Returns:\n Joined base path and passed filename\n \"\"\"\n return os.path.join(\".\", self.project_name, filename)\n", "path": "src/meltano/core/project_init_service.py"}], "after_files": [{"content": "\"\"\"New Project Initialization Service.\"\"\"\nfrom __future__ import annotations\n\nimport os\nimport uuid\n\nimport click\n\nfrom .cli_messages import GREETING\nfrom .db import project_engine\nfrom .plugin.meltano_file import MeltanoFilePlugin\nfrom .project import Project\nfrom .project_settings_service import ProjectSettingsService, SettingValueStore\n\n\nclass ProjectInitServiceError(Exception):\n \"\"\"Project Initialization Service Exception.\"\"\"\n\n\nclass ProjectInitService:\n \"\"\"New Project Initialization Service.\"\"\"\n\n def __init__(self, project_name):\n \"\"\"Create a new ProjectInitService instance.\n\n Args:\n project_name: The name of the project to create\n \"\"\"\n self.project_name = project_name.lower()\n\n def init(self, activate: bool = True, add_discovery: bool = False) -> Project:\n \"\"\"Initialise Meltano Project.\n\n Args:\n activate: Activate newly created project\n add_discovery: Add discovery.yml file to created project\n\n Returns:\n A new Project instance\n\n Raises:\n ProjectInitServiceError: Directory already exists\n \"\"\"\n try:\n os.mkdir(self.project_name)\n except FileExistsError as ex:\n raise ProjectInitServiceError(\n f\"Directory {self.project_name!r} already exists.\"\n ) from ex\n except PermissionError as ex:\n raise ProjectInitServiceError(\n f\"Permission denied to create {self.project_name!r}.\"\n ) from ex\n except Exception as ex:\n raise ProjectInitServiceError(\n f\"Could not create directory {self.project_name!r}. {ex}\"\n ) from ex\n\n click.secho(\"Created\", fg=\"blue\", nl=False)\n click.echo(f\" {self.project_name}\")\n\n self.project = Project(self.project_name)\n\n self.create_files(add_discovery=add_discovery)\n\n self.settings_service = ProjectSettingsService(self.project)\n self.settings_service.set(\n \"project_id\",\n str(uuid.uuid4()),\n store=SettingValueStore.MELTANO_YML,\n )\n self.set_send_anonymous_usage_stats()\n if activate:\n Project.activate(self.project)\n\n self.create_system_database()\n\n return self.project\n\n def create_dot_meltano_dir(self):\n \"\"\"Create .meltano directory.\"\"\"\n # explicitly create the .meltano directory if it doesn't exist\n os.makedirs(self.project.meltano_dir(), exist_ok=True)\n click.secho(\" |--\", fg=\"blue\", nl=False)\n click.echo(f\" {self.project.meltano_dir().name}\")\n\n def create_files(self, add_discovery=False):\n \"\"\"Create project files.\n\n Args:\n add_discovery: Add discovery.yml file to created project\n \"\"\"\n click.secho(\"Creating project files...\", fg=\"blue\")\n click.echo(f\" {self.project_name}/\")\n\n self.create_dot_meltano_dir()\n\n plugin = MeltanoFilePlugin(discovery=add_discovery)\n for path in plugin.create_files(self.project):\n click.secho(\" |--\", fg=\"blue\", nl=False)\n click.echo(f\" {path}\")\n\n def set_send_anonymous_usage_stats(self):\n \"\"\"Set Anonymous Usage Stats flag.\"\"\"\n # If set to false store explicitly in `meltano.yml`\n if not self.settings_service.get(\"send_anonymous_usage_stats\"):\n self.settings_service.set(\n \"send_anonymous_usage_stats\",\n self.settings_service.get(\"send_anonymous_usage_stats\"),\n store=SettingValueStore.MELTANO_YML,\n )\n\n def create_system_database(self):\n \"\"\"Create Meltano System DB.\n\n Raises:\n ProjectInitServiceError: Database initialization failed\n \"\"\"\n click.secho(\"Creating system database...\", fg=\"blue\", nl=False)\n\n # register the system database connection\n engine, _ = project_engine(self.project, default=True)\n\n from meltano.core.migration_service import MigrationError, MigrationService\n\n try:\n migration_service = MigrationService(engine)\n migration_service.upgrade(silent=True)\n migration_service.seed(self.project)\n click.secho(\" Done!\", fg=\"blue\")\n except MigrationError as err:\n raise ProjectInitServiceError(str(err)) from err\n\n def echo_instructions(self):\n \"\"\"Echo Next Steps to Click CLI.\"\"\"\n click.secho(GREETING, nl=False)\n click.secho(\"\\nProject \", nl=False)\n click.secho(self.project_name, fg=\"magenta\", nl=False)\n click.echo(\" has been created!\\n\")\n\n click.echo(\"Meltano Environments initialized with \", nl=False)\n click.secho(\"dev\", fg=\"bright_green\", nl=False)\n click.echo(\", \", nl=False)\n click.secho(\"staging\", fg=\"bright_yellow\", nl=False)\n click.echo(\", and \", nl=False)\n click.secho(\"prod\", fg=\"bright_red\", nl=False)\n click.echo(\".\")\n click.echo(\"To learn more about Environments visit: \", nl=False)\n click.secho(\n \"https://docs.meltano.com/concepts/environments\",\n fg=\"cyan\",\n )\n\n click.echo(\"\\nNext steps:\")\n click.secho(\" cd \", nl=False)\n click.secho(self.project_name, fg=\"magenta\")\n click.echo(\" Visit \", nl=False)\n click.secho(\n \"https://docs.meltano.com/getting-started#create-your-meltano-project\",\n fg=\"cyan\",\n nl=False,\n )\n click.echo(\" to learn where to go from here\")\n\n def join_with_project_base(self, filename):\n \"\"\"Join Path to Project base.\n\n Args:\n filename: File name to join with project base\n\n Returns:\n Joined base path and passed filename\n \"\"\"\n return os.path.join(\".\", self.project_name, filename)\n", "path": "src/meltano/core/project_init_service.py"}]}
| 2,202 | 126 |
gh_patches_debug_16663
|
rasdani/github-patches
|
git_diff
|
ethereum__web3.py-2989
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
raise_contract_logic_error_on_revert fails with UnicodeDecodeError
* Version: 6.4.0
* Python: 3.10
* OS: linux
* `pip freeze` output
```
aiohttp==3.8.4
aiosignal==1.3.1
async-timeout==4.0.2
attrs==23.1.0
bitarray==2.7.3
cached-property==1.5.2
certifi==2023.5.7
charset-normalizer==3.1.0
coverage==7.2.6
cytoolz==0.12.1
eth-abi==4.0.0
eth-account==0.8.0
eth-bloom==2.0.0
eth-hash==0.5.1
eth-keyfile==0.6.1
eth-keys==0.4.0
eth-rlp==0.3.0
eth-typing==3.3.0
eth-utils==2.1.0
frozenlist==1.3.3
hexbytes==0.3.0
idna==3.4
iniconfig==2.0.0
jsonschema==4.17.3
lru-dict==1.2.0
multidict==6.0.4
mypy-extensions==0.4.4
packaging==23.1
parsimonious==0.9.0
pluggy==1.0.0
protobuf==4.23.2
py-ecc==6.0.0
py-evm==0.7.0a2
pycryptodome==3.18.0
pyethash==0.1.27
pyrsistent==0.19.3
pytest==7.3.1
pytest-cov==4.1.0
regex==2023.5.5
requests==2.31.0
rlp==3.0.0
safe-eth-py==5.4.3
safe-pysha3==1.0.4
sortedcontainers==2.4.0
toolz==0.12.0
trie==2.1.0
urllib3==2.0.2
web3==6.4.0
websockets==11.0.3
yarl==1.9.2
```
### What was wrong?
Function `raise_contract_logic_error_on_revert` raises `UnicodeDecodeError` when parsing receipt for the following response
`response = {'error': {'code': -32015, 'data': 'Reverted 0xd27b44a9', 'message': 'VM execution error.'}, 'id': 3, 'jsonrpc': '2.0'}`
* Node: GnosisChain (issue happens for most nodes, https://rpc.gnosischain.com/, https://rpc.ankr.com/gnosis, etc)
```
/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/contract/contract.py:461: in call
return call_contract_function(
/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/contract/utils.py:96: in call_contract_function
return_data = w3.eth.call(
/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/eth/eth.py:255: in call
return self._durin_call(transaction, block_identifier, state_override)
/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/eth/eth.py:274: in _durin_call
return self._call(transaction, block_identifier, state_override)
/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/module.py:68: in caller
result = w3.manager.request_blocking(
/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/manager.py:232: in request_blocking
return self.formatted_response(
/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/manager.py:197: in formatted_response
apply_error_formatters(error_formatters, response)
/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/manager.py:73: in apply_error_formatters
formatted_resp = pipe(response, error_formatters)
cytoolz/functoolz.pyx:666: in cytoolz.functoolz.pipe
???
cytoolz/functoolz.pyx:641: in cytoolz.functoolz.c_pipe
???
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
response = {'error': {'code': -32015, 'data': 'Reverted 0xd27b44a9', 'message': 'VM execution error.'}, 'id': 3, 'jsonrpc': '2.0'}
def raise_contract_logic_error_on_revert(response: RPCResponse) -> RPCResponse:
"""
Reverts contain a `data` attribute with the following layout:
"Reverted "
Function selector for Error(string): 08c379a (4 bytes)
Data offset: 32 (32 bytes)
String length (32 bytes)
Reason string (padded, use string length from above to get meaningful part)
See also https://solidity.readthedocs.io/en/v0.6.3/control-structures.html#revert
"""
if not isinstance(response["error"], dict):
raise ValueError("Error expected to be a dict")
data = response["error"].get("data", "")
# Ganache case:
if isinstance(data, dict) and response["error"].get("message"):
raise ContractLogicError(
f'execution reverted: {response["error"]["message"]}', data=data
)
# Parity/OpenEthereum case:
if data.startswith("Reverted "):
# "Reverted", function selector and offset are always the same for revert errors
prefix = "Reverted 0x08c379a00000000000000000000000000000000000000000000000000000000000000020" # noqa: 501
if not data.startswith(prefix):
if data.startswith("Reverted 0x"):
# Special case for this form: 'Reverted 0x...'
receipt = data.split(" ")[1][2:]
> revert_reason = bytes.fromhex(receipt).decode("utf-8")
E UnicodeDecodeError: 'utf-8' codec can't decode byte 0xd2 in position 0: invalid continuation byte
/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/_utils/method_formatters.py:743: UnicodeDecodeError
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `web3/_utils/contract_error_handling.py`
Content:
```
1 from eth_abi import (
2 abi,
3 )
4 from eth_utils import (
5 to_bytes,
6 )
7
8 from web3.exceptions import (
9 ContractCustomError,
10 ContractLogicError,
11 ContractPanicError,
12 OffchainLookup,
13 )
14 from web3.types import (
15 RPCResponse,
16 )
17
18 # func selector for "Error(string)"
19 SOLIDITY_ERROR_FUNC_SELECTOR = "0x08c379a0"
20
21 # --- CCIP Read - EIP-3668 --- #
22 # the first 4 bytes of keccak hash (func selector) for:
23 # "OffchainLookup(address,string[],bytes,bytes4,bytes)"
24 OFFCHAIN_LOOKUP_FUNC_SELECTOR = "0x556f1830"
25 OFFCHAIN_LOOKUP_FIELDS = {
26 "sender": "address",
27 "urls": "string[]",
28 "callData": "bytes",
29 "callbackFunction": "bytes4",
30 "extraData": "bytes",
31 }
32
33
34 # --- Solidity Panic Error, as of Solidity 0.8.0 --- #
35 PANIC_ERROR_FUNC_SELECTOR = "0x4e487b71"
36 PANIC_ERROR_CODES = {
37 "00": "Panic error 0x00: Generic compiler inserted panics.",
38 "01": "Panic error 0x01: Assert evaluates to false.",
39 "11": "Panic error 0x11: Arithmetic operation results in underflow or overflow.",
40 "12": "Panic error 0x12: Division by zero.",
41 "21": "Panic error 0x21: Cannot convert value into an enum type.",
42 "22": "Panic error 0x12: Storage byte array is incorrectly encoded.",
43 "31": "Panic error 0x31: Call to 'pop()' on an empty array.",
44 "32": "Panic error 0x32: Array index is out of bounds.",
45 "41": "Panic error 0x41: Allocation of too much memory or array too large.",
46 "51": "Panic error 0x51: Call to a zero-initialized variable of internal "
47 "function type.",
48 }
49
50
51 def raise_contract_logic_error_on_revert(response: RPCResponse) -> RPCResponse:
52 """
53 Reverts contain a `data` attribute with the following layout:
54 "Reverted "
55 Function selector for Error(string): 08c379a (4 bytes)
56 Data offset: 32 (32 bytes)
57 String length (32 bytes)
58 Reason string (padded, use string length from above to get meaningful part)
59
60 See also https://solidity.readthedocs.io/en/v0.6.3/control-structures.html#revert
61 """
62 if not isinstance(response["error"], dict):
63 raise ValueError("Error expected to be a dict")
64
65 data = response["error"].get("data", "")
66
67 # Ganache case:
68 if isinstance(data, dict) and response["error"].get("message"):
69 raise ContractLogicError(
70 f'execution reverted: {response["error"]["message"]}', data=data
71 )
72
73 # Parity/OpenEthereum case:
74 if data.startswith("Reverted "):
75 # "Reverted", function selector and offset are always the same for revert errors
76 prefix = f"Reverted {SOLIDITY_ERROR_FUNC_SELECTOR}"
77 data_offset = ("00" * 31) + "20" # 0x0000...0020 (32 bytes)
78 revert_pattern = prefix + data_offset
79
80 if data.startswith(revert_pattern):
81 # if common revert pattern
82 string_length = int(
83 data[len(revert_pattern) : len(revert_pattern) + 64], 16
84 )
85 reason_as_hex = data[
86 len(revert_pattern) + 64 : len(revert_pattern) + 64 + string_length * 2
87 ]
88 elif data.startswith("Reverted 0x"):
89 # Special case for this form: 'Reverted 0x...'
90 reason_as_hex = data.split(" ")[1][2:]
91 else:
92 raise ContractLogicError("execution reverted", data=data)
93
94 reason_string = bytes.fromhex(reason_as_hex).decode("utf8")
95 raise ContractLogicError(f"execution reverted: {reason_string}", data=data)
96
97 # --- EIP-3668 | CCIP Read --- #
98 if data[:10] == OFFCHAIN_LOOKUP_FUNC_SELECTOR:
99 parsed_data_as_bytes = to_bytes(hexstr=data[10:])
100 abi_decoded_data = abi.decode(
101 list(OFFCHAIN_LOOKUP_FIELDS.values()), parsed_data_as_bytes
102 )
103 offchain_lookup_payload = dict(
104 zip(OFFCHAIN_LOOKUP_FIELDS.keys(), abi_decoded_data)
105 )
106 raise OffchainLookup(offchain_lookup_payload, data=data)
107
108 # --- Solidity Panic Error --- #
109 if data[:10] == PANIC_ERROR_FUNC_SELECTOR:
110 panic_error_code = data[-2:]
111 raise ContractPanicError(PANIC_ERROR_CODES[panic_error_code], data=data)
112
113 # Solidity 0.8.4 introduced custom error messages that allow args to
114 # be passed in (or not). See:
115 # https://blog.soliditylang.org/2021/04/21/custom-errors/
116 if len(data) >= 10 and not data[:10] == SOLIDITY_ERROR_FUNC_SELECTOR:
117 # Raise with data as both the message and the data for backwards
118 # compatibility and so that data can be accessed via 'data' attribute
119 # on the ContractCustomError exception
120 raise ContractCustomError(data, data=data)
121
122 # Geth case:
123 if "message" in response["error"] and response["error"].get("code", "") == 3:
124 message = response["error"]["message"]
125 raise ContractLogicError(message, data=data)
126
127 # Geth Revert without error message case:
128 if "execution reverted" in response["error"].get("message"):
129 raise ContractLogicError("execution reverted", data=data)
130
131 return response
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/web3/_utils/contract_error_handling.py b/web3/_utils/contract_error_handling.py
--- a/web3/_utils/contract_error_handling.py
+++ b/web3/_utils/contract_error_handling.py
@@ -1,3 +1,5 @@
+import warnings
+
from eth_abi import (
abi,
)
@@ -91,8 +93,12 @@
else:
raise ContractLogicError("execution reverted", data=data)
- reason_string = bytes.fromhex(reason_as_hex).decode("utf8")
- raise ContractLogicError(f"execution reverted: {reason_string}", data=data)
+ try:
+ reason_string = bytes.fromhex(reason_as_hex).decode("utf8")
+ raise ContractLogicError(f"execution reverted: {reason_string}", data=data)
+ except UnicodeDecodeError:
+ warnings.warn("Could not decode revert reason as UTF-8", RuntimeWarning)
+ raise ContractLogicError("execution reverted", data=data)
# --- EIP-3668 | CCIP Read --- #
if data[:10] == OFFCHAIN_LOOKUP_FUNC_SELECTOR:
|
{"golden_diff": "diff --git a/web3/_utils/contract_error_handling.py b/web3/_utils/contract_error_handling.py\n--- a/web3/_utils/contract_error_handling.py\n+++ b/web3/_utils/contract_error_handling.py\n@@ -1,3 +1,5 @@\n+import warnings\n+\n from eth_abi import (\n abi,\n )\n@@ -91,8 +93,12 @@\n else:\n raise ContractLogicError(\"execution reverted\", data=data)\n \n- reason_string = bytes.fromhex(reason_as_hex).decode(\"utf8\")\n- raise ContractLogicError(f\"execution reverted: {reason_string}\", data=data)\n+ try:\n+ reason_string = bytes.fromhex(reason_as_hex).decode(\"utf8\")\n+ raise ContractLogicError(f\"execution reverted: {reason_string}\", data=data)\n+ except UnicodeDecodeError:\n+ warnings.warn(\"Could not decode revert reason as UTF-8\", RuntimeWarning)\n+ raise ContractLogicError(\"execution reverted\", data=data)\n \n # --- EIP-3668 | CCIP Read --- #\n if data[:10] == OFFCHAIN_LOOKUP_FUNC_SELECTOR:\n", "issue": "raise_contract_logic_error_on_revert fails with UnicodeDecodeError\n* Version: 6.4.0\r\n* Python: 3.10\r\n* OS: linux\r\n* `pip freeze` output\r\n\r\n```\r\naiohttp==3.8.4\r\naiosignal==1.3.1\r\nasync-timeout==4.0.2\r\nattrs==23.1.0\r\nbitarray==2.7.3\r\ncached-property==1.5.2\r\ncertifi==2023.5.7\r\ncharset-normalizer==3.1.0\r\ncoverage==7.2.6\r\ncytoolz==0.12.1\r\neth-abi==4.0.0\r\neth-account==0.8.0\r\neth-bloom==2.0.0\r\neth-hash==0.5.1\r\neth-keyfile==0.6.1\r\neth-keys==0.4.0\r\neth-rlp==0.3.0\r\neth-typing==3.3.0\r\neth-utils==2.1.0\r\nfrozenlist==1.3.3\r\nhexbytes==0.3.0\r\nidna==3.4\r\niniconfig==2.0.0\r\njsonschema==4.17.3\r\nlru-dict==1.2.0\r\nmultidict==6.0.4\r\nmypy-extensions==0.4.4\r\npackaging==23.1\r\nparsimonious==0.9.0\r\npluggy==1.0.0\r\nprotobuf==4.23.2\r\npy-ecc==6.0.0\r\npy-evm==0.7.0a2\r\npycryptodome==3.18.0\r\npyethash==0.1.27\r\npyrsistent==0.19.3\r\npytest==7.3.1\r\npytest-cov==4.1.0\r\nregex==2023.5.5\r\nrequests==2.31.0\r\nrlp==3.0.0\r\nsafe-eth-py==5.4.3\r\nsafe-pysha3==1.0.4\r\nsortedcontainers==2.4.0\r\ntoolz==0.12.0\r\ntrie==2.1.0\r\nurllib3==2.0.2\r\nweb3==6.4.0\r\nwebsockets==11.0.3\r\nyarl==1.9.2\r\n```\r\n\r\n\r\n### What was wrong?\r\n\r\nFunction `raise_contract_logic_error_on_revert` raises `UnicodeDecodeError` when parsing receipt for the following response\r\n`response = {'error': {'code': -32015, 'data': 'Reverted 0xd27b44a9', 'message': 'VM execution error.'}, 'id': 3, 'jsonrpc': '2.0'}`\r\n\r\n\r\n* Node: GnosisChain (issue happens for most nodes, https://rpc.gnosischain.com/, https://rpc.ankr.com/gnosis, etc)\r\n\r\n```\r\n/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/contract/contract.py:461: in call\r\n return call_contract_function(\r\n/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/contract/utils.py:96: in call_contract_function\r\n return_data = w3.eth.call(\r\n/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/eth/eth.py:255: in call\r\n return self._durin_call(transaction, block_identifier, state_override)\r\n/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/eth/eth.py:274: in _durin_call\r\n return self._call(transaction, block_identifier, state_override)\r\n/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/module.py:68: in caller\r\n result = w3.manager.request_blocking(\r\n/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/manager.py:232: in request_blocking\r\n return self.formatted_response(\r\n/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/manager.py:197: in formatted_response\r\n apply_error_formatters(error_formatters, response)\r\n/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/manager.py:73: in apply_error_formatters\r\n formatted_resp = pipe(response, error_formatters)\r\ncytoolz/functoolz.pyx:666: in cytoolz.functoolz.pipe\r\n ???\r\ncytoolz/functoolz.pyx:641: in cytoolz.functoolz.c_pipe\r\n ???\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n\r\nresponse = {'error': {'code': -32015, 'data': 'Reverted 0xd27b44a9', 'message': 'VM execution error.'}, 'id': 3, 'jsonrpc': '2.0'}\r\n\r\n def raise_contract_logic_error_on_revert(response: RPCResponse) -> RPCResponse:\r\n \"\"\"\r\n Reverts contain a `data` attribute with the following layout:\r\n \"Reverted \"\r\n Function selector for Error(string): 08c379a (4 bytes)\r\n Data offset: 32 (32 bytes)\r\n String length (32 bytes)\r\n Reason string (padded, use string length from above to get meaningful part)\r\n \r\n See also https://solidity.readthedocs.io/en/v0.6.3/control-structures.html#revert\r\n \"\"\"\r\n if not isinstance(response[\"error\"], dict):\r\n raise ValueError(\"Error expected to be a dict\")\r\n \r\n data = response[\"error\"].get(\"data\", \"\")\r\n \r\n # Ganache case:\r\n if isinstance(data, dict) and response[\"error\"].get(\"message\"):\r\n raise ContractLogicError(\r\n f'execution reverted: {response[\"error\"][\"message\"]}', data=data\r\n )\r\n \r\n # Parity/OpenEthereum case:\r\n if data.startswith(\"Reverted \"):\r\n # \"Reverted\", function selector and offset are always the same for revert errors\r\n prefix = \"Reverted 0x08c379a00000000000000000000000000000000000000000000000000000000000000020\" # noqa: 501\r\n if not data.startswith(prefix):\r\n if data.startswith(\"Reverted 0x\"):\r\n # Special case for this form: 'Reverted 0x...'\r\n receipt = data.split(\" \")[1][2:]\r\n> revert_reason = bytes.fromhex(receipt).decode(\"utf-8\")\r\nE UnicodeDecodeError: 'utf-8' codec can't decode byte 0xd2 in position 0: invalid continuation byte\r\n\r\n/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/_utils/method_formatters.py:743: UnicodeDecodeError\r\n```\r\n\n", "before_files": [{"content": "from eth_abi import (\n abi,\n)\nfrom eth_utils import (\n to_bytes,\n)\n\nfrom web3.exceptions import (\n ContractCustomError,\n ContractLogicError,\n ContractPanicError,\n OffchainLookup,\n)\nfrom web3.types import (\n RPCResponse,\n)\n\n# func selector for \"Error(string)\"\nSOLIDITY_ERROR_FUNC_SELECTOR = \"0x08c379a0\"\n\n# --- CCIP Read - EIP-3668 --- #\n# the first 4 bytes of keccak hash (func selector) for:\n# \"OffchainLookup(address,string[],bytes,bytes4,bytes)\"\nOFFCHAIN_LOOKUP_FUNC_SELECTOR = \"0x556f1830\"\nOFFCHAIN_LOOKUP_FIELDS = {\n \"sender\": \"address\",\n \"urls\": \"string[]\",\n \"callData\": \"bytes\",\n \"callbackFunction\": \"bytes4\",\n \"extraData\": \"bytes\",\n}\n\n\n# --- Solidity Panic Error, as of Solidity 0.8.0 --- #\nPANIC_ERROR_FUNC_SELECTOR = \"0x4e487b71\"\nPANIC_ERROR_CODES = {\n \"00\": \"Panic error 0x00: Generic compiler inserted panics.\",\n \"01\": \"Panic error 0x01: Assert evaluates to false.\",\n \"11\": \"Panic error 0x11: Arithmetic operation results in underflow or overflow.\",\n \"12\": \"Panic error 0x12: Division by zero.\",\n \"21\": \"Panic error 0x21: Cannot convert value into an enum type.\",\n \"22\": \"Panic error 0x12: Storage byte array is incorrectly encoded.\",\n \"31\": \"Panic error 0x31: Call to 'pop()' on an empty array.\",\n \"32\": \"Panic error 0x32: Array index is out of bounds.\",\n \"41\": \"Panic error 0x41: Allocation of too much memory or array too large.\",\n \"51\": \"Panic error 0x51: Call to a zero-initialized variable of internal \"\n \"function type.\",\n}\n\n\ndef raise_contract_logic_error_on_revert(response: RPCResponse) -> RPCResponse:\n \"\"\"\n Reverts contain a `data` attribute with the following layout:\n \"Reverted \"\n Function selector for Error(string): 08c379a (4 bytes)\n Data offset: 32 (32 bytes)\n String length (32 bytes)\n Reason string (padded, use string length from above to get meaningful part)\n\n See also https://solidity.readthedocs.io/en/v0.6.3/control-structures.html#revert\n \"\"\"\n if not isinstance(response[\"error\"], dict):\n raise ValueError(\"Error expected to be a dict\")\n\n data = response[\"error\"].get(\"data\", \"\")\n\n # Ganache case:\n if isinstance(data, dict) and response[\"error\"].get(\"message\"):\n raise ContractLogicError(\n f'execution reverted: {response[\"error\"][\"message\"]}', data=data\n )\n\n # Parity/OpenEthereum case:\n if data.startswith(\"Reverted \"):\n # \"Reverted\", function selector and offset are always the same for revert errors\n prefix = f\"Reverted {SOLIDITY_ERROR_FUNC_SELECTOR}\"\n data_offset = (\"00\" * 31) + \"20\" # 0x0000...0020 (32 bytes)\n revert_pattern = prefix + data_offset\n\n if data.startswith(revert_pattern):\n # if common revert pattern\n string_length = int(\n data[len(revert_pattern) : len(revert_pattern) + 64], 16\n )\n reason_as_hex = data[\n len(revert_pattern) + 64 : len(revert_pattern) + 64 + string_length * 2\n ]\n elif data.startswith(\"Reverted 0x\"):\n # Special case for this form: 'Reverted 0x...'\n reason_as_hex = data.split(\" \")[1][2:]\n else:\n raise ContractLogicError(\"execution reverted\", data=data)\n\n reason_string = bytes.fromhex(reason_as_hex).decode(\"utf8\")\n raise ContractLogicError(f\"execution reverted: {reason_string}\", data=data)\n\n # --- EIP-3668 | CCIP Read --- #\n if data[:10] == OFFCHAIN_LOOKUP_FUNC_SELECTOR:\n parsed_data_as_bytes = to_bytes(hexstr=data[10:])\n abi_decoded_data = abi.decode(\n list(OFFCHAIN_LOOKUP_FIELDS.values()), parsed_data_as_bytes\n )\n offchain_lookup_payload = dict(\n zip(OFFCHAIN_LOOKUP_FIELDS.keys(), abi_decoded_data)\n )\n raise OffchainLookup(offchain_lookup_payload, data=data)\n\n # --- Solidity Panic Error --- #\n if data[:10] == PANIC_ERROR_FUNC_SELECTOR:\n panic_error_code = data[-2:]\n raise ContractPanicError(PANIC_ERROR_CODES[panic_error_code], data=data)\n\n # Solidity 0.8.4 introduced custom error messages that allow args to\n # be passed in (or not). See:\n # https://blog.soliditylang.org/2021/04/21/custom-errors/\n if len(data) >= 10 and not data[:10] == SOLIDITY_ERROR_FUNC_SELECTOR:\n # Raise with data as both the message and the data for backwards\n # compatibility and so that data can be accessed via 'data' attribute\n # on the ContractCustomError exception\n raise ContractCustomError(data, data=data)\n\n # Geth case:\n if \"message\" in response[\"error\"] and response[\"error\"].get(\"code\", \"\") == 3:\n message = response[\"error\"][\"message\"]\n raise ContractLogicError(message, data=data)\n\n # Geth Revert without error message case:\n if \"execution reverted\" in response[\"error\"].get(\"message\"):\n raise ContractLogicError(\"execution reverted\", data=data)\n\n return response\n", "path": "web3/_utils/contract_error_handling.py"}], "after_files": [{"content": "import warnings\n\nfrom eth_abi import (\n abi,\n)\nfrom eth_utils import (\n to_bytes,\n)\n\nfrom web3.exceptions import (\n ContractCustomError,\n ContractLogicError,\n ContractPanicError,\n OffchainLookup,\n)\nfrom web3.types import (\n RPCResponse,\n)\n\n# func selector for \"Error(string)\"\nSOLIDITY_ERROR_FUNC_SELECTOR = \"0x08c379a0\"\n\n# --- CCIP Read - EIP-3668 --- #\n# the first 4 bytes of keccak hash (func selector) for:\n# \"OffchainLookup(address,string[],bytes,bytes4,bytes)\"\nOFFCHAIN_LOOKUP_FUNC_SELECTOR = \"0x556f1830\"\nOFFCHAIN_LOOKUP_FIELDS = {\n \"sender\": \"address\",\n \"urls\": \"string[]\",\n \"callData\": \"bytes\",\n \"callbackFunction\": \"bytes4\",\n \"extraData\": \"bytes\",\n}\n\n\n# --- Solidity Panic Error, as of Solidity 0.8.0 --- #\nPANIC_ERROR_FUNC_SELECTOR = \"0x4e487b71\"\nPANIC_ERROR_CODES = {\n \"00\": \"Panic error 0x00: Generic compiler inserted panics.\",\n \"01\": \"Panic error 0x01: Assert evaluates to false.\",\n \"11\": \"Panic error 0x11: Arithmetic operation results in underflow or overflow.\",\n \"12\": \"Panic error 0x12: Division by zero.\",\n \"21\": \"Panic error 0x21: Cannot convert value into an enum type.\",\n \"22\": \"Panic error 0x12: Storage byte array is incorrectly encoded.\",\n \"31\": \"Panic error 0x31: Call to 'pop()' on an empty array.\",\n \"32\": \"Panic error 0x32: Array index is out of bounds.\",\n \"41\": \"Panic error 0x41: Allocation of too much memory or array too large.\",\n \"51\": \"Panic error 0x51: Call to a zero-initialized variable of internal \"\n \"function type.\",\n}\n\n\ndef raise_contract_logic_error_on_revert(response: RPCResponse) -> RPCResponse:\n \"\"\"\n Reverts contain a `data` attribute with the following layout:\n \"Reverted \"\n Function selector for Error(string): 08c379a (4 bytes)\n Data offset: 32 (32 bytes)\n String length (32 bytes)\n Reason string (padded, use string length from above to get meaningful part)\n\n See also https://solidity.readthedocs.io/en/v0.6.3/control-structures.html#revert\n \"\"\"\n if not isinstance(response[\"error\"], dict):\n raise ValueError(\"Error expected to be a dict\")\n\n data = response[\"error\"].get(\"data\", \"\")\n\n # Ganache case:\n if isinstance(data, dict) and response[\"error\"].get(\"message\"):\n raise ContractLogicError(\n f'execution reverted: {response[\"error\"][\"message\"]}', data=data\n )\n\n # Parity/OpenEthereum case:\n if data.startswith(\"Reverted \"):\n # \"Reverted\", function selector and offset are always the same for revert errors\n prefix = f\"Reverted {SOLIDITY_ERROR_FUNC_SELECTOR}\"\n data_offset = (\"00\" * 31) + \"20\" # 0x0000...0020 (32 bytes)\n revert_pattern = prefix + data_offset\n\n if data.startswith(revert_pattern):\n # if common revert pattern\n string_length = int(\n data[len(revert_pattern) : len(revert_pattern) + 64], 16\n )\n reason_as_hex = data[\n len(revert_pattern) + 64 : len(revert_pattern) + 64 + string_length * 2\n ]\n elif data.startswith(\"Reverted 0x\"):\n # Special case for this form: 'Reverted 0x...'\n reason_as_hex = data.split(\" \")[1][2:]\n else:\n raise ContractLogicError(\"execution reverted\", data=data)\n\n try:\n reason_string = bytes.fromhex(reason_as_hex).decode(\"utf8\")\n raise ContractLogicError(f\"execution reverted: {reason_string}\", data=data)\n except UnicodeDecodeError:\n warnings.warn(\"Could not decode revert reason as UTF-8\", RuntimeWarning)\n raise ContractLogicError(\"execution reverted\", data=data)\n\n # --- EIP-3668 | CCIP Read --- #\n if data[:10] == OFFCHAIN_LOOKUP_FUNC_SELECTOR:\n parsed_data_as_bytes = to_bytes(hexstr=data[10:])\n abi_decoded_data = abi.decode(\n list(OFFCHAIN_LOOKUP_FIELDS.values()), parsed_data_as_bytes\n )\n offchain_lookup_payload = dict(\n zip(OFFCHAIN_LOOKUP_FIELDS.keys(), abi_decoded_data)\n )\n raise OffchainLookup(offchain_lookup_payload, data=data)\n\n # --- Solidity Panic Error --- #\n if data[:10] == PANIC_ERROR_FUNC_SELECTOR:\n panic_error_code = data[-2:]\n raise ContractPanicError(PANIC_ERROR_CODES[panic_error_code], data=data)\n\n # Solidity 0.8.4 introduced custom error messages that allow args to\n # be passed in (or not). See:\n # https://blog.soliditylang.org/2021/04/21/custom-errors/\n if len(data) >= 10 and not data[:10] == SOLIDITY_ERROR_FUNC_SELECTOR:\n # Raise with data as both the message and the data for backwards\n # compatibility and so that data can be accessed via 'data' attribute\n # on the ContractCustomError exception\n raise ContractCustomError(data, data=data)\n\n # Geth case:\n if \"message\" in response[\"error\"] and response[\"error\"].get(\"code\", \"\") == 3:\n message = response[\"error\"][\"message\"]\n raise ContractLogicError(message, data=data)\n\n # Geth Revert without error message case:\n if \"execution reverted\" in response[\"error\"].get(\"message\"):\n raise ContractLogicError(\"execution reverted\", data=data)\n\n return response\n", "path": "web3/_utils/contract_error_handling.py"}]}
| 3,582 | 245 |
gh_patches_debug_17784
|
rasdani/github-patches
|
git_diff
|
lmfit__lmfit-py-150
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot deploy to pypi repo dues to tuples in the `setup.py` attributes
Due to a python-bug (http://bugs.python.org/issue19610) i cannot install and deploy lmfit with `python setup install`
I discovered this issue while trying to fix #149
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # from distutils.core import setup
3 from setuptools import setup
4
5 import lmfit as lmfit
6 import numpy, scipy
7
8 long_desc = """A library for least-squares minimization and data fitting in
9 Python. Built on top of scipy.optimize, lmfit provides a Parameter object
10 which can be set as fixed or free, can have upper and/or lower bounds, or
11 can be written in terms of algebraic constraints of other Parameters. The
12 user writes a function to be minimized as a function of these Parameters,
13 and the scipy.optimize methods are used to find the optimal values for the
14 Parameters. The Levenberg-Marquardt (leastsq) is the default minimization
15 algorithm, and provides estimated standard errors and correlations between
16 varied Parameters. Other minimization methods, including Nelder-Mead's
17 downhill simplex, Powell's method, BFGS, Sequential Least Squares, and
18 others are also supported. Bounds and contraints can be placed on
19 Parameters for all of these methods.
20
21 In addition, methods for explicitly calculating confidence intervals are
22 provided for exploring minmization problems where the approximation of
23 estimating Parameter uncertainties from the covariance matrix is
24 questionable. """
25
26
27 setup(name = 'lmfit',
28 version = lmfit.__version__,
29 author = 'LMFit Development Team',
30 author_email = '[email protected]',
31 url = 'http://lmfit.github.io/lmfit-py/',
32 download_url = 'http://lmfit.github.io//lmfit-py/',
33 requires = ('numpy', 'scipy'),
34 license = 'BSD',
35 description = "Least-Squares Minimization with Bounds and Constraints",
36 long_description = long_desc,
37 platforms = ('Windows', 'Linux', 'Mac OS X'),
38 classifiers=['Intended Audience :: Science/Research',
39 'Operating System :: OS Independent',
40 'Programming Language :: Python',
41 'Topic :: Scientific/Engineering',
42 ],
43 # test_suite='nose.collector',
44 # test_requires=['Nose'],
45 package_dir = {'lmfit': 'lmfit'},
46 packages = ['lmfit', 'lmfit.ui', 'lmfit.uncertainties'],
47 )
48
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -30,11 +30,11 @@
author_email = '[email protected]',
url = 'http://lmfit.github.io/lmfit-py/',
download_url = 'http://lmfit.github.io//lmfit-py/',
- requires = ('numpy', 'scipy'),
+ requires = ['numpy', 'scipy'],
license = 'BSD',
description = "Least-Squares Minimization with Bounds and Constraints",
long_description = long_desc,
- platforms = ('Windows', 'Linux', 'Mac OS X'),
+ platforms = ['Windows', 'Linux', 'Mac OS X'],
classifiers=['Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,11 +30,11 @@\n author_email = '[email protected]',\n url = 'http://lmfit.github.io/lmfit-py/',\n download_url = 'http://lmfit.github.io//lmfit-py/',\n- requires = ('numpy', 'scipy'),\n+ requires = ['numpy', 'scipy'],\n license = 'BSD',\n description = \"Least-Squares Minimization with Bounds and Constraints\",\n long_description = long_desc,\n- platforms = ('Windows', 'Linux', 'Mac OS X'),\n+ platforms = ['Windows', 'Linux', 'Mac OS X'],\n classifiers=['Intended Audience :: Science/Research',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n", "issue": "Cannot deploy to pypi repo dues to tuples in the `setup.py` attributes\nDue to a python-bug (http://bugs.python.org/issue19610) i cannot install and deploy lmfit with `python setup install`\n\nI discovered this issue while trying to fix #149 \n\n", "before_files": [{"content": "#!/usr/bin/env python\n# from distutils.core import setup\nfrom setuptools import setup\n\nimport lmfit as lmfit\nimport numpy, scipy\n\nlong_desc = \"\"\"A library for least-squares minimization and data fitting in\nPython. Built on top of scipy.optimize, lmfit provides a Parameter object\nwhich can be set as fixed or free, can have upper and/or lower bounds, or\ncan be written in terms of algebraic constraints of other Parameters. The\nuser writes a function to be minimized as a function of these Parameters,\nand the scipy.optimize methods are used to find the optimal values for the\nParameters. The Levenberg-Marquardt (leastsq) is the default minimization\nalgorithm, and provides estimated standard errors and correlations between\nvaried Parameters. Other minimization methods, including Nelder-Mead's\ndownhill simplex, Powell's method, BFGS, Sequential Least Squares, and\nothers are also supported. Bounds and contraints can be placed on\nParameters for all of these methods.\n\nIn addition, methods for explicitly calculating confidence intervals are\nprovided for exploring minmization problems where the approximation of\nestimating Parameter uncertainties from the covariance matrix is\nquestionable. \"\"\"\n\n\nsetup(name = 'lmfit',\n version = lmfit.__version__,\n author = 'LMFit Development Team',\n author_email = '[email protected]',\n url = 'http://lmfit.github.io/lmfit-py/',\n download_url = 'http://lmfit.github.io//lmfit-py/',\n requires = ('numpy', 'scipy'),\n license = 'BSD',\n description = \"Least-Squares Minimization with Bounds and Constraints\",\n long_description = long_desc,\n platforms = ('Windows', 'Linux', 'Mac OS X'),\n classifiers=['Intended Audience :: Science/Research',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Scientific/Engineering',\n ],\n # test_suite='nose.collector',\n # test_requires=['Nose'],\n package_dir = {'lmfit': 'lmfit'},\n packages = ['lmfit', 'lmfit.ui', 'lmfit.uncertainties'],\n )\n\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# from distutils.core import setup\nfrom setuptools import setup\n\nimport lmfit as lmfit\nimport numpy, scipy\n\nlong_desc = \"\"\"A library for least-squares minimization and data fitting in\nPython. Built on top of scipy.optimize, lmfit provides a Parameter object\nwhich can be set as fixed or free, can have upper and/or lower bounds, or\ncan be written in terms of algebraic constraints of other Parameters. The\nuser writes a function to be minimized as a function of these Parameters,\nand the scipy.optimize methods are used to find the optimal values for the\nParameters. The Levenberg-Marquardt (leastsq) is the default minimization\nalgorithm, and provides estimated standard errors and correlations between\nvaried Parameters. Other minimization methods, including Nelder-Mead's\ndownhill simplex, Powell's method, BFGS, Sequential Least Squares, and\nothers are also supported. Bounds and contraints can be placed on\nParameters for all of these methods.\n\nIn addition, methods for explicitly calculating confidence intervals are\nprovided for exploring minmization problems where the approximation of\nestimating Parameter uncertainties from the covariance matrix is\nquestionable. \"\"\"\n\n\nsetup(name = 'lmfit',\n version = lmfit.__version__,\n author = 'LMFit Development Team',\n author_email = '[email protected]',\n url = 'http://lmfit.github.io/lmfit-py/',\n download_url = 'http://lmfit.github.io//lmfit-py/',\n requires = ['numpy', 'scipy'],\n license = 'BSD',\n description = \"Least-Squares Minimization with Bounds and Constraints\",\n long_description = long_desc,\n platforms = ['Windows', 'Linux', 'Mac OS X'],\n classifiers=['Intended Audience :: Science/Research',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Scientific/Engineering',\n ],\n # test_suite='nose.collector',\n # test_requires=['Nose'],\n package_dir = {'lmfit': 'lmfit'},\n packages = ['lmfit', 'lmfit.ui', 'lmfit.uncertainties'],\n )\n\n", "path": "setup.py"}]}
| 878 | 185 |
gh_patches_debug_22010
|
rasdani/github-patches
|
git_diff
|
ckan__ckan-561
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Postgresql 8.4 error when running paster db init
When running the paster db init command with the CKAN 2.0 beta, there is an error encountered that appears to be related to use of the left() string function in ckan/migration/versions/067_turn_extras_to_strings.py. According to the documentation and my own simple test, this function is not support in Postgresql 8.4. For a stack trace, see: https://gist.github.com/thriuin/5067819.
Is there a new minimum version of Postgresql required -- documentation still says 8.4 which unfortunately is what comes with RedHat Enterprise.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckan/migration/versions/067_turn_extras_to_strings.py`
Content:
```
1 import json
2
3 def upgrade(migrate_engine):
4
5 with migrate_engine.begin() as connection:
6 tables = 'package_extra group_extra'
7 revision_tables = 'package_extra_revision group_extra_revision'
8
9 for table in tables.split():
10 sql = """select id, value from {table} where left(value,1) = '"' """.format(table=table)
11 results = connection.execute(sql)
12 for result in results:
13 id, value = result
14 update_sql = 'update {table} set value = %s where id = %s'
15 connection.execute(update_sql.format(table=table),
16 json.loads(value), id)
17
18 for table in revision_tables.split():
19 sql = """select id, revision_id, value from {table} where left(value,1) = '"' """.format(table=table)
20
21 results = connection.execute(sql)
22 for result in results:
23 id, revision_id, value = result
24 update_sql = 'update {table} set value = %s where id = %s and revision_id = %s'
25 connection.execute(update_sql.format(table=table),
26 json.loads(value), id, revision_id)
27
28
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ckan/migration/versions/067_turn_extras_to_strings.py b/ckan/migration/versions/067_turn_extras_to_strings.py
--- a/ckan/migration/versions/067_turn_extras_to_strings.py
+++ b/ckan/migration/versions/067_turn_extras_to_strings.py
@@ -7,7 +7,7 @@
revision_tables = 'package_extra_revision group_extra_revision'
for table in tables.split():
- sql = """select id, value from {table} where left(value,1) = '"' """.format(table=table)
+ sql = """select id, value from {table} where substr(value,0,1) = '"' """.format(table=table)
results = connection.execute(sql)
for result in results:
id, value = result
@@ -16,7 +16,7 @@
json.loads(value), id)
for table in revision_tables.split():
- sql = """select id, revision_id, value from {table} where left(value,1) = '"' """.format(table=table)
+ sql = """select id, revision_id, value from {table} where substr(value,0,1) = '"' """.format(table=table)
results = connection.execute(sql)
for result in results:
|
{"golden_diff": "diff --git a/ckan/migration/versions/067_turn_extras_to_strings.py b/ckan/migration/versions/067_turn_extras_to_strings.py\n--- a/ckan/migration/versions/067_turn_extras_to_strings.py\n+++ b/ckan/migration/versions/067_turn_extras_to_strings.py\n@@ -7,7 +7,7 @@\n revision_tables = 'package_extra_revision group_extra_revision'\n \n for table in tables.split():\n- sql = \"\"\"select id, value from {table} where left(value,1) = '\"' \"\"\".format(table=table)\n+ sql = \"\"\"select id, value from {table} where substr(value,0,1) = '\"' \"\"\".format(table=table)\n results = connection.execute(sql)\n for result in results:\n id, value = result\n@@ -16,7 +16,7 @@\n json.loads(value), id)\n \n for table in revision_tables.split():\n- sql = \"\"\"select id, revision_id, value from {table} where left(value,1) = '\"' \"\"\".format(table=table)\n+ sql = \"\"\"select id, revision_id, value from {table} where substr(value,0,1) = '\"' \"\"\".format(table=table)\n \n results = connection.execute(sql)\n for result in results:\n", "issue": "Postgresql 8.4 error when running paster db init\nWhen running the paster db init command with the CKAN 2.0 beta, there is an error encountered that appears to be related to use of the left() string function in ckan/migration/versions/067_turn_extras_to_strings.py. According to the documentation and my own simple test, this function is not support in Postgresql 8.4. For a stack trace, see: https://gist.github.com/thriuin/5067819.\n\nIs there a new minimum version of Postgresql required -- documentation still says 8.4 which unfortunately is what comes with RedHat Enterprise.\n\n", "before_files": [{"content": "import json\n\ndef upgrade(migrate_engine):\n\n with migrate_engine.begin() as connection:\n tables = 'package_extra group_extra'\n revision_tables = 'package_extra_revision group_extra_revision'\n\n for table in tables.split():\n sql = \"\"\"select id, value from {table} where left(value,1) = '\"' \"\"\".format(table=table)\n results = connection.execute(sql)\n for result in results:\n id, value = result\n update_sql = 'update {table} set value = %s where id = %s'\n connection.execute(update_sql.format(table=table),\n json.loads(value), id)\n\n for table in revision_tables.split():\n sql = \"\"\"select id, revision_id, value from {table} where left(value,1) = '\"' \"\"\".format(table=table)\n\n results = connection.execute(sql)\n for result in results:\n id, revision_id, value = result\n update_sql = 'update {table} set value = %s where id = %s and revision_id = %s'\n connection.execute(update_sql.format(table=table),\n json.loads(value), id, revision_id)\n\n\n", "path": "ckan/migration/versions/067_turn_extras_to_strings.py"}], "after_files": [{"content": "import json\n\ndef upgrade(migrate_engine):\n\n with migrate_engine.begin() as connection:\n tables = 'package_extra group_extra'\n revision_tables = 'package_extra_revision group_extra_revision'\n\n for table in tables.split():\n sql = \"\"\"select id, value from {table} where substr(value,0,1) = '\"' \"\"\".format(table=table)\n results = connection.execute(sql)\n for result in results:\n id, value = result\n update_sql = 'update {table} set value = %s where id = %s'\n connection.execute(update_sql.format(table=table),\n json.loads(value), id)\n\n for table in revision_tables.split():\n sql = \"\"\"select id, revision_id, value from {table} where substr(value,0,1) = '\"' \"\"\".format(table=table)\n\n results = connection.execute(sql)\n for result in results:\n id, revision_id, value = result\n update_sql = 'update {table} set value = %s where id = %s and revision_id = %s'\n connection.execute(update_sql.format(table=table),\n json.loads(value), id, revision_id)\n\n\n", "path": "ckan/migration/versions/067_turn_extras_to_strings.py"}]}
| 704 | 290 |
gh_patches_debug_41504
|
rasdani/github-patches
|
git_diff
|
mars-project__mars-1080
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Tile of rechunk() outputs inconsistent dtypes and columns_value
**Describe the bug**
DataFrame.rechunk() produces chunks with different number of dtypes and columns_value, which may impair descendant chunks.
Code to reproduce this:
```python
In [2]: import pandas as pd
In [4]: from mars.dataframe.datasource.dataframe import from_pandas as from_pandas_df
In [6]: cols = [chr(ord('A') + i) for i in range(10)]
In [8]: df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
In [9]: df = from_pandas_df(df_raw, chunk_size=5)
In [13]: rechunked = df.rechunk((20, 1)).tiles()
In [14]: rechunked.chunks[0].columns_value.to_pandas()
Out[14]: Index(['A'], dtype='object')
In [15]: rechunked.chunks[0].dtypes
Out[15]:
A int64
B int64
C int64
D int64
E int64
dtype: object
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mars/dataframe/base/rechunk.py`
Content:
```
1 # Copyright 1999-2020 Alibaba Group Holding Ltd.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import itertools
16
17 from ... import opcodes as OperandDef
18 from ...serialize import KeyField, AnyField, Int32Field, Int64Field
19 from ...tensor.rechunk.core import get_nsplits, plan_rechunks, compute_rechunk_slices
20 from ...tensor.utils import calc_sliced_size
21 from ...utils import check_chunks_unknown_shape
22 from ...tiles import TilesError
23 from ..operands import DataFrameOperand, DataFrameOperandMixin, DATAFRAME_TYPE, ObjectType
24 from ..utils import indexing_index_value, merge_index_value
25
26
27 class DataFrameRechunk(DataFrameOperand, DataFrameOperandMixin):
28 _op_type_ = OperandDef.RECHUNK
29
30 _input = KeyField('input')
31 _chunk_size = AnyField('chunk_size')
32 _threshold = Int32Field('threshold')
33 _chunk_size_limit = Int64Field('chunk_size_limit')
34
35 def __init__(self, chunk_size=None, threshold=None, chunk_size_limit=None, object_type=None, **kw):
36 super().__init__(_chunk_size=chunk_size, _threshold=threshold,
37 _chunk_size_limit=chunk_size_limit, _object_type=object_type, **kw)
38
39 @property
40 def chunk_size(self):
41 return self._chunk_size
42
43 @property
44 def threshold(self):
45 return self._threshold
46
47 @property
48 def chunk_size_limit(self):
49 return self._chunk_size_limit
50
51 def _set_inputs(self, inputs):
52 super()._set_inputs(inputs)
53 self._input = self._inputs[0]
54
55 def __call__(self, x):
56 if isinstance(x, DATAFRAME_TYPE):
57 self._object_type = ObjectType.dataframe
58 return self.new_dataframe([x], shape=x.shape, dtypes=x.dtypes,
59 columns_value=x.columns_value, index_value=x.index_value)
60 else:
61 self._object_type = ObjectType.series
62 return self.new_series([x], shape=x.shape, dtype=x.dtype, index_value=x.index_value, name=x.name)
63
64 @classmethod
65 def tile(cls, op):
66 check_chunks_unknown_shape(op.inputs, TilesError)
67 out = op.outputs[0]
68 new_chunk_size = op.chunk_size
69 if isinstance(out, DATAFRAME_TYPE):
70 itemsize = max(dt.itemsize for dt in out.dtypes)
71 else:
72 itemsize = out.dtype.itemsize
73 steps = plan_rechunks(op.inputs[0], new_chunk_size, itemsize,
74 threshold=op.threshold,
75 chunk_size_limit=op.chunk_size_limit)
76 for c in steps:
77 out = compute_rechunk(out.inputs[0], c)
78
79 return [out]
80
81
82 def rechunk(a, chunk_size, threshold=None, chunk_size_limit=None):
83 if isinstance(a, DATAFRAME_TYPE):
84 itemsize = max(dt.itemsize for dt in a.dtypes)
85 else:
86 itemsize = a.dtype.itemsize
87 chunk_size = get_nsplits(a, chunk_size, itemsize)
88 if chunk_size == a.nsplits:
89 return a
90
91 op = DataFrameRechunk(chunk_size, threshold, chunk_size_limit)
92 return op(a)
93
94
95 def _concat_dataframe_index_and_columns(to_concat_chunks):
96 if to_concat_chunks[0].index_value.to_pandas().empty:
97 index_value = to_concat_chunks[0].index_value
98 else:
99 idx_to_index_value = dict((c.index[0], c.index_value) for c in to_concat_chunks if c.index[1] == 0)
100 index_value = merge_index_value(idx_to_index_value)
101
102 idx_to_columns_value = dict((c.index[1], c.columns_value) for c in to_concat_chunks if c.index[0] == 0)
103 columns_value = merge_index_value(idx_to_columns_value, store_data=True)
104 return index_value, columns_value
105
106
107 def _concat_series_index(to_concat_chunks):
108 if to_concat_chunks[0].index_value.to_pandas().empty:
109 index_value = to_concat_chunks[0].index_value
110 else:
111 idx_to_index_value = dict((c.index[0], c.index_value) for c in to_concat_chunks)
112 index_value = merge_index_value(idx_to_index_value)
113 return index_value
114
115
116 def compute_rechunk(a, chunk_size):
117 from ..indexing.iloc import DataFrameIlocGetItem, SeriesIlocGetItem
118 from ..merge.concat import DataFrameConcat
119
120 result_slices = compute_rechunk_slices(a, chunk_size)
121 result_chunks = []
122 idxes = itertools.product(*[range(len(c)) for c in chunk_size])
123 chunk_slices = itertools.product(*result_slices)
124 chunk_shapes = itertools.product(*chunk_size)
125 is_dataframe = isinstance(a, DATAFRAME_TYPE)
126 for idx, chunk_slice, chunk_shape in zip(idxes, chunk_slices, chunk_shapes):
127 to_merge = []
128 merge_idxes = itertools.product(*[range(len(i)) for i in chunk_slice])
129 for merge_idx, index_slices in zip(merge_idxes, itertools.product(*chunk_slice)):
130 chunk_index, chunk_slice = zip(*index_slices)
131 old_chunk = a.cix[chunk_index]
132 merge_chunk_shape = tuple(calc_sliced_size(s, chunk_slice[0]) for s in old_chunk.shape)
133 new_index_value = indexing_index_value(old_chunk.index_value, chunk_slice[0])
134 if is_dataframe:
135 new_columns_value = indexing_index_value(old_chunk.columns_value, chunk_slice[1], store_data=True)
136 merge_chunk_op = DataFrameIlocGetItem(chunk_slice, sparse=old_chunk.op.sparse,
137 object_type=ObjectType.dataframe)
138 merge_chunk = merge_chunk_op.new_chunk([old_chunk], shape=merge_chunk_shape,
139 index=merge_idx, index_value=new_index_value,
140 columns_value=new_columns_value, dtypes=old_chunk.dtypes)
141 else:
142 merge_chunk_op = SeriesIlocGetItem(chunk_slice, sparse=old_chunk.op.sparse,
143 object_type=ObjectType.series)
144 merge_chunk = merge_chunk_op.new_chunk([old_chunk], shape=merge_chunk_shape,
145 index=merge_idx, index_value=new_index_value,
146 dtype=old_chunk.dtype)
147 to_merge.append(merge_chunk)
148 if len(to_merge) == 1:
149 chunk_op = to_merge[0].op.copy()
150 if is_dataframe:
151 out_chunk = chunk_op.new_chunk(to_merge[0].op.inputs, shape=chunk_shape,
152 index=idx, index_value=to_merge[0].index_value,
153 columns_value=to_merge[0].columns_value,
154 dtypes=to_merge[0].dtypes)
155 else:
156 out_chunk = chunk_op.new_chunk(to_merge[0].op.inputs, shape=chunk_shape,
157 index=idx, index_value=to_merge[0].index_value,
158 name=to_merge[0].name, dtype=to_merge[0].dtype)
159 result_chunks.append(out_chunk)
160 else:
161 if is_dataframe:
162 chunk_op = DataFrameConcat(object_type=ObjectType.dataframe)
163 index_value, columns_value = _concat_dataframe_index_and_columns(to_merge)
164 out_chunk = chunk_op.new_chunk(to_merge, shape=chunk_shape,
165 index=idx, index_value=index_value,
166 columns_value=columns_value,
167 dtypes=to_merge[0].dtypes)
168 else:
169 chunk_op = DataFrameConcat(object_type=ObjectType.series)
170 index_value = _concat_series_index(to_merge)
171 out_chunk = chunk_op.new_chunk(to_merge, shape=chunk_shape,
172 index=idx, index_value=index_value,
173 dtype=to_merge[0].dtype)
174 result_chunks.append(out_chunk)
175
176 if is_dataframe:
177 op = DataFrameRechunk(chunk_size, object_type=ObjectType.dataframe)
178 return op.new_dataframe([a], a.shape, dtypes=a.dtypes, columns_value=a.columns_value,
179 index_value=a.index_value, nsplits=chunk_size, chunks=result_chunks)
180 else:
181 op = DataFrameRechunk(chunk_size, object_type=ObjectType.series)
182 return op.new_series([a], a.shape, dtype=a.dtype, index_value=a.index_value,
183 nsplits=chunk_size, chunks=result_chunks)
184
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mars/dataframe/base/rechunk.py b/mars/dataframe/base/rechunk.py
--- a/mars/dataframe/base/rechunk.py
+++ b/mars/dataframe/base/rechunk.py
@@ -14,6 +14,8 @@
import itertools
+import pandas as pd
+
from ... import opcodes as OperandDef
from ...serialize import KeyField, AnyField, Int32Field, Int64Field
from ...tensor.rechunk.core import get_nsplits, plan_rechunks, compute_rechunk_slices
@@ -92,7 +94,7 @@
return op(a)
-def _concat_dataframe_index_and_columns(to_concat_chunks):
+def _concat_dataframe_meta(to_concat_chunks):
if to_concat_chunks[0].index_value.to_pandas().empty:
index_value = to_concat_chunks[0].index_value
else:
@@ -101,7 +103,10 @@
idx_to_columns_value = dict((c.index[1], c.columns_value) for c in to_concat_chunks if c.index[0] == 0)
columns_value = merge_index_value(idx_to_columns_value, store_data=True)
- return index_value, columns_value
+
+ idx_to_dtypes = dict((c.index[1], c.dtypes) for c in to_concat_chunks if c.index[0] == 0)
+ dtypes = pd.concat([v[1] for v in list(sorted(idx_to_dtypes.items()))])
+ return index_value, columns_value, dtypes
def _concat_series_index(to_concat_chunks):
@@ -137,7 +142,8 @@
object_type=ObjectType.dataframe)
merge_chunk = merge_chunk_op.new_chunk([old_chunk], shape=merge_chunk_shape,
index=merge_idx, index_value=new_index_value,
- columns_value=new_columns_value, dtypes=old_chunk.dtypes)
+ columns_value=new_columns_value,
+ dtypes=old_chunk.dtypes.iloc[chunk_slice[1]])
else:
merge_chunk_op = SeriesIlocGetItem(chunk_slice, sparse=old_chunk.op.sparse,
object_type=ObjectType.series)
@@ -160,11 +166,11 @@
else:
if is_dataframe:
chunk_op = DataFrameConcat(object_type=ObjectType.dataframe)
- index_value, columns_value = _concat_dataframe_index_and_columns(to_merge)
+ index_value, columns_value, dtypes = _concat_dataframe_meta(to_merge)
out_chunk = chunk_op.new_chunk(to_merge, shape=chunk_shape,
index=idx, index_value=index_value,
columns_value=columns_value,
- dtypes=to_merge[0].dtypes)
+ dtypes=dtypes)
else:
chunk_op = DataFrameConcat(object_type=ObjectType.series)
index_value = _concat_series_index(to_merge)
|
{"golden_diff": "diff --git a/mars/dataframe/base/rechunk.py b/mars/dataframe/base/rechunk.py\n--- a/mars/dataframe/base/rechunk.py\n+++ b/mars/dataframe/base/rechunk.py\n@@ -14,6 +14,8 @@\n \n import itertools\n \n+import pandas as pd\n+\n from ... import opcodes as OperandDef\n from ...serialize import KeyField, AnyField, Int32Field, Int64Field\n from ...tensor.rechunk.core import get_nsplits, plan_rechunks, compute_rechunk_slices\n@@ -92,7 +94,7 @@\n return op(a)\n \n \n-def _concat_dataframe_index_and_columns(to_concat_chunks):\n+def _concat_dataframe_meta(to_concat_chunks):\n if to_concat_chunks[0].index_value.to_pandas().empty:\n index_value = to_concat_chunks[0].index_value\n else:\n@@ -101,7 +103,10 @@\n \n idx_to_columns_value = dict((c.index[1], c.columns_value) for c in to_concat_chunks if c.index[0] == 0)\n columns_value = merge_index_value(idx_to_columns_value, store_data=True)\n- return index_value, columns_value\n+\n+ idx_to_dtypes = dict((c.index[1], c.dtypes) for c in to_concat_chunks if c.index[0] == 0)\n+ dtypes = pd.concat([v[1] for v in list(sorted(idx_to_dtypes.items()))])\n+ return index_value, columns_value, dtypes\n \n \n def _concat_series_index(to_concat_chunks):\n@@ -137,7 +142,8 @@\n object_type=ObjectType.dataframe)\n merge_chunk = merge_chunk_op.new_chunk([old_chunk], shape=merge_chunk_shape,\n index=merge_idx, index_value=new_index_value,\n- columns_value=new_columns_value, dtypes=old_chunk.dtypes)\n+ columns_value=new_columns_value,\n+ dtypes=old_chunk.dtypes.iloc[chunk_slice[1]])\n else:\n merge_chunk_op = SeriesIlocGetItem(chunk_slice, sparse=old_chunk.op.sparse,\n object_type=ObjectType.series)\n@@ -160,11 +166,11 @@\n else:\n if is_dataframe:\n chunk_op = DataFrameConcat(object_type=ObjectType.dataframe)\n- index_value, columns_value = _concat_dataframe_index_and_columns(to_merge)\n+ index_value, columns_value, dtypes = _concat_dataframe_meta(to_merge)\n out_chunk = chunk_op.new_chunk(to_merge, shape=chunk_shape,\n index=idx, index_value=index_value,\n columns_value=columns_value,\n- dtypes=to_merge[0].dtypes)\n+ dtypes=dtypes)\n else:\n chunk_op = DataFrameConcat(object_type=ObjectType.series)\n index_value = _concat_series_index(to_merge)\n", "issue": "[BUG] Tile of rechunk() outputs inconsistent dtypes and columns_value\n**Describe the bug**\r\nDataFrame.rechunk() produces chunks with different number of dtypes and columns_value, which may impair descendant chunks.\r\n\r\nCode to reproduce this:\r\n\r\n```python \r\nIn [2]: import pandas as pd \r\n\r\nIn [4]: from mars.dataframe.datasource.dataframe import from_pandas as from_pandas_df \r\n\r\nIn [6]: cols = [chr(ord('A') + i) for i in range(10)] \r\n\r\nIn [8]: df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols)) \r\n\r\nIn [9]: df = from_pandas_df(df_raw, chunk_size=5) \r\n\r\nIn [13]: rechunked = df.rechunk((20, 1)).tiles() \r\n\r\nIn [14]: rechunked.chunks[0].columns_value.to_pandas() \r\nOut[14]: Index(['A'], dtype='object')\r\n\r\nIn [15]: rechunked.chunks[0].dtypes \r\nOut[15]: \r\nA int64\r\nB int64\r\nC int64\r\nD int64\r\nE int64\r\ndtype: object\r\n```\n", "before_files": [{"content": "# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport itertools\n\nfrom ... import opcodes as OperandDef\nfrom ...serialize import KeyField, AnyField, Int32Field, Int64Field\nfrom ...tensor.rechunk.core import get_nsplits, plan_rechunks, compute_rechunk_slices\nfrom ...tensor.utils import calc_sliced_size\nfrom ...utils import check_chunks_unknown_shape\nfrom ...tiles import TilesError\nfrom ..operands import DataFrameOperand, DataFrameOperandMixin, DATAFRAME_TYPE, ObjectType\nfrom ..utils import indexing_index_value, merge_index_value\n\n\nclass DataFrameRechunk(DataFrameOperand, DataFrameOperandMixin):\n _op_type_ = OperandDef.RECHUNK\n\n _input = KeyField('input')\n _chunk_size = AnyField('chunk_size')\n _threshold = Int32Field('threshold')\n _chunk_size_limit = Int64Field('chunk_size_limit')\n\n def __init__(self, chunk_size=None, threshold=None, chunk_size_limit=None, object_type=None, **kw):\n super().__init__(_chunk_size=chunk_size, _threshold=threshold,\n _chunk_size_limit=chunk_size_limit, _object_type=object_type, **kw)\n\n @property\n def chunk_size(self):\n return self._chunk_size\n\n @property\n def threshold(self):\n return self._threshold\n\n @property\n def chunk_size_limit(self):\n return self._chunk_size_limit\n\n def _set_inputs(self, inputs):\n super()._set_inputs(inputs)\n self._input = self._inputs[0]\n\n def __call__(self, x):\n if isinstance(x, DATAFRAME_TYPE):\n self._object_type = ObjectType.dataframe\n return self.new_dataframe([x], shape=x.shape, dtypes=x.dtypes,\n columns_value=x.columns_value, index_value=x.index_value)\n else:\n self._object_type = ObjectType.series\n return self.new_series([x], shape=x.shape, dtype=x.dtype, index_value=x.index_value, name=x.name)\n\n @classmethod\n def tile(cls, op):\n check_chunks_unknown_shape(op.inputs, TilesError)\n out = op.outputs[0]\n new_chunk_size = op.chunk_size\n if isinstance(out, DATAFRAME_TYPE):\n itemsize = max(dt.itemsize for dt in out.dtypes)\n else:\n itemsize = out.dtype.itemsize\n steps = plan_rechunks(op.inputs[0], new_chunk_size, itemsize,\n threshold=op.threshold,\n chunk_size_limit=op.chunk_size_limit)\n for c in steps:\n out = compute_rechunk(out.inputs[0], c)\n\n return [out]\n\n\ndef rechunk(a, chunk_size, threshold=None, chunk_size_limit=None):\n if isinstance(a, DATAFRAME_TYPE):\n itemsize = max(dt.itemsize for dt in a.dtypes)\n else:\n itemsize = a.dtype.itemsize\n chunk_size = get_nsplits(a, chunk_size, itemsize)\n if chunk_size == a.nsplits:\n return a\n\n op = DataFrameRechunk(chunk_size, threshold, chunk_size_limit)\n return op(a)\n\n\ndef _concat_dataframe_index_and_columns(to_concat_chunks):\n if to_concat_chunks[0].index_value.to_pandas().empty:\n index_value = to_concat_chunks[0].index_value\n else:\n idx_to_index_value = dict((c.index[0], c.index_value) for c in to_concat_chunks if c.index[1] == 0)\n index_value = merge_index_value(idx_to_index_value)\n\n idx_to_columns_value = dict((c.index[1], c.columns_value) for c in to_concat_chunks if c.index[0] == 0)\n columns_value = merge_index_value(idx_to_columns_value, store_data=True)\n return index_value, columns_value\n\n\ndef _concat_series_index(to_concat_chunks):\n if to_concat_chunks[0].index_value.to_pandas().empty:\n index_value = to_concat_chunks[0].index_value\n else:\n idx_to_index_value = dict((c.index[0], c.index_value) for c in to_concat_chunks)\n index_value = merge_index_value(idx_to_index_value)\n return index_value\n\n\ndef compute_rechunk(a, chunk_size):\n from ..indexing.iloc import DataFrameIlocGetItem, SeriesIlocGetItem\n from ..merge.concat import DataFrameConcat\n\n result_slices = compute_rechunk_slices(a, chunk_size)\n result_chunks = []\n idxes = itertools.product(*[range(len(c)) for c in chunk_size])\n chunk_slices = itertools.product(*result_slices)\n chunk_shapes = itertools.product(*chunk_size)\n is_dataframe = isinstance(a, DATAFRAME_TYPE)\n for idx, chunk_slice, chunk_shape in zip(idxes, chunk_slices, chunk_shapes):\n to_merge = []\n merge_idxes = itertools.product(*[range(len(i)) for i in chunk_slice])\n for merge_idx, index_slices in zip(merge_idxes, itertools.product(*chunk_slice)):\n chunk_index, chunk_slice = zip(*index_slices)\n old_chunk = a.cix[chunk_index]\n merge_chunk_shape = tuple(calc_sliced_size(s, chunk_slice[0]) for s in old_chunk.shape)\n new_index_value = indexing_index_value(old_chunk.index_value, chunk_slice[0])\n if is_dataframe:\n new_columns_value = indexing_index_value(old_chunk.columns_value, chunk_slice[1], store_data=True)\n merge_chunk_op = DataFrameIlocGetItem(chunk_slice, sparse=old_chunk.op.sparse,\n object_type=ObjectType.dataframe)\n merge_chunk = merge_chunk_op.new_chunk([old_chunk], shape=merge_chunk_shape,\n index=merge_idx, index_value=new_index_value,\n columns_value=new_columns_value, dtypes=old_chunk.dtypes)\n else:\n merge_chunk_op = SeriesIlocGetItem(chunk_slice, sparse=old_chunk.op.sparse,\n object_type=ObjectType.series)\n merge_chunk = merge_chunk_op.new_chunk([old_chunk], shape=merge_chunk_shape,\n index=merge_idx, index_value=new_index_value,\n dtype=old_chunk.dtype)\n to_merge.append(merge_chunk)\n if len(to_merge) == 1:\n chunk_op = to_merge[0].op.copy()\n if is_dataframe:\n out_chunk = chunk_op.new_chunk(to_merge[0].op.inputs, shape=chunk_shape,\n index=idx, index_value=to_merge[0].index_value,\n columns_value=to_merge[0].columns_value,\n dtypes=to_merge[0].dtypes)\n else:\n out_chunk = chunk_op.new_chunk(to_merge[0].op.inputs, shape=chunk_shape,\n index=idx, index_value=to_merge[0].index_value,\n name=to_merge[0].name, dtype=to_merge[0].dtype)\n result_chunks.append(out_chunk)\n else:\n if is_dataframe:\n chunk_op = DataFrameConcat(object_type=ObjectType.dataframe)\n index_value, columns_value = _concat_dataframe_index_and_columns(to_merge)\n out_chunk = chunk_op.new_chunk(to_merge, shape=chunk_shape,\n index=idx, index_value=index_value,\n columns_value=columns_value,\n dtypes=to_merge[0].dtypes)\n else:\n chunk_op = DataFrameConcat(object_type=ObjectType.series)\n index_value = _concat_series_index(to_merge)\n out_chunk = chunk_op.new_chunk(to_merge, shape=chunk_shape,\n index=idx, index_value=index_value,\n dtype=to_merge[0].dtype)\n result_chunks.append(out_chunk)\n\n if is_dataframe:\n op = DataFrameRechunk(chunk_size, object_type=ObjectType.dataframe)\n return op.new_dataframe([a], a.shape, dtypes=a.dtypes, columns_value=a.columns_value,\n index_value=a.index_value, nsplits=chunk_size, chunks=result_chunks)\n else:\n op = DataFrameRechunk(chunk_size, object_type=ObjectType.series)\n return op.new_series([a], a.shape, dtype=a.dtype, index_value=a.index_value,\n nsplits=chunk_size, chunks=result_chunks)\n", "path": "mars/dataframe/base/rechunk.py"}], "after_files": [{"content": "# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport itertools\n\nimport pandas as pd\n\nfrom ... import opcodes as OperandDef\nfrom ...serialize import KeyField, AnyField, Int32Field, Int64Field\nfrom ...tensor.rechunk.core import get_nsplits, plan_rechunks, compute_rechunk_slices\nfrom ...tensor.utils import calc_sliced_size\nfrom ...utils import check_chunks_unknown_shape\nfrom ...tiles import TilesError\nfrom ..operands import DataFrameOperand, DataFrameOperandMixin, DATAFRAME_TYPE, ObjectType\nfrom ..utils import indexing_index_value, merge_index_value\n\n\nclass DataFrameRechunk(DataFrameOperand, DataFrameOperandMixin):\n _op_type_ = OperandDef.RECHUNK\n\n _input = KeyField('input')\n _chunk_size = AnyField('chunk_size')\n _threshold = Int32Field('threshold')\n _chunk_size_limit = Int64Field('chunk_size_limit')\n\n def __init__(self, chunk_size=None, threshold=None, chunk_size_limit=None, object_type=None, **kw):\n super().__init__(_chunk_size=chunk_size, _threshold=threshold,\n _chunk_size_limit=chunk_size_limit, _object_type=object_type, **kw)\n\n @property\n def chunk_size(self):\n return self._chunk_size\n\n @property\n def threshold(self):\n return self._threshold\n\n @property\n def chunk_size_limit(self):\n return self._chunk_size_limit\n\n def _set_inputs(self, inputs):\n super()._set_inputs(inputs)\n self._input = self._inputs[0]\n\n def __call__(self, x):\n if isinstance(x, DATAFRAME_TYPE):\n self._object_type = ObjectType.dataframe\n return self.new_dataframe([x], shape=x.shape, dtypes=x.dtypes,\n columns_value=x.columns_value, index_value=x.index_value)\n else:\n self._object_type = ObjectType.series\n return self.new_series([x], shape=x.shape, dtype=x.dtype, index_value=x.index_value, name=x.name)\n\n @classmethod\n def tile(cls, op):\n check_chunks_unknown_shape(op.inputs, TilesError)\n out = op.outputs[0]\n new_chunk_size = op.chunk_size\n if isinstance(out, DATAFRAME_TYPE):\n itemsize = max(dt.itemsize for dt in out.dtypes)\n else:\n itemsize = out.dtype.itemsize\n steps = plan_rechunks(op.inputs[0], new_chunk_size, itemsize,\n threshold=op.threshold,\n chunk_size_limit=op.chunk_size_limit)\n for c in steps:\n out = compute_rechunk(out.inputs[0], c)\n\n return [out]\n\n\ndef rechunk(a, chunk_size, threshold=None, chunk_size_limit=None):\n if isinstance(a, DATAFRAME_TYPE):\n itemsize = max(dt.itemsize for dt in a.dtypes)\n else:\n itemsize = a.dtype.itemsize\n chunk_size = get_nsplits(a, chunk_size, itemsize)\n if chunk_size == a.nsplits:\n return a\n\n op = DataFrameRechunk(chunk_size, threshold, chunk_size_limit)\n return op(a)\n\n\ndef _concat_dataframe_meta(to_concat_chunks):\n if to_concat_chunks[0].index_value.to_pandas().empty:\n index_value = to_concat_chunks[0].index_value\n else:\n idx_to_index_value = dict((c.index[0], c.index_value) for c in to_concat_chunks if c.index[1] == 0)\n index_value = merge_index_value(idx_to_index_value)\n\n idx_to_columns_value = dict((c.index[1], c.columns_value) for c in to_concat_chunks if c.index[0] == 0)\n columns_value = merge_index_value(idx_to_columns_value, store_data=True)\n\n idx_to_dtypes = dict((c.index[1], c.dtypes) for c in to_concat_chunks if c.index[0] == 0)\n dtypes = pd.concat([v[1] for v in list(sorted(idx_to_dtypes.items()))])\n return index_value, columns_value, dtypes\n\n\ndef _concat_series_index(to_concat_chunks):\n if to_concat_chunks[0].index_value.to_pandas().empty:\n index_value = to_concat_chunks[0].index_value\n else:\n idx_to_index_value = dict((c.index[0], c.index_value) for c in to_concat_chunks)\n index_value = merge_index_value(idx_to_index_value)\n return index_value\n\n\ndef compute_rechunk(a, chunk_size):\n from ..indexing.iloc import DataFrameIlocGetItem, SeriesIlocGetItem\n from ..merge.concat import DataFrameConcat\n\n result_slices = compute_rechunk_slices(a, chunk_size)\n result_chunks = []\n idxes = itertools.product(*[range(len(c)) for c in chunk_size])\n chunk_slices = itertools.product(*result_slices)\n chunk_shapes = itertools.product(*chunk_size)\n is_dataframe = isinstance(a, DATAFRAME_TYPE)\n for idx, chunk_slice, chunk_shape in zip(idxes, chunk_slices, chunk_shapes):\n to_merge = []\n merge_idxes = itertools.product(*[range(len(i)) for i in chunk_slice])\n for merge_idx, index_slices in zip(merge_idxes, itertools.product(*chunk_slice)):\n chunk_index, chunk_slice = zip(*index_slices)\n old_chunk = a.cix[chunk_index]\n merge_chunk_shape = tuple(calc_sliced_size(s, chunk_slice[0]) for s in old_chunk.shape)\n new_index_value = indexing_index_value(old_chunk.index_value, chunk_slice[0])\n if is_dataframe:\n new_columns_value = indexing_index_value(old_chunk.columns_value, chunk_slice[1], store_data=True)\n merge_chunk_op = DataFrameIlocGetItem(chunk_slice, sparse=old_chunk.op.sparse,\n object_type=ObjectType.dataframe)\n merge_chunk = merge_chunk_op.new_chunk([old_chunk], shape=merge_chunk_shape,\n index=merge_idx, index_value=new_index_value,\n columns_value=new_columns_value,\n dtypes=old_chunk.dtypes.iloc[chunk_slice[1]])\n else:\n merge_chunk_op = SeriesIlocGetItem(chunk_slice, sparse=old_chunk.op.sparse,\n object_type=ObjectType.series)\n merge_chunk = merge_chunk_op.new_chunk([old_chunk], shape=merge_chunk_shape,\n index=merge_idx, index_value=new_index_value,\n dtype=old_chunk.dtype)\n to_merge.append(merge_chunk)\n if len(to_merge) == 1:\n chunk_op = to_merge[0].op.copy()\n if is_dataframe:\n out_chunk = chunk_op.new_chunk(to_merge[0].op.inputs, shape=chunk_shape,\n index=idx, index_value=to_merge[0].index_value,\n columns_value=to_merge[0].columns_value,\n dtypes=to_merge[0].dtypes)\n else:\n out_chunk = chunk_op.new_chunk(to_merge[0].op.inputs, shape=chunk_shape,\n index=idx, index_value=to_merge[0].index_value,\n name=to_merge[0].name, dtype=to_merge[0].dtype)\n result_chunks.append(out_chunk)\n else:\n if is_dataframe:\n chunk_op = DataFrameConcat(object_type=ObjectType.dataframe)\n index_value, columns_value, dtypes = _concat_dataframe_meta(to_merge)\n out_chunk = chunk_op.new_chunk(to_merge, shape=chunk_shape,\n index=idx, index_value=index_value,\n columns_value=columns_value,\n dtypes=dtypes)\n else:\n chunk_op = DataFrameConcat(object_type=ObjectType.series)\n index_value = _concat_series_index(to_merge)\n out_chunk = chunk_op.new_chunk(to_merge, shape=chunk_shape,\n index=idx, index_value=index_value,\n dtype=to_merge[0].dtype)\n result_chunks.append(out_chunk)\n\n if is_dataframe:\n op = DataFrameRechunk(chunk_size, object_type=ObjectType.dataframe)\n return op.new_dataframe([a], a.shape, dtypes=a.dtypes, columns_value=a.columns_value,\n index_value=a.index_value, nsplits=chunk_size, chunks=result_chunks)\n else:\n op = DataFrameRechunk(chunk_size, object_type=ObjectType.series)\n return op.new_series([a], a.shape, dtype=a.dtype, index_value=a.index_value,\n nsplits=chunk_size, chunks=result_chunks)\n", "path": "mars/dataframe/base/rechunk.py"}]}
| 2,849 | 617 |
gh_patches_debug_5351
|
rasdani/github-patches
|
git_diff
|
coala__coala-2795
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make exception tracebacks default
Instead of asking the user to run coala with `-L DEBUG`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `coalib/output/printers/LogPrinter.py`
Content:
```
1 import traceback
2
3 from pyprint.ColorPrinter import ColorPrinter
4
5 from coalib.output.printers.LOG_LEVEL import LOG_LEVEL, LOG_LEVEL_COLORS
6 from coalib.processes.communication.LogMessage import LogMessage
7
8
9 class LogPrinter:
10 """
11 The LogPrinter class allows to print log messages to an underlying Printer.
12
13 This class is an adapter, means you can create a LogPrinter from every
14 existing Printer instance.
15 """
16
17 def __init__(self,
18 printer,
19 log_level=LOG_LEVEL.INFO,
20 timestamp_format="%X"):
21 """
22 Creates a new log printer from an existing Printer.
23
24 :param printer: The underlying Printer where log messages
25 shall be written to. If you inherit from
26 LogPrinter, set it to self.
27 :param log_level: The minimum log level, everything below will
28 not be logged.
29 :param timestamp_format: The format string for the
30 datetime.today().strftime(format) method.
31 """
32 self._printer = printer
33 self.log_level = log_level
34 self.timestamp_format = timestamp_format
35
36 @property
37 def printer(self):
38 """
39 Returns the underlying printer where logs are printed to.
40 """
41 return self._printer
42
43 def _get_log_prefix(self, log_level, timestamp):
44 datetime_string = timestamp.strftime(self.timestamp_format)
45
46 if datetime_string != "":
47 datetime_string = "[" + datetime_string + "]"
48
49 return '[{}]{}'.format(LOG_LEVEL.reverse.get(log_level, "ERROR"),
50 datetime_string)
51
52 def debug(self, *messages, delimiter=" ", timestamp=None, **kwargs):
53 self.log_message(LogMessage(LOG_LEVEL.DEBUG,
54 *messages,
55 delimiter=delimiter,
56 timestamp=timestamp),
57 **kwargs)
58
59 def info(self, *messages, delimiter=" ", timestamp=None, **kwargs):
60 self.log_message(LogMessage(LOG_LEVEL.INFO,
61 *messages,
62 delimiter=delimiter,
63 timestamp=timestamp),
64 **kwargs)
65
66 def warn(self, *messages, delimiter=" ", timestamp=None, **kwargs):
67 self.log_message(LogMessage(LOG_LEVEL.WARNING,
68 *messages,
69 delimiter=delimiter,
70 timestamp=timestamp),
71 **kwargs)
72
73 def err(self, *messages, delimiter=" ", timestamp=None, **kwargs):
74 self.log_message(LogMessage(LOG_LEVEL.ERROR,
75 *messages,
76 delimiter=delimiter,
77 timestamp=timestamp),
78 **kwargs)
79
80 def log(self, log_level, message, timestamp=None, **kwargs):
81 self.log_message(LogMessage(log_level,
82 message,
83 timestamp=timestamp),
84 **kwargs)
85
86 def log_exception(self,
87 message,
88 exception,
89 log_level=LOG_LEVEL.ERROR,
90 timestamp=None,
91 **kwargs):
92 """
93 If the log_level of the printer is greater than DEBUG, it prints
94 only the message. If it is DEBUG or lower, it shows the message
95 along with the traceback of the exception.
96
97 :param message: The message to print.
98 :param exception: The exception to print.
99 :param log_level: The log_level of this message (not used when
100 logging the traceback. Tracebacks always have
101 a level of DEBUG).
102 :param timestamp: The time at which this log occurred. Defaults to
103 the current time.
104 :param kwargs: Keyword arguments to be passed when logging the
105 message (not used when logging the traceback).
106 """
107 if not isinstance(exception, BaseException):
108 raise TypeError("log_exception can only log derivatives of "
109 "BaseException.")
110
111 traceback_str = "\n".join(
112 traceback.format_exception(type(exception),
113 exception,
114 exception.__traceback__))
115
116 self.log(log_level, message, timestamp=timestamp, **kwargs)
117 self.log_message(
118 LogMessage(LOG_LEVEL.DEBUG,
119 "Exception was:" + "\n" + traceback_str,
120 timestamp=timestamp),
121 **kwargs)
122
123 def log_message(self, log_message, **kwargs):
124 if not isinstance(log_message, LogMessage):
125 raise TypeError("log_message should be of type LogMessage.")
126
127 if log_message.log_level < self.log_level:
128 return
129
130 self._print_log_message(
131 self._get_log_prefix(log_message.log_level, log_message.timestamp),
132 log_message,
133 **kwargs)
134
135 def _print_log_message(self, prefix, log_message, **kwargs):
136 """
137 Override this if you want to influence how the log message is printed.
138
139 If the underlying printer is a ColorPrinter, then colored logging is
140 used. You can turn it off in the underlying ColorPrinter if you want to
141 print uncolored.
142
143 :param prefix: The prefix to print (as string).
144 :param log_message: The LogMessage object to print.
145 :param kwargs: Any other keyword arguments.
146 """
147 if isinstance(self._printer, ColorPrinter):
148 self.printer.print(prefix,
149 end=" ",
150 color=LOG_LEVEL_COLORS[log_message.log_level],
151 **kwargs)
152 self.printer.print(log_message.message, **kwargs)
153 else:
154 self.printer.print(prefix, log_message.message, **kwargs)
155
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/coalib/output/printers/LogPrinter.py b/coalib/output/printers/LogPrinter.py
--- a/coalib/output/printers/LogPrinter.py
+++ b/coalib/output/printers/LogPrinter.py
@@ -115,7 +115,7 @@
self.log(log_level, message, timestamp=timestamp, **kwargs)
self.log_message(
- LogMessage(LOG_LEVEL.DEBUG,
+ LogMessage(LOG_LEVEL.INFO,
"Exception was:" + "\n" + traceback_str,
timestamp=timestamp),
**kwargs)
|
{"golden_diff": "diff --git a/coalib/output/printers/LogPrinter.py b/coalib/output/printers/LogPrinter.py\n--- a/coalib/output/printers/LogPrinter.py\n+++ b/coalib/output/printers/LogPrinter.py\n@@ -115,7 +115,7 @@\n \n self.log(log_level, message, timestamp=timestamp, **kwargs)\n self.log_message(\n- LogMessage(LOG_LEVEL.DEBUG,\n+ LogMessage(LOG_LEVEL.INFO,\n \"Exception was:\" + \"\\n\" + traceback_str,\n timestamp=timestamp),\n **kwargs)\n", "issue": "Make exception tracebacks default\nInstead of asking the user to run coala with `-L DEBUG`\n\n", "before_files": [{"content": "import traceback\n\nfrom pyprint.ColorPrinter import ColorPrinter\n\nfrom coalib.output.printers.LOG_LEVEL import LOG_LEVEL, LOG_LEVEL_COLORS\nfrom coalib.processes.communication.LogMessage import LogMessage\n\n\nclass LogPrinter:\n \"\"\"\n The LogPrinter class allows to print log messages to an underlying Printer.\n\n This class is an adapter, means you can create a LogPrinter from every\n existing Printer instance.\n \"\"\"\n\n def __init__(self,\n printer,\n log_level=LOG_LEVEL.INFO,\n timestamp_format=\"%X\"):\n \"\"\"\n Creates a new log printer from an existing Printer.\n\n :param printer: The underlying Printer where log messages\n shall be written to. If you inherit from\n LogPrinter, set it to self.\n :param log_level: The minimum log level, everything below will\n not be logged.\n :param timestamp_format: The format string for the\n datetime.today().strftime(format) method.\n \"\"\"\n self._printer = printer\n self.log_level = log_level\n self.timestamp_format = timestamp_format\n\n @property\n def printer(self):\n \"\"\"\n Returns the underlying printer where logs are printed to.\n \"\"\"\n return self._printer\n\n def _get_log_prefix(self, log_level, timestamp):\n datetime_string = timestamp.strftime(self.timestamp_format)\n\n if datetime_string != \"\":\n datetime_string = \"[\" + datetime_string + \"]\"\n\n return '[{}]{}'.format(LOG_LEVEL.reverse.get(log_level, \"ERROR\"),\n datetime_string)\n\n def debug(self, *messages, delimiter=\" \", timestamp=None, **kwargs):\n self.log_message(LogMessage(LOG_LEVEL.DEBUG,\n *messages,\n delimiter=delimiter,\n timestamp=timestamp),\n **kwargs)\n\n def info(self, *messages, delimiter=\" \", timestamp=None, **kwargs):\n self.log_message(LogMessage(LOG_LEVEL.INFO,\n *messages,\n delimiter=delimiter,\n timestamp=timestamp),\n **kwargs)\n\n def warn(self, *messages, delimiter=\" \", timestamp=None, **kwargs):\n self.log_message(LogMessage(LOG_LEVEL.WARNING,\n *messages,\n delimiter=delimiter,\n timestamp=timestamp),\n **kwargs)\n\n def err(self, *messages, delimiter=\" \", timestamp=None, **kwargs):\n self.log_message(LogMessage(LOG_LEVEL.ERROR,\n *messages,\n delimiter=delimiter,\n timestamp=timestamp),\n **kwargs)\n\n def log(self, log_level, message, timestamp=None, **kwargs):\n self.log_message(LogMessage(log_level,\n message,\n timestamp=timestamp),\n **kwargs)\n\n def log_exception(self,\n message,\n exception,\n log_level=LOG_LEVEL.ERROR,\n timestamp=None,\n **kwargs):\n \"\"\"\n If the log_level of the printer is greater than DEBUG, it prints\n only the message. If it is DEBUG or lower, it shows the message\n along with the traceback of the exception.\n\n :param message: The message to print.\n :param exception: The exception to print.\n :param log_level: The log_level of this message (not used when\n logging the traceback. Tracebacks always have\n a level of DEBUG).\n :param timestamp: The time at which this log occurred. Defaults to\n the current time.\n :param kwargs: Keyword arguments to be passed when logging the\n message (not used when logging the traceback).\n \"\"\"\n if not isinstance(exception, BaseException):\n raise TypeError(\"log_exception can only log derivatives of \"\n \"BaseException.\")\n\n traceback_str = \"\\n\".join(\n traceback.format_exception(type(exception),\n exception,\n exception.__traceback__))\n\n self.log(log_level, message, timestamp=timestamp, **kwargs)\n self.log_message(\n LogMessage(LOG_LEVEL.DEBUG,\n \"Exception was:\" + \"\\n\" + traceback_str,\n timestamp=timestamp),\n **kwargs)\n\n def log_message(self, log_message, **kwargs):\n if not isinstance(log_message, LogMessage):\n raise TypeError(\"log_message should be of type LogMessage.\")\n\n if log_message.log_level < self.log_level:\n return\n\n self._print_log_message(\n self._get_log_prefix(log_message.log_level, log_message.timestamp),\n log_message,\n **kwargs)\n\n def _print_log_message(self, prefix, log_message, **kwargs):\n \"\"\"\n Override this if you want to influence how the log message is printed.\n\n If the underlying printer is a ColorPrinter, then colored logging is\n used. You can turn it off in the underlying ColorPrinter if you want to\n print uncolored.\n\n :param prefix: The prefix to print (as string).\n :param log_message: The LogMessage object to print.\n :param kwargs: Any other keyword arguments.\n \"\"\"\n if isinstance(self._printer, ColorPrinter):\n self.printer.print(prefix,\n end=\" \",\n color=LOG_LEVEL_COLORS[log_message.log_level],\n **kwargs)\n self.printer.print(log_message.message, **kwargs)\n else:\n self.printer.print(prefix, log_message.message, **kwargs)\n", "path": "coalib/output/printers/LogPrinter.py"}], "after_files": [{"content": "import traceback\n\nfrom pyprint.ColorPrinter import ColorPrinter\n\nfrom coalib.output.printers.LOG_LEVEL import LOG_LEVEL, LOG_LEVEL_COLORS\nfrom coalib.processes.communication.LogMessage import LogMessage\n\n\nclass LogPrinter:\n \"\"\"\n The LogPrinter class allows to print log messages to an underlying Printer.\n\n This class is an adapter, means you can create a LogPrinter from every\n existing Printer instance.\n \"\"\"\n\n def __init__(self,\n printer,\n log_level=LOG_LEVEL.INFO,\n timestamp_format=\"%X\"):\n \"\"\"\n Creates a new log printer from an existing Printer.\n\n :param printer: The underlying Printer where log messages\n shall be written to. If you inherit from\n LogPrinter, set it to self.\n :param log_level: The minimum log level, everything below will\n not be logged.\n :param timestamp_format: The format string for the\n datetime.today().strftime(format) method.\n \"\"\"\n self._printer = printer\n self.log_level = log_level\n self.timestamp_format = timestamp_format\n\n @property\n def printer(self):\n \"\"\"\n Returns the underlying printer where logs are printed to.\n \"\"\"\n return self._printer\n\n def _get_log_prefix(self, log_level, timestamp):\n datetime_string = timestamp.strftime(self.timestamp_format)\n\n if datetime_string != \"\":\n datetime_string = \"[\" + datetime_string + \"]\"\n\n return '[{}]{}'.format(LOG_LEVEL.reverse.get(log_level, \"ERROR\"),\n datetime_string)\n\n def debug(self, *messages, delimiter=\" \", timestamp=None, **kwargs):\n self.log_message(LogMessage(LOG_LEVEL.DEBUG,\n *messages,\n delimiter=delimiter,\n timestamp=timestamp),\n **kwargs)\n\n def info(self, *messages, delimiter=\" \", timestamp=None, **kwargs):\n self.log_message(LogMessage(LOG_LEVEL.INFO,\n *messages,\n delimiter=delimiter,\n timestamp=timestamp),\n **kwargs)\n\n def warn(self, *messages, delimiter=\" \", timestamp=None, **kwargs):\n self.log_message(LogMessage(LOG_LEVEL.WARNING,\n *messages,\n delimiter=delimiter,\n timestamp=timestamp),\n **kwargs)\n\n def err(self, *messages, delimiter=\" \", timestamp=None, **kwargs):\n self.log_message(LogMessage(LOG_LEVEL.ERROR,\n *messages,\n delimiter=delimiter,\n timestamp=timestamp),\n **kwargs)\n\n def log(self, log_level, message, timestamp=None, **kwargs):\n self.log_message(LogMessage(log_level,\n message,\n timestamp=timestamp),\n **kwargs)\n\n def log_exception(self,\n message,\n exception,\n log_level=LOG_LEVEL.ERROR,\n timestamp=None,\n **kwargs):\n \"\"\"\n If the log_level of the printer is greater than DEBUG, it prints\n only the message. If it is DEBUG or lower, it shows the message\n along with the traceback of the exception.\n\n :param message: The message to print.\n :param exception: The exception to print.\n :param log_level: The log_level of this message (not used when\n logging the traceback. Tracebacks always have\n a level of DEBUG).\n :param timestamp: The time at which this log occurred. Defaults to\n the current time.\n :param kwargs: Keyword arguments to be passed when logging the\n message (not used when logging the traceback).\n \"\"\"\n if not isinstance(exception, BaseException):\n raise TypeError(\"log_exception can only log derivatives of \"\n \"BaseException.\")\n\n traceback_str = \"\\n\".join(\n traceback.format_exception(type(exception),\n exception,\n exception.__traceback__))\n\n self.log(log_level, message, timestamp=timestamp, **kwargs)\n self.log_message(\n LogMessage(LOG_LEVEL.INFO,\n \"Exception was:\" + \"\\n\" + traceback_str,\n timestamp=timestamp),\n **kwargs)\n\n def log_message(self, log_message, **kwargs):\n if not isinstance(log_message, LogMessage):\n raise TypeError(\"log_message should be of type LogMessage.\")\n\n if log_message.log_level < self.log_level:\n return\n\n self._print_log_message(\n self._get_log_prefix(log_message.log_level, log_message.timestamp),\n log_message,\n **kwargs)\n\n def _print_log_message(self, prefix, log_message, **kwargs):\n \"\"\"\n Override this if you want to influence how the log message is printed.\n\n If the underlying printer is a ColorPrinter, then colored logging is\n used. You can turn it off in the underlying ColorPrinter if you want to\n print uncolored.\n\n :param prefix: The prefix to print (as string).\n :param log_message: The LogMessage object to print.\n :param kwargs: Any other keyword arguments.\n \"\"\"\n if isinstance(self._printer, ColorPrinter):\n self.printer.print(prefix,\n end=\" \",\n color=LOG_LEVEL_COLORS[log_message.log_level],\n **kwargs)\n self.printer.print(log_message.message, **kwargs)\n else:\n self.printer.print(prefix, log_message.message, **kwargs)\n", "path": "coalib/output/printers/LogPrinter.py"}]}
| 1,727 | 125 |
gh_patches_debug_21305
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-335
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Latest virtualenv breaks pre-commit
See also #299
Failure looks like:
```
17:00:19 hookid: sort-simple-yaml
17:00:19
17:00:19 bash: /nail/home/push/.pre-commit/reposkzFrD//tmp/tmp.cEk6TCoZOS/srv-configs/py_env-default/bin/activate: No such file or directory
```
```
$ pip install virtualenv --upgrade
Downloading/unpacking virtualenv
Downloading virtualenv-14.0.0-py2.py3-none-any.whl (1.8MB): 1.8MB downloaded
Installing collected packages: virtualenv
Successfully installed virtualenv
Cleaning up...
$ python
Python 2.6.7 (r267:88850, Dec 2 2011, 20:27:26)
[GCC 4.4.3] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import virtualenv
>>> virtualenv.path_locations('foo')
('/nail/home/asottile/foo', '/nail/home/asottile/foo/lib/python2.6', '/nail/home/asottile/foo/include/python2.6', '/nail/home/asottile/foo/bin')
>>>
$ pip install virtualenv==1.11.5
Downloading/unpacking virtualenv==1.11.5
Downloading virtualenv-1.11.5.tar.gz (1.8MB): 1.8MB downloaded
Running setup.py (path:/nail/home/asottile/venv/build/virtualenv/setup.py) egg_info for package virtualenv
warning: no previously-included files matching '*' found under directory 'docs/_templates'
warning: no previously-included files matching '*' found under directory 'docs/_build'
Installing collected packages: virtualenv
Found existing installation: virtualenv 14.0.0
Uninstalling virtualenv:
Successfully uninstalled virtualenv
Running setup.py install for virtualenv
warning: no previously-included files matching '*' found under directory 'docs/_templates'
warning: no previously-included files matching '*' found under directory 'docs/_build'
Installing virtualenv script to /nail/home/asottile/venv/bin
Installing virtualenv-2.6 script to /nail/home/asottile/venv/bin
Successfully installed virtualenv
Cleaning up...
$ python
Python 2.6.7 (r267:88850, Dec 2 2011, 20:27:26)
[GCC 4.4.3] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import virtualenv
>>> virtualenv.path_locations('foo')
('foo', 'foo/lib/python2.6', 'foo/include/python2.6', 'foo/bin')
>>>
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/languages/python.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import contextlib
4 import distutils.spawn
5 import os
6 import sys
7
8 import virtualenv
9
10 from pre_commit.languages import helpers
11 from pre_commit.util import clean_path_on_failure
12 from pre_commit.util import shell_escape
13
14
15 ENVIRONMENT_DIR = 'py_env'
16
17
18 class PythonEnv(helpers.Environment):
19 @property
20 def env_prefix(self):
21 return ". '{{prefix}}{0}activate' &&".format(
22 virtualenv.path_locations(
23 helpers.environment_dir(ENVIRONMENT_DIR, self.language_version)
24 )[-1].rstrip(os.sep) + os.sep,
25 )
26
27
28 @contextlib.contextmanager
29 def in_env(repo_cmd_runner, language_version):
30 yield PythonEnv(repo_cmd_runner, language_version)
31
32
33 def norm_version(version):
34 if os.name == 'nt': # pragma: no cover (windows)
35 # Try looking up by name
36 if distutils.spawn.find_executable(version):
37 return version
38
39 # If it is in the form pythonx.x search in the default
40 # place on windows
41 if version.startswith('python'):
42 return r'C:\{0}\python.exe'.format(version.replace('.', ''))
43
44 # Otherwise assume it is a path
45 return os.path.expanduser(version)
46
47
48 def install_environment(
49 repo_cmd_runner,
50 version='default',
51 additional_dependencies=None,
52 ):
53 assert repo_cmd_runner.exists('setup.py')
54 directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
55
56 # Install a virtualenv
57 with clean_path_on_failure(repo_cmd_runner.path(directory)):
58 venv_cmd = [
59 sys.executable, '-m', 'virtualenv',
60 '{{prefix}}{0}'.format(directory)
61 ]
62 if version != 'default':
63 venv_cmd.extend(['-p', norm_version(version)])
64 repo_cmd_runner.run(venv_cmd)
65 with in_env(repo_cmd_runner, version) as env:
66 env.run("cd '{prefix}' && pip install .", encoding=None)
67 if additional_dependencies:
68 env.run(
69 "cd '{prefix}' && pip install " +
70 ' '.join(
71 shell_escape(dep) for dep in additional_dependencies
72 ),
73 encoding=None,
74 )
75
76
77 def run_hook(repo_cmd_runner, hook, file_args):
78 with in_env(repo_cmd_runner, hook['language_version']) as env:
79 return helpers.run_hook(env, hook, file_args)
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py
--- a/pre_commit/languages/python.py
+++ b/pre_commit/languages/python.py
@@ -5,8 +5,6 @@
import os
import sys
-import virtualenv
-
from pre_commit.languages import helpers
from pre_commit.util import clean_path_on_failure
from pre_commit.util import shell_escape
@@ -15,13 +13,22 @@
ENVIRONMENT_DIR = 'py_env'
+def bin_dir(venv):
+ """On windows there's a different directory for the virtualenv"""
+ if os.name == 'nt': # pragma: no cover (windows)
+ return os.path.join(venv, 'Scripts')
+ else:
+ return os.path.join(venv, 'bin')
+
+
class PythonEnv(helpers.Environment):
@property
def env_prefix(self):
- return ". '{{prefix}}{0}activate' &&".format(
- virtualenv.path_locations(
+ return ". '{{prefix}}{0}{1}activate' &&".format(
+ bin_dir(
helpers.environment_dir(ENVIRONMENT_DIR, self.language_version)
- )[-1].rstrip(os.sep) + os.sep,
+ ),
+ os.sep,
)
|
{"golden_diff": "diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py\n--- a/pre_commit/languages/python.py\n+++ b/pre_commit/languages/python.py\n@@ -5,8 +5,6 @@\n import os\n import sys\n \n-import virtualenv\n-\n from pre_commit.languages import helpers\n from pre_commit.util import clean_path_on_failure\n from pre_commit.util import shell_escape\n@@ -15,13 +13,22 @@\n ENVIRONMENT_DIR = 'py_env'\n \n \n+def bin_dir(venv):\n+ \"\"\"On windows there's a different directory for the virtualenv\"\"\"\n+ if os.name == 'nt': # pragma: no cover (windows)\n+ return os.path.join(venv, 'Scripts')\n+ else:\n+ return os.path.join(venv, 'bin')\n+\n+\n class PythonEnv(helpers.Environment):\n @property\n def env_prefix(self):\n- return \". '{{prefix}}{0}activate' &&\".format(\n- virtualenv.path_locations(\n+ return \". '{{prefix}}{0}{1}activate' &&\".format(\n+ bin_dir(\n helpers.environment_dir(ENVIRONMENT_DIR, self.language_version)\n- )[-1].rstrip(os.sep) + os.sep,\n+ ),\n+ os.sep,\n )\n", "issue": "Latest virtualenv breaks pre-commit\nSee also #299 \n\nFailure looks like:\n\n```\n17:00:19 hookid: sort-simple-yaml\n17:00:19 \n17:00:19 bash: /nail/home/push/.pre-commit/reposkzFrD//tmp/tmp.cEk6TCoZOS/srv-configs/py_env-default/bin/activate: No such file or directory\n```\n\n```\n$ pip install virtualenv --upgrade\nDownloading/unpacking virtualenv\n Downloading virtualenv-14.0.0-py2.py3-none-any.whl (1.8MB): 1.8MB downloaded\nInstalling collected packages: virtualenv\nSuccessfully installed virtualenv\nCleaning up...\n$ python\nPython 2.6.7 (r267:88850, Dec 2 2011, 20:27:26) \n[GCC 4.4.3] on linux2\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n>>> import virtualenv\n>>> virtualenv.path_locations('foo')\n('/nail/home/asottile/foo', '/nail/home/asottile/foo/lib/python2.6', '/nail/home/asottile/foo/include/python2.6', '/nail/home/asottile/foo/bin')\n>>> \n$ pip install virtualenv==1.11.5\nDownloading/unpacking virtualenv==1.11.5\n Downloading virtualenv-1.11.5.tar.gz (1.8MB): 1.8MB downloaded\n Running setup.py (path:/nail/home/asottile/venv/build/virtualenv/setup.py) egg_info for package virtualenv\n warning: no previously-included files matching '*' found under directory 'docs/_templates'\n warning: no previously-included files matching '*' found under directory 'docs/_build'\nInstalling collected packages: virtualenv\n Found existing installation: virtualenv 14.0.0\n Uninstalling virtualenv:\n Successfully uninstalled virtualenv\n Running setup.py install for virtualenv\n warning: no previously-included files matching '*' found under directory 'docs/_templates'\n warning: no previously-included files matching '*' found under directory 'docs/_build'\n Installing virtualenv script to /nail/home/asottile/venv/bin\n Installing virtualenv-2.6 script to /nail/home/asottile/venv/bin\nSuccessfully installed virtualenv\nCleaning up...\n$ python\nPython 2.6.7 (r267:88850, Dec 2 2011, 20:27:26) \n[GCC 4.4.3] on linux2\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n>>> import virtualenv\n>>> virtualenv.path_locations('foo')\n('foo', 'foo/lib/python2.6', 'foo/include/python2.6', 'foo/bin')\n>>>\n```\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport distutils.spawn\nimport os\nimport sys\n\nimport virtualenv\n\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import shell_escape\n\n\nENVIRONMENT_DIR = 'py_env'\n\n\nclass PythonEnv(helpers.Environment):\n @property\n def env_prefix(self):\n return \". '{{prefix}}{0}activate' &&\".format(\n virtualenv.path_locations(\n helpers.environment_dir(ENVIRONMENT_DIR, self.language_version)\n )[-1].rstrip(os.sep) + os.sep,\n )\n\n\[email protected]\ndef in_env(repo_cmd_runner, language_version):\n yield PythonEnv(repo_cmd_runner, language_version)\n\n\ndef norm_version(version):\n if os.name == 'nt': # pragma: no cover (windows)\n # Try looking up by name\n if distutils.spawn.find_executable(version):\n return version\n\n # If it is in the form pythonx.x search in the default\n # place on windows\n if version.startswith('python'):\n return r'C:\\{0}\\python.exe'.format(version.replace('.', ''))\n\n # Otherwise assume it is a path\n return os.path.expanduser(version)\n\n\ndef install_environment(\n repo_cmd_runner,\n version='default',\n additional_dependencies=None,\n):\n assert repo_cmd_runner.exists('setup.py')\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n\n # Install a virtualenv\n with clean_path_on_failure(repo_cmd_runner.path(directory)):\n venv_cmd = [\n sys.executable, '-m', 'virtualenv',\n '{{prefix}}{0}'.format(directory)\n ]\n if version != 'default':\n venv_cmd.extend(['-p', norm_version(version)])\n repo_cmd_runner.run(venv_cmd)\n with in_env(repo_cmd_runner, version) as env:\n env.run(\"cd '{prefix}' && pip install .\", encoding=None)\n if additional_dependencies:\n env.run(\n \"cd '{prefix}' && pip install \" +\n ' '.join(\n shell_escape(dep) for dep in additional_dependencies\n ),\n encoding=None,\n )\n\n\ndef run_hook(repo_cmd_runner, hook, file_args):\n with in_env(repo_cmd_runner, hook['language_version']) as env:\n return helpers.run_hook(env, hook, file_args)\n", "path": "pre_commit/languages/python.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport distutils.spawn\nimport os\nimport sys\n\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import shell_escape\n\n\nENVIRONMENT_DIR = 'py_env'\n\n\ndef bin_dir(venv):\n \"\"\"On windows there's a different directory for the virtualenv\"\"\"\n if os.name == 'nt': # pragma: no cover (windows)\n return os.path.join(venv, 'Scripts')\n else:\n return os.path.join(venv, 'bin')\n\n\nclass PythonEnv(helpers.Environment):\n @property\n def env_prefix(self):\n return \". '{{prefix}}{0}{1}activate' &&\".format(\n bin_dir(\n helpers.environment_dir(ENVIRONMENT_DIR, self.language_version)\n ),\n os.sep,\n )\n\n\[email protected]\ndef in_env(repo_cmd_runner, language_version):\n yield PythonEnv(repo_cmd_runner, language_version)\n\n\ndef norm_version(version):\n if os.name == 'nt': # pragma: no cover (windows)\n # Try looking up by name\n if distutils.spawn.find_executable(version):\n return version\n\n # If it is in the form pythonx.x search in the default\n # place on windows\n if version.startswith('python'):\n return r'C:\\{0}\\python.exe'.format(version.replace('.', ''))\n\n # Otherwise assume it is a path\n return os.path.expanduser(version)\n\n\ndef install_environment(\n repo_cmd_runner,\n version='default',\n additional_dependencies=None,\n):\n assert repo_cmd_runner.exists('setup.py')\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n\n # Install a virtualenv\n with clean_path_on_failure(repo_cmd_runner.path(directory)):\n venv_cmd = [\n sys.executable, '-m', 'virtualenv',\n '{{prefix}}{0}'.format(directory)\n ]\n if version != 'default':\n venv_cmd.extend(['-p', norm_version(version)])\n repo_cmd_runner.run(venv_cmd)\n with in_env(repo_cmd_runner, version) as env:\n env.run(\"cd '{prefix}' && pip install .\", encoding=None)\n if additional_dependencies:\n env.run(\n \"cd '{prefix}' && pip install \" +\n ' '.join(\n shell_escape(dep) for dep in additional_dependencies\n ),\n encoding=None,\n )\n\n\ndef run_hook(repo_cmd_runner, hook, file_args):\n with in_env(repo_cmd_runner, hook['language_version']) as env:\n return helpers.run_hook(env, hook, file_args)\n", "path": "pre_commit/languages/python.py"}]}
| 1,585 | 278 |
gh_patches_debug_17631
|
rasdani/github-patches
|
git_diff
|
cal-itp__benefits-391
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Create a specific `eligibility/start.html` template, update the view to use it
Like for some other pages, we need a specific template file for the Eligibility Start page, which has some unique elements to render and behaviors to handle.
The scope of this task it to simply create the new template, `{% extends "core/page.html" %}`, and get the view to use it -- keeping everything else about the page the same.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benefits/eligibility/views.py`
Content:
```
1 """
2 The eligibility application: view definitions for the eligibility verification flow.
3 """
4 from django.contrib import messages
5 from django.shortcuts import redirect
6 from django.urls import reverse
7 from django.utils.decorators import decorator_from_middleware
8 from django.utils.translation import pgettext, gettext as _
9
10 from benefits.core import middleware, recaptcha, session, viewmodels
11 from benefits.core.models import EligibilityVerifier
12 from benefits.core.views import PageTemplateResponse
13 from . import analytics, api, forms
14
15
16 @decorator_from_middleware(middleware.AgencySessionRequired)
17 def index(request):
18 """View handler for the eligibility verifier selection form."""
19
20 session.update(request, eligibility_types=[], origin=reverse("eligibility:index"))
21 agency = session.agency(request)
22
23 eligibility_start = reverse("eligibility:start")
24
25 page = viewmodels.Page(
26 title=_("eligibility.pages.index.title"),
27 content_title=_("eligibility.pages.index.content_title"),
28 forms=forms.EligibilityVerifierSelectionForm(agency=agency),
29 )
30
31 if request.method == "POST":
32 form = forms.EligibilityVerifierSelectionForm(data=request.POST, agency=agency)
33
34 if form.is_valid():
35 verifier_id = form.cleaned_data.get("verifier")
36 verifier = EligibilityVerifier.objects.get(id=verifier_id)
37 session.update(request, verifier=verifier)
38
39 response = redirect(eligibility_start)
40 else:
41 # form was not valid, allow for correction/resubmission
42 page.forms = [form]
43 response = PageTemplateResponse(request, page)
44 else:
45 if agency.eligibility_verifiers.count() == 1:
46 verifier = agency.eligibility_verifiers.first()
47 session.update(request, verifier=verifier)
48 response = redirect(eligibility_start)
49 else:
50 response = PageTemplateResponse(request, page)
51
52 return response
53
54
55 @decorator_from_middleware(middleware.AgencySessionRequired)
56 @decorator_from_middleware(middleware.VerifierSessionRequired)
57 def start(request):
58 """View handler for the eligibility verification getting started screen."""
59
60 session.update(request, eligibility_types=[])
61 verifier = session.verifier(request)
62
63 page = viewmodels.Page(
64 title=_("eligibility.pages.start.title"),
65 content_title=_(verifier.start_content_title),
66 media=[
67 viewmodels.MediaItem(
68 icon=viewmodels.Icon("idcardcheck", pgettext("image alt text", "core.icons.idcardcheck")),
69 heading=_(verifier.start_item_name),
70 details=_(verifier.start_item_description),
71 ),
72 viewmodels.MediaItem(
73 icon=viewmodels.Icon("bankcardcheck", pgettext("image alt text", "core.icons.bankcardcheck")),
74 heading=_("eligibility.pages.start.items[1].title"),
75 details=_("eligibility.pages.start.items[1].text"),
76 ),
77 ],
78 paragraphs=[_(verifier.start_blurb)],
79 button=viewmodels.Button.primary(text=_("eligibility.buttons.continue"), url=reverse("eligibility:confirm")),
80 )
81
82 return PageTemplateResponse(request, page)
83
84
85 @decorator_from_middleware(middleware.AgencySessionRequired)
86 @decorator_from_middleware(middleware.RateLimit)
87 @decorator_from_middleware(middleware.VerifierSessionRequired)
88 def confirm(request):
89 """View handler for the eligibility verification form."""
90
91 verifier = session.verifier(request)
92
93 page = viewmodels.Page(
94 title=_(verifier.form_title),
95 content_title=_(verifier.form_content_title),
96 paragraphs=[_(verifier.form_blurb)],
97 form=forms.EligibilityVerificationForm(auto_id=True, label_suffix="", verifier=verifier),
98 classes="text-lg-center",
99 )
100
101 if request.method == "POST":
102 analytics.started_eligibility(request)
103
104 form = forms.EligibilityVerificationForm(data=request.POST, verifier=verifier)
105 response = _verify(request, form)
106
107 if response is None:
108 # form was not valid, allow for correction/resubmission
109 analytics.returned_error(request, form.errors)
110 page.forms = [form]
111 response = PageTemplateResponse(request, page)
112 elif session.eligible(request):
113 eligibility = session.eligibility(request)
114 response = verified(request, [eligibility.name])
115 else:
116 response = PageTemplateResponse(request, page)
117
118 return response
119
120
121 def _verify(request, form):
122 """Helper calls the eligibility verification API with user input."""
123
124 if not form.is_valid():
125 if recaptcha.has_error(form):
126 messages.error(request, "Recaptcha failed. Please try again.")
127 return None
128
129 sub, name = form.cleaned_data.get("sub"), form.cleaned_data.get("name")
130
131 agency = session.agency(request)
132 verifier = session.verifier(request)
133 client = api.Client(agency, verifier)
134
135 response = client.verify(sub, name)
136
137 if response.error and any(response.error):
138 form.add_api_errors(response.error)
139 return None
140 elif any(response.eligibility):
141 return verified(request, response.eligibility)
142 else:
143 return unverified(request)
144
145
146 @decorator_from_middleware(middleware.AgencySessionRequired)
147 def verified(request, verified_types):
148 """View handler for the verified eligibility page."""
149
150 analytics.returned_success(request)
151
152 enrollment_index = reverse("enrollment:index")
153 session.update(request, eligibility_types=verified_types, origin=enrollment_index)
154
155 return redirect(enrollment_index)
156
157
158 @decorator_from_middleware(middleware.AgencySessionRequired)
159 @decorator_from_middleware(middleware.VerifierSessionRequired)
160 def unverified(request):
161 """View handler for the unverified eligibility page."""
162
163 analytics.returned_fail(request)
164
165 # tel: link to agency phone number
166 agency = session.agency(request)
167 buttons = viewmodels.Button.agency_contact_links(agency)
168
169 verifier = session.verifier(request)
170
171 page = viewmodels.Page(
172 title=_(verifier.unverified_title),
173 content_title=_(verifier.unverified_content_title),
174 icon=viewmodels.Icon("idcardquestion", pgettext("image alt text", "core.icons.idcardquestion")),
175 paragraphs=[_(verifier.unverified_blurb), _("eligibility.pages.unverified.p[1]")],
176 buttons=buttons,
177 classes="text-lg-center",
178 )
179
180 return PageTemplateResponse(request, page)
181
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/benefits/eligibility/views.py b/benefits/eligibility/views.py
--- a/benefits/eligibility/views.py
+++ b/benefits/eligibility/views.py
@@ -3,6 +3,7 @@
"""
from django.contrib import messages
from django.shortcuts import redirect
+from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils.decorators import decorator_from_middleware
from django.utils.translation import pgettext, gettext as _
@@ -79,7 +80,7 @@
button=viewmodels.Button.primary(text=_("eligibility.buttons.continue"), url=reverse("eligibility:confirm")),
)
- return PageTemplateResponse(request, page)
+ return TemplateResponse(request, "eligibility/start.html", page.context_dict())
@decorator_from_middleware(middleware.AgencySessionRequired)
|
{"golden_diff": "diff --git a/benefits/eligibility/views.py b/benefits/eligibility/views.py\n--- a/benefits/eligibility/views.py\n+++ b/benefits/eligibility/views.py\n@@ -3,6 +3,7 @@\n \"\"\"\n from django.contrib import messages\n from django.shortcuts import redirect\n+from django.template.response import TemplateResponse\n from django.urls import reverse\n from django.utils.decorators import decorator_from_middleware\n from django.utils.translation import pgettext, gettext as _\n@@ -79,7 +80,7 @@\n button=viewmodels.Button.primary(text=_(\"eligibility.buttons.continue\"), url=reverse(\"eligibility:confirm\")),\n )\n \n- return PageTemplateResponse(request, page)\n+ return TemplateResponse(request, \"eligibility/start.html\", page.context_dict())\n \n \n @decorator_from_middleware(middleware.AgencySessionRequired)\n", "issue": "Create a specific `eligibility/start.html` template, update the view to use it\nLike for some other pages, we need a specific template file for the Eligibility Start page, which has some unique elements to render and behaviors to handle.\r\n\r\nThe scope of this task it to simply create the new template, `{% extends \"core/page.html\" %}`, and get the view to use it -- keeping everything else about the page the same.\n", "before_files": [{"content": "\"\"\"\nThe eligibility application: view definitions for the eligibility verification flow.\n\"\"\"\nfrom django.contrib import messages\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.utils.decorators import decorator_from_middleware\nfrom django.utils.translation import pgettext, gettext as _\n\nfrom benefits.core import middleware, recaptcha, session, viewmodels\nfrom benefits.core.models import EligibilityVerifier\nfrom benefits.core.views import PageTemplateResponse\nfrom . import analytics, api, forms\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\ndef index(request):\n \"\"\"View handler for the eligibility verifier selection form.\"\"\"\n\n session.update(request, eligibility_types=[], origin=reverse(\"eligibility:index\"))\n agency = session.agency(request)\n\n eligibility_start = reverse(\"eligibility:start\")\n\n page = viewmodels.Page(\n title=_(\"eligibility.pages.index.title\"),\n content_title=_(\"eligibility.pages.index.content_title\"),\n forms=forms.EligibilityVerifierSelectionForm(agency=agency),\n )\n\n if request.method == \"POST\":\n form = forms.EligibilityVerifierSelectionForm(data=request.POST, agency=agency)\n\n if form.is_valid():\n verifier_id = form.cleaned_data.get(\"verifier\")\n verifier = EligibilityVerifier.objects.get(id=verifier_id)\n session.update(request, verifier=verifier)\n\n response = redirect(eligibility_start)\n else:\n # form was not valid, allow for correction/resubmission\n page.forms = [form]\n response = PageTemplateResponse(request, page)\n else:\n if agency.eligibility_verifiers.count() == 1:\n verifier = agency.eligibility_verifiers.first()\n session.update(request, verifier=verifier)\n response = redirect(eligibility_start)\n else:\n response = PageTemplateResponse(request, page)\n\n return response\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\n@decorator_from_middleware(middleware.VerifierSessionRequired)\ndef start(request):\n \"\"\"View handler for the eligibility verification getting started screen.\"\"\"\n\n session.update(request, eligibility_types=[])\n verifier = session.verifier(request)\n\n page = viewmodels.Page(\n title=_(\"eligibility.pages.start.title\"),\n content_title=_(verifier.start_content_title),\n media=[\n viewmodels.MediaItem(\n icon=viewmodels.Icon(\"idcardcheck\", pgettext(\"image alt text\", \"core.icons.idcardcheck\")),\n heading=_(verifier.start_item_name),\n details=_(verifier.start_item_description),\n ),\n viewmodels.MediaItem(\n icon=viewmodels.Icon(\"bankcardcheck\", pgettext(\"image alt text\", \"core.icons.bankcardcheck\")),\n heading=_(\"eligibility.pages.start.items[1].title\"),\n details=_(\"eligibility.pages.start.items[1].text\"),\n ),\n ],\n paragraphs=[_(verifier.start_blurb)],\n button=viewmodels.Button.primary(text=_(\"eligibility.buttons.continue\"), url=reverse(\"eligibility:confirm\")),\n )\n\n return PageTemplateResponse(request, page)\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\n@decorator_from_middleware(middleware.RateLimit)\n@decorator_from_middleware(middleware.VerifierSessionRequired)\ndef confirm(request):\n \"\"\"View handler for the eligibility verification form.\"\"\"\n\n verifier = session.verifier(request)\n\n page = viewmodels.Page(\n title=_(verifier.form_title),\n content_title=_(verifier.form_content_title),\n paragraphs=[_(verifier.form_blurb)],\n form=forms.EligibilityVerificationForm(auto_id=True, label_suffix=\"\", verifier=verifier),\n classes=\"text-lg-center\",\n )\n\n if request.method == \"POST\":\n analytics.started_eligibility(request)\n\n form = forms.EligibilityVerificationForm(data=request.POST, verifier=verifier)\n response = _verify(request, form)\n\n if response is None:\n # form was not valid, allow for correction/resubmission\n analytics.returned_error(request, form.errors)\n page.forms = [form]\n response = PageTemplateResponse(request, page)\n elif session.eligible(request):\n eligibility = session.eligibility(request)\n response = verified(request, [eligibility.name])\n else:\n response = PageTemplateResponse(request, page)\n\n return response\n\n\ndef _verify(request, form):\n \"\"\"Helper calls the eligibility verification API with user input.\"\"\"\n\n if not form.is_valid():\n if recaptcha.has_error(form):\n messages.error(request, \"Recaptcha failed. Please try again.\")\n return None\n\n sub, name = form.cleaned_data.get(\"sub\"), form.cleaned_data.get(\"name\")\n\n agency = session.agency(request)\n verifier = session.verifier(request)\n client = api.Client(agency, verifier)\n\n response = client.verify(sub, name)\n\n if response.error and any(response.error):\n form.add_api_errors(response.error)\n return None\n elif any(response.eligibility):\n return verified(request, response.eligibility)\n else:\n return unverified(request)\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\ndef verified(request, verified_types):\n \"\"\"View handler for the verified eligibility page.\"\"\"\n\n analytics.returned_success(request)\n\n enrollment_index = reverse(\"enrollment:index\")\n session.update(request, eligibility_types=verified_types, origin=enrollment_index)\n\n return redirect(enrollment_index)\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\n@decorator_from_middleware(middleware.VerifierSessionRequired)\ndef unverified(request):\n \"\"\"View handler for the unverified eligibility page.\"\"\"\n\n analytics.returned_fail(request)\n\n # tel: link to agency phone number\n agency = session.agency(request)\n buttons = viewmodels.Button.agency_contact_links(agency)\n\n verifier = session.verifier(request)\n\n page = viewmodels.Page(\n title=_(verifier.unverified_title),\n content_title=_(verifier.unverified_content_title),\n icon=viewmodels.Icon(\"idcardquestion\", pgettext(\"image alt text\", \"core.icons.idcardquestion\")),\n paragraphs=[_(verifier.unverified_blurb), _(\"eligibility.pages.unverified.p[1]\")],\n buttons=buttons,\n classes=\"text-lg-center\",\n )\n\n return PageTemplateResponse(request, page)\n", "path": "benefits/eligibility/views.py"}], "after_files": [{"content": "\"\"\"\nThe eligibility application: view definitions for the eligibility verification flow.\n\"\"\"\nfrom django.contrib import messages\nfrom django.shortcuts import redirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.decorators import decorator_from_middleware\nfrom django.utils.translation import pgettext, gettext as _\n\nfrom benefits.core import middleware, recaptcha, session, viewmodels\nfrom benefits.core.models import EligibilityVerifier\nfrom benefits.core.views import PageTemplateResponse\nfrom . import analytics, api, forms\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\ndef index(request):\n \"\"\"View handler for the eligibility verifier selection form.\"\"\"\n\n session.update(request, eligibility_types=[], origin=reverse(\"eligibility:index\"))\n agency = session.agency(request)\n\n eligibility_start = reverse(\"eligibility:start\")\n\n page = viewmodels.Page(\n title=_(\"eligibility.pages.index.title\"),\n content_title=_(\"eligibility.pages.index.content_title\"),\n forms=forms.EligibilityVerifierSelectionForm(agency=agency),\n )\n\n if request.method == \"POST\":\n form = forms.EligibilityVerifierSelectionForm(data=request.POST, agency=agency)\n\n if form.is_valid():\n verifier_id = form.cleaned_data.get(\"verifier\")\n verifier = EligibilityVerifier.objects.get(id=verifier_id)\n session.update(request, verifier=verifier)\n\n response = redirect(eligibility_start)\n else:\n # form was not valid, allow for correction/resubmission\n page.forms = [form]\n response = PageTemplateResponse(request, page)\n else:\n if agency.eligibility_verifiers.count() == 1:\n verifier = agency.eligibility_verifiers.first()\n session.update(request, verifier=verifier)\n response = redirect(eligibility_start)\n else:\n response = PageTemplateResponse(request, page)\n\n return response\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\n@decorator_from_middleware(middleware.VerifierSessionRequired)\ndef start(request):\n \"\"\"View handler for the eligibility verification getting started screen.\"\"\"\n\n session.update(request, eligibility_types=[])\n verifier = session.verifier(request)\n\n page = viewmodels.Page(\n title=_(\"eligibility.pages.start.title\"),\n content_title=_(verifier.start_content_title),\n media=[\n viewmodels.MediaItem(\n icon=viewmodels.Icon(\"idcardcheck\", pgettext(\"image alt text\", \"core.icons.idcardcheck\")),\n heading=_(verifier.start_item_name),\n details=_(verifier.start_item_description),\n ),\n viewmodels.MediaItem(\n icon=viewmodels.Icon(\"bankcardcheck\", pgettext(\"image alt text\", \"core.icons.bankcardcheck\")),\n heading=_(\"eligibility.pages.start.items[1].title\"),\n details=_(\"eligibility.pages.start.items[1].text\"),\n ),\n ],\n paragraphs=[_(verifier.start_blurb)],\n button=viewmodels.Button.primary(text=_(\"eligibility.buttons.continue\"), url=reverse(\"eligibility:confirm\")),\n )\n\n return TemplateResponse(request, \"eligibility/start.html\", page.context_dict())\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\n@decorator_from_middleware(middleware.RateLimit)\n@decorator_from_middleware(middleware.VerifierSessionRequired)\ndef confirm(request):\n \"\"\"View handler for the eligibility verification form.\"\"\"\n\n verifier = session.verifier(request)\n\n page = viewmodels.Page(\n title=_(verifier.form_title),\n content_title=_(verifier.form_content_title),\n paragraphs=[_(verifier.form_blurb)],\n form=forms.EligibilityVerificationForm(auto_id=True, label_suffix=\"\", verifier=verifier),\n classes=\"text-lg-center\",\n )\n\n if request.method == \"POST\":\n analytics.started_eligibility(request)\n\n form = forms.EligibilityVerificationForm(data=request.POST, verifier=verifier)\n response = _verify(request, form)\n\n if response is None:\n # form was not valid, allow for correction/resubmission\n analytics.returned_error(request, form.errors)\n page.forms = [form]\n response = PageTemplateResponse(request, page)\n elif session.eligible(request):\n eligibility = session.eligibility(request)\n response = verified(request, [eligibility.name])\n else:\n response = PageTemplateResponse(request, page)\n\n return response\n\n\ndef _verify(request, form):\n \"\"\"Helper calls the eligibility verification API with user input.\"\"\"\n\n if not form.is_valid():\n if recaptcha.has_error(form):\n messages.error(request, \"Recaptcha failed. Please try again.\")\n return None\n\n sub, name = form.cleaned_data.get(\"sub\"), form.cleaned_data.get(\"name\")\n\n agency = session.agency(request)\n verifier = session.verifier(request)\n client = api.Client(agency, verifier)\n\n response = client.verify(sub, name)\n\n if response.error and any(response.error):\n form.add_api_errors(response.error)\n return None\n elif any(response.eligibility):\n return verified(request, response.eligibility)\n else:\n return unverified(request)\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\ndef verified(request, verified_types):\n \"\"\"View handler for the verified eligibility page.\"\"\"\n\n analytics.returned_success(request)\n\n enrollment_index = reverse(\"enrollment:index\")\n session.update(request, eligibility_types=verified_types, origin=enrollment_index)\n\n return redirect(enrollment_index)\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\n@decorator_from_middleware(middleware.VerifierSessionRequired)\ndef unverified(request):\n \"\"\"View handler for the unverified eligibility page.\"\"\"\n\n analytics.returned_fail(request)\n\n # tel: link to agency phone number\n agency = session.agency(request)\n buttons = viewmodels.Button.agency_contact_links(agency)\n\n verifier = session.verifier(request)\n\n page = viewmodels.Page(\n title=_(verifier.unverified_title),\n content_title=_(verifier.unverified_content_title),\n icon=viewmodels.Icon(\"idcardquestion\", pgettext(\"image alt text\", \"core.icons.idcardquestion\")),\n paragraphs=[_(verifier.unverified_blurb), _(\"eligibility.pages.unverified.p[1]\")],\n buttons=buttons,\n classes=\"text-lg-center\",\n )\n\n return PageTemplateResponse(request, page)\n", "path": "benefits/eligibility/views.py"}]}
| 2,117 | 182 |
gh_patches_debug_4069
|
rasdani/github-patches
|
git_diff
|
goauthentik__authentik-7454
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Patreon login doesn't work/setup is not intuitive
**Describe the bug**
While trying to set up the Patreon social integration, I realised that the required fields of Consumer Key and Consumer Secret don't seem to apply to the data that Patreon provides with its API - or at least the terminology is confusing. But outside of that, the default scopes that it seems to be presenting Patreon with upon setup are not part of Patreon's API, and will always error out with an "Invalid Scope" unless manually replaced with the correct ones. If this social integration is working and I'm mistaken, it is missing documentation that would definitely make it easier on new users.
To Reproduce
Steps to reproduce the behavior:
1. Go to the social integration settings.
2. Click on the Patreon integration.
3. Enter the Client ID and Secret into the Key and Secret fields (assuming that's what you're supposed to use)
4. Get an invalid_scope error when trying to sign in
Expected behavior
Should allow users to log in via Patreon.
Screenshots
N/A
Logs
N/A
Version and Deployment (please complete the following information):
authentik version: 2023.6.1
Deployment: TrueNAS
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/sources/oauth/types/patreon.py`
Content:
```
1 """Patreon OAuth Views"""
2 from typing import Any
3
4 from authentik.sources.oauth.clients.oauth2 import UserprofileHeaderAuthClient
5 from authentik.sources.oauth.models import OAuthSource
6 from authentik.sources.oauth.types.registry import SourceType, registry
7 from authentik.sources.oauth.views.callback import OAuthCallback
8 from authentik.sources.oauth.views.redirect import OAuthRedirect
9
10
11 class PatreonOAuthRedirect(OAuthRedirect):
12 """Patreon OAuth2 Redirect"""
13
14 def get_additional_parameters(self, source: OAuthSource): # pragma: no cover
15 return {
16 "scope": ["openid", "email", "profile"],
17 }
18
19
20 class PatreonOAuthCallback(OAuthCallback):
21 """Patreon OAuth2 Callback"""
22
23 client_class: UserprofileHeaderAuthClient
24
25 def get_user_id(self, info: dict[str, str]) -> str:
26 return info.get("data", {}).get("id")
27
28 def get_user_enroll_context(
29 self,
30 info: dict[str, Any],
31 ) -> dict[str, Any]:
32 return {
33 "username": info.get("data", {}).get("attributes", {}).get("vanity"),
34 "email": info.get("data", {}).get("attributes", {}).get("email"),
35 "name": info.get("data", {}).get("attributes", {}).get("full_name"),
36 }
37
38
39 @registry.register()
40 class PatreonType(SourceType):
41 """OpenIDConnect Type definition"""
42
43 callback_view = PatreonOAuthCallback
44 redirect_view = PatreonOAuthRedirect
45 name = "Patreon"
46 slug = "patreon"
47
48 authorization_url = "https://www.patreon.com/oauth2/authorize"
49 access_token_url = "https://www.patreon.com/api/oauth2/token" # nosec
50 profile_url = "https://www.patreon.com/api/oauth2/api/current_user"
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/authentik/sources/oauth/types/patreon.py b/authentik/sources/oauth/types/patreon.py
--- a/authentik/sources/oauth/types/patreon.py
+++ b/authentik/sources/oauth/types/patreon.py
@@ -12,8 +12,9 @@
"""Patreon OAuth2 Redirect"""
def get_additional_parameters(self, source: OAuthSource): # pragma: no cover
+ # https://docs.patreon.com/#scopes
return {
- "scope": ["openid", "email", "profile"],
+ "scope": ["identity", "identity[email]"],
}
|
{"golden_diff": "diff --git a/authentik/sources/oauth/types/patreon.py b/authentik/sources/oauth/types/patreon.py\n--- a/authentik/sources/oauth/types/patreon.py\n+++ b/authentik/sources/oauth/types/patreon.py\n@@ -12,8 +12,9 @@\n \"\"\"Patreon OAuth2 Redirect\"\"\"\r\n \r\n def get_additional_parameters(self, source: OAuthSource): # pragma: no cover\r\n+ # https://docs.patreon.com/#scopes\r\n return {\r\n- \"scope\": [\"openid\", \"email\", \"profile\"],\r\n+ \"scope\": [\"identity\", \"identity[email]\"],\r\n }\n", "issue": "Patreon login doesn't work/setup is not intuitive\n**Describe the bug**\r\nWhile trying to set up the Patreon social integration, I realised that the required fields of Consumer Key and Consumer Secret don't seem to apply to the data that Patreon provides with its API - or at least the terminology is confusing. But outside of that, the default scopes that it seems to be presenting Patreon with upon setup are not part of Patreon's API, and will always error out with an \"Invalid Scope\" unless manually replaced with the correct ones. If this social integration is working and I'm mistaken, it is missing documentation that would definitely make it easier on new users.\r\n\r\nTo Reproduce\r\nSteps to reproduce the behavior:\r\n\r\n1. Go to the social integration settings.\r\n2. Click on the Patreon integration.\r\n3. Enter the Client ID and Secret into the Key and Secret fields (assuming that's what you're supposed to use)\r\n4. Get an invalid_scope error when trying to sign in\r\n\r\nExpected behavior\r\nShould allow users to log in via Patreon.\r\n\r\nScreenshots\r\nN/A\r\n\r\nLogs\r\nN/A\r\n\r\nVersion and Deployment (please complete the following information):\r\n\r\nauthentik version: 2023.6.1\r\nDeployment: TrueNAS\r\n\r\n\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"Patreon OAuth Views\"\"\"\r\nfrom typing import Any\r\n\r\nfrom authentik.sources.oauth.clients.oauth2 import UserprofileHeaderAuthClient\r\nfrom authentik.sources.oauth.models import OAuthSource\r\nfrom authentik.sources.oauth.types.registry import SourceType, registry\r\nfrom authentik.sources.oauth.views.callback import OAuthCallback\r\nfrom authentik.sources.oauth.views.redirect import OAuthRedirect\r\n\r\n\r\nclass PatreonOAuthRedirect(OAuthRedirect):\r\n \"\"\"Patreon OAuth2 Redirect\"\"\"\r\n\r\n def get_additional_parameters(self, source: OAuthSource): # pragma: no cover\r\n return {\r\n \"scope\": [\"openid\", \"email\", \"profile\"],\r\n }\r\n\r\n\r\nclass PatreonOAuthCallback(OAuthCallback):\r\n \"\"\"Patreon OAuth2 Callback\"\"\"\r\n\r\n client_class: UserprofileHeaderAuthClient\r\n\r\n def get_user_id(self, info: dict[str, str]) -> str:\r\n return info.get(\"data\", {}).get(\"id\")\r\n\r\n def get_user_enroll_context(\r\n self,\r\n info: dict[str, Any],\r\n ) -> dict[str, Any]:\r\n return {\r\n \"username\": info.get(\"data\", {}).get(\"attributes\", {}).get(\"vanity\"),\r\n \"email\": info.get(\"data\", {}).get(\"attributes\", {}).get(\"email\"),\r\n \"name\": info.get(\"data\", {}).get(\"attributes\", {}).get(\"full_name\"),\r\n }\r\n\r\n\r\[email protected]()\r\nclass PatreonType(SourceType):\r\n \"\"\"OpenIDConnect Type definition\"\"\"\r\n\r\n callback_view = PatreonOAuthCallback\r\n redirect_view = PatreonOAuthRedirect\r\n name = \"Patreon\"\r\n slug = \"patreon\"\r\n\r\n authorization_url = \"https://www.patreon.com/oauth2/authorize\"\r\n access_token_url = \"https://www.patreon.com/api/oauth2/token\" # nosec\r\n profile_url = \"https://www.patreon.com/api/oauth2/api/current_user\"\r\n", "path": "authentik/sources/oauth/types/patreon.py"}], "after_files": [{"content": "\"\"\"Patreon OAuth Views\"\"\"\r\nfrom typing import Any\r\n\r\nfrom authentik.sources.oauth.clients.oauth2 import UserprofileHeaderAuthClient\r\nfrom authentik.sources.oauth.models import OAuthSource\r\nfrom authentik.sources.oauth.types.registry import SourceType, registry\r\nfrom authentik.sources.oauth.views.callback import OAuthCallback\r\nfrom authentik.sources.oauth.views.redirect import OAuthRedirect\r\n\r\n\r\nclass PatreonOAuthRedirect(OAuthRedirect):\r\n \"\"\"Patreon OAuth2 Redirect\"\"\"\r\n\r\n def get_additional_parameters(self, source: OAuthSource): # pragma: no cover\r\n # https://docs.patreon.com/#scopes\r\n return {\r\n \"scope\": [\"identity\", \"identity[email]\"],\r\n }\r\n\r\n\r\nclass PatreonOAuthCallback(OAuthCallback):\r\n \"\"\"Patreon OAuth2 Callback\"\"\"\r\n\r\n client_class: UserprofileHeaderAuthClient\r\n\r\n def get_user_id(self, info: dict[str, str]) -> str:\r\n return info.get(\"data\", {}).get(\"id\")\r\n\r\n def get_user_enroll_context(\r\n self,\r\n info: dict[str, Any],\r\n ) -> dict[str, Any]:\r\n return {\r\n \"username\": info.get(\"data\", {}).get(\"attributes\", {}).get(\"vanity\"),\r\n \"email\": info.get(\"data\", {}).get(\"attributes\", {}).get(\"email\"),\r\n \"name\": info.get(\"data\", {}).get(\"attributes\", {}).get(\"full_name\"),\r\n }\r\n\r\n\r\[email protected]()\r\nclass PatreonType(SourceType):\r\n \"\"\"OpenIDConnect Type definition\"\"\"\r\n\r\n callback_view = PatreonOAuthCallback\r\n redirect_view = PatreonOAuthRedirect\r\n name = \"Patreon\"\r\n slug = \"patreon\"\r\n\r\n authorization_url = \"https://www.patreon.com/oauth2/authorize\"\r\n access_token_url = \"https://www.patreon.com/api/oauth2/token\" # nosec\r\n profile_url = \"https://www.patreon.com/api/oauth2/api/current_user\"\r\n", "path": "authentik/sources/oauth/types/patreon.py"}]}
| 1,009 | 138 |
gh_patches_debug_35647
|
rasdani/github-patches
|
git_diff
|
castorini__pyserini-1434
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Issues with latest MIRACL 2CR
On my iMac Pro (Intel), I'm getting the following failures:
```
condition bm25-mdpr-tied-pft-msmarco-hybrid.bn:
- split: dev
nDCG@10: 0.6540 [OK]
R@100 : 0.9321 [FAIL] expected 0.9100
condition bm25-mdpr-tied-pft-msmarco-hybrid.zh:
- split: dev
nDCG@10: 0.5255 [FAIL] expected 0.5254
R@100 : 0.9587 [OK]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/repro_matrix/run_all_miracl.py`
Content:
```
1 #
2 # Pyserini: Reproducible IR research with sparse and dense representations
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 #
16
17 import argparse
18 import math
19 import os
20 import subprocess
21 import time
22 from collections import defaultdict
23 from string import Template
24
25 import yaml
26
27 from scripts.repro_matrix.defs_miracl import models, languages, trec_eval_metric_definitions
28 from scripts.repro_matrix.utils import run_eval_and_return_metric, ok_str, okish_str, fail_str
29
30
31 def print_results(metric, split):
32 print(f'Metric = {metric}, Split = {split}')
33 print(' ' * 32, end='')
34 for lang in languages:
35 print(f'{lang[0]:3} ', end='')
36 print('')
37 for model in models:
38 print(f'{model:30}', end='')
39 for lang in languages:
40 key = f'{model}.{lang[0]}'
41 print(f'{table[key][split][metric]:7.3f}', end='')
42 print('')
43 print('')
44
45
46 def extract_topic_fn_from_cmd(cmd):
47 cmd = cmd.split()
48 topic_idx = cmd.index('--topics')
49 return cmd[topic_idx + 1]
50
51
52 if __name__ == '__main__':
53 parser = argparse.ArgumentParser(description='Generate regression matrix for MIRACL.')
54 parser.add_argument('--skip-eval', action='store_true', default=False, help='Skip running trec_eval.')
55 args = parser.parse_args()
56
57 start = time.time()
58
59 table = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: 0.0)))
60
61 with open('pyserini/resources/miracl.yaml') as f:
62 yaml_data = yaml.safe_load(f)
63 for condition in yaml_data['conditions']:
64 name = condition['name']
65 eval_key = condition['eval_key']
66 cmd_template = condition['command']
67 cmd_lst = cmd_template.split()
68
69 print(f'condition {name}:')
70 lang = name.split('.')[-1]
71 is_hybrid_run = 'hybrid' in name
72
73 for splits in condition['splits']:
74 split = splits['split']
75 if is_hybrid_run:
76 hits = int(cmd_lst[cmd_lst.index('--k') + 1])
77 else:
78 hits = int(cmd_lst[cmd_lst.index('--hits') + 1])
79
80 print(f' - split: {split}')
81
82 runfile = f'runs/run.miracl.{name}.{split}.top{hits}.txt'
83 if is_hybrid_run:
84 bm25_output = f'runs/run.miracl.bm25.{lang}.{split}.top{hits}.txt'
85 mdpr_output = f'runs/run.miracl.mdpr-tied-pft-msmarco.{lang}.{split}.top{hits}.txt'
86 if not os.path.exists(bm25_output):
87 print(f'Missing BM25 file: {bm25_output}')
88 continue
89 if not os.path.exists(mdpr_output):
90 print(f'Missing mDPR file: {mdpr_output}')
91 continue
92 cmd = Template(cmd_template).substitute(split=split, output=runfile, bm25_output=bm25_output, mdpr_output=mdpr_output)
93 else:
94 cmd = Template(cmd_template).substitute(split=split, output=runfile)
95
96 # In the yaml file, the topics are written as something like '--topics miracl-v1.0-ar-${split}'
97 # This works for the dev split because the topics are directly included in Anserini/Pyserini.
98 # For this training split, we have to map the symbol into a file in tools/topics-and-qrels/
99 # Here, we assume that the developer has cloned the miracl repo and placed the topics there.
100 if split == 'train':
101 cmd = cmd.replace(f'--topics miracl-v1.0-{lang}-{split}',
102 f'--topics tools/topics-and-qrels/topics.miracl-v1.0-{lang}-{split}.tsv')
103
104 if not os.path.exists(runfile):
105 print(f' Running: {cmd}')
106 rtn = subprocess.run(cmd.split(), capture_output=True)
107 stderr = rtn.stderr.decode()
108 if '--topics' in cmd:
109 topic_fn = extract_topic_fn_from_cmd(cmd)
110 if f'ValueError: Topic {topic_fn} Not Found' in stderr:
111 print(f'Skipping {topic_fn}: file not found.')
112 continue
113
114 for expected in splits['scores']:
115 for metric in expected:
116 if not args.skip_eval:
117 # We have the translate the training qrels into a file located in tools/topics-and-qrels/
118 # because they are not included with Anserini/Pyserini by default.
119 # Here, we assume that the developer has cloned the miracl repo and placed the qrels there.
120 if split == 'train':
121 qrels = f'tools/topics-and-qrels/qrels.{eval_key}-train.tsv'
122 else:
123 qrels = f'{eval_key}-{split}'
124 score = float(run_eval_and_return_metric(metric, qrels,
125 trec_eval_metric_definitions[metric], runfile))
126 if math.isclose(score, float(expected[metric])):
127 result_str = ok_str
128 # Flaky test: small difference on Mac Studio (M1 chip)
129 elif name == 'mdpr-tied-pft-msmarco.hi' and split == 'train' \
130 and math.isclose(score, float(expected[metric]), abs_tol=2e-4):
131 result_str = okish_str
132 else:
133 result_str = fail_str + f' expected {expected[metric]:.4f}'
134 print(f' {metric:7}: {score:.4f} {result_str}')
135 table[name][split][metric] = score
136 else:
137 table[name][split][metric] = expected[metric]
138
139 print('')
140
141 for metric in ['nDCG@10', 'R@100']:
142 for split in ['dev', 'train']:
143 print_results(metric, split)
144
145 end = time.time()
146 print(f'Total elapsed time: {end - start:.0f}s')
147
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scripts/repro_matrix/run_all_miracl.py b/scripts/repro_matrix/run_all_miracl.py
--- a/scripts/repro_matrix/run_all_miracl.py
+++ b/scripts/repro_matrix/run_all_miracl.py
@@ -30,12 +30,12 @@
def print_results(metric, split):
print(f'Metric = {metric}, Split = {split}')
- print(' ' * 32, end='')
+ print(' ' * 35, end='')
for lang in languages:
print(f'{lang[0]:3} ', end='')
print('')
for model in models:
- print(f'{model:30}', end='')
+ print(f'{model:33}', end='')
for lang in languages:
key = f'{model}.{lang[0]}'
print(f'{table[key][split][metric]:7.3f}', end='')
@@ -125,9 +125,18 @@
trec_eval_metric_definitions[metric], runfile))
if math.isclose(score, float(expected[metric])):
result_str = ok_str
- # Flaky test: small difference on Mac Studio (M1 chip)
- elif name == 'mdpr-tied-pft-msmarco.hi' and split == 'train' \
- and math.isclose(score, float(expected[metric]), abs_tol=2e-4):
+ # Flaky tests
+ elif (name == 'mdpr-tied-pft-msmarco.hi' and split == 'train'
+ and math.isclose(score, float(expected[metric]), abs_tol=2e-4)) or \
+ (name == 'mdpr-tied-pft-msmarco-ft-all.ru'
+ and split == 'dev' and metric == 'nDCG@10'
+ and math.isclose(score, float(expected[metric]), abs_tol=2e-4)) or \
+ (name == 'bm25-mdpr-tied-pft-msmarco-hybrid.te'
+ and split == 'train' and metric == 'nDCG@10'
+ and math.isclose(score, float(expected[metric]), abs_tol=2e-4)) or \
+ (name == 'bm25-mdpr-tied-pft-msmarco-hybrid.zh'
+ and split == 'dev' and metric == 'nDCG@10'
+ and math.isclose(score, float(expected[metric]), abs_tol=2e-4)):
result_str = okish_str
else:
result_str = fail_str + f' expected {expected[metric]:.4f}'
|
{"golden_diff": "diff --git a/scripts/repro_matrix/run_all_miracl.py b/scripts/repro_matrix/run_all_miracl.py\n--- a/scripts/repro_matrix/run_all_miracl.py\n+++ b/scripts/repro_matrix/run_all_miracl.py\n@@ -30,12 +30,12 @@\n \n def print_results(metric, split):\n print(f'Metric = {metric}, Split = {split}')\n- print(' ' * 32, end='')\n+ print(' ' * 35, end='')\n for lang in languages:\n print(f'{lang[0]:3} ', end='')\n print('')\n for model in models:\n- print(f'{model:30}', end='')\n+ print(f'{model:33}', end='')\n for lang in languages:\n key = f'{model}.{lang[0]}'\n print(f'{table[key][split][metric]:7.3f}', end='')\n@@ -125,9 +125,18 @@\n trec_eval_metric_definitions[metric], runfile))\n if math.isclose(score, float(expected[metric])):\n result_str = ok_str\n- # Flaky test: small difference on Mac Studio (M1 chip)\n- elif name == 'mdpr-tied-pft-msmarco.hi' and split == 'train' \\\n- and math.isclose(score, float(expected[metric]), abs_tol=2e-4):\n+ # Flaky tests\n+ elif (name == 'mdpr-tied-pft-msmarco.hi' and split == 'train'\n+ and math.isclose(score, float(expected[metric]), abs_tol=2e-4)) or \\\n+ (name == 'mdpr-tied-pft-msmarco-ft-all.ru'\n+ and split == 'dev' and metric == 'nDCG@10'\n+ and math.isclose(score, float(expected[metric]), abs_tol=2e-4)) or \\\n+ (name == 'bm25-mdpr-tied-pft-msmarco-hybrid.te'\n+ and split == 'train' and metric == 'nDCG@10'\n+ and math.isclose(score, float(expected[metric]), abs_tol=2e-4)) or \\\n+ (name == 'bm25-mdpr-tied-pft-msmarco-hybrid.zh'\n+ and split == 'dev' and metric == 'nDCG@10'\n+ and math.isclose(score, float(expected[metric]), abs_tol=2e-4)):\n result_str = okish_str\n else:\n result_str = fail_str + f' expected {expected[metric]:.4f}'\n", "issue": "Issues with latest MIRACL 2CR\nOn my iMac Pro (Intel), I'm getting the following failures:\r\n\r\n```\r\ncondition bm25-mdpr-tied-pft-msmarco-hybrid.bn:\r\n - split: dev\r\n nDCG@10: 0.6540 [OK]\r\n R@100 : 0.9321 [FAIL] expected 0.9100\r\n\r\ncondition bm25-mdpr-tied-pft-msmarco-hybrid.zh:\r\n - split: dev\r\n nDCG@10: 0.5255 [FAIL] expected 0.5254\r\n R@100 : 0.9587 [OK]\r\n```\r\n\n", "before_files": [{"content": "#\n# Pyserini: Reproducible IR research with sparse and dense representations\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport argparse\nimport math\nimport os\nimport subprocess\nimport time\nfrom collections import defaultdict\nfrom string import Template\n\nimport yaml\n\nfrom scripts.repro_matrix.defs_miracl import models, languages, trec_eval_metric_definitions\nfrom scripts.repro_matrix.utils import run_eval_and_return_metric, ok_str, okish_str, fail_str\n\n\ndef print_results(metric, split):\n print(f'Metric = {metric}, Split = {split}')\n print(' ' * 32, end='')\n for lang in languages:\n print(f'{lang[0]:3} ', end='')\n print('')\n for model in models:\n print(f'{model:30}', end='')\n for lang in languages:\n key = f'{model}.{lang[0]}'\n print(f'{table[key][split][metric]:7.3f}', end='')\n print('')\n print('')\n\n\ndef extract_topic_fn_from_cmd(cmd):\n cmd = cmd.split()\n topic_idx = cmd.index('--topics')\n return cmd[topic_idx + 1]\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Generate regression matrix for MIRACL.')\n parser.add_argument('--skip-eval', action='store_true', default=False, help='Skip running trec_eval.')\n args = parser.parse_args()\n\n start = time.time()\n\n table = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: 0.0)))\n\n with open('pyserini/resources/miracl.yaml') as f:\n yaml_data = yaml.safe_load(f)\n for condition in yaml_data['conditions']:\n name = condition['name']\n eval_key = condition['eval_key']\n cmd_template = condition['command']\n cmd_lst = cmd_template.split()\n\n print(f'condition {name}:')\n lang = name.split('.')[-1]\n is_hybrid_run = 'hybrid' in name\n\n for splits in condition['splits']:\n split = splits['split']\n if is_hybrid_run:\n hits = int(cmd_lst[cmd_lst.index('--k') + 1])\n else:\n hits = int(cmd_lst[cmd_lst.index('--hits') + 1])\n\n print(f' - split: {split}')\n\n runfile = f'runs/run.miracl.{name}.{split}.top{hits}.txt'\n if is_hybrid_run:\n bm25_output = f'runs/run.miracl.bm25.{lang}.{split}.top{hits}.txt'\n mdpr_output = f'runs/run.miracl.mdpr-tied-pft-msmarco.{lang}.{split}.top{hits}.txt'\n if not os.path.exists(bm25_output):\n print(f'Missing BM25 file: {bm25_output}')\n continue\n if not os.path.exists(mdpr_output):\n print(f'Missing mDPR file: {mdpr_output}')\n continue\n cmd = Template(cmd_template).substitute(split=split, output=runfile, bm25_output=bm25_output, mdpr_output=mdpr_output)\n else:\n cmd = Template(cmd_template).substitute(split=split, output=runfile)\n\n # In the yaml file, the topics are written as something like '--topics miracl-v1.0-ar-${split}'\n # This works for the dev split because the topics are directly included in Anserini/Pyserini.\n # For this training split, we have to map the symbol into a file in tools/topics-and-qrels/\n # Here, we assume that the developer has cloned the miracl repo and placed the topics there.\n if split == 'train':\n cmd = cmd.replace(f'--topics miracl-v1.0-{lang}-{split}',\n f'--topics tools/topics-and-qrels/topics.miracl-v1.0-{lang}-{split}.tsv')\n\n if not os.path.exists(runfile):\n print(f' Running: {cmd}')\n rtn = subprocess.run(cmd.split(), capture_output=True)\n stderr = rtn.stderr.decode()\n if '--topics' in cmd:\n topic_fn = extract_topic_fn_from_cmd(cmd)\n if f'ValueError: Topic {topic_fn} Not Found' in stderr:\n print(f'Skipping {topic_fn}: file not found.')\n continue\n\n for expected in splits['scores']:\n for metric in expected:\n if not args.skip_eval:\n # We have the translate the training qrels into a file located in tools/topics-and-qrels/\n # because they are not included with Anserini/Pyserini by default.\n # Here, we assume that the developer has cloned the miracl repo and placed the qrels there.\n if split == 'train':\n qrels = f'tools/topics-and-qrels/qrels.{eval_key}-train.tsv'\n else:\n qrels = f'{eval_key}-{split}'\n score = float(run_eval_and_return_metric(metric, qrels,\n trec_eval_metric_definitions[metric], runfile))\n if math.isclose(score, float(expected[metric])):\n result_str = ok_str\n # Flaky test: small difference on Mac Studio (M1 chip)\n elif name == 'mdpr-tied-pft-msmarco.hi' and split == 'train' \\\n and math.isclose(score, float(expected[metric]), abs_tol=2e-4):\n result_str = okish_str\n else:\n result_str = fail_str + f' expected {expected[metric]:.4f}'\n print(f' {metric:7}: {score:.4f} {result_str}')\n table[name][split][metric] = score\n else:\n table[name][split][metric] = expected[metric]\n\n print('')\n\n for metric in ['nDCG@10', 'R@100']:\n for split in ['dev', 'train']:\n print_results(metric, split)\n\n end = time.time()\n print(f'Total elapsed time: {end - start:.0f}s')\n", "path": "scripts/repro_matrix/run_all_miracl.py"}], "after_files": [{"content": "#\n# Pyserini: Reproducible IR research with sparse and dense representations\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport argparse\nimport math\nimport os\nimport subprocess\nimport time\nfrom collections import defaultdict\nfrom string import Template\n\nimport yaml\n\nfrom scripts.repro_matrix.defs_miracl import models, languages, trec_eval_metric_definitions\nfrom scripts.repro_matrix.utils import run_eval_and_return_metric, ok_str, okish_str, fail_str\n\n\ndef print_results(metric, split):\n print(f'Metric = {metric}, Split = {split}')\n print(' ' * 35, end='')\n for lang in languages:\n print(f'{lang[0]:3} ', end='')\n print('')\n for model in models:\n print(f'{model:33}', end='')\n for lang in languages:\n key = f'{model}.{lang[0]}'\n print(f'{table[key][split][metric]:7.3f}', end='')\n print('')\n print('')\n\n\ndef extract_topic_fn_from_cmd(cmd):\n cmd = cmd.split()\n topic_idx = cmd.index('--topics')\n return cmd[topic_idx + 1]\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Generate regression matrix for MIRACL.')\n parser.add_argument('--skip-eval', action='store_true', default=False, help='Skip running trec_eval.')\n args = parser.parse_args()\n\n start = time.time()\n\n table = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: 0.0)))\n\n with open('pyserini/resources/miracl.yaml') as f:\n yaml_data = yaml.safe_load(f)\n for condition in yaml_data['conditions']:\n name = condition['name']\n eval_key = condition['eval_key']\n cmd_template = condition['command']\n cmd_lst = cmd_template.split()\n\n print(f'condition {name}:')\n lang = name.split('.')[-1]\n is_hybrid_run = 'hybrid' in name\n\n for splits in condition['splits']:\n split = splits['split']\n if is_hybrid_run:\n hits = int(cmd_lst[cmd_lst.index('--k') + 1])\n else:\n hits = int(cmd_lst[cmd_lst.index('--hits') + 1])\n\n print(f' - split: {split}')\n\n runfile = f'runs/run.miracl.{name}.{split}.top{hits}.txt'\n if is_hybrid_run:\n bm25_output = f'runs/run.miracl.bm25.{lang}.{split}.top{hits}.txt'\n mdpr_output = f'runs/run.miracl.mdpr-tied-pft-msmarco.{lang}.{split}.top{hits}.txt'\n if not os.path.exists(bm25_output):\n print(f'Missing BM25 file: {bm25_output}')\n continue\n if not os.path.exists(mdpr_output):\n print(f'Missing mDPR file: {mdpr_output}')\n continue\n cmd = Template(cmd_template).substitute(split=split, output=runfile, bm25_output=bm25_output, mdpr_output=mdpr_output)\n else:\n cmd = Template(cmd_template).substitute(split=split, output=runfile)\n\n # In the yaml file, the topics are written as something like '--topics miracl-v1.0-ar-${split}'\n # This works for the dev split because the topics are directly included in Anserini/Pyserini.\n # For this training split, we have to map the symbol into a file in tools/topics-and-qrels/\n # Here, we assume that the developer has cloned the miracl repo and placed the topics there.\n if split == 'train':\n cmd = cmd.replace(f'--topics miracl-v1.0-{lang}-{split}',\n f'--topics tools/topics-and-qrels/topics.miracl-v1.0-{lang}-{split}.tsv')\n\n if not os.path.exists(runfile):\n print(f' Running: {cmd}')\n rtn = subprocess.run(cmd.split(), capture_output=True)\n stderr = rtn.stderr.decode()\n if '--topics' in cmd:\n topic_fn = extract_topic_fn_from_cmd(cmd)\n if f'ValueError: Topic {topic_fn} Not Found' in stderr:\n print(f'Skipping {topic_fn}: file not found.')\n continue\n\n for expected in splits['scores']:\n for metric in expected:\n if not args.skip_eval:\n # We have the translate the training qrels into a file located in tools/topics-and-qrels/\n # because they are not included with Anserini/Pyserini by default.\n # Here, we assume that the developer has cloned the miracl repo and placed the qrels there.\n if split == 'train':\n qrels = f'tools/topics-and-qrels/qrels.{eval_key}-train.tsv'\n else:\n qrels = f'{eval_key}-{split}'\n score = float(run_eval_and_return_metric(metric, qrels,\n trec_eval_metric_definitions[metric], runfile))\n if math.isclose(score, float(expected[metric])):\n result_str = ok_str\n # Flaky tests\n elif (name == 'mdpr-tied-pft-msmarco.hi' and split == 'train'\n and math.isclose(score, float(expected[metric]), abs_tol=2e-4)) or \\\n (name == 'mdpr-tied-pft-msmarco-ft-all.ru'\n and split == 'dev' and metric == 'nDCG@10'\n and math.isclose(score, float(expected[metric]), abs_tol=2e-4)) or \\\n (name == 'bm25-mdpr-tied-pft-msmarco-hybrid.te'\n and split == 'train' and metric == 'nDCG@10'\n and math.isclose(score, float(expected[metric]), abs_tol=2e-4)) or \\\n (name == 'bm25-mdpr-tied-pft-msmarco-hybrid.zh'\n and split == 'dev' and metric == 'nDCG@10'\n and math.isclose(score, float(expected[metric]), abs_tol=2e-4)):\n result_str = okish_str\n else:\n result_str = fail_str + f' expected {expected[metric]:.4f}'\n print(f' {metric:7}: {score:.4f} {result_str}')\n table[name][split][metric] = score\n else:\n table[name][split][metric] = expected[metric]\n\n print('')\n\n for metric in ['nDCG@10', 'R@100']:\n for split in ['dev', 'train']:\n print_results(metric, split)\n\n end = time.time()\n print(f'Total elapsed time: {end - start:.0f}s')\n", "path": "scripts/repro_matrix/run_all_miracl.py"}]}
| 2,198 | 582 |
gh_patches_debug_1861
|
rasdani/github-patches
|
git_diff
|
carpentries__amy-690
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
No reverse match for rest_framework namespace
The error for a very strange reason shows when accessing these URLs:
https://github.com/swcarpentry/amy/blob/develop/api/urls.py#L57
I wasn't able to get rid of it; it's not being used at all, so maybe it should be removed…?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `api/urls.py`
Content:
```
1 from django.conf.urls import url, include
2 from rest_framework_nested import routers
3 from rest_framework.urlpatterns import format_suffix_patterns
4
5 from . import views
6
7 # new in Django 1.9: this defines a namespace for URLs; there's no need for
8 # `namespace='api'` in the include()
9 app_name = 'api'
10
11 # routers generate URLs for methods like `.list` or `.retrieve`
12 router = routers.SimpleRouter()
13 router.register('reports', views.ReportsViewSet, base_name='reports')
14 router.register('persons', views.PersonViewSet)
15 awards_router = routers.NestedSimpleRouter(router, 'persons', lookup='person')
16 awards_router.register('awards', views.AwardViewSet, base_name='person-awards')
17 person_task_router = routers.NestedSimpleRouter(router, 'persons',
18 lookup='person')
19 person_task_router.register('tasks', views.PersonTaskViewSet,
20 base_name='person-tasks')
21 router.register('events', views.EventViewSet)
22 tasks_router = routers.NestedSimpleRouter(router, 'events', lookup='event')
23 tasks_router.register('tasks', views.TaskViewSet, base_name='event-tasks')
24 todos_router = routers.NestedSimpleRouter(router, 'events', lookup='event')
25 todos_router.register('todos', views.TodoViewSet, base_name='event-todos')
26 router.register('hosts', views.HostViewSet)
27 router.register('airports', views.AirportViewSet)
28
29 urlpatterns = [
30 url('^$', views.ApiRoot.as_view(), name='root'),
31 # TODO: turn these export views into ViewSets and add them to the router
32 url('^export/badges/$',
33 views.ExportBadgesView.as_view(),
34 name='export-badges'),
35 url('^export/instructors/$',
36 views.ExportInstructorLocationsView.as_view(),
37 name='export-instructors'),
38 url('^export/members/$',
39 views.ExportMembersView.as_view(),
40 name='export-members'),
41 url('^events/published/$',
42 views.PublishedEvents.as_view(),
43 name='events-published'),
44 url('^todos/user/$',
45 views.UserTodoItems.as_view(),
46 name='user-todos'),
47
48 url('^', include(router.urls)),
49 url('^', include(awards_router.urls)),
50 url('^', include(person_task_router.urls)),
51 url('^', include(tasks_router.urls)),
52 url('^', include(todos_router.urls)),
53 ]
54
55 # for login-logout functionality
56 urlpatterns += [
57 url(r'^api-auth/',
58 include('rest_framework.urls', namespace='rest_framework')),
59 ]
60
61 urlpatterns = format_suffix_patterns(urlpatterns) # allow to specify format
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/api/urls.py b/api/urls.py
--- a/api/urls.py
+++ b/api/urls.py
@@ -52,10 +52,4 @@
url('^', include(todos_router.urls)),
]
-# for login-logout functionality
-urlpatterns += [
- url(r'^api-auth/',
- include('rest_framework.urls', namespace='rest_framework')),
-]
-
urlpatterns = format_suffix_patterns(urlpatterns) # allow to specify format
|
{"golden_diff": "diff --git a/api/urls.py b/api/urls.py\n--- a/api/urls.py\n+++ b/api/urls.py\n@@ -52,10 +52,4 @@\n url('^', include(todos_router.urls)),\n ]\n \n-# for login-logout functionality\n-urlpatterns += [\n- url(r'^api-auth/',\n- include('rest_framework.urls', namespace='rest_framework')),\n-]\n-\n urlpatterns = format_suffix_patterns(urlpatterns) # allow to specify format\n", "issue": "No reverse match for rest_framework namespace\nThe error for a very strange reason shows when accessing these URLs:\nhttps://github.com/swcarpentry/amy/blob/develop/api/urls.py#L57\n\nI wasn't able to get rid of it; it's not being used at all, so maybe it should be removed\u2026?\n\n", "before_files": [{"content": "from django.conf.urls import url, include\nfrom rest_framework_nested import routers\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nfrom . import views\n\n# new in Django 1.9: this defines a namespace for URLs; there's no need for\n# `namespace='api'` in the include()\napp_name = 'api'\n\n# routers generate URLs for methods like `.list` or `.retrieve`\nrouter = routers.SimpleRouter()\nrouter.register('reports', views.ReportsViewSet, base_name='reports')\nrouter.register('persons', views.PersonViewSet)\nawards_router = routers.NestedSimpleRouter(router, 'persons', lookup='person')\nawards_router.register('awards', views.AwardViewSet, base_name='person-awards')\nperson_task_router = routers.NestedSimpleRouter(router, 'persons',\n lookup='person')\nperson_task_router.register('tasks', views.PersonTaskViewSet,\n base_name='person-tasks')\nrouter.register('events', views.EventViewSet)\ntasks_router = routers.NestedSimpleRouter(router, 'events', lookup='event')\ntasks_router.register('tasks', views.TaskViewSet, base_name='event-tasks')\ntodos_router = routers.NestedSimpleRouter(router, 'events', lookup='event')\ntodos_router.register('todos', views.TodoViewSet, base_name='event-todos')\nrouter.register('hosts', views.HostViewSet)\nrouter.register('airports', views.AirportViewSet)\n\nurlpatterns = [\n url('^$', views.ApiRoot.as_view(), name='root'),\n # TODO: turn these export views into ViewSets and add them to the router\n url('^export/badges/$',\n views.ExportBadgesView.as_view(),\n name='export-badges'),\n url('^export/instructors/$',\n views.ExportInstructorLocationsView.as_view(),\n name='export-instructors'),\n url('^export/members/$',\n views.ExportMembersView.as_view(),\n name='export-members'),\n url('^events/published/$',\n views.PublishedEvents.as_view(),\n name='events-published'),\n url('^todos/user/$',\n views.UserTodoItems.as_view(),\n name='user-todos'),\n\n url('^', include(router.urls)),\n url('^', include(awards_router.urls)),\n url('^', include(person_task_router.urls)),\n url('^', include(tasks_router.urls)),\n url('^', include(todos_router.urls)),\n]\n\n# for login-logout functionality\nurlpatterns += [\n url(r'^api-auth/',\n include('rest_framework.urls', namespace='rest_framework')),\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns) # allow to specify format\n", "path": "api/urls.py"}], "after_files": [{"content": "from django.conf.urls import url, include\nfrom rest_framework_nested import routers\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nfrom . import views\n\n# new in Django 1.9: this defines a namespace for URLs; there's no need for\n# `namespace='api'` in the include()\napp_name = 'api'\n\n# routers generate URLs for methods like `.list` or `.retrieve`\nrouter = routers.SimpleRouter()\nrouter.register('reports', views.ReportsViewSet, base_name='reports')\nrouter.register('persons', views.PersonViewSet)\nawards_router = routers.NestedSimpleRouter(router, 'persons', lookup='person')\nawards_router.register('awards', views.AwardViewSet, base_name='person-awards')\nperson_task_router = routers.NestedSimpleRouter(router, 'persons',\n lookup='person')\nperson_task_router.register('tasks', views.PersonTaskViewSet,\n base_name='person-tasks')\nrouter.register('events', views.EventViewSet)\ntasks_router = routers.NestedSimpleRouter(router, 'events', lookup='event')\ntasks_router.register('tasks', views.TaskViewSet, base_name='event-tasks')\ntodos_router = routers.NestedSimpleRouter(router, 'events', lookup='event')\ntodos_router.register('todos', views.TodoViewSet, base_name='event-todos')\nrouter.register('hosts', views.HostViewSet)\nrouter.register('airports', views.AirportViewSet)\n\nurlpatterns = [\n url('^$', views.ApiRoot.as_view(), name='root'),\n # TODO: turn these export views into ViewSets and add them to the router\n url('^export/badges/$',\n views.ExportBadgesView.as_view(),\n name='export-badges'),\n url('^export/instructors/$',\n views.ExportInstructorLocationsView.as_view(),\n name='export-instructors'),\n url('^export/members/$',\n views.ExportMembersView.as_view(),\n name='export-members'),\n url('^events/published/$',\n views.PublishedEvents.as_view(),\n name='events-published'),\n url('^todos/user/$',\n views.UserTodoItems.as_view(),\n name='user-todos'),\n\n url('^', include(router.urls)),\n url('^', include(awards_router.urls)),\n url('^', include(person_task_router.urls)),\n url('^', include(tasks_router.urls)),\n url('^', include(todos_router.urls)),\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns) # allow to specify format\n", "path": "api/urls.py"}]}
| 985 | 104 |
gh_patches_debug_8186
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-5117
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow to search for products by SKU in admin dashboard
### What I'm trying to achieve
I'm looking to find a product by its SKU
### Describe a proposed solution
Tweak search engine settings to return products with full/partial SKU match.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/search/backends/postgresql_storefront.py`
Content:
```
1 from django.contrib.postgres.search import TrigramSimilarity
2 from django.db.models import Q
3
4 from ...product.models import Product
5
6
7 def search(phrase):
8 """Return matching products for storefront views.
9
10 Fuzzy storefront search that is resistant to small typing errors made
11 by user. Name is matched using trigram similarity, description uses
12 standard postgres full text search.
13
14 Args:
15 phrase (str): searched phrase
16
17 """
18 name_sim = TrigramSimilarity("name", phrase)
19 published = Q(is_published=True)
20 ft_in_description = Q(description__search=phrase)
21 name_similar = Q(name_sim__gt=0.2)
22 return Product.objects.annotate(name_sim=name_sim).filter(
23 (ft_in_description | name_similar) & published
24 )
25
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/saleor/search/backends/postgresql_storefront.py b/saleor/search/backends/postgresql_storefront.py
--- a/saleor/search/backends/postgresql_storefront.py
+++ b/saleor/search/backends/postgresql_storefront.py
@@ -16,9 +16,9 @@
"""
name_sim = TrigramSimilarity("name", phrase)
- published = Q(is_published=True)
ft_in_description = Q(description__search=phrase)
+ ft_by_sku = Q(variants__sku__search=phrase)
name_similar = Q(name_sim__gt=0.2)
return Product.objects.annotate(name_sim=name_sim).filter(
- (ft_in_description | name_similar) & published
+ (ft_in_description | name_similar | ft_by_sku)
)
|
{"golden_diff": "diff --git a/saleor/search/backends/postgresql_storefront.py b/saleor/search/backends/postgresql_storefront.py\n--- a/saleor/search/backends/postgresql_storefront.py\n+++ b/saleor/search/backends/postgresql_storefront.py\n@@ -16,9 +16,9 @@\n \n \"\"\"\n name_sim = TrigramSimilarity(\"name\", phrase)\n- published = Q(is_published=True)\n ft_in_description = Q(description__search=phrase)\n+ ft_by_sku = Q(variants__sku__search=phrase)\n name_similar = Q(name_sim__gt=0.2)\n return Product.objects.annotate(name_sim=name_sim).filter(\n- (ft_in_description | name_similar) & published\n+ (ft_in_description | name_similar | ft_by_sku)\n )\n", "issue": "Allow to search for products by SKU in admin dashboard\n### What I'm trying to achieve\r\nI'm looking to find a product by its SKU\r\n\r\n### Describe a proposed solution\r\nTweak search engine settings to return products with full/partial SKU match.\r\n\r\n\n", "before_files": [{"content": "from django.contrib.postgres.search import TrigramSimilarity\nfrom django.db.models import Q\n\nfrom ...product.models import Product\n\n\ndef search(phrase):\n \"\"\"Return matching products for storefront views.\n\n Fuzzy storefront search that is resistant to small typing errors made\n by user. Name is matched using trigram similarity, description uses\n standard postgres full text search.\n\n Args:\n phrase (str): searched phrase\n\n \"\"\"\n name_sim = TrigramSimilarity(\"name\", phrase)\n published = Q(is_published=True)\n ft_in_description = Q(description__search=phrase)\n name_similar = Q(name_sim__gt=0.2)\n return Product.objects.annotate(name_sim=name_sim).filter(\n (ft_in_description | name_similar) & published\n )\n", "path": "saleor/search/backends/postgresql_storefront.py"}], "after_files": [{"content": "from django.contrib.postgres.search import TrigramSimilarity\nfrom django.db.models import Q\n\nfrom ...product.models import Product\n\n\ndef search(phrase):\n \"\"\"Return matching products for storefront views.\n\n Fuzzy storefront search that is resistant to small typing errors made\n by user. Name is matched using trigram similarity, description uses\n standard postgres full text search.\n\n Args:\n phrase (str): searched phrase\n\n \"\"\"\n name_sim = TrigramSimilarity(\"name\", phrase)\n ft_in_description = Q(description__search=phrase)\n ft_by_sku = Q(variants__sku__search=phrase)\n name_similar = Q(name_sim__gt=0.2)\n return Product.objects.annotate(name_sim=name_sim).filter(\n (ft_in_description | name_similar | ft_by_sku)\n )\n", "path": "saleor/search/backends/postgresql_storefront.py"}]}
| 528 | 179 |
gh_patches_debug_17752
|
rasdani/github-patches
|
git_diff
|
nf-core__tools-1590
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Lint warning on Nextflow minimum version badge
### Description of the bug
`nf-core lint` complains that the minimum version badge for Nextflow could not found, however it was present in the `README.md`.
It occurred after the `template-merge-2.4`
It appears to be a bug.
### Command used and terminal output
```console
(nextflow2) rnavar$ nf-core lint
,--./,-.
___ __ __ __ ___ /,-._.--~\
|\ | |__ __ / ` / \ |__) |__ } {
| \| | \__, \__/ | \ |___ \`-._,-`-,
`._,._,'
nf-core/tools version 2.4.1 - https://nf-co.re
INFO Testing pipeline: . __init__.py:244
╭─ [!] 1 Pipeline Test Warning ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
│ │
│ readme: README did not have a Nextflow minimum version badge. │
│ │
╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
```
### System information
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nf_core/lint/readme.py`
Content:
```
1 #!/usr/bin/env python
2
3 import os
4 import re
5
6
7 def readme(self):
8 """Repository ``README.md`` tests
9
10 The ``README.md`` files for a project are very important and must meet some requirements:
11
12 * Nextflow badge
13
14 * If no Nextflow badge is found, a warning is given
15 * If a badge is found but the version doesn't match the minimum version in the config file, the test fails
16 * Example badge code:
17
18 .. code-block:: md
19
20 [](https://www.nextflow.io/)
21
22 * Bioconda badge
23
24 * If your pipeline contains a file called ``environment.yml`` in the root directory, a bioconda badge is required
25 * Required badge code:
26
27 .. code-block:: md
28
29 [](https://bioconda.github.io/)
30
31 .. note:: These badges are a markdown image ```` *inside* a markdown link ``[markdown image](<link URL>)``, so a bit fiddly to write.
32 """
33 passed = []
34 warned = []
35 failed = []
36
37 with open(os.path.join(self.wf_path, "README.md"), "r") as fh:
38 content = fh.read()
39
40 # Check that there is a readme badge showing the minimum required version of Nextflow
41 # [](https://www.nextflow.io/)
42 # and that it has the correct version
43 nf_badge_re = r"\[!\[Nextflow\]\(https://img\.shields\.io/badge/nextflow%20DSL2-%E2%89%A5([\d\.]+)-23aa62\.svg\?labelColor=000000\)\]\(https://www\.nextflow\.io/\)"
44 match = re.search(nf_badge_re, content)
45 if match:
46 nf_badge_version = match.group(1).strip("'\"")
47 try:
48 assert nf_badge_version == self.minNextflowVersion
49 except (AssertionError, KeyError):
50 failed.append(
51 "README Nextflow minimum version badge does not match config. Badge: `{}`, Config: `{}`".format(
52 nf_badge_version, self.minNextflowVersion
53 )
54 )
55 else:
56 passed.append(
57 "README Nextflow minimum version badge matched config. Badge: `{}`, Config: `{}`".format(
58 nf_badge_version, self.minNextflowVersion
59 )
60 )
61 else:
62 warned.append("README did not have a Nextflow minimum version badge.")
63
64 # Check that the minimum version mentioned in the quick start section is consistent
65 # Looking for: "1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.10.3`)"
66 nf_version_re = r"1\.\s*Install\s*\[`Nextflow`\]\(https://www.nextflow.io/docs/latest/getstarted.html#installation\)\s*\(`>=(\d*\.\d*\.\d*)`\)"
67 match = re.search(nf_version_re, content)
68 if match:
69 nf_quickstart_version = match.group(1)
70 try:
71 assert nf_quickstart_version == self.minNextflowVersion
72 except (AssertionError, KeyError):
73 failed.append(
74 f"README Nextflow minimium version in Quick Start section does not match config. README: `{nf_quickstart_version}`, Config `{self.minNextflowVersion}`"
75 )
76 else:
77 passed.append(
78 f"README Nextflow minimum version in Quick Start section matched config. README: `{nf_quickstart_version}`, Config: `{self.minNextflowVersion}`"
79 )
80 else:
81 warned.append("README did not have a Nextflow minimum version mentioned in Quick Start section.")
82
83 return {"passed": passed, "warned": warned, "failed": failed}
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nf_core/lint/readme.py b/nf_core/lint/readme.py
--- a/nf_core/lint/readme.py
+++ b/nf_core/lint/readme.py
@@ -38,9 +38,9 @@
content = fh.read()
# Check that there is a readme badge showing the minimum required version of Nextflow
- # [](https://www.nextflow.io/)
+ # [](https://www.nextflow.io/)
# and that it has the correct version
- nf_badge_re = r"\[!\[Nextflow\]\(https://img\.shields\.io/badge/nextflow%20DSL2-%E2%89%A5([\d\.]+)-23aa62\.svg\?labelColor=000000\)\]\(https://www\.nextflow\.io/\)"
+ nf_badge_re = r"\[!\[Nextflow\]\(https://img\.shields\.io/badge/nextflow%20DSL2-%E2%89%A5([\d\.]+)-23aa62\.svg\)\]\(https://www\.nextflow\.io/\)"
match = re.search(nf_badge_re, content)
if match:
nf_badge_version = match.group(1).strip("'\"")
|
{"golden_diff": "diff --git a/nf_core/lint/readme.py b/nf_core/lint/readme.py\n--- a/nf_core/lint/readme.py\n+++ b/nf_core/lint/readme.py\n@@ -38,9 +38,9 @@\n content = fh.read()\n \n # Check that there is a readme badge showing the minimum required version of Nextflow\n- # [](https://www.nextflow.io/)\n+ # [](https://www.nextflow.io/)\n # and that it has the correct version\n- nf_badge_re = r\"\\[!\\[Nextflow\\]\\(https://img\\.shields\\.io/badge/nextflow%20DSL2-%E2%89%A5([\\d\\.]+)-23aa62\\.svg\\?labelColor=000000\\)\\]\\(https://www\\.nextflow\\.io/\\)\"\n+ nf_badge_re = r\"\\[!\\[Nextflow\\]\\(https://img\\.shields\\.io/badge/nextflow%20DSL2-%E2%89%A5([\\d\\.]+)-23aa62\\.svg\\)\\]\\(https://www\\.nextflow\\.io/\\)\"\n match = re.search(nf_badge_re, content)\n if match:\n nf_badge_version = match.group(1).strip(\"'\\\"\")\n", "issue": "Lint warning on Nextflow minimum version badge\n### Description of the bug\n\n`nf-core lint` complains that the minimum version badge for Nextflow could not found, however it was present in the `README.md`.\r\nIt occurred after the `template-merge-2.4`\r\nIt appears to be a bug.\r\n\r\n\n\n### Command used and terminal output\n\n```console\n(nextflow2) rnavar$ nf-core lint\r\n\r\n\r\n\r\n ,--./,-.\r\n\r\n ___ __ __ __ ___ /,-._.--~\\\r\n\r\n |\\ | |__ __ / ` / \\ |__) |__ } {\r\n\r\n | \\| | \\__, \\__/ | \\ |___ \\`-._,-`-,\r\n\r\n `._,._,'\r\n\r\n\r\n\r\n nf-core/tools version 2.4.1 - https://nf-co.re\r\n\r\n\r\n\r\n\r\n\r\nINFO Testing pipeline: . __init__.py:244\r\n\r\n\r\n\r\n\u256d\u2500 [!] 1 Pipeline Test Warning \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e\r\n\r\n\u2502 \u2502\r\n\r\n\u2502 readme: README did not have a Nextflow minimum version badge. \u2502\r\n\r\n\u2502 \u2502\r\n\r\n\u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f\n```\n\n\n### System information\n\n_No response_\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport re\n\n\ndef readme(self):\n \"\"\"Repository ``README.md`` tests\n\n The ``README.md`` files for a project are very important and must meet some requirements:\n\n * Nextflow badge\n\n * If no Nextflow badge is found, a warning is given\n * If a badge is found but the version doesn't match the minimum version in the config file, the test fails\n * Example badge code:\n\n .. code-block:: md\n\n [](https://www.nextflow.io/)\n\n * Bioconda badge\n\n * If your pipeline contains a file called ``environment.yml`` in the root directory, a bioconda badge is required\n * Required badge code:\n\n .. code-block:: md\n\n [](https://bioconda.github.io/)\n\n .. note:: These badges are a markdown image ```` *inside* a markdown link ``[markdown image](<link URL>)``, so a bit fiddly to write.\n \"\"\"\n passed = []\n warned = []\n failed = []\n\n with open(os.path.join(self.wf_path, \"README.md\"), \"r\") as fh:\n content = fh.read()\n\n # Check that there is a readme badge showing the minimum required version of Nextflow\n # [](https://www.nextflow.io/)\n # and that it has the correct version\n nf_badge_re = r\"\\[!\\[Nextflow\\]\\(https://img\\.shields\\.io/badge/nextflow%20DSL2-%E2%89%A5([\\d\\.]+)-23aa62\\.svg\\?labelColor=000000\\)\\]\\(https://www\\.nextflow\\.io/\\)\"\n match = re.search(nf_badge_re, content)\n if match:\n nf_badge_version = match.group(1).strip(\"'\\\"\")\n try:\n assert nf_badge_version == self.minNextflowVersion\n except (AssertionError, KeyError):\n failed.append(\n \"README Nextflow minimum version badge does not match config. Badge: `{}`, Config: `{}`\".format(\n nf_badge_version, self.minNextflowVersion\n )\n )\n else:\n passed.append(\n \"README Nextflow minimum version badge matched config. Badge: `{}`, Config: `{}`\".format(\n nf_badge_version, self.minNextflowVersion\n )\n )\n else:\n warned.append(\"README did not have a Nextflow minimum version badge.\")\n\n # Check that the minimum version mentioned in the quick start section is consistent\n # Looking for: \"1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.10.3`)\"\n nf_version_re = r\"1\\.\\s*Install\\s*\\[`Nextflow`\\]\\(https://www.nextflow.io/docs/latest/getstarted.html#installation\\)\\s*\\(`>=(\\d*\\.\\d*\\.\\d*)`\\)\"\n match = re.search(nf_version_re, content)\n if match:\n nf_quickstart_version = match.group(1)\n try:\n assert nf_quickstart_version == self.minNextflowVersion\n except (AssertionError, KeyError):\n failed.append(\n f\"README Nextflow minimium version in Quick Start section does not match config. README: `{nf_quickstart_version}`, Config `{self.minNextflowVersion}`\"\n )\n else:\n passed.append(\n f\"README Nextflow minimum version in Quick Start section matched config. README: `{nf_quickstart_version}`, Config: `{self.minNextflowVersion}`\"\n )\n else:\n warned.append(\"README did not have a Nextflow minimum version mentioned in Quick Start section.\")\n\n return {\"passed\": passed, \"warned\": warned, \"failed\": failed}\n", "path": "nf_core/lint/readme.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport re\n\n\ndef readme(self):\n \"\"\"Repository ``README.md`` tests\n\n The ``README.md`` files for a project are very important and must meet some requirements:\n\n * Nextflow badge\n\n * If no Nextflow badge is found, a warning is given\n * If a badge is found but the version doesn't match the minimum version in the config file, the test fails\n * Example badge code:\n\n .. code-block:: md\n\n [](https://www.nextflow.io/)\n\n * Bioconda badge\n\n * If your pipeline contains a file called ``environment.yml`` in the root directory, a bioconda badge is required\n * Required badge code:\n\n .. code-block:: md\n\n [](https://bioconda.github.io/)\n\n .. note:: These badges are a markdown image ```` *inside* a markdown link ``[markdown image](<link URL>)``, so a bit fiddly to write.\n \"\"\"\n passed = []\n warned = []\n failed = []\n\n with open(os.path.join(self.wf_path, \"README.md\"), \"r\") as fh:\n content = fh.read()\n\n # Check that there is a readme badge showing the minimum required version of Nextflow\n # [](https://www.nextflow.io/)\n # and that it has the correct version\n nf_badge_re = r\"\\[!\\[Nextflow\\]\\(https://img\\.shields\\.io/badge/nextflow%20DSL2-%E2%89%A5([\\d\\.]+)-23aa62\\.svg\\)\\]\\(https://www\\.nextflow\\.io/\\)\"\n match = re.search(nf_badge_re, content)\n if match:\n nf_badge_version = match.group(1).strip(\"'\\\"\")\n try:\n assert nf_badge_version == self.minNextflowVersion\n except (AssertionError, KeyError):\n failed.append(\n \"README Nextflow minimum version badge does not match config. Badge: `{}`, Config: `{}`\".format(\n nf_badge_version, self.minNextflowVersion\n )\n )\n else:\n passed.append(\n \"README Nextflow minimum version badge matched config. Badge: `{}`, Config: `{}`\".format(\n nf_badge_version, self.minNextflowVersion\n )\n )\n else:\n warned.append(\"README did not have a Nextflow minimum version badge.\")\n\n # Check that the minimum version mentioned in the quick start section is consistent\n # Looking for: \"1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.10.3`)\"\n nf_version_re = r\"1\\.\\s*Install\\s*\\[`Nextflow`\\]\\(https://www.nextflow.io/docs/latest/getstarted.html#installation\\)\\s*\\(`>=(\\d*\\.\\d*\\.\\d*)`\\)\"\n match = re.search(nf_version_re, content)\n if match:\n nf_quickstart_version = match.group(1)\n try:\n assert nf_quickstart_version == self.minNextflowVersion\n except (AssertionError, KeyError):\n failed.append(\n f\"README Nextflow minimium version in Quick Start section does not match config. README: `{nf_quickstart_version}`, Config `{self.minNextflowVersion}`\"\n )\n else:\n passed.append(\n f\"README Nextflow minimum version in Quick Start section matched config. README: `{nf_quickstart_version}`, Config: `{self.minNextflowVersion}`\"\n )\n else:\n warned.append(\"README did not have a Nextflow minimum version mentioned in Quick Start section.\")\n\n return {\"passed\": passed, \"warned\": warned, \"failed\": failed}\n", "path": "nf_core/lint/readme.py"}]}
| 1,665 | 392 |
gh_patches_debug_2980
|
rasdani/github-patches
|
git_diff
|
getmoto__moto-2114
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Lambda publish_version returns wrong status code
In boto3,when lambda publish_version is success,boto3 returns Http status code 201.
But, moto returns Http status code 200
moto and boto version
```
boto3 1.9.71
botocore 1.12.71
moto 1.3.7
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `moto/awslambda/responses.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import json
4
5 try:
6 from urllib import unquote
7 except ImportError:
8 from urllib.parse import unquote
9
10 from moto.core.utils import amz_crc32, amzn_request_id, path_url
11 from moto.core.responses import BaseResponse
12 from .models import lambda_backends
13
14
15 class LambdaResponse(BaseResponse):
16 @property
17 def json_body(self):
18 """
19 :return: JSON
20 :rtype: dict
21 """
22 return json.loads(self.body)
23
24 @property
25 def lambda_backend(self):
26 """
27 Get backend
28 :return: Lambda Backend
29 :rtype: moto.awslambda.models.LambdaBackend
30 """
31 return lambda_backends[self.region]
32
33 def root(self, request, full_url, headers):
34 self.setup_class(request, full_url, headers)
35 if request.method == 'GET':
36 return self._list_functions(request, full_url, headers)
37 elif request.method == 'POST':
38 return self._create_function(request, full_url, headers)
39 else:
40 raise ValueError("Cannot handle request")
41
42 def function(self, request, full_url, headers):
43 self.setup_class(request, full_url, headers)
44 if request.method == 'GET':
45 return self._get_function(request, full_url, headers)
46 elif request.method == 'DELETE':
47 return self._delete_function(request, full_url, headers)
48 else:
49 raise ValueError("Cannot handle request")
50
51 def versions(self, request, full_url, headers):
52 self.setup_class(request, full_url, headers)
53 if request.method == 'GET':
54 # This is ListVersionByFunction
55
56 path = request.path if hasattr(request, 'path') else path_url(request.url)
57 function_name = path.split('/')[-2]
58 return self._list_versions_by_function(function_name)
59
60 elif request.method == 'POST':
61 return self._publish_function(request, full_url, headers)
62 else:
63 raise ValueError("Cannot handle request")
64
65 @amz_crc32
66 @amzn_request_id
67 def invoke(self, request, full_url, headers):
68 self.setup_class(request, full_url, headers)
69 if request.method == 'POST':
70 return self._invoke(request, full_url)
71 else:
72 raise ValueError("Cannot handle request")
73
74 @amz_crc32
75 @amzn_request_id
76 def invoke_async(self, request, full_url, headers):
77 self.setup_class(request, full_url, headers)
78 if request.method == 'POST':
79 return self._invoke_async(request, full_url)
80 else:
81 raise ValueError("Cannot handle request")
82
83 def tag(self, request, full_url, headers):
84 self.setup_class(request, full_url, headers)
85 if request.method == 'GET':
86 return self._list_tags(request, full_url)
87 elif request.method == 'POST':
88 return self._tag_resource(request, full_url)
89 elif request.method == 'DELETE':
90 return self._untag_resource(request, full_url)
91 else:
92 raise ValueError("Cannot handle {0} request".format(request.method))
93
94 def policy(self, request, full_url, headers):
95 if request.method == 'GET':
96 return self._get_policy(request, full_url, headers)
97 if request.method == 'POST':
98 return self._add_policy(request, full_url, headers)
99
100 def _add_policy(self, request, full_url, headers):
101 path = request.path if hasattr(request, 'path') else path_url(request.url)
102 function_name = path.split('/')[-2]
103 if self.lambda_backend.get_function(function_name):
104 policy = request.body.decode('utf8')
105 self.lambda_backend.add_policy(function_name, policy)
106 return 200, {}, json.dumps(dict(Statement=policy))
107 else:
108 return 404, {}, "{}"
109
110 def _get_policy(self, request, full_url, headers):
111 path = request.path if hasattr(request, 'path') else path_url(request.url)
112 function_name = path.split('/')[-2]
113 if self.lambda_backend.get_function(function_name):
114 lambda_function = self.lambda_backend.get_function(function_name)
115 return 200, {}, json.dumps(dict(Policy="{\"Statement\":[" + lambda_function.policy + "]}"))
116 else:
117 return 404, {}, "{}"
118
119 def _invoke(self, request, full_url):
120 response_headers = {}
121
122 function_name = self.path.rsplit('/', 2)[-2]
123 qualifier = self._get_param('qualifier')
124
125 fn = self.lambda_backend.get_function(function_name, qualifier)
126 if fn:
127 payload = fn.invoke(self.body, self.headers, response_headers)
128 response_headers['Content-Length'] = str(len(payload))
129 return 202, response_headers, payload
130 else:
131 return 404, response_headers, "{}"
132
133 def _invoke_async(self, request, full_url):
134 response_headers = {}
135
136 function_name = self.path.rsplit('/', 3)[-3]
137
138 fn = self.lambda_backend.get_function(function_name, None)
139 if fn:
140 payload = fn.invoke(self.body, self.headers, response_headers)
141 response_headers['Content-Length'] = str(len(payload))
142 return 202, response_headers, payload
143 else:
144 return 404, response_headers, "{}"
145
146 def _list_functions(self, request, full_url, headers):
147 result = {
148 'Functions': []
149 }
150
151 for fn in self.lambda_backend.list_functions():
152 json_data = fn.get_configuration()
153
154 result['Functions'].append(json_data)
155
156 return 200, {}, json.dumps(result)
157
158 def _list_versions_by_function(self, function_name):
159 result = {
160 'Versions': []
161 }
162
163 functions = self.lambda_backend.list_versions_by_function(function_name)
164 if functions:
165 for fn in functions:
166 json_data = fn.get_configuration()
167 result['Versions'].append(json_data)
168
169 return 200, {}, json.dumps(result)
170
171 def _create_function(self, request, full_url, headers):
172 try:
173 fn = self.lambda_backend.create_function(self.json_body)
174 except ValueError as e:
175 return 400, {}, json.dumps({"Error": {"Code": e.args[0], "Message": e.args[1]}})
176 else:
177 config = fn.get_configuration()
178 return 201, {}, json.dumps(config)
179
180 def _publish_function(self, request, full_url, headers):
181 function_name = self.path.rsplit('/', 2)[-2]
182
183 fn = self.lambda_backend.publish_function(function_name)
184 if fn:
185 config = fn.get_configuration()
186 return 200, {}, json.dumps(config)
187 else:
188 return 404, {}, "{}"
189
190 def _delete_function(self, request, full_url, headers):
191 function_name = self.path.rsplit('/', 1)[-1]
192 qualifier = self._get_param('Qualifier', None)
193
194 if self.lambda_backend.delete_function(function_name, qualifier):
195 return 204, {}, ""
196 else:
197 return 404, {}, "{}"
198
199 def _get_function(self, request, full_url, headers):
200 function_name = self.path.rsplit('/', 1)[-1]
201 qualifier = self._get_param('Qualifier', None)
202
203 fn = self.lambda_backend.get_function(function_name, qualifier)
204
205 if fn:
206 code = fn.get_code()
207
208 return 200, {}, json.dumps(code)
209 else:
210 return 404, {}, "{}"
211
212 def _get_aws_region(self, full_url):
213 region = self.region_regex.search(full_url)
214 if region:
215 return region.group(1)
216 else:
217 return self.default_region
218
219 def _list_tags(self, request, full_url):
220 function_arn = unquote(self.path.rsplit('/', 1)[-1])
221
222 fn = self.lambda_backend.get_function_by_arn(function_arn)
223 if fn:
224 return 200, {}, json.dumps({'Tags': fn.tags})
225 else:
226 return 404, {}, "{}"
227
228 def _tag_resource(self, request, full_url):
229 function_arn = unquote(self.path.rsplit('/', 1)[-1])
230
231 if self.lambda_backend.tag_resource(function_arn, self.json_body['Tags']):
232 return 200, {}, "{}"
233 else:
234 return 404, {}, "{}"
235
236 def _untag_resource(self, request, full_url):
237 function_arn = unquote(self.path.rsplit('/', 1)[-1])
238 tag_keys = self.querystring['tagKeys']
239
240 if self.lambda_backend.untag_resource(function_arn, tag_keys):
241 return 204, {}, "{}"
242 else:
243 return 404, {}, "{}"
244
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py
--- a/moto/awslambda/responses.py
+++ b/moto/awslambda/responses.py
@@ -183,7 +183,7 @@
fn = self.lambda_backend.publish_function(function_name)
if fn:
config = fn.get_configuration()
- return 200, {}, json.dumps(config)
+ return 201, {}, json.dumps(config)
else:
return 404, {}, "{}"
|
{"golden_diff": "diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py\n--- a/moto/awslambda/responses.py\n+++ b/moto/awslambda/responses.py\n@@ -183,7 +183,7 @@\n fn = self.lambda_backend.publish_function(function_name)\n if fn:\n config = fn.get_configuration()\n- return 200, {}, json.dumps(config)\n+ return 201, {}, json.dumps(config)\n else:\n return 404, {}, \"{}\"\n", "issue": "Lambda publish_version returns wrong status code\nIn boto3,when lambda publish_version is success,boto3 returns Http status code 201.\r\nBut, moto returns Http status code 200\r\n\r\n\r\nmoto and boto version\r\n```\r\nboto3 1.9.71\r\nbotocore 1.12.71\r\nmoto 1.3.7\r\n```\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport json\n\ntry:\n from urllib import unquote\nexcept ImportError:\n from urllib.parse import unquote\n\nfrom moto.core.utils import amz_crc32, amzn_request_id, path_url\nfrom moto.core.responses import BaseResponse\nfrom .models import lambda_backends\n\n\nclass LambdaResponse(BaseResponse):\n @property\n def json_body(self):\n \"\"\"\n :return: JSON\n :rtype: dict\n \"\"\"\n return json.loads(self.body)\n\n @property\n def lambda_backend(self):\n \"\"\"\n Get backend\n :return: Lambda Backend\n :rtype: moto.awslambda.models.LambdaBackend\n \"\"\"\n return lambda_backends[self.region]\n\n def root(self, request, full_url, headers):\n self.setup_class(request, full_url, headers)\n if request.method == 'GET':\n return self._list_functions(request, full_url, headers)\n elif request.method == 'POST':\n return self._create_function(request, full_url, headers)\n else:\n raise ValueError(\"Cannot handle request\")\n\n def function(self, request, full_url, headers):\n self.setup_class(request, full_url, headers)\n if request.method == 'GET':\n return self._get_function(request, full_url, headers)\n elif request.method == 'DELETE':\n return self._delete_function(request, full_url, headers)\n else:\n raise ValueError(\"Cannot handle request\")\n\n def versions(self, request, full_url, headers):\n self.setup_class(request, full_url, headers)\n if request.method == 'GET':\n # This is ListVersionByFunction\n\n path = request.path if hasattr(request, 'path') else path_url(request.url)\n function_name = path.split('/')[-2]\n return self._list_versions_by_function(function_name)\n\n elif request.method == 'POST':\n return self._publish_function(request, full_url, headers)\n else:\n raise ValueError(\"Cannot handle request\")\n\n @amz_crc32\n @amzn_request_id\n def invoke(self, request, full_url, headers):\n self.setup_class(request, full_url, headers)\n if request.method == 'POST':\n return self._invoke(request, full_url)\n else:\n raise ValueError(\"Cannot handle request\")\n\n @amz_crc32\n @amzn_request_id\n def invoke_async(self, request, full_url, headers):\n self.setup_class(request, full_url, headers)\n if request.method == 'POST':\n return self._invoke_async(request, full_url)\n else:\n raise ValueError(\"Cannot handle request\")\n\n def tag(self, request, full_url, headers):\n self.setup_class(request, full_url, headers)\n if request.method == 'GET':\n return self._list_tags(request, full_url)\n elif request.method == 'POST':\n return self._tag_resource(request, full_url)\n elif request.method == 'DELETE':\n return self._untag_resource(request, full_url)\n else:\n raise ValueError(\"Cannot handle {0} request\".format(request.method))\n\n def policy(self, request, full_url, headers):\n if request.method == 'GET':\n return self._get_policy(request, full_url, headers)\n if request.method == 'POST':\n return self._add_policy(request, full_url, headers)\n\n def _add_policy(self, request, full_url, headers):\n path = request.path if hasattr(request, 'path') else path_url(request.url)\n function_name = path.split('/')[-2]\n if self.lambda_backend.get_function(function_name):\n policy = request.body.decode('utf8')\n self.lambda_backend.add_policy(function_name, policy)\n return 200, {}, json.dumps(dict(Statement=policy))\n else:\n return 404, {}, \"{}\"\n\n def _get_policy(self, request, full_url, headers):\n path = request.path if hasattr(request, 'path') else path_url(request.url)\n function_name = path.split('/')[-2]\n if self.lambda_backend.get_function(function_name):\n lambda_function = self.lambda_backend.get_function(function_name)\n return 200, {}, json.dumps(dict(Policy=\"{\\\"Statement\\\":[\" + lambda_function.policy + \"]}\"))\n else:\n return 404, {}, \"{}\"\n\n def _invoke(self, request, full_url):\n response_headers = {}\n\n function_name = self.path.rsplit('/', 2)[-2]\n qualifier = self._get_param('qualifier')\n\n fn = self.lambda_backend.get_function(function_name, qualifier)\n if fn:\n payload = fn.invoke(self.body, self.headers, response_headers)\n response_headers['Content-Length'] = str(len(payload))\n return 202, response_headers, payload\n else:\n return 404, response_headers, \"{}\"\n\n def _invoke_async(self, request, full_url):\n response_headers = {}\n\n function_name = self.path.rsplit('/', 3)[-3]\n\n fn = self.lambda_backend.get_function(function_name, None)\n if fn:\n payload = fn.invoke(self.body, self.headers, response_headers)\n response_headers['Content-Length'] = str(len(payload))\n return 202, response_headers, payload\n else:\n return 404, response_headers, \"{}\"\n\n def _list_functions(self, request, full_url, headers):\n result = {\n 'Functions': []\n }\n\n for fn in self.lambda_backend.list_functions():\n json_data = fn.get_configuration()\n\n result['Functions'].append(json_data)\n\n return 200, {}, json.dumps(result)\n\n def _list_versions_by_function(self, function_name):\n result = {\n 'Versions': []\n }\n\n functions = self.lambda_backend.list_versions_by_function(function_name)\n if functions:\n for fn in functions:\n json_data = fn.get_configuration()\n result['Versions'].append(json_data)\n\n return 200, {}, json.dumps(result)\n\n def _create_function(self, request, full_url, headers):\n try:\n fn = self.lambda_backend.create_function(self.json_body)\n except ValueError as e:\n return 400, {}, json.dumps({\"Error\": {\"Code\": e.args[0], \"Message\": e.args[1]}})\n else:\n config = fn.get_configuration()\n return 201, {}, json.dumps(config)\n\n def _publish_function(self, request, full_url, headers):\n function_name = self.path.rsplit('/', 2)[-2]\n\n fn = self.lambda_backend.publish_function(function_name)\n if fn:\n config = fn.get_configuration()\n return 200, {}, json.dumps(config)\n else:\n return 404, {}, \"{}\"\n\n def _delete_function(self, request, full_url, headers):\n function_name = self.path.rsplit('/', 1)[-1]\n qualifier = self._get_param('Qualifier', None)\n\n if self.lambda_backend.delete_function(function_name, qualifier):\n return 204, {}, \"\"\n else:\n return 404, {}, \"{}\"\n\n def _get_function(self, request, full_url, headers):\n function_name = self.path.rsplit('/', 1)[-1]\n qualifier = self._get_param('Qualifier', None)\n\n fn = self.lambda_backend.get_function(function_name, qualifier)\n\n if fn:\n code = fn.get_code()\n\n return 200, {}, json.dumps(code)\n else:\n return 404, {}, \"{}\"\n\n def _get_aws_region(self, full_url):\n region = self.region_regex.search(full_url)\n if region:\n return region.group(1)\n else:\n return self.default_region\n\n def _list_tags(self, request, full_url):\n function_arn = unquote(self.path.rsplit('/', 1)[-1])\n\n fn = self.lambda_backend.get_function_by_arn(function_arn)\n if fn:\n return 200, {}, json.dumps({'Tags': fn.tags})\n else:\n return 404, {}, \"{}\"\n\n def _tag_resource(self, request, full_url):\n function_arn = unquote(self.path.rsplit('/', 1)[-1])\n\n if self.lambda_backend.tag_resource(function_arn, self.json_body['Tags']):\n return 200, {}, \"{}\"\n else:\n return 404, {}, \"{}\"\n\n def _untag_resource(self, request, full_url):\n function_arn = unquote(self.path.rsplit('/', 1)[-1])\n tag_keys = self.querystring['tagKeys']\n\n if self.lambda_backend.untag_resource(function_arn, tag_keys):\n return 204, {}, \"{}\"\n else:\n return 404, {}, \"{}\"\n", "path": "moto/awslambda/responses.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport json\n\ntry:\n from urllib import unquote\nexcept ImportError:\n from urllib.parse import unquote\n\nfrom moto.core.utils import amz_crc32, amzn_request_id, path_url\nfrom moto.core.responses import BaseResponse\nfrom .models import lambda_backends\n\n\nclass LambdaResponse(BaseResponse):\n @property\n def json_body(self):\n \"\"\"\n :return: JSON\n :rtype: dict\n \"\"\"\n return json.loads(self.body)\n\n @property\n def lambda_backend(self):\n \"\"\"\n Get backend\n :return: Lambda Backend\n :rtype: moto.awslambda.models.LambdaBackend\n \"\"\"\n return lambda_backends[self.region]\n\n def root(self, request, full_url, headers):\n self.setup_class(request, full_url, headers)\n if request.method == 'GET':\n return self._list_functions(request, full_url, headers)\n elif request.method == 'POST':\n return self._create_function(request, full_url, headers)\n else:\n raise ValueError(\"Cannot handle request\")\n\n def function(self, request, full_url, headers):\n self.setup_class(request, full_url, headers)\n if request.method == 'GET':\n return self._get_function(request, full_url, headers)\n elif request.method == 'DELETE':\n return self._delete_function(request, full_url, headers)\n else:\n raise ValueError(\"Cannot handle request\")\n\n def versions(self, request, full_url, headers):\n self.setup_class(request, full_url, headers)\n if request.method == 'GET':\n # This is ListVersionByFunction\n\n path = request.path if hasattr(request, 'path') else path_url(request.url)\n function_name = path.split('/')[-2]\n return self._list_versions_by_function(function_name)\n\n elif request.method == 'POST':\n return self._publish_function(request, full_url, headers)\n else:\n raise ValueError(\"Cannot handle request\")\n\n @amz_crc32\n @amzn_request_id\n def invoke(self, request, full_url, headers):\n self.setup_class(request, full_url, headers)\n if request.method == 'POST':\n return self._invoke(request, full_url)\n else:\n raise ValueError(\"Cannot handle request\")\n\n @amz_crc32\n @amzn_request_id\n def invoke_async(self, request, full_url, headers):\n self.setup_class(request, full_url, headers)\n if request.method == 'POST':\n return self._invoke_async(request, full_url)\n else:\n raise ValueError(\"Cannot handle request\")\n\n def tag(self, request, full_url, headers):\n self.setup_class(request, full_url, headers)\n if request.method == 'GET':\n return self._list_tags(request, full_url)\n elif request.method == 'POST':\n return self._tag_resource(request, full_url)\n elif request.method == 'DELETE':\n return self._untag_resource(request, full_url)\n else:\n raise ValueError(\"Cannot handle {0} request\".format(request.method))\n\n def policy(self, request, full_url, headers):\n if request.method == 'GET':\n return self._get_policy(request, full_url, headers)\n if request.method == 'POST':\n return self._add_policy(request, full_url, headers)\n\n def _add_policy(self, request, full_url, headers):\n path = request.path if hasattr(request, 'path') else path_url(request.url)\n function_name = path.split('/')[-2]\n if self.lambda_backend.get_function(function_name):\n policy = request.body.decode('utf8')\n self.lambda_backend.add_policy(function_name, policy)\n return 200, {}, json.dumps(dict(Statement=policy))\n else:\n return 404, {}, \"{}\"\n\n def _get_policy(self, request, full_url, headers):\n path = request.path if hasattr(request, 'path') else path_url(request.url)\n function_name = path.split('/')[-2]\n if self.lambda_backend.get_function(function_name):\n lambda_function = self.lambda_backend.get_function(function_name)\n return 200, {}, json.dumps(dict(Policy=\"{\\\"Statement\\\":[\" + lambda_function.policy + \"]}\"))\n else:\n return 404, {}, \"{}\"\n\n def _invoke(self, request, full_url):\n response_headers = {}\n\n function_name = self.path.rsplit('/', 2)[-2]\n qualifier = self._get_param('qualifier')\n\n fn = self.lambda_backend.get_function(function_name, qualifier)\n if fn:\n payload = fn.invoke(self.body, self.headers, response_headers)\n response_headers['Content-Length'] = str(len(payload))\n return 202, response_headers, payload\n else:\n return 404, response_headers, \"{}\"\n\n def _invoke_async(self, request, full_url):\n response_headers = {}\n\n function_name = self.path.rsplit('/', 3)[-3]\n\n fn = self.lambda_backend.get_function(function_name, None)\n if fn:\n payload = fn.invoke(self.body, self.headers, response_headers)\n response_headers['Content-Length'] = str(len(payload))\n return 202, response_headers, payload\n else:\n return 404, response_headers, \"{}\"\n\n def _list_functions(self, request, full_url, headers):\n result = {\n 'Functions': []\n }\n\n for fn in self.lambda_backend.list_functions():\n json_data = fn.get_configuration()\n\n result['Functions'].append(json_data)\n\n return 200, {}, json.dumps(result)\n\n def _list_versions_by_function(self, function_name):\n result = {\n 'Versions': []\n }\n\n functions = self.lambda_backend.list_versions_by_function(function_name)\n if functions:\n for fn in functions:\n json_data = fn.get_configuration()\n result['Versions'].append(json_data)\n\n return 200, {}, json.dumps(result)\n\n def _create_function(self, request, full_url, headers):\n try:\n fn = self.lambda_backend.create_function(self.json_body)\n except ValueError as e:\n return 400, {}, json.dumps({\"Error\": {\"Code\": e.args[0], \"Message\": e.args[1]}})\n else:\n config = fn.get_configuration()\n return 201, {}, json.dumps(config)\n\n def _publish_function(self, request, full_url, headers):\n function_name = self.path.rsplit('/', 2)[-2]\n\n fn = self.lambda_backend.publish_function(function_name)\n if fn:\n config = fn.get_configuration()\n return 201, {}, json.dumps(config)\n else:\n return 404, {}, \"{}\"\n\n def _delete_function(self, request, full_url, headers):\n function_name = self.path.rsplit('/', 1)[-1]\n qualifier = self._get_param('Qualifier', None)\n\n if self.lambda_backend.delete_function(function_name, qualifier):\n return 204, {}, \"\"\n else:\n return 404, {}, \"{}\"\n\n def _get_function(self, request, full_url, headers):\n function_name = self.path.rsplit('/', 1)[-1]\n qualifier = self._get_param('Qualifier', None)\n\n fn = self.lambda_backend.get_function(function_name, qualifier)\n\n if fn:\n code = fn.get_code()\n\n return 200, {}, json.dumps(code)\n else:\n return 404, {}, \"{}\"\n\n def _get_aws_region(self, full_url):\n region = self.region_regex.search(full_url)\n if region:\n return region.group(1)\n else:\n return self.default_region\n\n def _list_tags(self, request, full_url):\n function_arn = unquote(self.path.rsplit('/', 1)[-1])\n\n fn = self.lambda_backend.get_function_by_arn(function_arn)\n if fn:\n return 200, {}, json.dumps({'Tags': fn.tags})\n else:\n return 404, {}, \"{}\"\n\n def _tag_resource(self, request, full_url):\n function_arn = unquote(self.path.rsplit('/', 1)[-1])\n\n if self.lambda_backend.tag_resource(function_arn, self.json_body['Tags']):\n return 200, {}, \"{}\"\n else:\n return 404, {}, \"{}\"\n\n def _untag_resource(self, request, full_url):\n function_arn = unquote(self.path.rsplit('/', 1)[-1])\n tag_keys = self.querystring['tagKeys']\n\n if self.lambda_backend.untag_resource(function_arn, tag_keys):\n return 204, {}, \"{}\"\n else:\n return 404, {}, \"{}\"\n", "path": "moto/awslambda/responses.py"}]}
| 2,871 | 123 |
gh_patches_debug_4184
|
rasdani/github-patches
|
git_diff
|
jupyterhub__jupyterhub-2995
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Failed build of docs in CI
```
#!/bin/bash -eo pipefail
cd docs
make html
npm install && touch node_modules
npm WARN deprecated [email protected]: request has been deprecated, see https://github.com/request/request/issues/3142
npm notice created a lockfile as package-lock.json. You should commit this file.
npm WARN optional SKIPPING OPTIONAL DEPENDENCY: fsevents@~2.1.2 (node_modules/chokidar/node_modules/fsevents):
npm WARN notsup SKIPPING OPTIONAL DEPENDENCY: Unsupported platform for [email protected]: wanted {"os":"darwin","arch":"any"} (current: {"os":"linux","arch":"x64"})
npm WARN [email protected] No repository field.
added 216 packages from 514 contributors and audited 325 packages in 4.188s
14 packages are looking for funding
run `npm fund` for details
found 2 low severity vulnerabilities
run `npm audit fix` to fix them, or `npm audit` for details
npm run rest-api
> [email protected] rest-api /home/circleci/project/docs
> bootprint openapi ./rest-api.yml source/_static/rest-api
[
'source/_static/rest-api/index.html',
'source/_static/rest-api/main.css',
'source/_static/rest-api/main.css.map'
]
sphinx-build -b html -d build/doctrees "-W" source build/html
Running Sphinx v2.4.4
Adding copy buttons to code blocks...
making output directory... done
/home/circleci/.local/lib/python3.6/site-packages/sphinx/util/compat.py:32: RemovedInSphinx30Warning: The config variable "source_parsers" is deprecated. Please update your extension for the parser and remove the setting.
RemovedInSphinx30Warning)
/home/circleci/.local/lib/python3.6/site-packages/sphinx/util/compat.py:36: RemovedInSphinx30Warning: app.add_source_parser() does not support suffix argument. Use app.add_source_suffix() instead.
app.add_source_parser(suffix, parser)
Theme error:
no theme named 'pandas_sphinx_theme' found (missing theme.conf?)
Makefile:64: recipe for target 'html' failed
make: *** [html] Error 2
```
FYI @choldgraf @betatim
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/source/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 import os
4 import shlex
5 import sys
6
7 # Set paths
8 sys.path.insert(0, os.path.abspath('.'))
9
10 # -- General configuration ------------------------------------------------
11
12 # Minimal Sphinx version
13 needs_sphinx = '1.4'
14
15 # Sphinx extension modules
16 extensions = [
17 'sphinx.ext.autodoc',
18 'sphinx.ext.intersphinx',
19 'sphinx.ext.napoleon',
20 'autodoc_traits',
21 'sphinx_copybutton',
22 'sphinx-jsonschema',
23 ]
24
25 templates_path = ['_templates']
26
27 # The master toctree document.
28 master_doc = 'index'
29
30 # General information about the project.
31 project = u'JupyterHub'
32 copyright = u'2016, Project Jupyter team'
33 author = u'Project Jupyter team'
34
35 # Autopopulate version
36 from os.path import dirname
37
38 docs = dirname(dirname(__file__))
39 root = dirname(docs)
40 sys.path.insert(0, root)
41
42 import jupyterhub
43
44 # The short X.Y version.
45 version = '%i.%i' % jupyterhub.version_info[:2]
46 # The full version, including alpha/beta/rc tags.
47 release = jupyterhub.__version__
48
49 language = None
50 exclude_patterns = []
51 pygments_style = 'sphinx'
52 todo_include_todos = False
53
54 # Set the default role so we can use `foo` instead of ``foo``
55 default_role = 'literal'
56
57 # -- Source -------------------------------------------------------------
58
59 import recommonmark
60 from recommonmark.transform import AutoStructify
61
62
63 def setup(app):
64 app.add_config_value('recommonmark_config', {'enable_eval_rst': True}, True)
65 app.add_stylesheet('custom.css')
66 app.add_transform(AutoStructify)
67
68
69 source_parsers = {'.md': 'recommonmark.parser.CommonMarkParser'}
70
71 source_suffix = ['.rst', '.md']
72 # source_encoding = 'utf-8-sig'
73
74 # -- Options for HTML output ----------------------------------------------
75
76 # The theme to use for HTML and HTML Help pages.
77 html_theme = 'pandas_sphinx_theme'
78
79 html_logo = '_static/images/logo/logo.png'
80 html_favicon = '_static/images/logo/favicon.ico'
81
82 # Paths that contain custom static files (such as style sheets)
83 html_static_path = ['_static']
84
85 htmlhelp_basename = 'JupyterHubdoc'
86
87 # -- Options for LaTeX output ---------------------------------------------
88
89 latex_elements = {
90 # 'papersize': 'letterpaper',
91 # 'pointsize': '10pt',
92 # 'preamble': '',
93 # 'figure_align': 'htbp',
94 }
95
96 # Grouping the document tree into LaTeX files. List of tuples
97 # (source start file, target name, title,
98 # author, documentclass [howto, manual, or own class]).
99 latex_documents = [
100 (
101 master_doc,
102 'JupyterHub.tex',
103 u'JupyterHub Documentation',
104 u'Project Jupyter team',
105 'manual',
106 )
107 ]
108
109 # latex_logo = None
110 # latex_use_parts = False
111 # latex_show_pagerefs = False
112 # latex_show_urls = False
113 # latex_appendices = []
114 # latex_domain_indices = True
115
116
117 # -- manual page output -------------------------------------------------
118
119 # One entry per manual page. List of tuples
120 # (source start file, name, description, authors, manual section).
121 man_pages = [(master_doc, 'jupyterhub', u'JupyterHub Documentation', [author], 1)]
122
123 # man_show_urls = False
124
125
126 # -- Texinfo output -----------------------------------------------------
127
128 # Grouping the document tree into Texinfo files. List of tuples
129 # (source start file, target name, title, author,
130 # dir menu entry, description, category)
131 texinfo_documents = [
132 (
133 master_doc,
134 'JupyterHub',
135 u'JupyterHub Documentation',
136 author,
137 'JupyterHub',
138 'One line description of project.',
139 'Miscellaneous',
140 )
141 ]
142
143 # texinfo_appendices = []
144 # texinfo_domain_indices = True
145 # texinfo_show_urls = 'footnote'
146 # texinfo_no_detailmenu = False
147
148
149 # -- Epub output --------------------------------------------------------
150
151 # Bibliographic Dublin Core info.
152 epub_title = project
153 epub_author = author
154 epub_publisher = author
155 epub_copyright = copyright
156
157 # A list of files that should not be packed into the epub file.
158 epub_exclude_files = ['search.html']
159
160 # -- Intersphinx ----------------------------------------------------------
161
162 intersphinx_mapping = {'https://docs.python.org/3/': None}
163
164 # -- Read The Docs --------------------------------------------------------
165
166 on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
167 if on_rtd:
168 # readthedocs.org uses their theme by default, so no need to specify it
169 # build rest-api, since RTD doesn't run make
170 from subprocess import check_call as sh
171
172 sh(['make', 'rest-api'], cwd=docs)
173
174 # -- Spell checking -------------------------------------------------------
175
176 try:
177 import sphinxcontrib.spelling
178 except ImportError:
179 pass
180 else:
181 extensions.append("sphinxcontrib.spelling")
182
183 spelling_word_list_filename = 'spelling_wordlist.txt'
184
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -74,7 +74,7 @@
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages.
-html_theme = 'pandas_sphinx_theme'
+html_theme = 'pydata_sphinx_theme'
html_logo = '_static/images/logo/logo.png'
html_favicon = '_static/images/logo/favicon.ico'
|
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -74,7 +74,7 @@\n # -- Options for HTML output ----------------------------------------------\n \n # The theme to use for HTML and HTML Help pages.\n-html_theme = 'pandas_sphinx_theme'\n+html_theme = 'pydata_sphinx_theme'\n \n html_logo = '_static/images/logo/logo.png'\n html_favicon = '_static/images/logo/favicon.ico'\n", "issue": "Failed build of docs in CI\n```\r\n#!/bin/bash -eo pipefail\r\ncd docs\r\nmake html\r\nnpm install && touch node_modules\r\nnpm WARN deprecated [email protected]: request has been deprecated, see https://github.com/request/request/issues/3142\r\nnpm notice created a lockfile as package-lock.json. You should commit this file.\r\nnpm WARN optional SKIPPING OPTIONAL DEPENDENCY: fsevents@~2.1.2 (node_modules/chokidar/node_modules/fsevents):\r\nnpm WARN notsup SKIPPING OPTIONAL DEPENDENCY: Unsupported platform for [email protected]: wanted {\"os\":\"darwin\",\"arch\":\"any\"} (current: {\"os\":\"linux\",\"arch\":\"x64\"})\r\nnpm WARN [email protected] No repository field.\r\n\r\nadded 216 packages from 514 contributors and audited 325 packages in 4.188s\r\n\r\n14 packages are looking for funding\r\n run `npm fund` for details\r\n\r\nfound 2 low severity vulnerabilities\r\n run `npm audit fix` to fix them, or `npm audit` for details\r\nnpm run rest-api\r\n\r\n> [email protected] rest-api /home/circleci/project/docs\r\n> bootprint openapi ./rest-api.yml source/_static/rest-api\r\n\r\n[\r\n 'source/_static/rest-api/index.html',\r\n 'source/_static/rest-api/main.css',\r\n 'source/_static/rest-api/main.css.map'\r\n]\r\nsphinx-build -b html -d build/doctrees \"-W\" source build/html\r\nRunning Sphinx v2.4.4\r\nAdding copy buttons to code blocks...\r\nmaking output directory... done\r\n/home/circleci/.local/lib/python3.6/site-packages/sphinx/util/compat.py:32: RemovedInSphinx30Warning: The config variable \"source_parsers\" is deprecated. Please update your extension for the parser and remove the setting.\r\n RemovedInSphinx30Warning)\r\n/home/circleci/.local/lib/python3.6/site-packages/sphinx/util/compat.py:36: RemovedInSphinx30Warning: app.add_source_parser() does not support suffix argument. Use app.add_source_suffix() instead.\r\n app.add_source_parser(suffix, parser)\r\n\r\nTheme error:\r\nno theme named 'pandas_sphinx_theme' found (missing theme.conf?)\r\nMakefile:64: recipe for target 'html' failed\r\nmake: *** [html] Error 2\r\n```\r\n\r\nFYI @choldgraf @betatim\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\nimport os\nimport shlex\nimport sys\n\n# Set paths\nsys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\n# Minimal Sphinx version\nneeds_sphinx = '1.4'\n\n# Sphinx extension modules\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.napoleon',\n 'autodoc_traits',\n 'sphinx_copybutton',\n 'sphinx-jsonschema',\n]\n\ntemplates_path = ['_templates']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'JupyterHub'\ncopyright = u'2016, Project Jupyter team'\nauthor = u'Project Jupyter team'\n\n# Autopopulate version\nfrom os.path import dirname\n\ndocs = dirname(dirname(__file__))\nroot = dirname(docs)\nsys.path.insert(0, root)\n\nimport jupyterhub\n\n# The short X.Y version.\nversion = '%i.%i' % jupyterhub.version_info[:2]\n# The full version, including alpha/beta/rc tags.\nrelease = jupyterhub.__version__\n\nlanguage = None\nexclude_patterns = []\npygments_style = 'sphinx'\ntodo_include_todos = False\n\n# Set the default role so we can use `foo` instead of ``foo``\ndefault_role = 'literal'\n\n# -- Source -------------------------------------------------------------\n\nimport recommonmark\nfrom recommonmark.transform import AutoStructify\n\n\ndef setup(app):\n app.add_config_value('recommonmark_config', {'enable_eval_rst': True}, True)\n app.add_stylesheet('custom.css')\n app.add_transform(AutoStructify)\n\n\nsource_parsers = {'.md': 'recommonmark.parser.CommonMarkParser'}\n\nsource_suffix = ['.rst', '.md']\n# source_encoding = 'utf-8-sig'\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages.\nhtml_theme = 'pandas_sphinx_theme'\n\nhtml_logo = '_static/images/logo/logo.png'\nhtml_favicon = '_static/images/logo/favicon.ico'\n\n# Paths that contain custom static files (such as style sheets)\nhtml_static_path = ['_static']\n\nhtmlhelp_basename = 'JupyterHubdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # 'papersize': 'letterpaper',\n # 'pointsize': '10pt',\n # 'preamble': '',\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n 'JupyterHub.tex',\n u'JupyterHub Documentation',\n u'Project Jupyter team',\n 'manual',\n )\n]\n\n# latex_logo = None\n# latex_use_parts = False\n# latex_show_pagerefs = False\n# latex_show_urls = False\n# latex_appendices = []\n# latex_domain_indices = True\n\n\n# -- manual page output -------------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, 'jupyterhub', u'JupyterHub Documentation', [author], 1)]\n\n# man_show_urls = False\n\n\n# -- Texinfo output -----------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n 'JupyterHub',\n u'JupyterHub Documentation',\n author,\n 'JupyterHub',\n 'One line description of project.',\n 'Miscellaneous',\n )\n]\n\n# texinfo_appendices = []\n# texinfo_domain_indices = True\n# texinfo_show_urls = 'footnote'\n# texinfo_no_detailmenu = False\n\n\n# -- Epub output --------------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = ['search.html']\n\n# -- Intersphinx ----------------------------------------------------------\n\nintersphinx_mapping = {'https://docs.python.org/3/': None}\n\n# -- Read The Docs --------------------------------------------------------\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\nif on_rtd:\n # readthedocs.org uses their theme by default, so no need to specify it\n # build rest-api, since RTD doesn't run make\n from subprocess import check_call as sh\n\n sh(['make', 'rest-api'], cwd=docs)\n\n# -- Spell checking -------------------------------------------------------\n\ntry:\n import sphinxcontrib.spelling\nexcept ImportError:\n pass\nelse:\n extensions.append(\"sphinxcontrib.spelling\")\n\nspelling_word_list_filename = 'spelling_wordlist.txt'\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\nimport os\nimport shlex\nimport sys\n\n# Set paths\nsys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\n# Minimal Sphinx version\nneeds_sphinx = '1.4'\n\n# Sphinx extension modules\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.napoleon',\n 'autodoc_traits',\n 'sphinx_copybutton',\n 'sphinx-jsonschema',\n]\n\ntemplates_path = ['_templates']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'JupyterHub'\ncopyright = u'2016, Project Jupyter team'\nauthor = u'Project Jupyter team'\n\n# Autopopulate version\nfrom os.path import dirname\n\ndocs = dirname(dirname(__file__))\nroot = dirname(docs)\nsys.path.insert(0, root)\n\nimport jupyterhub\n\n# The short X.Y version.\nversion = '%i.%i' % jupyterhub.version_info[:2]\n# The full version, including alpha/beta/rc tags.\nrelease = jupyterhub.__version__\n\nlanguage = None\nexclude_patterns = []\npygments_style = 'sphinx'\ntodo_include_todos = False\n\n# Set the default role so we can use `foo` instead of ``foo``\ndefault_role = 'literal'\n\n# -- Source -------------------------------------------------------------\n\nimport recommonmark\nfrom recommonmark.transform import AutoStructify\n\n\ndef setup(app):\n app.add_config_value('recommonmark_config', {'enable_eval_rst': True}, True)\n app.add_stylesheet('custom.css')\n app.add_transform(AutoStructify)\n\n\nsource_parsers = {'.md': 'recommonmark.parser.CommonMarkParser'}\n\nsource_suffix = ['.rst', '.md']\n# source_encoding = 'utf-8-sig'\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages.\nhtml_theme = 'pydata_sphinx_theme'\n\nhtml_logo = '_static/images/logo/logo.png'\nhtml_favicon = '_static/images/logo/favicon.ico'\n\n# Paths that contain custom static files (such as style sheets)\nhtml_static_path = ['_static']\n\nhtmlhelp_basename = 'JupyterHubdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # 'papersize': 'letterpaper',\n # 'pointsize': '10pt',\n # 'preamble': '',\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n 'JupyterHub.tex',\n u'JupyterHub Documentation',\n u'Project Jupyter team',\n 'manual',\n )\n]\n\n# latex_logo = None\n# latex_use_parts = False\n# latex_show_pagerefs = False\n# latex_show_urls = False\n# latex_appendices = []\n# latex_domain_indices = True\n\n\n# -- manual page output -------------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, 'jupyterhub', u'JupyterHub Documentation', [author], 1)]\n\n# man_show_urls = False\n\n\n# -- Texinfo output -----------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n 'JupyterHub',\n u'JupyterHub Documentation',\n author,\n 'JupyterHub',\n 'One line description of project.',\n 'Miscellaneous',\n )\n]\n\n# texinfo_appendices = []\n# texinfo_domain_indices = True\n# texinfo_show_urls = 'footnote'\n# texinfo_no_detailmenu = False\n\n\n# -- Epub output --------------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = ['search.html']\n\n# -- Intersphinx ----------------------------------------------------------\n\nintersphinx_mapping = {'https://docs.python.org/3/': None}\n\n# -- Read The Docs --------------------------------------------------------\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\nif on_rtd:\n # readthedocs.org uses their theme by default, so no need to specify it\n # build rest-api, since RTD doesn't run make\n from subprocess import check_call as sh\n\n sh(['make', 'rest-api'], cwd=docs)\n\n# -- Spell checking -------------------------------------------------------\n\ntry:\n import sphinxcontrib.spelling\nexcept ImportError:\n pass\nelse:\n extensions.append(\"sphinxcontrib.spelling\")\n\nspelling_word_list_filename = 'spelling_wordlist.txt'\n", "path": "docs/source/conf.py"}]}
| 2,358 | 104 |
gh_patches_debug_27228
|
rasdani/github-patches
|
git_diff
|
microsoft__AzureTRE-176
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Standardize TRE identifiers
## Description
As a TRE developer
I want naming of identifiers to be simple and standardized across the TRE
So it will as intuitive as possible
Currently we have Core ID, TRE ID and resource_name_prefix, which all are unique IDs for a TRE instance. ([Ref to code](https://github.com/microsoft/AzureTRE/blob/3cc8e14c6a16d5bb940f259dd5cb257e735e448b/templates/core/terraform/main.tf#L17))
They are used to ensure no clashes between names, but having a single identifier is sufficient.
### A simplified solution
When creating a TRE instance, a unique identifier is needed, to make sure no clashes occur. That identifier should be named TRE_ID and can be up to 10 chars long (Alphanumeric, underscore, and hyphen). If the Cloud Administrator wants to use a specific naming convention e.g. one that includes environment, the Cloud Administrator can do so.
Examples of a TRE_ID:
- mytre
- msfttre-dev
- tre123
Hench the TRE_ID is an unique identifier for the TRE instance replacing the Core ID, which consisted of TRE ID + resource_name_prefix.
## Acceptance criteria
- [x] TRE provisioning script uses the TRE ID as the TRE instance name, hence creates the cross-cutting services in a ressource group with the name of TRE ID e.g. mytre
- [x] TRE provisioning script does not require environment parameter
- [x] Workspace bundle uses TRE_ID (not Core ID as now) as the identifier for the TRE instance
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `management_api_app/core/config.py`
Content:
```
1 from starlette.config import Config
2
3
4 config = Config(".env")
5
6 # API settings
7 API_PREFIX = "/api"
8 PROJECT_NAME: str = config("PROJECT_NAME", default="Azure TRE API")
9 DEBUG: bool = config("DEBUG", cast=bool, default=False)
10 VERSION = "0.0.0"
11
12 # Resource Info
13 RESOURCE_LOCATION: str = config("RESOURCE_LOCATION", default="")
14 CORE_ID: str = config("CORE_ID", default="")
15
16 # State store configuration
17 STATE_STORE_ENDPOINT: str = config("STATE_STORE_ENDPOINT", default="") # Cosmos DB endpoint
18 STATE_STORE_KEY: str = config("STATE_STORE_KEY", default="") # Cosmos DB access key
19 STATE_STORE_DATABASE = "AzureTRE"
20 STATE_STORE_RESOURCES_CONTAINER = "Resources"
21 STATE_STORE_BUNDLE_SPECS_CONTAINER = "ResourceSpecs"
22
23 # Service bus configuration
24 SERVICE_BUS_FULLY_QUALIFIED_NAMESPACE: str = config("SERVICE_BUS_FULLY_QUALIFIED_NAMESPACE", default="")
25 SERVICE_BUS_RESOURCE_REQUEST_QUEUE: str = config("SERVICE_BUS_RESOURCE_REQUEST_QUEUE", default="")
26
```
Path: `management_api_app/db/repositories/workspaces.py`
Content:
```
1 import uuid
2 from typing import List
3
4 from azure.cosmos import ContainerProxy, CosmosClient
5 from pydantic import UUID4
6
7 from core import config
8 from db.errors import EntityDoesNotExist
9 from db.query_builder import QueryBuilder
10 from db.repositories.base import BaseRepository
11 from models.domain.resource import ResourceType
12 from models.domain.workspace import Workspace
13 from models.schemas.workspace import WorkspaceInCreate
14
15
16 class WorkspaceRepository(BaseRepository):
17 def __init__(self, client: CosmosClient):
18 super().__init__(client, config.STATE_STORE_RESOURCES_CONTAINER)
19
20 @property
21 def container(self) -> ContainerProxy:
22 return self._container
23
24 def get_all_active_workspaces(self) -> List[Workspace]:
25 query = QueryBuilder().select_active_resources(ResourceType.Workspace).build()
26 workspaces = list(self.container.query_items(query=query, enable_cross_partition_query=True))
27 return workspaces
28
29 def get_workspace_by_workspace_id(self, workspace_id: UUID4) -> Workspace:
30 query = QueryBuilder().select_active_resources(ResourceType.Workspace).with_id(workspace_id).build()
31 workspaces = list(self.container.query_items(query=query, enable_cross_partition_query=True))
32
33 if workspaces:
34 return workspaces[0]
35 else:
36 raise EntityDoesNotExist
37
38 def create_workspace(self, workspace_create: WorkspaceInCreate) -> Workspace:
39 resource_spec_parameters = {
40 "location": config.RESOURCE_LOCATION,
41 "workspace_id": "0001", # TODO: Calculate this value - Issue #166
42 "core_id": config.CORE_ID,
43 "address_space": "10.2.1.0/24" # TODO: Calculate this value - Issue #52
44 }
45
46 workspace = Workspace(
47 id=str(uuid.uuid4()),
48 displayName=workspace_create.displayName,
49 description=workspace_create.description,
50 resourceSpecName=workspace_create.workspaceType,
51 resourceSpecVersion="0.1.0", # TODO: Calculate latest - Issue #167
52 resourceSpecParameters=resource_spec_parameters
53 )
54
55 self.container.create_item(body=workspace.dict())
56 return workspace
57
```
Path: `management_api_app/models/schemas/workspace.py`
Content:
```
1 from typing import List
2 from pydantic import BaseModel, Field
3
4 from models.domain.workspace import Workspace
5
6
7 def get_sample_workspace(workspace_id: str, spec_workspace_id: str = "0001") -> dict:
8 return {
9 "id": workspace_id,
10 "displayName": "my workspace",
11 "description": "some description",
12 "resourceSpecName": "tre-workspace-vanilla",
13 "resourceSpecVersion": "0.1.0",
14 "resourceSpecParameters": {
15 "location": "westeurope",
16 "workspace_id": spec_workspace_id,
17 "core_id": "mytre-dev-1234",
18 "address_space": "10.2.1.0/24"
19 },
20 "status": "not_deployed",
21 "isDeleted": False,
22 "resourceType": "workspace",
23 "workspaceURL": ""
24 }
25
26
27 class WorkspaceInResponse(BaseModel):
28 workspace: Workspace
29
30 class Config:
31 schema_extra = {
32 "example": {
33 "workspace": get_sample_workspace("933ad738-7265-4b5f-9eae-a1a62928772e")
34 }
35 }
36
37
38 class WorkspacesInList(BaseModel):
39 workspaces: List[Workspace]
40
41 class Config:
42 schema_extra = {
43 "example": {
44 "workspaces": [
45 get_sample_workspace("933ad738-7265-4b5f-9eae-a1a62928772e", "0001"),
46 get_sample_workspace("2fdc9fba-726e-4db6-a1b8-9018a2165748", "0002"),
47 ]
48 }
49 }
50
51
52 class WorkspaceInCreate(BaseModel):
53 displayName: str = Field(title="Friendly name for workspace")
54 workspaceType: str = Field(title="Workspace type", description="Bundle name")
55 description: str = Field(title="Workspace description")
56 parameters: dict = Field({}, title="Workspace parameters", description="Values for the parameters required by the workspace resource specification")
57
58 class Config:
59 schema_extra = {
60 "example": {
61 "displayName": "My workspace",
62 "description": "workspace for team X",
63 "workspaceType": "tre-workspace-vanilla",
64 "parameters": {}
65 }
66 }
67
68
69 class WorkspaceIdInResponse(BaseModel):
70 workspaceId: str
71
72 class Config:
73 schema_extra = {
74 "example": {
75 "workspaceId": "49a7445c-aae6-41ec-a539-30dfa90ab1ae",
76 }
77 }
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/management_api_app/core/config.py b/management_api_app/core/config.py
--- a/management_api_app/core/config.py
+++ b/management_api_app/core/config.py
@@ -11,7 +11,7 @@
# Resource Info
RESOURCE_LOCATION: str = config("RESOURCE_LOCATION", default="")
-CORE_ID: str = config("CORE_ID", default="")
+TRE_ID: str = config("TRE_ID", default="")
# State store configuration
STATE_STORE_ENDPOINT: str = config("STATE_STORE_ENDPOINT", default="") # Cosmos DB endpoint
diff --git a/management_api_app/db/repositories/workspaces.py b/management_api_app/db/repositories/workspaces.py
--- a/management_api_app/db/repositories/workspaces.py
+++ b/management_api_app/db/repositories/workspaces.py
@@ -39,7 +39,7 @@
resource_spec_parameters = {
"location": config.RESOURCE_LOCATION,
"workspace_id": "0001", # TODO: Calculate this value - Issue #166
- "core_id": config.CORE_ID,
+ "tre_id": config.TRE_ID,
"address_space": "10.2.1.0/24" # TODO: Calculate this value - Issue #52
}
diff --git a/management_api_app/models/schemas/workspace.py b/management_api_app/models/schemas/workspace.py
--- a/management_api_app/models/schemas/workspace.py
+++ b/management_api_app/models/schemas/workspace.py
@@ -14,7 +14,7 @@
"resourceSpecParameters": {
"location": "westeurope",
"workspace_id": spec_workspace_id,
- "core_id": "mytre-dev-1234",
+ "tre_id": "mytre-dev-1234",
"address_space": "10.2.1.0/24"
},
"status": "not_deployed",
|
{"golden_diff": "diff --git a/management_api_app/core/config.py b/management_api_app/core/config.py\n--- a/management_api_app/core/config.py\n+++ b/management_api_app/core/config.py\n@@ -11,7 +11,7 @@\n \n # Resource Info\n RESOURCE_LOCATION: str = config(\"RESOURCE_LOCATION\", default=\"\")\n-CORE_ID: str = config(\"CORE_ID\", default=\"\")\n+TRE_ID: str = config(\"TRE_ID\", default=\"\")\n \n # State store configuration\n STATE_STORE_ENDPOINT: str = config(\"STATE_STORE_ENDPOINT\", default=\"\") # Cosmos DB endpoint\ndiff --git a/management_api_app/db/repositories/workspaces.py b/management_api_app/db/repositories/workspaces.py\n--- a/management_api_app/db/repositories/workspaces.py\n+++ b/management_api_app/db/repositories/workspaces.py\n@@ -39,7 +39,7 @@\n resource_spec_parameters = {\n \"location\": config.RESOURCE_LOCATION,\n \"workspace_id\": \"0001\", # TODO: Calculate this value - Issue #166\n- \"core_id\": config.CORE_ID,\n+ \"tre_id\": config.TRE_ID,\n \"address_space\": \"10.2.1.0/24\" # TODO: Calculate this value - Issue #52\n }\n \ndiff --git a/management_api_app/models/schemas/workspace.py b/management_api_app/models/schemas/workspace.py\n--- a/management_api_app/models/schemas/workspace.py\n+++ b/management_api_app/models/schemas/workspace.py\n@@ -14,7 +14,7 @@\n \"resourceSpecParameters\": {\n \"location\": \"westeurope\",\n \"workspace_id\": spec_workspace_id,\n- \"core_id\": \"mytre-dev-1234\",\n+ \"tre_id\": \"mytre-dev-1234\",\n \"address_space\": \"10.2.1.0/24\"\n },\n \"status\": \"not_deployed\",\n", "issue": "Standardize TRE identifiers\n## Description\r\n\r\nAs a TRE developer\r\nI want naming of identifiers to be simple and standardized across the TRE\r\nSo it will as intuitive as possible\r\n\r\nCurrently we have Core ID, TRE ID and resource_name_prefix, which all are unique IDs for a TRE instance. ([Ref to code](https://github.com/microsoft/AzureTRE/blob/3cc8e14c6a16d5bb940f259dd5cb257e735e448b/templates/core/terraform/main.tf#L17))\r\nThey are used to ensure no clashes between names, but having a single identifier is sufficient.\r\n\r\n### A simplified solution\r\n\r\nWhen creating a TRE instance, a unique identifier is needed, to make sure no clashes occur. That identifier should be named TRE_ID and can be up to 10 chars long (Alphanumeric, underscore, and hyphen). If the Cloud Administrator wants to use a specific naming convention e.g. one that includes environment, the Cloud Administrator can do so.\r\n\r\nExamples of a TRE_ID:\r\n\r\n- mytre\r\n- msfttre-dev\r\n- tre123\r\n\r\nHench the TRE_ID is an unique identifier for the TRE instance replacing the Core ID, which consisted of TRE ID + resource_name_prefix.\r\n\r\n## Acceptance criteria\r\n\r\n- [x] TRE provisioning script uses the TRE ID as the TRE instance name, hence creates the cross-cutting services in a ressource group with the name of TRE ID e.g. mytre\r\n- [x] TRE provisioning script does not require environment parameter\r\n- [x] Workspace bundle uses TRE_ID (not Core ID as now) as the identifier for the TRE instance\r\n\n", "before_files": [{"content": "from starlette.config import Config\n\n\nconfig = Config(\".env\")\n\n# API settings\nAPI_PREFIX = \"/api\"\nPROJECT_NAME: str = config(\"PROJECT_NAME\", default=\"Azure TRE API\")\nDEBUG: bool = config(\"DEBUG\", cast=bool, default=False)\nVERSION = \"0.0.0\"\n\n# Resource Info\nRESOURCE_LOCATION: str = config(\"RESOURCE_LOCATION\", default=\"\")\nCORE_ID: str = config(\"CORE_ID\", default=\"\")\n\n# State store configuration\nSTATE_STORE_ENDPOINT: str = config(\"STATE_STORE_ENDPOINT\", default=\"\") # Cosmos DB endpoint\nSTATE_STORE_KEY: str = config(\"STATE_STORE_KEY\", default=\"\") # Cosmos DB access key\nSTATE_STORE_DATABASE = \"AzureTRE\"\nSTATE_STORE_RESOURCES_CONTAINER = \"Resources\"\nSTATE_STORE_BUNDLE_SPECS_CONTAINER = \"ResourceSpecs\"\n\n# Service bus configuration\nSERVICE_BUS_FULLY_QUALIFIED_NAMESPACE: str = config(\"SERVICE_BUS_FULLY_QUALIFIED_NAMESPACE\", default=\"\")\nSERVICE_BUS_RESOURCE_REQUEST_QUEUE: str = config(\"SERVICE_BUS_RESOURCE_REQUEST_QUEUE\", default=\"\")\n", "path": "management_api_app/core/config.py"}, {"content": "import uuid\nfrom typing import List\n\nfrom azure.cosmos import ContainerProxy, CosmosClient\nfrom pydantic import UUID4\n\nfrom core import config\nfrom db.errors import EntityDoesNotExist\nfrom db.query_builder import QueryBuilder\nfrom db.repositories.base import BaseRepository\nfrom models.domain.resource import ResourceType\nfrom models.domain.workspace import Workspace\nfrom models.schemas.workspace import WorkspaceInCreate\n\n\nclass WorkspaceRepository(BaseRepository):\n def __init__(self, client: CosmosClient):\n super().__init__(client, config.STATE_STORE_RESOURCES_CONTAINER)\n\n @property\n def container(self) -> ContainerProxy:\n return self._container\n\n def get_all_active_workspaces(self) -> List[Workspace]:\n query = QueryBuilder().select_active_resources(ResourceType.Workspace).build()\n workspaces = list(self.container.query_items(query=query, enable_cross_partition_query=True))\n return workspaces\n\n def get_workspace_by_workspace_id(self, workspace_id: UUID4) -> Workspace:\n query = QueryBuilder().select_active_resources(ResourceType.Workspace).with_id(workspace_id).build()\n workspaces = list(self.container.query_items(query=query, enable_cross_partition_query=True))\n\n if workspaces:\n return workspaces[0]\n else:\n raise EntityDoesNotExist\n\n def create_workspace(self, workspace_create: WorkspaceInCreate) -> Workspace:\n resource_spec_parameters = {\n \"location\": config.RESOURCE_LOCATION,\n \"workspace_id\": \"0001\", # TODO: Calculate this value - Issue #166\n \"core_id\": config.CORE_ID,\n \"address_space\": \"10.2.1.0/24\" # TODO: Calculate this value - Issue #52\n }\n\n workspace = Workspace(\n id=str(uuid.uuid4()),\n displayName=workspace_create.displayName,\n description=workspace_create.description,\n resourceSpecName=workspace_create.workspaceType,\n resourceSpecVersion=\"0.1.0\", # TODO: Calculate latest - Issue #167\n resourceSpecParameters=resource_spec_parameters\n )\n\n self.container.create_item(body=workspace.dict())\n return workspace\n", "path": "management_api_app/db/repositories/workspaces.py"}, {"content": "from typing import List\nfrom pydantic import BaseModel, Field\n\nfrom models.domain.workspace import Workspace\n\n\ndef get_sample_workspace(workspace_id: str, spec_workspace_id: str = \"0001\") -> dict:\n return {\n \"id\": workspace_id,\n \"displayName\": \"my workspace\",\n \"description\": \"some description\",\n \"resourceSpecName\": \"tre-workspace-vanilla\",\n \"resourceSpecVersion\": \"0.1.0\",\n \"resourceSpecParameters\": {\n \"location\": \"westeurope\",\n \"workspace_id\": spec_workspace_id,\n \"core_id\": \"mytre-dev-1234\",\n \"address_space\": \"10.2.1.0/24\"\n },\n \"status\": \"not_deployed\",\n \"isDeleted\": False,\n \"resourceType\": \"workspace\",\n \"workspaceURL\": \"\"\n }\n\n\nclass WorkspaceInResponse(BaseModel):\n workspace: Workspace\n\n class Config:\n schema_extra = {\n \"example\": {\n \"workspace\": get_sample_workspace(\"933ad738-7265-4b5f-9eae-a1a62928772e\")\n }\n }\n\n\nclass WorkspacesInList(BaseModel):\n workspaces: List[Workspace]\n\n class Config:\n schema_extra = {\n \"example\": {\n \"workspaces\": [\n get_sample_workspace(\"933ad738-7265-4b5f-9eae-a1a62928772e\", \"0001\"),\n get_sample_workspace(\"2fdc9fba-726e-4db6-a1b8-9018a2165748\", \"0002\"),\n ]\n }\n }\n\n\nclass WorkspaceInCreate(BaseModel):\n displayName: str = Field(title=\"Friendly name for workspace\")\n workspaceType: str = Field(title=\"Workspace type\", description=\"Bundle name\")\n description: str = Field(title=\"Workspace description\")\n parameters: dict = Field({}, title=\"Workspace parameters\", description=\"Values for the parameters required by the workspace resource specification\")\n\n class Config:\n schema_extra = {\n \"example\": {\n \"displayName\": \"My workspace\",\n \"description\": \"workspace for team X\",\n \"workspaceType\": \"tre-workspace-vanilla\",\n \"parameters\": {}\n }\n }\n\n\nclass WorkspaceIdInResponse(BaseModel):\n workspaceId: str\n\n class Config:\n schema_extra = {\n \"example\": {\n \"workspaceId\": \"49a7445c-aae6-41ec-a539-30dfa90ab1ae\",\n }\n }\n", "path": "management_api_app/models/schemas/workspace.py"}], "after_files": [{"content": "from starlette.config import Config\n\n\nconfig = Config(\".env\")\n\n# API settings\nAPI_PREFIX = \"/api\"\nPROJECT_NAME: str = config(\"PROJECT_NAME\", default=\"Azure TRE API\")\nDEBUG: bool = config(\"DEBUG\", cast=bool, default=False)\nVERSION = \"0.0.0\"\n\n# Resource Info\nRESOURCE_LOCATION: str = config(\"RESOURCE_LOCATION\", default=\"\")\nTRE_ID: str = config(\"TRE_ID\", default=\"\")\n\n# State store configuration\nSTATE_STORE_ENDPOINT: str = config(\"STATE_STORE_ENDPOINT\", default=\"\") # Cosmos DB endpoint\nSTATE_STORE_KEY: str = config(\"STATE_STORE_KEY\", default=\"\") # Cosmos DB access key\nSTATE_STORE_DATABASE = \"AzureTRE\"\nSTATE_STORE_RESOURCES_CONTAINER = \"Resources\"\nSTATE_STORE_BUNDLE_SPECS_CONTAINER = \"ResourceSpecs\"\n\n# Service bus configuration\nSERVICE_BUS_FULLY_QUALIFIED_NAMESPACE: str = config(\"SERVICE_BUS_FULLY_QUALIFIED_NAMESPACE\", default=\"\")\nSERVICE_BUS_RESOURCE_REQUEST_QUEUE: str = config(\"SERVICE_BUS_RESOURCE_REQUEST_QUEUE\", default=\"\")\n", "path": "management_api_app/core/config.py"}, {"content": "import uuid\nfrom typing import List\n\nfrom azure.cosmos import ContainerProxy, CosmosClient\nfrom pydantic import UUID4\n\nfrom core import config\nfrom db.errors import EntityDoesNotExist\nfrom db.query_builder import QueryBuilder\nfrom db.repositories.base import BaseRepository\nfrom models.domain.resource import ResourceType\nfrom models.domain.workspace import Workspace\nfrom models.schemas.workspace import WorkspaceInCreate\n\n\nclass WorkspaceRepository(BaseRepository):\n def __init__(self, client: CosmosClient):\n super().__init__(client, config.STATE_STORE_RESOURCES_CONTAINER)\n\n @property\n def container(self) -> ContainerProxy:\n return self._container\n\n def get_all_active_workspaces(self) -> List[Workspace]:\n query = QueryBuilder().select_active_resources(ResourceType.Workspace).build()\n workspaces = list(self.container.query_items(query=query, enable_cross_partition_query=True))\n return workspaces\n\n def get_workspace_by_workspace_id(self, workspace_id: UUID4) -> Workspace:\n query = QueryBuilder().select_active_resources(ResourceType.Workspace).with_id(workspace_id).build()\n workspaces = list(self.container.query_items(query=query, enable_cross_partition_query=True))\n\n if workspaces:\n return workspaces[0]\n else:\n raise EntityDoesNotExist\n\n def create_workspace(self, workspace_create: WorkspaceInCreate) -> Workspace:\n resource_spec_parameters = {\n \"location\": config.RESOURCE_LOCATION,\n \"workspace_id\": \"0001\", # TODO: Calculate this value - Issue #166\n \"tre_id\": config.TRE_ID,\n \"address_space\": \"10.2.1.0/24\" # TODO: Calculate this value - Issue #52\n }\n\n workspace = Workspace(\n id=str(uuid.uuid4()),\n displayName=workspace_create.displayName,\n description=workspace_create.description,\n resourceSpecName=workspace_create.workspaceType,\n resourceSpecVersion=\"0.1.0\", # TODO: Calculate latest - Issue #167\n resourceSpecParameters=resource_spec_parameters\n )\n\n self.container.create_item(body=workspace.dict())\n return workspace\n", "path": "management_api_app/db/repositories/workspaces.py"}, {"content": "from typing import List\nfrom pydantic import BaseModel, Field\n\nfrom models.domain.workspace import Workspace\n\n\ndef get_sample_workspace(workspace_id: str, spec_workspace_id: str = \"0001\") -> dict:\n return {\n \"id\": workspace_id,\n \"displayName\": \"my workspace\",\n \"description\": \"some description\",\n \"resourceSpecName\": \"tre-workspace-vanilla\",\n \"resourceSpecVersion\": \"0.1.0\",\n \"resourceSpecParameters\": {\n \"location\": \"westeurope\",\n \"workspace_id\": spec_workspace_id,\n \"tre_id\": \"mytre-dev-1234\",\n \"address_space\": \"10.2.1.0/24\"\n },\n \"status\": \"not_deployed\",\n \"isDeleted\": False,\n \"resourceType\": \"workspace\",\n \"workspaceURL\": \"\"\n }\n\n\nclass WorkspaceInResponse(BaseModel):\n workspace: Workspace\n\n class Config:\n schema_extra = {\n \"example\": {\n \"workspace\": get_sample_workspace(\"933ad738-7265-4b5f-9eae-a1a62928772e\")\n }\n }\n\n\nclass WorkspacesInList(BaseModel):\n workspaces: List[Workspace]\n\n class Config:\n schema_extra = {\n \"example\": {\n \"workspaces\": [\n get_sample_workspace(\"933ad738-7265-4b5f-9eae-a1a62928772e\", \"0001\"),\n get_sample_workspace(\"2fdc9fba-726e-4db6-a1b8-9018a2165748\", \"0002\"),\n ]\n }\n }\n\n\nclass WorkspaceInCreate(BaseModel):\n displayName: str = Field(title=\"Friendly name for workspace\")\n workspaceType: str = Field(title=\"Workspace type\", description=\"Bundle name\")\n description: str = Field(title=\"Workspace description\")\n parameters: dict = Field({}, title=\"Workspace parameters\", description=\"Values for the parameters required by the workspace resource specification\")\n\n class Config:\n schema_extra = {\n \"example\": {\n \"displayName\": \"My workspace\",\n \"description\": \"workspace for team X\",\n \"workspaceType\": \"tre-workspace-vanilla\",\n \"parameters\": {}\n }\n }\n\n\nclass WorkspaceIdInResponse(BaseModel):\n workspaceId: str\n\n class Config:\n schema_extra = {\n \"example\": {\n \"workspaceId\": \"49a7445c-aae6-41ec-a539-30dfa90ab1ae\",\n }\n }\n", "path": "management_api_app/models/schemas/workspace.py"}]}
| 2,222 | 430 |
gh_patches_debug_27427
|
rasdani/github-patches
|
git_diff
|
beeware__toga-1109
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Memory leaks in Windows
**Describe the bug**
Unfortunately, it seems like there are memory leaks issues in the Windows implementation of toga
**To Reproduce**
Steps to reproduce the behavior:
1. install memory profiler, briefcase and matplotlib using `pip install memory_profiler briefcase matplotlib`
2. Go to the Box example directory (or any other example for that matter)
3. Run `mprof run --include-children briefcase dev`
4. Click some buttons and wait for a few seconds
5. Exit the program
6. run `mprof plot` and see the memory leak

**Environment:**
- Operating System: Windows 10
- Python version: python 3.8
- Software versions:
- Briefcase: latest
- Toga: latest
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/winforms/toga_winforms/libs/proactor.py`
Content:
```
1 import asyncio
2 import sys
3 import threading
4 from asyncio import events
5
6 from .winforms import Action, Task, WinForms, user32
7
8
9 class AsyncIOTickMessageFilter(WinForms.IMessageFilter):
10 """
11 A Winforms message filter that will catch the request to tick the Asyncio
12 event loop.
13 """
14 __namespace__ = 'System.Windows.Forms'
15
16 def __init__(self, loop, msg_id):
17 self.loop = loop
18 self.msg_id = msg_id
19
20 def PreFilterMessage(self, message):
21 print('ping', message)
22 if message.Msg == self.msg_id:
23 print("asyncio tick message!!")
24 self.loop.run_once_recurring()
25 return True
26 # print("Filter message", message)
27 return False
28
29
30 class WinformsProactorEventLoop(asyncio.ProactorEventLoop):
31 def run_forever(self, app_context):
32 """Set up the asyncio event loop, integrate it with the Winforms
33 event loop, and start the application.
34
35 This largely duplicates the setup behavior of the default Proactor
36 run_forever implementation.
37
38 :param app_context: The WinForms.ApplicationContext instance
39 controlling the lifecycle of the app.
40 """
41 # Python 3.8 added an implementation of run_forever() in
42 # ProactorEventLoop. The only part that actually matters is the
43 # refactoring that moved the initial call to stage _loop_self_reading;
44 # it now needs to be created as part of run_forever; otherwise the
45 # event loop locks up, because there won't be anything for the
46 # select call to process.
47 if sys.version_info >= (3, 8):
48 self.call_soon(self._loop_self_reading)
49
50 # Remember the application context.
51 self.app_context = app_context
52
53 # Register a custom user window message.
54 self.msg_id = user32.RegisterWindowMessageA("Python asyncio tick")
55 # Add a message filter to listen for the asyncio tick message
56 # FIXME: Actually install the message filter.
57 # msg_filter = AsyncIOTickMessageFilter(self, self.msg_id)
58 # WinForms.Application.AddMessageFilter(msg_filter)
59
60 # Setup the Proactor.
61 # The code between the following markers should be exactly the same as
62 # the official CPython implementation, up to the start of the
63 # `while True:` part of run_forever() (see BaseEventLoop.run_forever()
64 # in Lib/ascynio/base_events.py)
65 # === START BaseEventLoop.run_forever() setup ===
66 self._check_closed()
67 if self.is_running():
68 raise RuntimeError('This event loop is already running')
69 if events._get_running_loop() is not None:
70 raise RuntimeError(
71 'Cannot run the event loop while another loop is running')
72 self._set_coroutine_origin_tracking(self._debug)
73 self._thread_id = threading.get_ident()
74 try:
75 self._old_agen_hooks = sys.get_asyncgen_hooks()
76 sys.set_asyncgen_hooks(
77 firstiter=self._asyncgen_firstiter_hook,
78 finalizer=self._asyncgen_finalizer_hook
79 )
80 except AttributeError:
81 # Python < 3.6 didn't have sys.get_asyncgen_hooks();
82 # No action required for those versions.
83 pass
84
85 events._set_running_loop(self)
86 # === END BaseEventLoop.run_forever() setup ===
87
88 # Rather than going into a `while True:` loop, we're going to use the
89 # Winforms event loop to queue a tick() message that will cause a
90 # single iteration of the asyncio event loop to be executed. Each time
91 # we do this, we queue *another* tick() message in 5ms time. In this
92 # way, we'll get a continuous stream of tick() calls, without blocking
93 # the Winforms event loop.
94
95 # Queue the first asyncio tick.
96 self.enqueue_tick()
97
98 # Start the Winforms event loop.
99 WinForms.Application.Run(self.app_context)
100
101 def enqueue_tick(self):
102 # Queue a call to tick in 5ms.
103 Task.Delay(5).ContinueWith(Action[Task](self.tick))
104
105 def tick(self, *args, **kwargs):
106 """
107 Cause a single iteration of the event loop to run on the main GUI thread.
108 """
109 # Post a userspace message that will trigger running an iteration
110 # of the asyncio event loop. This can't be done directly, because the
111 # tick() will be executing in a threadpool, and we need the asyncio
112 # handling to occur in the main GUI thread. However, by positing a
113 # message, it will be caught by the MessageFilter we installed on the
114 # Application thread.
115
116 # The message is sent with:
117 # * HWND 0xfff (all windows),
118 # * MSG self.msg_id (a message ID in the WM_USER range)
119 # * LPARAM and WPARAM empty (no extra details needed; just tick!)
120 user32.PostMessageA(0xffff, self.msg_id, None, None)
121
122 # FIXME: Once we have a working message filter, this invoke call
123 # can be removed.
124 # If the app context has a main form, invoke run_once_recurring()
125 # on the thread associated with that form.
126 if self.app_context.MainForm:
127 self.app_context.MainForm.Invoke(Action(self.run_once_recurring))
128
129 def run_once_recurring(self):
130 """
131 Run one iteration of the event loop, and enqueue the next iteration
132 (if we're not stopping).
133
134 This largely duplicates the "finally" behavior of the default Proactor
135 run_forever implementation.
136 """
137 # Perform one tick of the event loop.
138 self._run_once()
139
140 if self._stopping:
141 # If we're stopping, we can do the "finally" handling from
142 # the BaseEventLoop run_forever().
143 # === START BaseEventLoop.run_forever() finally handling ===
144 self._stopping = False
145 self._thread_id = None
146 events._set_running_loop(None)
147 self._set_coroutine_origin_tracking(False)
148 try:
149 sys.set_asyncgen_hooks(*self._old_agen_hooks)
150 except AttributeError:
151 # Python < 3.6 didn't have set_asyncgen_hooks.
152 # No action required for those versions.
153 pass
154 # === END BaseEventLoop.run_forever() finally handling ===
155 else:
156 # Otherwise, live to tick another day. Enqueue the next tick,
157 # and make sure there will be *something* to be processed.
158 # If you don't ensure there is at least one message on the
159 # queue, the select() call will block, locking the app.
160 self.enqueue_tick()
161 self.call_soon(self._loop_self_reading)
162
163
164 # Python 3.7 changed the name of an internal wrapper function.
165 # Install an alias for the old name at the new name.
166 if sys.version_info < (3, 7):
167 WinformsProactorEventLoop._set_coroutine_origin_tracking = WinformsProactorEventLoop._set_coroutine_wrapper
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/winforms/toga_winforms/libs/proactor.py b/src/winforms/toga_winforms/libs/proactor.py
--- a/src/winforms/toga_winforms/libs/proactor.py
+++ b/src/winforms/toga_winforms/libs/proactor.py
@@ -100,7 +100,8 @@
def enqueue_tick(self):
# Queue a call to tick in 5ms.
- Task.Delay(5).ContinueWith(Action[Task](self.tick))
+ self.task = Action[Task](self.tick)
+ Task.Delay(5).ContinueWith(self.task)
def tick(self, *args, **kwargs):
"""
@@ -113,6 +114,9 @@
# message, it will be caught by the MessageFilter we installed on the
# Application thread.
+ if self.task:
+ self.task.Dispose()
+ del self.task
# The message is sent with:
# * HWND 0xfff (all windows),
# * MSG self.msg_id (a message ID in the WM_USER range)
@@ -124,7 +128,10 @@
# If the app context has a main form, invoke run_once_recurring()
# on the thread associated with that form.
if self.app_context.MainForm:
- self.app_context.MainForm.Invoke(Action(self.run_once_recurring))
+ action = Action(self.run_once_recurring)
+ self.app_context.MainForm.Invoke(action)
+ action.Dispose()
+ del action
def run_once_recurring(self):
"""
|
{"golden_diff": "diff --git a/src/winforms/toga_winforms/libs/proactor.py b/src/winforms/toga_winforms/libs/proactor.py\n--- a/src/winforms/toga_winforms/libs/proactor.py\n+++ b/src/winforms/toga_winforms/libs/proactor.py\n@@ -100,7 +100,8 @@\n \n def enqueue_tick(self):\n # Queue a call to tick in 5ms.\n- Task.Delay(5).ContinueWith(Action[Task](self.tick))\n+ self.task = Action[Task](self.tick)\n+ Task.Delay(5).ContinueWith(self.task)\n \n def tick(self, *args, **kwargs):\n \"\"\"\n@@ -113,6 +114,9 @@\n # message, it will be caught by the MessageFilter we installed on the\n # Application thread.\n \n+ if self.task:\n+ self.task.Dispose()\n+ del self.task\n # The message is sent with:\n # * HWND 0xfff (all windows),\n # * MSG self.msg_id (a message ID in the WM_USER range)\n@@ -124,7 +128,10 @@\n # If the app context has a main form, invoke run_once_recurring()\n # on the thread associated with that form.\n if self.app_context.MainForm:\n- self.app_context.MainForm.Invoke(Action(self.run_once_recurring))\n+ action = Action(self.run_once_recurring)\n+ self.app_context.MainForm.Invoke(action)\n+ action.Dispose()\n+ del action\n \n def run_once_recurring(self):\n \"\"\"\n", "issue": "Memory leaks in Windows\n**Describe the bug**\r\nUnfortunately, it seems like there are memory leaks issues in the Windows implementation of toga\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. install memory profiler, briefcase and matplotlib using `pip install memory_profiler briefcase matplotlib`\r\n2. Go to the Box example directory (or any other example for that matter)\r\n3. Run `mprof run --include-children briefcase dev`\r\n4. Click some buttons and wait for a few seconds\r\n5. Exit the program\r\n6. run `mprof plot` and see the memory leak\r\n\r\n\r\n\r\n**Environment:**\r\n - Operating System: Windows 10\r\n - Python version: python 3.8\r\n - Software versions:\r\n - Briefcase: latest\r\n - Toga: latest\r\n\n", "before_files": [{"content": "import asyncio\nimport sys\nimport threading\nfrom asyncio import events\n\nfrom .winforms import Action, Task, WinForms, user32\n\n\nclass AsyncIOTickMessageFilter(WinForms.IMessageFilter):\n \"\"\"\n A Winforms message filter that will catch the request to tick the Asyncio\n event loop.\n \"\"\"\n __namespace__ = 'System.Windows.Forms'\n\n def __init__(self, loop, msg_id):\n self.loop = loop\n self.msg_id = msg_id\n\n def PreFilterMessage(self, message):\n print('ping', message)\n if message.Msg == self.msg_id:\n print(\"asyncio tick message!!\")\n self.loop.run_once_recurring()\n return True\n # print(\"Filter message\", message)\n return False\n\n\nclass WinformsProactorEventLoop(asyncio.ProactorEventLoop):\n def run_forever(self, app_context):\n \"\"\"Set up the asyncio event loop, integrate it with the Winforms\n event loop, and start the application.\n\n This largely duplicates the setup behavior of the default Proactor\n run_forever implementation.\n\n :param app_context: The WinForms.ApplicationContext instance\n controlling the lifecycle of the app.\n \"\"\"\n # Python 3.8 added an implementation of run_forever() in\n # ProactorEventLoop. The only part that actually matters is the\n # refactoring that moved the initial call to stage _loop_self_reading;\n # it now needs to be created as part of run_forever; otherwise the\n # event loop locks up, because there won't be anything for the\n # select call to process.\n if sys.version_info >= (3, 8):\n self.call_soon(self._loop_self_reading)\n\n # Remember the application context.\n self.app_context = app_context\n\n # Register a custom user window message.\n self.msg_id = user32.RegisterWindowMessageA(\"Python asyncio tick\")\n # Add a message filter to listen for the asyncio tick message\n # FIXME: Actually install the message filter.\n # msg_filter = AsyncIOTickMessageFilter(self, self.msg_id)\n # WinForms.Application.AddMessageFilter(msg_filter)\n\n # Setup the Proactor.\n # The code between the following markers should be exactly the same as\n # the official CPython implementation, up to the start of the\n # `while True:` part of run_forever() (see BaseEventLoop.run_forever()\n # in Lib/ascynio/base_events.py)\n # === START BaseEventLoop.run_forever() setup ===\n self._check_closed()\n if self.is_running():\n raise RuntimeError('This event loop is already running')\n if events._get_running_loop() is not None:\n raise RuntimeError(\n 'Cannot run the event loop while another loop is running')\n self._set_coroutine_origin_tracking(self._debug)\n self._thread_id = threading.get_ident()\n try:\n self._old_agen_hooks = sys.get_asyncgen_hooks()\n sys.set_asyncgen_hooks(\n firstiter=self._asyncgen_firstiter_hook,\n finalizer=self._asyncgen_finalizer_hook\n )\n except AttributeError:\n # Python < 3.6 didn't have sys.get_asyncgen_hooks();\n # No action required for those versions.\n pass\n\n events._set_running_loop(self)\n # === END BaseEventLoop.run_forever() setup ===\n\n # Rather than going into a `while True:` loop, we're going to use the\n # Winforms event loop to queue a tick() message that will cause a\n # single iteration of the asyncio event loop to be executed. Each time\n # we do this, we queue *another* tick() message in 5ms time. In this\n # way, we'll get a continuous stream of tick() calls, without blocking\n # the Winforms event loop.\n\n # Queue the first asyncio tick.\n self.enqueue_tick()\n\n # Start the Winforms event loop.\n WinForms.Application.Run(self.app_context)\n\n def enqueue_tick(self):\n # Queue a call to tick in 5ms.\n Task.Delay(5).ContinueWith(Action[Task](self.tick))\n\n def tick(self, *args, **kwargs):\n \"\"\"\n Cause a single iteration of the event loop to run on the main GUI thread.\n \"\"\"\n # Post a userspace message that will trigger running an iteration\n # of the asyncio event loop. This can't be done directly, because the\n # tick() will be executing in a threadpool, and we need the asyncio\n # handling to occur in the main GUI thread. However, by positing a\n # message, it will be caught by the MessageFilter we installed on the\n # Application thread.\n\n # The message is sent with:\n # * HWND 0xfff (all windows),\n # * MSG self.msg_id (a message ID in the WM_USER range)\n # * LPARAM and WPARAM empty (no extra details needed; just tick!)\n user32.PostMessageA(0xffff, self.msg_id, None, None)\n\n # FIXME: Once we have a working message filter, this invoke call\n # can be removed.\n # If the app context has a main form, invoke run_once_recurring()\n # on the thread associated with that form.\n if self.app_context.MainForm:\n self.app_context.MainForm.Invoke(Action(self.run_once_recurring))\n\n def run_once_recurring(self):\n \"\"\"\n Run one iteration of the event loop, and enqueue the next iteration\n (if we're not stopping).\n\n This largely duplicates the \"finally\" behavior of the default Proactor\n run_forever implementation.\n \"\"\"\n # Perform one tick of the event loop.\n self._run_once()\n\n if self._stopping:\n # If we're stopping, we can do the \"finally\" handling from\n # the BaseEventLoop run_forever().\n # === START BaseEventLoop.run_forever() finally handling ===\n self._stopping = False\n self._thread_id = None\n events._set_running_loop(None)\n self._set_coroutine_origin_tracking(False)\n try:\n sys.set_asyncgen_hooks(*self._old_agen_hooks)\n except AttributeError:\n # Python < 3.6 didn't have set_asyncgen_hooks.\n # No action required for those versions.\n pass\n # === END BaseEventLoop.run_forever() finally handling ===\n else:\n # Otherwise, live to tick another day. Enqueue the next tick,\n # and make sure there will be *something* to be processed.\n # If you don't ensure there is at least one message on the\n # queue, the select() call will block, locking the app.\n self.enqueue_tick()\n self.call_soon(self._loop_self_reading)\n\n\n# Python 3.7 changed the name of an internal wrapper function.\n# Install an alias for the old name at the new name.\nif sys.version_info < (3, 7):\n WinformsProactorEventLoop._set_coroutine_origin_tracking = WinformsProactorEventLoop._set_coroutine_wrapper\n", "path": "src/winforms/toga_winforms/libs/proactor.py"}], "after_files": [{"content": "import asyncio\nimport sys\nimport threading\nfrom asyncio import events\n\nfrom .winforms import Action, Task, WinForms, user32\n\n\nclass AsyncIOTickMessageFilter(WinForms.IMessageFilter):\n \"\"\"\n A Winforms message filter that will catch the request to tick the Asyncio\n event loop.\n \"\"\"\n __namespace__ = 'System.Windows.Forms'\n\n def __init__(self, loop, msg_id):\n self.loop = loop\n self.msg_id = msg_id\n\n def PreFilterMessage(self, message):\n print('ping', message)\n if message.Msg == self.msg_id:\n print(\"asyncio tick message!!\")\n self.loop.run_once_recurring()\n return True\n # print(\"Filter message\", message)\n return False\n\n\nclass WinformsProactorEventLoop(asyncio.ProactorEventLoop):\n def run_forever(self, app_context):\n \"\"\"Set up the asyncio event loop, integrate it with the Winforms\n event loop, and start the application.\n\n This largely duplicates the setup behavior of the default Proactor\n run_forever implementation.\n\n :param app_context: The WinForms.ApplicationContext instance\n controlling the lifecycle of the app.\n \"\"\"\n # Python 3.8 added an implementation of run_forever() in\n # ProactorEventLoop. The only part that actually matters is the\n # refactoring that moved the initial call to stage _loop_self_reading;\n # it now needs to be created as part of run_forever; otherwise the\n # event loop locks up, because there won't be anything for the\n # select call to process.\n if sys.version_info >= (3, 8):\n self.call_soon(self._loop_self_reading)\n\n # Remember the application context.\n self.app_context = app_context\n\n # Register a custom user window message.\n self.msg_id = user32.RegisterWindowMessageA(\"Python asyncio tick\")\n # Add a message filter to listen for the asyncio tick message\n # FIXME: Actually install the message filter.\n # msg_filter = AsyncIOTickMessageFilter(self, self.msg_id)\n # WinForms.Application.AddMessageFilter(msg_filter)\n\n # Setup the Proactor.\n # The code between the following markers should be exactly the same as\n # the official CPython implementation, up to the start of the\n # `while True:` part of run_forever() (see BaseEventLoop.run_forever()\n # in Lib/ascynio/base_events.py)\n # === START BaseEventLoop.run_forever() setup ===\n self._check_closed()\n if self.is_running():\n raise RuntimeError('This event loop is already running')\n if events._get_running_loop() is not None:\n raise RuntimeError(\n 'Cannot run the event loop while another loop is running')\n self._set_coroutine_origin_tracking(self._debug)\n self._thread_id = threading.get_ident()\n try:\n self._old_agen_hooks = sys.get_asyncgen_hooks()\n sys.set_asyncgen_hooks(\n firstiter=self._asyncgen_firstiter_hook,\n finalizer=self._asyncgen_finalizer_hook\n )\n except AttributeError:\n # Python < 3.6 didn't have sys.get_asyncgen_hooks();\n # No action required for those versions.\n pass\n\n events._set_running_loop(self)\n # === END BaseEventLoop.run_forever() setup ===\n\n # Rather than going into a `while True:` loop, we're going to use the\n # Winforms event loop to queue a tick() message that will cause a\n # single iteration of the asyncio event loop to be executed. Each time\n # we do this, we queue *another* tick() message in 5ms time. In this\n # way, we'll get a continuous stream of tick() calls, without blocking\n # the Winforms event loop.\n\n # Queue the first asyncio tick.\n self.enqueue_tick()\n\n # Start the Winforms event loop.\n WinForms.Application.Run(self.app_context)\n\n def enqueue_tick(self):\n # Queue a call to tick in 5ms.\n self.task = Action[Task](self.tick)\n Task.Delay(5).ContinueWith(self.task)\n\n def tick(self, *args, **kwargs):\n \"\"\"\n Cause a single iteration of the event loop to run on the main GUI thread.\n \"\"\"\n # Post a userspace message that will trigger running an iteration\n # of the asyncio event loop. This can't be done directly, because the\n # tick() will be executing in a threadpool, and we need the asyncio\n # handling to occur in the main GUI thread. However, by positing a\n # message, it will be caught by the MessageFilter we installed on the\n # Application thread.\n\n if self.task:\n self.task.Dispose()\n del self.task\n # The message is sent with:\n # * HWND 0xfff (all windows),\n # * MSG self.msg_id (a message ID in the WM_USER range)\n # * LPARAM and WPARAM empty (no extra details needed; just tick!)\n user32.PostMessageA(0xffff, self.msg_id, None, None)\n\n # FIXME: Once we have a working message filter, this invoke call\n # can be removed.\n # If the app context has a main form, invoke run_once_recurring()\n # on the thread associated with that form.\n if self.app_context.MainForm:\n action = Action(self.run_once_recurring)\n self.app_context.MainForm.Invoke(action)\n action.Dispose()\n del action\n\n def run_once_recurring(self):\n \"\"\"\n Run one iteration of the event loop, and enqueue the next iteration\n (if we're not stopping).\n\n This largely duplicates the \"finally\" behavior of the default Proactor\n run_forever implementation.\n \"\"\"\n # Perform one tick of the event loop.\n self._run_once()\n\n if self._stopping:\n # If we're stopping, we can do the \"finally\" handling from\n # the BaseEventLoop run_forever().\n # === START BaseEventLoop.run_forever() finally handling ===\n self._stopping = False\n self._thread_id = None\n events._set_running_loop(None)\n self._set_coroutine_origin_tracking(False)\n try:\n sys.set_asyncgen_hooks(*self._old_agen_hooks)\n except AttributeError:\n # Python < 3.6 didn't have set_asyncgen_hooks.\n # No action required for those versions.\n pass\n # === END BaseEventLoop.run_forever() finally handling ===\n else:\n # Otherwise, live to tick another day. Enqueue the next tick,\n # and make sure there will be *something* to be processed.\n # If you don't ensure there is at least one message on the\n # queue, the select() call will block, locking the app.\n self.enqueue_tick()\n self.call_soon(self._loop_self_reading)\n\n\n# Python 3.7 changed the name of an internal wrapper function.\n# Install an alias for the old name at the new name.\nif sys.version_info < (3, 7):\n WinformsProactorEventLoop._set_coroutine_origin_tracking = WinformsProactorEventLoop._set_coroutine_wrapper\n", "path": "src/winforms/toga_winforms/libs/proactor.py"}]}
| 2,432 | 340 |
gh_patches_debug_21134
|
rasdani/github-patches
|
git_diff
|
comic__grand-challenge.org-649
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
No response when uploading a new algorithm using the wrong file format
# Recipe
1. Go to https://grand-challenge.org/algorithms/create/
2. Upload, for example, a `.tar.gz` file
# Result
Upload completes, nothing happens.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/algorithms/forms.py`
Content:
```
1 from crispy_forms.helper import FormHelper
2 from crispy_forms.layout import Submit
3 from django import forms
4
5 from grandchallenge.algorithms.models import Algorithm, Job
6 from grandchallenge.core.validators import (
7 ExtensionValidator,
8 MimeTypeValidator,
9 )
10 from grandchallenge.jqfileupload.widgets import uploader
11 from grandchallenge.jqfileupload.widgets.uploader import UploadedAjaxFileList
12
13 algorithm_upload_widget = uploader.AjaxUploadWidget(
14 ajax_target_path="ajax/algorithm-upload/", multifile=False
15 )
16
17
18 class AlgorithmForm(forms.ModelForm):
19 ipython_notebook = forms.FileField(
20 validators=[MimeTypeValidator(allowed_types=("text/plain",))],
21 required=False,
22 help_text=(
23 "Please upload an iPython notebook that describes your algorithm"
24 ),
25 )
26 chunked_upload = UploadedAjaxFileList(
27 widget=algorithm_upload_widget,
28 label="Algorithm Image",
29 validators=[ExtensionValidator(allowed_extensions=(".tar",))],
30 help_text=(
31 "Tar archive of the container image produced from the command "
32 "`docker save IMAGE > IMAGE.tar`. See "
33 "https://docs.docker.com/engine/reference/commandline/save/"
34 ),
35 )
36
37 def __init__(self, *args, **kwargs):
38 super().__init__(*args, **kwargs)
39 self.helper = FormHelper(self)
40
41 class Meta:
42 model = Algorithm
43 fields = (
44 "title",
45 "requires_gpu",
46 "ipython_notebook",
47 "chunked_upload",
48 )
49
```
Path: `app/grandchallenge/evaluation/forms.py`
Content:
```
1 from crispy_forms.bootstrap import TabHolder, Tab
2 from crispy_forms.helper import FormHelper
3 from crispy_forms.layout import Submit, Layout, ButtonHolder
4 from django import forms
5 from django_summernote.widgets import SummernoteInplaceWidget
6
7 from grandchallenge.core.validators import ExtensionValidator
8 from grandchallenge.core.widgets import JSONEditorWidget
9 from grandchallenge.evaluation.models import (
10 Method,
11 Submission,
12 Config,
13 EXTRA_RESULT_COLUMNS_SCHEMA,
14 )
15 from grandchallenge.jqfileupload.widgets import uploader
16 from grandchallenge.jqfileupload.widgets.uploader import UploadedAjaxFileList
17
18 submission_options = (
19 "submission_page_html",
20 "daily_submission_limit",
21 "allow_submission_comments",
22 "supplementary_file_choice",
23 "supplementary_file_label",
24 "supplementary_file_help_text",
25 "publication_url_choice",
26 )
27
28 scoring_options = (
29 "score_title",
30 "score_jsonpath",
31 "score_error_jsonpath",
32 "score_default_sort",
33 "score_decimal_places",
34 "extra_results_columns",
35 "scoring_method_choice",
36 "auto_publish_new_results",
37 "result_display_choice",
38 )
39
40 leaderboard_options = (
41 "use_teams",
42 "display_submission_comments",
43 "show_supplementary_file_link",
44 "show_publication_url",
45 )
46
47 result_detail_options = ("submission_join_key",)
48
49
50 class ConfigForm(forms.ModelForm):
51 def __init__(self, *args, **kwargs):
52 super().__init__(*args, **kwargs)
53 self.helper = FormHelper(self)
54 self.helper.layout = Layout(
55 TabHolder(
56 Tab("Submission", *submission_options),
57 Tab("Scoring", *scoring_options),
58 Tab("Leaderboard", *leaderboard_options),
59 Tab("Result Detail", *result_detail_options),
60 ),
61 ButtonHolder(Submit("save", "Save")),
62 )
63
64 class Meta:
65 model = Config
66 fields = (
67 *submission_options,
68 *scoring_options,
69 *leaderboard_options,
70 *result_detail_options,
71 )
72 widgets = {
73 "submission_page_html": SummernoteInplaceWidget(),
74 "extra_results_columns": JSONEditorWidget(
75 schema=EXTRA_RESULT_COLUMNS_SCHEMA
76 ),
77 }
78
79
80 method_upload_widget = uploader.AjaxUploadWidget(
81 ajax_target_path="ajax/method-upload/", multifile=False
82 )
83
84
85 class MethodForm(forms.ModelForm):
86 chunked_upload = UploadedAjaxFileList(
87 widget=method_upload_widget,
88 label="Evaluation Method Container",
89 validators=[ExtensionValidator(allowed_extensions=(".tar",))],
90 help_text=(
91 "Tar archive of the container image produced from the command "
92 "`docker save IMAGE > IMAGE.tar`. See "
93 "https://docs.docker.com/engine/reference/commandline/save/"
94 ),
95 )
96
97 def __init__(self, *args, **kwargs):
98 super().__init__(*args, **kwargs)
99 self.helper = FormHelper(self)
100
101 class Meta:
102 model = Method
103 fields = ["chunked_upload"]
104
105
106 submission_upload_widget = uploader.AjaxUploadWidget(
107 ajax_target_path="ajax/submission-upload/", multifile=False
108 )
109
110 submission_fields = (
111 "comment",
112 "supplementary_file",
113 "publication_url",
114 "chunked_upload",
115 )
116
117
118 class SubmissionForm(forms.ModelForm):
119 chunked_upload = UploadedAjaxFileList(
120 widget=submission_upload_widget,
121 label="Predictions File",
122 validators=[ExtensionValidator(allowed_extensions=(".zip", ".csv"))],
123 )
124
125 def __init__(
126 self,
127 *args,
128 display_comment_field=False,
129 supplementary_file_choice=Config.OFF,
130 supplementary_file_label="",
131 supplementary_file_help_text="",
132 publication_url_choice=Config.OFF,
133 **kwargs,
134 ):
135 """
136 Conditionally render the comment field based on the
137 display_comment_field kwarg
138 """
139 super().__init__(*args, **kwargs)
140
141 if not display_comment_field:
142 del self.fields["comment"]
143
144 if supplementary_file_label:
145 self.fields["supplementary_file"].label = supplementary_file_label
146
147 if supplementary_file_help_text:
148 self.fields[
149 "supplementary_file"
150 ].help_text = supplementary_file_help_text
151
152 if supplementary_file_choice == Config.REQUIRED:
153 self.fields["supplementary_file"].required = True
154 elif supplementary_file_choice == Config.OFF:
155 del self.fields["supplementary_file"]
156
157 if publication_url_choice == Config.REQUIRED:
158 self.fields["publication_url"].required = True
159 elif publication_url_choice == Config.OFF:
160 del self.fields["publication_url"]
161
162 self.helper = FormHelper(self)
163
164 class Meta:
165 model = Submission
166 fields = submission_fields
167
168
169 class LegacySubmissionForm(SubmissionForm):
170 class Meta:
171 model = Submission
172 fields = ("creator", *submission_fields)
173
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/grandchallenge/algorithms/forms.py b/app/grandchallenge/algorithms/forms.py
--- a/app/grandchallenge/algorithms/forms.py
+++ b/app/grandchallenge/algorithms/forms.py
@@ -29,7 +29,7 @@
validators=[ExtensionValidator(allowed_extensions=(".tar",))],
help_text=(
"Tar archive of the container image produced from the command "
- "`docker save IMAGE > IMAGE.tar`. See "
+ "'docker save IMAGE > IMAGE.tar'. See "
"https://docs.docker.com/engine/reference/commandline/save/"
),
)
diff --git a/app/grandchallenge/evaluation/forms.py b/app/grandchallenge/evaluation/forms.py
--- a/app/grandchallenge/evaluation/forms.py
+++ b/app/grandchallenge/evaluation/forms.py
@@ -89,7 +89,7 @@
validators=[ExtensionValidator(allowed_extensions=(".tar",))],
help_text=(
"Tar archive of the container image produced from the command "
- "`docker save IMAGE > IMAGE.tar`. See "
+ "'docker save IMAGE > IMAGE.tar'. See "
"https://docs.docker.com/engine/reference/commandline/save/"
),
)
|
{"golden_diff": "diff --git a/app/grandchallenge/algorithms/forms.py b/app/grandchallenge/algorithms/forms.py\n--- a/app/grandchallenge/algorithms/forms.py\n+++ b/app/grandchallenge/algorithms/forms.py\n@@ -29,7 +29,7 @@\n validators=[ExtensionValidator(allowed_extensions=(\".tar\",))],\n help_text=(\n \"Tar archive of the container image produced from the command \"\n- \"`docker save IMAGE > IMAGE.tar`. See \"\n+ \"'docker save IMAGE > IMAGE.tar'. See \"\n \"https://docs.docker.com/engine/reference/commandline/save/\"\n ),\n )\ndiff --git a/app/grandchallenge/evaluation/forms.py b/app/grandchallenge/evaluation/forms.py\n--- a/app/grandchallenge/evaluation/forms.py\n+++ b/app/grandchallenge/evaluation/forms.py\n@@ -89,7 +89,7 @@\n validators=[ExtensionValidator(allowed_extensions=(\".tar\",))],\n help_text=(\n \"Tar archive of the container image produced from the command \"\n- \"`docker save IMAGE > IMAGE.tar`. See \"\n+ \"'docker save IMAGE > IMAGE.tar'. See \"\n \"https://docs.docker.com/engine/reference/commandline/save/\"\n ),\n )\n", "issue": "No response when uploading a new algorithm using the wrong file format\n# Recipe\r\n\r\n1. Go to https://grand-challenge.org/algorithms/create/\r\n2. Upload, for example, a `.tar.gz` file\r\n\r\n# Result\r\n\r\nUpload completes, nothing happens.\n", "before_files": [{"content": "from crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit\nfrom django import forms\n\nfrom grandchallenge.algorithms.models import Algorithm, Job\nfrom grandchallenge.core.validators import (\n ExtensionValidator,\n MimeTypeValidator,\n)\nfrom grandchallenge.jqfileupload.widgets import uploader\nfrom grandchallenge.jqfileupload.widgets.uploader import UploadedAjaxFileList\n\nalgorithm_upload_widget = uploader.AjaxUploadWidget(\n ajax_target_path=\"ajax/algorithm-upload/\", multifile=False\n)\n\n\nclass AlgorithmForm(forms.ModelForm):\n ipython_notebook = forms.FileField(\n validators=[MimeTypeValidator(allowed_types=(\"text/plain\",))],\n required=False,\n help_text=(\n \"Please upload an iPython notebook that describes your algorithm\"\n ),\n )\n chunked_upload = UploadedAjaxFileList(\n widget=algorithm_upload_widget,\n label=\"Algorithm Image\",\n validators=[ExtensionValidator(allowed_extensions=(\".tar\",))],\n help_text=(\n \"Tar archive of the container image produced from the command \"\n \"`docker save IMAGE > IMAGE.tar`. See \"\n \"https://docs.docker.com/engine/reference/commandline/save/\"\n ),\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper(self)\n\n class Meta:\n model = Algorithm\n fields = (\n \"title\",\n \"requires_gpu\",\n \"ipython_notebook\",\n \"chunked_upload\",\n )\n", "path": "app/grandchallenge/algorithms/forms.py"}, {"content": "from crispy_forms.bootstrap import TabHolder, Tab\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit, Layout, ButtonHolder\nfrom django import forms\nfrom django_summernote.widgets import SummernoteInplaceWidget\n\nfrom grandchallenge.core.validators import ExtensionValidator\nfrom grandchallenge.core.widgets import JSONEditorWidget\nfrom grandchallenge.evaluation.models import (\n Method,\n Submission,\n Config,\n EXTRA_RESULT_COLUMNS_SCHEMA,\n)\nfrom grandchallenge.jqfileupload.widgets import uploader\nfrom grandchallenge.jqfileupload.widgets.uploader import UploadedAjaxFileList\n\nsubmission_options = (\n \"submission_page_html\",\n \"daily_submission_limit\",\n \"allow_submission_comments\",\n \"supplementary_file_choice\",\n \"supplementary_file_label\",\n \"supplementary_file_help_text\",\n \"publication_url_choice\",\n)\n\nscoring_options = (\n \"score_title\",\n \"score_jsonpath\",\n \"score_error_jsonpath\",\n \"score_default_sort\",\n \"score_decimal_places\",\n \"extra_results_columns\",\n \"scoring_method_choice\",\n \"auto_publish_new_results\",\n \"result_display_choice\",\n)\n\nleaderboard_options = (\n \"use_teams\",\n \"display_submission_comments\",\n \"show_supplementary_file_link\",\n \"show_publication_url\",\n)\n\nresult_detail_options = (\"submission_join_key\",)\n\n\nclass ConfigForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper(self)\n self.helper.layout = Layout(\n TabHolder(\n Tab(\"Submission\", *submission_options),\n Tab(\"Scoring\", *scoring_options),\n Tab(\"Leaderboard\", *leaderboard_options),\n Tab(\"Result Detail\", *result_detail_options),\n ),\n ButtonHolder(Submit(\"save\", \"Save\")),\n )\n\n class Meta:\n model = Config\n fields = (\n *submission_options,\n *scoring_options,\n *leaderboard_options,\n *result_detail_options,\n )\n widgets = {\n \"submission_page_html\": SummernoteInplaceWidget(),\n \"extra_results_columns\": JSONEditorWidget(\n schema=EXTRA_RESULT_COLUMNS_SCHEMA\n ),\n }\n\n\nmethod_upload_widget = uploader.AjaxUploadWidget(\n ajax_target_path=\"ajax/method-upload/\", multifile=False\n)\n\n\nclass MethodForm(forms.ModelForm):\n chunked_upload = UploadedAjaxFileList(\n widget=method_upload_widget,\n label=\"Evaluation Method Container\",\n validators=[ExtensionValidator(allowed_extensions=(\".tar\",))],\n help_text=(\n \"Tar archive of the container image produced from the command \"\n \"`docker save IMAGE > IMAGE.tar`. See \"\n \"https://docs.docker.com/engine/reference/commandline/save/\"\n ),\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper(self)\n\n class Meta:\n model = Method\n fields = [\"chunked_upload\"]\n\n\nsubmission_upload_widget = uploader.AjaxUploadWidget(\n ajax_target_path=\"ajax/submission-upload/\", multifile=False\n)\n\nsubmission_fields = (\n \"comment\",\n \"supplementary_file\",\n \"publication_url\",\n \"chunked_upload\",\n)\n\n\nclass SubmissionForm(forms.ModelForm):\n chunked_upload = UploadedAjaxFileList(\n widget=submission_upload_widget,\n label=\"Predictions File\",\n validators=[ExtensionValidator(allowed_extensions=(\".zip\", \".csv\"))],\n )\n\n def __init__(\n self,\n *args,\n display_comment_field=False,\n supplementary_file_choice=Config.OFF,\n supplementary_file_label=\"\",\n supplementary_file_help_text=\"\",\n publication_url_choice=Config.OFF,\n **kwargs,\n ):\n \"\"\"\n Conditionally render the comment field based on the\n display_comment_field kwarg\n \"\"\"\n super().__init__(*args, **kwargs)\n\n if not display_comment_field:\n del self.fields[\"comment\"]\n\n if supplementary_file_label:\n self.fields[\"supplementary_file\"].label = supplementary_file_label\n\n if supplementary_file_help_text:\n self.fields[\n \"supplementary_file\"\n ].help_text = supplementary_file_help_text\n\n if supplementary_file_choice == Config.REQUIRED:\n self.fields[\"supplementary_file\"].required = True\n elif supplementary_file_choice == Config.OFF:\n del self.fields[\"supplementary_file\"]\n\n if publication_url_choice == Config.REQUIRED:\n self.fields[\"publication_url\"].required = True\n elif publication_url_choice == Config.OFF:\n del self.fields[\"publication_url\"]\n\n self.helper = FormHelper(self)\n\n class Meta:\n model = Submission\n fields = submission_fields\n\n\nclass LegacySubmissionForm(SubmissionForm):\n class Meta:\n model = Submission\n fields = (\"creator\", *submission_fields)\n", "path": "app/grandchallenge/evaluation/forms.py"}], "after_files": [{"content": "from crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit\nfrom django import forms\n\nfrom grandchallenge.algorithms.models import Algorithm, Job\nfrom grandchallenge.core.validators import (\n ExtensionValidator,\n MimeTypeValidator,\n)\nfrom grandchallenge.jqfileupload.widgets import uploader\nfrom grandchallenge.jqfileupload.widgets.uploader import UploadedAjaxFileList\n\nalgorithm_upload_widget = uploader.AjaxUploadWidget(\n ajax_target_path=\"ajax/algorithm-upload/\", multifile=False\n)\n\n\nclass AlgorithmForm(forms.ModelForm):\n ipython_notebook = forms.FileField(\n validators=[MimeTypeValidator(allowed_types=(\"text/plain\",))],\n required=False,\n help_text=(\n \"Please upload an iPython notebook that describes your algorithm\"\n ),\n )\n chunked_upload = UploadedAjaxFileList(\n widget=algorithm_upload_widget,\n label=\"Algorithm Image\",\n validators=[ExtensionValidator(allowed_extensions=(\".tar\",))],\n help_text=(\n \"Tar archive of the container image produced from the command \"\n \"'docker save IMAGE > IMAGE.tar'. See \"\n \"https://docs.docker.com/engine/reference/commandline/save/\"\n ),\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper(self)\n\n class Meta:\n model = Algorithm\n fields = (\n \"title\",\n \"requires_gpu\",\n \"ipython_notebook\",\n \"chunked_upload\",\n )\n", "path": "app/grandchallenge/algorithms/forms.py"}, {"content": "from crispy_forms.bootstrap import TabHolder, Tab\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit, Layout, ButtonHolder\nfrom django import forms\nfrom django_summernote.widgets import SummernoteInplaceWidget\n\nfrom grandchallenge.core.validators import ExtensionValidator\nfrom grandchallenge.core.widgets import JSONEditorWidget\nfrom grandchallenge.evaluation.models import (\n Method,\n Submission,\n Config,\n EXTRA_RESULT_COLUMNS_SCHEMA,\n)\nfrom grandchallenge.jqfileupload.widgets import uploader\nfrom grandchallenge.jqfileupload.widgets.uploader import UploadedAjaxFileList\n\nsubmission_options = (\n \"submission_page_html\",\n \"daily_submission_limit\",\n \"allow_submission_comments\",\n \"supplementary_file_choice\",\n \"supplementary_file_label\",\n \"supplementary_file_help_text\",\n \"publication_url_choice\",\n)\n\nscoring_options = (\n \"score_title\",\n \"score_jsonpath\",\n \"score_error_jsonpath\",\n \"score_default_sort\",\n \"score_decimal_places\",\n \"extra_results_columns\",\n \"scoring_method_choice\",\n \"auto_publish_new_results\",\n \"result_display_choice\",\n)\n\nleaderboard_options = (\n \"use_teams\",\n \"display_submission_comments\",\n \"show_supplementary_file_link\",\n \"show_publication_url\",\n)\n\nresult_detail_options = (\"submission_join_key\",)\n\n\nclass ConfigForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper(self)\n self.helper.layout = Layout(\n TabHolder(\n Tab(\"Submission\", *submission_options),\n Tab(\"Scoring\", *scoring_options),\n Tab(\"Leaderboard\", *leaderboard_options),\n Tab(\"Result Detail\", *result_detail_options),\n ),\n ButtonHolder(Submit(\"save\", \"Save\")),\n )\n\n class Meta:\n model = Config\n fields = (\n *submission_options,\n *scoring_options,\n *leaderboard_options,\n *result_detail_options,\n )\n widgets = {\n \"submission_page_html\": SummernoteInplaceWidget(),\n \"extra_results_columns\": JSONEditorWidget(\n schema=EXTRA_RESULT_COLUMNS_SCHEMA\n ),\n }\n\n\nmethod_upload_widget = uploader.AjaxUploadWidget(\n ajax_target_path=\"ajax/method-upload/\", multifile=False\n)\n\n\nclass MethodForm(forms.ModelForm):\n chunked_upload = UploadedAjaxFileList(\n widget=method_upload_widget,\n label=\"Evaluation Method Container\",\n validators=[ExtensionValidator(allowed_extensions=(\".tar\",))],\n help_text=(\n \"Tar archive of the container image produced from the command \"\n \"'docker save IMAGE > IMAGE.tar'. See \"\n \"https://docs.docker.com/engine/reference/commandline/save/\"\n ),\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper(self)\n\n class Meta:\n model = Method\n fields = [\"chunked_upload\"]\n\n\nsubmission_upload_widget = uploader.AjaxUploadWidget(\n ajax_target_path=\"ajax/submission-upload/\", multifile=False\n)\n\nsubmission_fields = (\n \"comment\",\n \"supplementary_file\",\n \"publication_url\",\n \"chunked_upload\",\n)\n\n\nclass SubmissionForm(forms.ModelForm):\n chunked_upload = UploadedAjaxFileList(\n widget=submission_upload_widget,\n label=\"Predictions File\",\n validators=[ExtensionValidator(allowed_extensions=(\".zip\", \".csv\"))],\n )\n\n def __init__(\n self,\n *args,\n display_comment_field=False,\n supplementary_file_choice=Config.OFF,\n supplementary_file_label=\"\",\n supplementary_file_help_text=\"\",\n publication_url_choice=Config.OFF,\n **kwargs,\n ):\n \"\"\"\n Conditionally render the comment field based on the\n display_comment_field kwarg\n \"\"\"\n super().__init__(*args, **kwargs)\n\n if not display_comment_field:\n del self.fields[\"comment\"]\n\n if supplementary_file_label:\n self.fields[\"supplementary_file\"].label = supplementary_file_label\n\n if supplementary_file_help_text:\n self.fields[\n \"supplementary_file\"\n ].help_text = supplementary_file_help_text\n\n if supplementary_file_choice == Config.REQUIRED:\n self.fields[\"supplementary_file\"].required = True\n elif supplementary_file_choice == Config.OFF:\n del self.fields[\"supplementary_file\"]\n\n if publication_url_choice == Config.REQUIRED:\n self.fields[\"publication_url\"].required = True\n elif publication_url_choice == Config.OFF:\n del self.fields[\"publication_url\"]\n\n self.helper = FormHelper(self)\n\n class Meta:\n model = Submission\n fields = submission_fields\n\n\nclass LegacySubmissionForm(SubmissionForm):\n class Meta:\n model = Submission\n fields = (\"creator\", *submission_fields)\n", "path": "app/grandchallenge/evaluation/forms.py"}]}
| 2,167 | 256 |
gh_patches_debug_7639
|
rasdani/github-patches
|
git_diff
|
frappe__frappe-11557
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IMAP port settings are not updated from Email Domain to Email Account
## Description of the issue
When changing the IMAP port in an existing Email Domain, the Email Accounts using this Domain are not updated accordingly. This can lead to Frappe trying an IMAPS connection (which usually is 993) to the plain IMAP port 143, resulting in misleading error messages like `ssl.SSLError: [SSL: WRONG_VERSION_NUMBER] wrong version number (_ssl.c:852)`.
We could track down the root cause to the method `on_update` from the DocType "Email Domain": it simply misses the field `incoming_port` when copying data to all e-mail accounts that use this domain. This leads to the problem if the `incoming_port` is already set in the email account and gets updated/changed afterwards in the email domain.
## Context information (for bug reports)
```
frappe-bench$ bench --version
5.0.0
frappe-bench$ bench version
erpnext 12.11.2
frappe 12.9.1
```
## Steps to reproduce the issue
1. To reproduce this small bug you need to create a "Email Domain" in Frappe and save it with imap-port 143 and no SSL.
2. Create an e-mail account and link it with the domain from step 1 but without `Enable Incoming` and save.
3. Try to `Enable Incoming` and save
4. After "saving" the e-mail account go to the domain and change the imap-port from 143 to 993 and check SSL.
5. The `incoming_port` in Email-account is still 143.
### Observed result
In the database you can see that the `incoming_port` in the e-mail account is still 143 (real domain and mail addresses hidden):
```
select
ea.email_id,
ea.domain,
ea.incoming_port,
ed.incoming_port,
ea.email_server,
ed.email_server
from
`tabEmail Account` ea,
`tabEmail Domain` ed
where ea.domain = ed.name
and ed.name = "example.com";
```
#### Before updating the IMAP port in the domain
```
+------------------+-------------+---------------+---------------+--------------+--------------+
| email_id | domain | incoming_port | incoming_port | email_server | email_server |
+------------------+-------------+---------------+---------------+--------------+--------------+
| [email protected] | example.com | 143 | 143 | example.com | example.com |
+------------------+-------------+---------------+---------------+--------------+--------------+
1 row in set (0.000 sec)
```
#### After updating the IMAP port in the domain
```
+------------------+-------------+---------------+---------------+--------------+--------------+
| email_id | domain | incoming_port | incoming_port | email_server | email_server |
+------------------+-------------+---------------+---------------+--------------+--------------+
| [email protected] | example.com | 143 | 993 | example.com | example.com |
+------------------+-------------+---------------+---------------+--------------+--------------+
1 row in set (0.001 sec)
```
Now it will always trigger an SSL-handshake-error if the scheduler tries to get access.
### Expected result
When the mail domain gets updated all necessary fields related to e-mail account should be updated including the `incoming_port`.
### Stacktrace / full error message
```
Traceback (most recent call last):
File "/home/erpnext/frappe-bench/apps/frappe/frappe/app.py", line 64, in application
response = frappe.api.handle()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/api.py", line 59, in handle
return frappe.handler.handle()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/handler.py", line 24, in handle
data = execute_cmd(cmd)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/handler.py", line 63, in execute_cmd
return frappe.call(method, **frappe.form_dict)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/__init__.py", line 1055, in call
return fn(*args, **newargs)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/desk/form/save.py", line 21, in savedocs
doc.save()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 273, in save
return self._save(*args, **kwargs)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 309, in _save
self.run_before_save_methods()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 896, in run_before_save_methods
self.run_method("validate")
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 797, in run_method
out = Document.hook(fn)(self, *args, **kwargs)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 1073, in composer
return composed(self, method, *args, **kwargs)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 1056, in runner
add_to_return_value(self, fn(self, *args, **kwargs))
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 791, in <lambda>
fn = lambda self, *args, **kwargs: getattr(self, method)(*args, **kwargs)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/email/doctype/email_account/email_account.py", line 68, in validate
self.get_incoming_server()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/email/doctype/email_account/email_account.py", line 168, in get_incoming_server
email_server.connect()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/email/receive.py", line 43, in connect
return self.connect_imap()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/email/receive.py", line 51, in connect_imap
self.imap = Timed_IMAP4_SSL(self.settings.host, self.settings.incoming_port, timeout=frappe.conf.get("pop_timeout"))
File "/home/erpnext/frappe-bench/apps/frappe/frappe/email/receive.py", line 564, in __init__
self._super.__init__(self, *args, **kwargs)
File "/usr/lib/python3.6/imaplib.py", line 1288, in __init__
IMAP4.__init__(self, host, port)
File "/usr/lib/python3.6/imaplib.py", line 198, in __init__
self.open(host, port)
File "/usr/lib/python3.6/imaplib.py", line 1301, in open
IMAP4.open(self, host, port)
File "/usr/lib/python3.6/imaplib.py", line 299, in open
self.sock = self._create_socket()
File "/usr/lib/python3.6/imaplib.py", line 1293, in _create_socket
server_hostname=self.host)
File "/usr/lib/python3.6/ssl.py", line 407, in wrap_socket
_context=self, _session=session)
File "/usr/lib/python3.6/ssl.py", line 817, in __init__
self.do_handshake()
File "/usr/lib/python3.6/ssl.py", line 1077, in do_handshake
self._sslobj.do_handshake()
File "/usr/lib/python3.6/ssl.py", line 689, in do_handshake
self._sslobj.do_handshake()
ssl.SSLError: [SSL: WRONG_VERSION_NUMBER] wrong version number (_ssl.c:852)
```
## OS
- Linux Ubuntu 18.04
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `frappe/email/doctype/email_domain/email_domain.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
3 # For license information, please see license.txt
4
5 from __future__ import unicode_literals
6 import frappe
7 from frappe import _
8 from frappe.model.document import Document
9 from frappe.utils import validate_email_address ,cint, cstr
10 import imaplib,poplib,smtplib
11 from frappe.email.utils import get_port
12
13 class EmailDomain(Document):
14 def autoname(self):
15 if self.domain_name:
16 self.name = self.domain_name
17
18 def validate(self):
19 """Validate email id and check POP3/IMAP and SMTP connections is enabled."""
20 if self.email_id:
21 validate_email_address(self.email_id, True)
22
23 if frappe.local.flags.in_patch or frappe.local.flags.in_test:
24 return
25
26 if not frappe.local.flags.in_install and not frappe.local.flags.in_patch:
27 try:
28 if self.use_imap:
29 if self.use_ssl:
30 test = imaplib.IMAP4_SSL(self.email_server, port=get_port(self))
31 else:
32 test = imaplib.IMAP4(self.email_server, port=get_port(self))
33
34 else:
35 if self.use_ssl:
36 test = poplib.POP3_SSL(self.email_server, port=get_port(self))
37 else:
38 test = poplib.POP3(self.email_server, port=get_port(self))
39
40 except Exception:
41 frappe.throw(_("Incoming email account not correct"))
42 return None
43 finally:
44 try:
45 if self.use_imap:
46 test.logout()
47 else:
48 test.quit()
49 except Exception:
50 pass
51 try:
52 if self.use_tls and not self.smtp_port:
53 self.smtp_port = 587
54 sess = smtplib.SMTP(cstr(self.smtp_server or ""), cint(self.smtp_port) or None)
55 sess.quit()
56 except Exception:
57 frappe.throw(_("Outgoing email account not correct"))
58 return None
59 return
60
61 def on_update(self):
62 """update all email accounts using this domain"""
63 for email_account in frappe.get_all("Email Account",
64 filters={"domain": self.name}):
65
66 try:
67 email_account = frappe.get_doc("Email Account",
68 email_account.name)
69 email_account.set("email_server",self.email_server)
70 email_account.set("use_imap",self.use_imap)
71 email_account.set("use_ssl",self.use_ssl)
72 email_account.set("use_tls",self.use_tls)
73 email_account.set("attachment_limit",self.attachment_limit)
74 email_account.set("smtp_server",self.smtp_server)
75 email_account.set("smtp_port",self.smtp_port)
76 email_account.save()
77 except Exception as e:
78 frappe.msgprint(email_account.name)
79 frappe.throw(e)
80 return None
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/frappe/email/doctype/email_domain/email_domain.py b/frappe/email/doctype/email_domain/email_domain.py
--- a/frappe/email/doctype/email_domain/email_domain.py
+++ b/frappe/email/doctype/email_domain/email_domain.py
@@ -73,6 +73,7 @@
email_account.set("attachment_limit",self.attachment_limit)
email_account.set("smtp_server",self.smtp_server)
email_account.set("smtp_port",self.smtp_port)
+ email_account.set("incoming_port", self.incoming_port)
email_account.save()
except Exception as e:
frappe.msgprint(email_account.name)
|
{"golden_diff": "diff --git a/frappe/email/doctype/email_domain/email_domain.py b/frappe/email/doctype/email_domain/email_domain.py\n--- a/frappe/email/doctype/email_domain/email_domain.py\n+++ b/frappe/email/doctype/email_domain/email_domain.py\n@@ -73,6 +73,7 @@\n \t\t\t\temail_account.set(\"attachment_limit\",self.attachment_limit)\n \t\t\t\temail_account.set(\"smtp_server\",self.smtp_server)\n \t\t\t\temail_account.set(\"smtp_port\",self.smtp_port)\n+\t\t\t\temail_account.set(\"incoming_port\", self.incoming_port)\n \t\t\t\temail_account.save()\n \t\t\texcept Exception as e:\n \t\t\t\tfrappe.msgprint(email_account.name)\n", "issue": "IMAP port settings are not updated from Email Domain to Email Account\n## Description of the issue\r\n\r\nWhen changing the IMAP port in an existing Email Domain, the Email Accounts using this Domain are not updated accordingly. This can lead to Frappe trying an IMAPS connection (which usually is 993) to the plain IMAP port 143, resulting in misleading error messages like `ssl.SSLError: [SSL: WRONG_VERSION_NUMBER] wrong version number (_ssl.c:852)`.\r\n\r\nWe could track down the root cause to the method `on_update` from the DocType \"Email Domain\": it simply misses the field `incoming_port` when copying data to all e-mail accounts that use this domain. This leads to the problem if the `incoming_port` is already set in the email account and gets updated/changed afterwards in the email domain.\r\n## Context information (for bug reports)\r\n\r\n```\r\nfrappe-bench$ bench --version\r\n5.0.0\r\n\r\nfrappe-bench$ bench version\r\nerpnext 12.11.2\r\nfrappe 12.9.1\r\n```\r\n## Steps to reproduce the issue\r\n\r\n1. To reproduce this small bug you need to create a \"Email Domain\" in Frappe and save it with imap-port 143 and no SSL.\r\n2. Create an e-mail account and link it with the domain from step 1 but without `Enable Incoming` and save.\r\n3. Try to `Enable Incoming` and save\r\n4. After \"saving\" the e-mail account go to the domain and change the imap-port from 143 to 993 and check SSL.\r\n5. The `incoming_port` in Email-account is still 143.\r\n\r\n### Observed result\r\nIn the database you can see that the `incoming_port` in the e-mail account is still 143 (real domain and mail addresses hidden):\r\n\r\n```\r\nselect\r\n ea.email_id,\r\n ea.domain,\r\n ea.incoming_port,\r\n ed.incoming_port,\r\n ea.email_server,\r\n ed.email_server\r\nfrom \r\n `tabEmail Account` ea,\r\n `tabEmail Domain` ed\r\nwhere ea.domain = ed.name\r\n and ed.name = \"example.com\";\r\n```\r\n\r\n#### Before updating the IMAP port in the domain\r\n```\r\n+------------------+-------------+---------------+---------------+--------------+--------------+\r\n| email_id | domain | incoming_port | incoming_port | email_server | email_server |\r\n+------------------+-------------+---------------+---------------+--------------+--------------+\r\n| [email protected] | example.com | 143 | 143 | example.com | example.com |\r\n+------------------+-------------+---------------+---------------+--------------+--------------+\r\n1 row in set (0.000 sec)\r\n```\r\n#### After updating the IMAP port in the domain\r\n```\r\n+------------------+-------------+---------------+---------------+--------------+--------------+\r\n| email_id | domain | incoming_port | incoming_port | email_server | email_server |\r\n+------------------+-------------+---------------+---------------+--------------+--------------+\r\n| [email protected] | example.com | 143 | 993 | example.com | example.com |\r\n+------------------+-------------+---------------+---------------+--------------+--------------+\r\n1 row in set (0.001 sec)\r\n```\r\nNow it will always trigger an SSL-handshake-error if the scheduler tries to get access.\r\n\r\n### Expected result\r\nWhen the mail domain gets updated all necessary fields related to e-mail account should be updated including the `incoming_port`.\r\n### Stacktrace / full error message\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/app.py\", line 64, in application\r\n response = frappe.api.handle()\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/api.py\", line 59, in handle\r\n return frappe.handler.handle()\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/handler.py\", line 24, in handle\r\n data = execute_cmd(cmd)\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/handler.py\", line 63, in execute_cmd\r\n return frappe.call(method, **frappe.form_dict)\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/__init__.py\", line 1055, in call\r\n return fn(*args, **newargs)\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/desk/form/save.py\", line 21, in savedocs\r\n doc.save()\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py\", line 273, in save\r\n return self._save(*args, **kwargs)\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py\", line 309, in _save\r\n self.run_before_save_methods()\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py\", line 896, in run_before_save_methods\r\n self.run_method(\"validate\")\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py\", line 797, in run_method\r\n out = Document.hook(fn)(self, *args, **kwargs)\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py\", line 1073, in composer\r\n return composed(self, method, *args, **kwargs)\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py\", line 1056, in runner\r\n add_to_return_value(self, fn(self, *args, **kwargs))\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py\", line 791, in <lambda>\r\n fn = lambda self, *args, **kwargs: getattr(self, method)(*args, **kwargs)\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/email/doctype/email_account/email_account.py\", line 68, in validate\r\n self.get_incoming_server()\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/email/doctype/email_account/email_account.py\", line 168, in get_incoming_server\r\n email_server.connect()\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/email/receive.py\", line 43, in connect\r\n return self.connect_imap()\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/email/receive.py\", line 51, in connect_imap\r\n self.imap = Timed_IMAP4_SSL(self.settings.host, self.settings.incoming_port, timeout=frappe.conf.get(\"pop_timeout\"))\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/email/receive.py\", line 564, in __init__\r\n self._super.__init__(self, *args, **kwargs)\r\n File \"/usr/lib/python3.6/imaplib.py\", line 1288, in __init__\r\n IMAP4.__init__(self, host, port)\r\n File \"/usr/lib/python3.6/imaplib.py\", line 198, in __init__\r\n self.open(host, port)\r\n File \"/usr/lib/python3.6/imaplib.py\", line 1301, in open\r\n IMAP4.open(self, host, port)\r\n File \"/usr/lib/python3.6/imaplib.py\", line 299, in open\r\n self.sock = self._create_socket()\r\n File \"/usr/lib/python3.6/imaplib.py\", line 1293, in _create_socket\r\n server_hostname=self.host)\r\n File \"/usr/lib/python3.6/ssl.py\", line 407, in wrap_socket\r\n _context=self, _session=session)\r\n File \"/usr/lib/python3.6/ssl.py\", line 817, in __init__\r\n self.do_handshake()\r\n File \"/usr/lib/python3.6/ssl.py\", line 1077, in do_handshake\r\n self._sslobj.do_handshake()\r\n File \"/usr/lib/python3.6/ssl.py\", line 689, in do_handshake\r\n self._sslobj.do_handshake()\r\nssl.SSLError: [SSL: WRONG_VERSION_NUMBER] wrong version number (_ssl.c:852)\r\n```\r\n\r\n## OS\r\n- Linux Ubuntu 18.04\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe import _\nfrom frappe.model.document import Document\nfrom frappe.utils import validate_email_address ,cint, cstr\nimport imaplib,poplib,smtplib\nfrom frappe.email.utils import get_port\n\nclass EmailDomain(Document):\n\tdef autoname(self):\n\t\tif self.domain_name:\n\t\t\tself.name = self.domain_name\n\n\tdef validate(self):\n\t\t\"\"\"Validate email id and check POP3/IMAP and SMTP connections is enabled.\"\"\"\n\t\tif self.email_id:\n\t\t\tvalidate_email_address(self.email_id, True)\n\n\t\tif frappe.local.flags.in_patch or frappe.local.flags.in_test:\n\t\t\treturn\n\n\t\tif not frappe.local.flags.in_install and not frappe.local.flags.in_patch:\n\t\t\ttry:\n\t\t\t\tif self.use_imap:\n\t\t\t\t\tif self.use_ssl:\n\t\t\t\t\t\ttest = imaplib.IMAP4_SSL(self.email_server, port=get_port(self))\n\t\t\t\t\telse:\n\t\t\t\t\t\ttest = imaplib.IMAP4(self.email_server, port=get_port(self))\n\n\t\t\t\telse:\n\t\t\t\t\tif self.use_ssl:\n\t\t\t\t\t\ttest = poplib.POP3_SSL(self.email_server, port=get_port(self))\n\t\t\t\t\telse:\n\t\t\t\t\t\ttest = poplib.POP3(self.email_server, port=get_port(self))\n\n\t\t\texcept Exception:\n\t\t\t\tfrappe.throw(_(\"Incoming email account not correct\"))\n\t\t\t\treturn None\n\t\t\tfinally:\n\t\t\t\ttry:\n\t\t\t\t\tif self.use_imap:\n\t\t\t\t\t\ttest.logout()\n\t\t\t\t\telse:\n\t\t\t\t\t\ttest.quit()\n\t\t\t\texcept Exception:\n\t\t\t\t\tpass\n\t\t\ttry:\n\t\t\t\tif self.use_tls and not self.smtp_port:\n\t\t\t\t\tself.smtp_port = 587\n\t\t\t\tsess = smtplib.SMTP(cstr(self.smtp_server or \"\"), cint(self.smtp_port) or None)\n\t\t\t\tsess.quit()\n\t\t\texcept Exception:\n\t\t\t\tfrappe.throw(_(\"Outgoing email account not correct\"))\n\t\t\t\treturn None\n\t\treturn\n\n\tdef on_update(self):\n\t\t\"\"\"update all email accounts using this domain\"\"\"\n\t\tfor email_account in frappe.get_all(\"Email Account\",\n\t\tfilters={\"domain\": self.name}):\n\n\t\t\ttry:\n\t\t\t\temail_account = frappe.get_doc(\"Email Account\",\n\t\t\t\t\temail_account.name)\n\t\t\t\temail_account.set(\"email_server\",self.email_server)\n\t\t\t\temail_account.set(\"use_imap\",self.use_imap)\n\t\t\t\temail_account.set(\"use_ssl\",self.use_ssl)\n\t\t\t\temail_account.set(\"use_tls\",self.use_tls)\n\t\t\t\temail_account.set(\"attachment_limit\",self.attachment_limit)\n\t\t\t\temail_account.set(\"smtp_server\",self.smtp_server)\n\t\t\t\temail_account.set(\"smtp_port\",self.smtp_port)\n\t\t\t\temail_account.save()\n\t\t\texcept Exception as e:\n\t\t\t\tfrappe.msgprint(email_account.name)\n\t\t\t\tfrappe.throw(e)\n\t\t\t\treturn None\n", "path": "frappe/email/doctype/email_domain/email_domain.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe import _\nfrom frappe.model.document import Document\nfrom frappe.utils import validate_email_address ,cint, cstr\nimport imaplib,poplib,smtplib\nfrom frappe.email.utils import get_port\n\nclass EmailDomain(Document):\n\tdef autoname(self):\n\t\tif self.domain_name:\n\t\t\tself.name = self.domain_name\n\n\tdef validate(self):\n\t\t\"\"\"Validate email id and check POP3/IMAP and SMTP connections is enabled.\"\"\"\n\t\tif self.email_id:\n\t\t\tvalidate_email_address(self.email_id, True)\n\n\t\tif frappe.local.flags.in_patch or frappe.local.flags.in_test:\n\t\t\treturn\n\n\t\tif not frappe.local.flags.in_install and not frappe.local.flags.in_patch:\n\t\t\ttry:\n\t\t\t\tif self.use_imap:\n\t\t\t\t\tif self.use_ssl:\n\t\t\t\t\t\ttest = imaplib.IMAP4_SSL(self.email_server, port=get_port(self))\n\t\t\t\t\telse:\n\t\t\t\t\t\ttest = imaplib.IMAP4(self.email_server, port=get_port(self))\n\n\t\t\t\telse:\n\t\t\t\t\tif self.use_ssl:\n\t\t\t\t\t\ttest = poplib.POP3_SSL(self.email_server, port=get_port(self))\n\t\t\t\t\telse:\n\t\t\t\t\t\ttest = poplib.POP3(self.email_server, port=get_port(self))\n\n\t\t\texcept Exception:\n\t\t\t\tfrappe.throw(_(\"Incoming email account not correct\"))\n\t\t\t\treturn None\n\t\t\tfinally:\n\t\t\t\ttry:\n\t\t\t\t\tif self.use_imap:\n\t\t\t\t\t\ttest.logout()\n\t\t\t\t\telse:\n\t\t\t\t\t\ttest.quit()\n\t\t\t\texcept Exception:\n\t\t\t\t\tpass\n\t\t\ttry:\n\t\t\t\tif self.use_tls and not self.smtp_port:\n\t\t\t\t\tself.smtp_port = 587\n\t\t\t\tsess = smtplib.SMTP(cstr(self.smtp_server or \"\"), cint(self.smtp_port) or None)\n\t\t\t\tsess.quit()\n\t\t\texcept Exception:\n\t\t\t\tfrappe.throw(_(\"Outgoing email account not correct\"))\n\t\t\t\treturn None\n\t\treturn\n\n\tdef on_update(self):\n\t\t\"\"\"update all email accounts using this domain\"\"\"\n\t\tfor email_account in frappe.get_all(\"Email Account\",\n\t\tfilters={\"domain\": self.name}):\n\n\t\t\ttry:\n\t\t\t\temail_account = frappe.get_doc(\"Email Account\",\n\t\t\t\t\temail_account.name)\n\t\t\t\temail_account.set(\"email_server\",self.email_server)\n\t\t\t\temail_account.set(\"use_imap\",self.use_imap)\n\t\t\t\temail_account.set(\"use_ssl\",self.use_ssl)\n\t\t\t\temail_account.set(\"use_tls\",self.use_tls)\n\t\t\t\temail_account.set(\"attachment_limit\",self.attachment_limit)\n\t\t\t\temail_account.set(\"smtp_server\",self.smtp_server)\n\t\t\t\temail_account.set(\"smtp_port\",self.smtp_port)\n\t\t\t\temail_account.set(\"incoming_port\", self.incoming_port)\n\t\t\t\temail_account.save()\n\t\t\texcept Exception as e:\n\t\t\t\tfrappe.msgprint(email_account.name)\n\t\t\t\tfrappe.throw(e)\n\t\t\t\treturn None\n", "path": "frappe/email/doctype/email_domain/email_domain.py"}]}
| 2,889 | 135 |
gh_patches_debug_8873
|
rasdani/github-patches
|
git_diff
|
kubeflow__pipelines-4132
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
allow output artifact store configuration (vs hard coded)
it seems like the output artifacts are always stored in a specific minio service, port, namespace, bucket, secrets, etc (`minio-service.kubeflow:9000`).
see: https://github.com/kubeflow/pipelines/blob/f40a22a3f4a8e06d20cf3e3f425b5058d5c87e0b/sdk/python/kfp/compiler/_op_to_template.py#L148
it would be great to make it flexible, e.g. allow using S3, or change namespace or bucket names.
i suggest making it configurable, i can do such PR if we agree its needed.
flexible pipeline service (host) path in client SDK
when creating an SDK `Client()` the path to `ml-pipeline` API service is loaded from a hard coded value (`ml-pipeline.kubeflow.svc.cluster.local:8888`) which indicate a specific k8s namespace. it can be valuable to load that default value from an env variable, i.e. changing the line in `_client.py` from:
`config.host = host if host else Client.IN_CLUSTER_DNS_NAME`
to:
`config.host = host or os.environ.get('ML_PIPELINE_DNS_NAME',Client.IN_CLUSTER_DNS_NAME)`
also note that when a user provide the `host` parameter, the ipython output points to the API server and not to the UI service (see the logic in `_get_url_prefix()`), it seems like a potential bug
if its acceptable i can submit a PR for the line change above
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/setup.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import re
17 from setuptools import setup
18
19 NAME = 'kfp'
20 #VERSION = .... Change the version in kfp/__init__.py
21
22 REQUIRES = [
23 'PyYAML',
24 'google-cloud-storage>=1.13.0',
25 'kubernetes>=8.0.0, <12.0.0',
26 'google-auth>=1.6.1',
27 'requests_toolbelt>=0.8.0',
28 'cloudpickle',
29 # Update the upper version whenever a new major version of the
30 # kfp-server-api package is released.
31 # Update the lower version when kfp sdk depends on new apis/fields in
32 # kfp-server-api.
33 # Note, please also update ./requirements.in
34 'kfp-server-api>=0.2.5, <2.0.0',
35 'jsonschema >= 3.0.1',
36 'tabulate',
37 'click',
38 'Deprecated',
39 'strip-hints',
40 ]
41
42
43 def find_version(*file_path_parts):
44 here = os.path.abspath(os.path.dirname(__file__))
45 with open(os.path.join(here, *file_path_parts), 'r') as fp:
46 version_file_text = fp.read()
47
48 version_match = re.search(
49 r"^__version__ = ['\"]([^'\"]*)['\"]",
50 version_file_text,
51 re.M,
52 )
53 if version_match:
54 return version_match.group(1)
55
56 raise RuntimeError('Unable to find version string.')
57
58
59 setup(
60 name=NAME,
61 version=find_version('kfp', '__init__.py'),
62 description='KubeFlow Pipelines SDK',
63 author='google',
64 install_requires=REQUIRES,
65 packages=[
66 'kfp',
67 'kfp.cli',
68 'kfp.cli.diagnose_me',
69 'kfp.compiler',
70 'kfp.components',
71 'kfp.components.structures',
72 'kfp.components.structures.kubernetes',
73 'kfp.containers',
74 'kfp.dsl',
75 'kfp.dsl.extensions',
76 'kfp.notebook',
77 ],
78 classifiers=[
79 'Intended Audience :: Developers',
80 'Intended Audience :: Education',
81 'Intended Audience :: Science/Research',
82 'License :: OSI Approved :: Apache Software License',
83 'Programming Language :: Python :: 3',
84 'Programming Language :: Python :: 3.5',
85 'Programming Language :: Python :: 3.6',
86 'Programming Language :: Python :: 3.7',
87 'Topic :: Scientific/Engineering',
88 'Topic :: Scientific/Engineering :: Artificial Intelligence',
89 'Topic :: Software Development',
90 'Topic :: Software Development :: Libraries',
91 'Topic :: Software Development :: Libraries :: Python Modules',
92 ],
93 python_requires='>=3.5.3',
94 include_package_data=True,
95 entry_points={
96 'console_scripts': [
97 'dsl-compile = kfp.compiler.main:main', 'kfp=kfp.__main__:main'
98 ]
99 })
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sdk/python/setup.py b/sdk/python/setup.py
--- a/sdk/python/setup.py
+++ b/sdk/python/setup.py
@@ -39,6 +39,10 @@
'strip-hints',
]
+TESTS_REQUIRE = [
+ 'mock',
+]
+
def find_version(*file_path_parts):
here = os.path.abspath(os.path.dirname(__file__))
@@ -62,6 +66,7 @@
description='KubeFlow Pipelines SDK',
author='google',
install_requires=REQUIRES,
+ tests_require=TESTS_REQUIRE,
packages=[
'kfp',
'kfp.cli',
|
{"golden_diff": "diff --git a/sdk/python/setup.py b/sdk/python/setup.py\n--- a/sdk/python/setup.py\n+++ b/sdk/python/setup.py\n@@ -39,6 +39,10 @@\n 'strip-hints',\n ]\n \n+TESTS_REQUIRE = [\n+ 'mock',\n+]\n+\n \n def find_version(*file_path_parts):\n here = os.path.abspath(os.path.dirname(__file__))\n@@ -62,6 +66,7 @@\n description='KubeFlow Pipelines SDK',\n author='google',\n install_requires=REQUIRES,\n+ tests_require=TESTS_REQUIRE,\n packages=[\n 'kfp',\n 'kfp.cli',\n", "issue": "allow output artifact store configuration (vs hard coded)\nit seems like the output artifacts are always stored in a specific minio service, port, namespace, bucket, secrets, etc (`minio-service.kubeflow:9000`). \r\n\r\nsee: https://github.com/kubeflow/pipelines/blob/f40a22a3f4a8e06d20cf3e3f425b5058d5c87e0b/sdk/python/kfp/compiler/_op_to_template.py#L148\r\n\r\nit would be great to make it flexible, e.g. allow using S3, or change namespace or bucket names.\r\ni suggest making it configurable, i can do such PR if we agree its needed. \nflexible pipeline service (host) path in client SDK \nwhen creating an SDK `Client()` the path to `ml-pipeline` API service is loaded from a hard coded value (`ml-pipeline.kubeflow.svc.cluster.local:8888`) which indicate a specific k8s namespace. it can be valuable to load that default value from an env variable, i.e. changing the line in `_client.py` from:\r\n\r\n`config.host = host if host else Client.IN_CLUSTER_DNS_NAME`\r\n\r\nto:\r\n\r\n`config.host = host or os.environ.get('ML_PIPELINE_DNS_NAME',Client.IN_CLUSTER_DNS_NAME)`\r\n\r\nalso note that when a user provide the `host` parameter, the ipython output points to the API server and not to the UI service (see the logic in `_get_url_prefix()`), it seems like a potential bug\r\n\r\nif its acceptable i can submit a PR for the line change above\r\n \n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\nfrom setuptools import setup\n\nNAME = 'kfp'\n#VERSION = .... Change the version in kfp/__init__.py\n\nREQUIRES = [\n 'PyYAML',\n 'google-cloud-storage>=1.13.0',\n 'kubernetes>=8.0.0, <12.0.0',\n 'google-auth>=1.6.1',\n 'requests_toolbelt>=0.8.0',\n 'cloudpickle',\n # Update the upper version whenever a new major version of the\n # kfp-server-api package is released.\n # Update the lower version when kfp sdk depends on new apis/fields in\n # kfp-server-api.\n # Note, please also update ./requirements.in\n 'kfp-server-api>=0.2.5, <2.0.0',\n 'jsonschema >= 3.0.1',\n 'tabulate',\n 'click',\n 'Deprecated',\n 'strip-hints',\n]\n\n\ndef find_version(*file_path_parts):\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, *file_path_parts), 'r') as fp:\n version_file_text = fp.read()\n\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file_text,\n re.M,\n )\n if version_match:\n return version_match.group(1)\n\n raise RuntimeError('Unable to find version string.')\n\n\nsetup(\n name=NAME,\n version=find_version('kfp', '__init__.py'),\n description='KubeFlow Pipelines SDK',\n author='google',\n install_requires=REQUIRES,\n packages=[\n 'kfp',\n 'kfp.cli',\n 'kfp.cli.diagnose_me',\n 'kfp.compiler',\n 'kfp.components',\n 'kfp.components.structures',\n 'kfp.components.structures.kubernetes',\n 'kfp.containers',\n 'kfp.dsl',\n 'kfp.dsl.extensions',\n 'kfp.notebook',\n ],\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.5.3',\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n 'dsl-compile = kfp.compiler.main:main', 'kfp=kfp.__main__:main'\n ]\n })\n", "path": "sdk/python/setup.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\nfrom setuptools import setup\n\nNAME = 'kfp'\n#VERSION = .... Change the version in kfp/__init__.py\n\nREQUIRES = [\n 'PyYAML',\n 'google-cloud-storage>=1.13.0',\n 'kubernetes>=8.0.0, <12.0.0',\n 'google-auth>=1.6.1',\n 'requests_toolbelt>=0.8.0',\n 'cloudpickle',\n # Update the upper version whenever a new major version of the\n # kfp-server-api package is released.\n # Update the lower version when kfp sdk depends on new apis/fields in\n # kfp-server-api.\n # Note, please also update ./requirements.in\n 'kfp-server-api>=0.2.5, <2.0.0',\n 'jsonschema >= 3.0.1',\n 'tabulate',\n 'click',\n 'Deprecated',\n 'strip-hints',\n]\n\nTESTS_REQUIRE = [\n 'mock',\n]\n\n\ndef find_version(*file_path_parts):\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, *file_path_parts), 'r') as fp:\n version_file_text = fp.read()\n\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file_text,\n re.M,\n )\n if version_match:\n return version_match.group(1)\n\n raise RuntimeError('Unable to find version string.')\n\n\nsetup(\n name=NAME,\n version=find_version('kfp', '__init__.py'),\n description='KubeFlow Pipelines SDK',\n author='google',\n install_requires=REQUIRES,\n tests_require=TESTS_REQUIRE,\n packages=[\n 'kfp',\n 'kfp.cli',\n 'kfp.cli.diagnose_me',\n 'kfp.compiler',\n 'kfp.components',\n 'kfp.components.structures',\n 'kfp.components.structures.kubernetes',\n 'kfp.containers',\n 'kfp.dsl',\n 'kfp.dsl.extensions',\n 'kfp.notebook',\n ],\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.5.3',\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n 'dsl-compile = kfp.compiler.main:main', 'kfp=kfp.__main__:main'\n ]\n })\n", "path": "sdk/python/setup.py"}]}
| 1,575 | 141 |
gh_patches_debug_2772
|
rasdani/github-patches
|
git_diff
|
kivy__kivy-4785
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RecycleBoxLayout NameError
``` python
from random import sample
from string import ascii_lowercase
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
kv = """
<Row@BoxLayout>:
value: ''
size_hint: None, None
size: self.minimum_size
Label:
text: root.value
size_hint: None, None
size: self.texture_size
<Test>:
rv: rv
orientation: 'vertical'
Button:
text: 'Populate list'
on_press: root.populate()
RecycleView:
id: rv
viewclass: 'Row'
RecycleBoxLayout:
default_size: None, None
size_hint_y: None
height: self.minimum_height
orientation: 'vertical'
"""
Builder.load_string(kv)
class Test(BoxLayout):
def populate(self):
self.rv.data = [{'value': ''.join(sample(ascii_lowercase, 6))}
for x in range(50)]
class TestApp(App):
def build(self):
return Test()
if __name__ == '__main__':
TestApp().run()
```
``` python
Traceback (most recent call last):
File "E:\dev\prs\kivy\examples\widgets\recycleview\basic_data.py", line 49, in <module>
TestApp().run()
File "E:\dev\prs\kivy\kivy\app.py", line 828, in run
runTouchApp()
File "E:\dev\prs\kivy\kivy\base.py", line 487, in runTouchApp
EventLoop.window.mainloop()
File "E:\dev\prs\kivy\kivy\core\window\window_sdl2.py", line 633, in mainloop
self._mainloop()
File "E:\dev\prs\kivy\kivy\core\window\window_sdl2.py", line 388, in _mainloop
EventLoop.idle()
File "E:\dev\prs\kivy\kivy\base.py", line 336, in idle
Clock.tick_draw()
File "E:\dev\prs\kivy\kivy\clock.py", line 528, in tick_draw
self._process_events_before_frame()
File "E:\dev\prs\kivy\kivy\clock.py", line 678, in _process_events_before_frame
event.tick(self._last_tick, remove)
File "E:\dev\prs\kivy\kivy\clock.py", line 412, in tick
ret = callback(self._dt)
File "E:\dev\prs\kivy\kivy\uix\recycleview\__init__.py", line 109, in refresh_views
lm.compute_layout(data, f)
File "E:\dev\prs\kivy\kivy\uix\recycleboxlayout.py", line 88, in compute_layout
changed and not self._update_sizes(changed)):
File "E:\dev\prs\kivy\kivy\uix\recycleboxlayout.py", line 81, in _update_sizes
return relayout
NameError: name 'relayout' is not defined
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kivy/uix/recycleboxlayout.py`
Content:
```
1 """
2 RecycleBoxLayout
3 ================
4
5 .. versionadded:: 1.9.2
6
7 .. warning::
8 This module is highly experimental, its API may change in the future and
9 the documentation is not complete at this time.
10
11 The RecycleBoxLayout is designed to provide a
12 :class:`~kivy.uix.boxlayout.BoxLayout` type layout when used with the
13 :class:`~kivy.uix.recycleview.RecycleView` widget. Please refer to the
14 :mod:`~kivy.uix.recycleview` module documentation for more information.
15
16 """
17
18 from kivy.uix.recyclelayout import RecycleLayout
19 from kivy.uix.boxlayout import BoxLayout
20
21 __all__ = ('RecycleBoxLayout', )
22
23
24 class RecycleBoxLayout(RecycleLayout, BoxLayout):
25
26 _rv_positions = None
27
28 def __init__(self, **kwargs):
29 super(RecycleBoxLayout, self).__init__(**kwargs)
30 self.funbind('children', self._trigger_layout)
31
32 def _update_sizes(self, changed):
33 horizontal = self.orientation == 'horizontal'
34 padding_left, padding_top, padding_right, padding_bottom = self.padding
35 padding_x = padding_left + padding_right
36 padding_y = padding_top + padding_bottom
37 selfw = self.width
38 selfh = self.height
39 layout_w = max(0, selfw - padding_x)
40 layout_h = max(0, selfh - padding_y)
41 cx = self.x + padding_left
42 cy = self.y + padding_bottom
43 view_opts = self.view_opts
44 remove_view = self.remove_view
45
46 for (index, widget, (w, h), (wn, hn), (shw, shh), (shnw, shnh),
47 (shw_min, shh_min), (shwn_min, shhn_min), (shw_max, shh_max),
48 (shwn_max, shhn_max), ph, phn) in changed:
49 if (horizontal and
50 (shw != shnw or w != wn or shw_min != shwn_min or
51 shw_max != shwn_max) or
52 not horizontal and
53 (shh != shnh or h != hn or shh_min != shhn_min or
54 shh_max != shhn_max)):
55 return True
56
57 remove_view(widget, index)
58 opt = view_opts[index]
59 if horizontal:
60 wo, ho = opt['size']
61 if shnh is not None:
62 _, h = opt['size'] = [wo, shnh * layout_h]
63 else:
64 h = ho
65
66 xo, yo = opt['pos']
67 for key, value in phn.items():
68 posy = value * layout_h
69 if key == 'y':
70 yo = posy + cy
71 elif key == 'top':
72 yo = posy - h
73 elif key == 'center_y':
74 yo = posy - (h / 2.)
75 opt['pos'] = [xo, yo]
76 else:
77 wo, ho = opt['size']
78 if shnw is not None:
79 w, _ = opt['size'] = [shnw * layout_w, ho]
80 else:
81 w = wo
82
83 xo, yo = opt['pos']
84 for key, value in phn.items():
85 posx = value * layout_w
86 if key == 'x':
87 xo = posx + cx
88 elif key == 'right':
89 xo = posx - w
90 elif key == 'center_x':
91 xo = posx - (w / 2.)
92 opt['pos'] = [xo, yo]
93
94 return relayout
95
96 def compute_layout(self, data, flags):
97 super(RecycleBoxLayout, self).compute_layout(data, flags)
98
99 changed = self._changed_views
100 if (changed is None or
101 changed and not self._update_sizes(changed)):
102 return
103
104 self.clear_layout()
105 self._rv_positions = None
106 if not data:
107 l, t, r, b = self.padding
108 self.minimum_size = l + r, t + b
109 return
110
111 view_opts = self.view_opts
112 n = len(view_opts)
113 for i, x, y, w, h in self._iterate_layout(
114 [(opt['size'], opt['size_hint'], opt['pos_hint'],
115 opt['size_hint_min'], opt['size_hint_max']) for
116 opt in reversed(view_opts)]):
117 opt = view_opts[n - i - 1]
118 shw, shh = opt['size_hint']
119 opt['pos'] = x, y
120 wo, ho = opt['size']
121 # layout won't/shouldn't change previous size if size_hint is None
122 # which is what w/h being None means.
123 opt['size'] = [(wo if shw is None else w),
124 (ho if shh is None else h)]
125
126 spacing = self.spacing
127 pos = self._rv_positions = [None, ] * len(data)
128
129 if self.orientation == 'horizontal':
130 pos[0] = self.x
131 last = pos[0] + self.padding[0] + view_opts[0]['size'][0] + \
132 spacing / 2.
133 for i, val in enumerate(view_opts[1:], 1):
134 pos[i] = last
135 last += val['size'][0] + spacing
136 else:
137 last = pos[-1] = \
138 self.y + self.height - self.padding[1] - \
139 view_opts[0]['size'][1] - spacing / 2.
140 n = len(view_opts)
141 for i, val in enumerate(view_opts[1:], 1):
142 last -= spacing + val['size'][1]
143 pos[n - 1 - i] = last
144
145 def get_view_index_at(self, pos):
146 calc_pos = self._rv_positions
147 if not calc_pos:
148 return 0
149
150 x, y = pos
151
152 if self.orientation == 'horizontal':
153 if x >= calc_pos[-1] or len(calc_pos) == 1:
154 return len(calc_pos) - 1
155
156 ix = 0
157 for val in calc_pos[1:]:
158 if x < val:
159 return ix
160 ix += 1
161 else:
162 if y >= calc_pos[-1] or len(calc_pos) == 1:
163 return 0
164
165 iy = 0
166 for val in calc_pos[1:]:
167 if y < val:
168 return len(calc_pos) - iy - 1
169 iy += 1
170
171 assert False
172
173 def compute_visible_views(self, data, viewport):
174 if self._rv_positions is None or not data:
175 return []
176
177 x, y, w, h = viewport
178 at_idx = self.get_view_index_at
179 if self.orientation == 'horizontal':
180 a, b = at_idx((x, y)), at_idx((x + w, y))
181 else:
182 a, b = at_idx((x, y + h)), at_idx((x, y))
183 return list(range(a, b + 1))
184
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kivy/uix/recycleboxlayout.py b/kivy/uix/recycleboxlayout.py
--- a/kivy/uix/recycleboxlayout.py
+++ b/kivy/uix/recycleboxlayout.py
@@ -91,7 +91,7 @@
xo = posx - (w / 2.)
opt['pos'] = [xo, yo]
- return relayout
+ return False
def compute_layout(self, data, flags):
super(RecycleBoxLayout, self).compute_layout(data, flags)
|
{"golden_diff": "diff --git a/kivy/uix/recycleboxlayout.py b/kivy/uix/recycleboxlayout.py\n--- a/kivy/uix/recycleboxlayout.py\n+++ b/kivy/uix/recycleboxlayout.py\n@@ -91,7 +91,7 @@\n xo = posx - (w / 2.)\n opt['pos'] = [xo, yo]\n \n- return relayout\n+ return False\n \n def compute_layout(self, data, flags):\n super(RecycleBoxLayout, self).compute_layout(data, flags)\n", "issue": "RecycleBoxLayout NameError\n``` python\nfrom random import sample\nfrom string import ascii_lowercase\n\nfrom kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.uix.boxlayout import BoxLayout\n\n\nkv = \"\"\"\n<Row@BoxLayout>:\n value: ''\n size_hint: None, None\n size: self.minimum_size\n Label:\n text: root.value\n size_hint: None, None\n size: self.texture_size\n\n<Test>:\n rv: rv\n orientation: 'vertical'\n Button:\n text: 'Populate list'\n on_press: root.populate()\n RecycleView:\n id: rv\n viewclass: 'Row'\n RecycleBoxLayout:\n default_size: None, None\n size_hint_y: None\n height: self.minimum_height\n orientation: 'vertical'\n\"\"\"\n\nBuilder.load_string(kv)\n\n\nclass Test(BoxLayout):\n\n def populate(self):\n self.rv.data = [{'value': ''.join(sample(ascii_lowercase, 6))}\n for x in range(50)]\n\n\nclass TestApp(App):\n def build(self):\n return Test()\n\nif __name__ == '__main__':\n TestApp().run()\n```\n\n``` python\n Traceback (most recent call last):\n File \"E:\\dev\\prs\\kivy\\examples\\widgets\\recycleview\\basic_data.py\", line 49, in <module>\n TestApp().run()\n File \"E:\\dev\\prs\\kivy\\kivy\\app.py\", line 828, in run\n runTouchApp()\n File \"E:\\dev\\prs\\kivy\\kivy\\base.py\", line 487, in runTouchApp\n EventLoop.window.mainloop()\n File \"E:\\dev\\prs\\kivy\\kivy\\core\\window\\window_sdl2.py\", line 633, in mainloop\n self._mainloop()\n File \"E:\\dev\\prs\\kivy\\kivy\\core\\window\\window_sdl2.py\", line 388, in _mainloop\n EventLoop.idle()\n File \"E:\\dev\\prs\\kivy\\kivy\\base.py\", line 336, in idle\n Clock.tick_draw()\n File \"E:\\dev\\prs\\kivy\\kivy\\clock.py\", line 528, in tick_draw\n self._process_events_before_frame()\n File \"E:\\dev\\prs\\kivy\\kivy\\clock.py\", line 678, in _process_events_before_frame\n event.tick(self._last_tick, remove)\n File \"E:\\dev\\prs\\kivy\\kivy\\clock.py\", line 412, in tick\n ret = callback(self._dt)\n File \"E:\\dev\\prs\\kivy\\kivy\\uix\\recycleview\\__init__.py\", line 109, in refresh_views\n lm.compute_layout(data, f)\n File \"E:\\dev\\prs\\kivy\\kivy\\uix\\recycleboxlayout.py\", line 88, in compute_layout\n changed and not self._update_sizes(changed)):\n File \"E:\\dev\\prs\\kivy\\kivy\\uix\\recycleboxlayout.py\", line 81, in _update_sizes\n return relayout\n NameError: name 'relayout' is not defined\n```\n\n", "before_files": [{"content": "\"\"\"\nRecycleBoxLayout\n================\n\n.. versionadded:: 1.9.2\n\n.. warning::\n This module is highly experimental, its API may change in the future and\n the documentation is not complete at this time.\n\nThe RecycleBoxLayout is designed to provide a\n:class:`~kivy.uix.boxlayout.BoxLayout` type layout when used with the\n:class:`~kivy.uix.recycleview.RecycleView` widget. Please refer to the\n:mod:`~kivy.uix.recycleview` module documentation for more information.\n\n\"\"\"\n\nfrom kivy.uix.recyclelayout import RecycleLayout\nfrom kivy.uix.boxlayout import BoxLayout\n\n__all__ = ('RecycleBoxLayout', )\n\n\nclass RecycleBoxLayout(RecycleLayout, BoxLayout):\n\n _rv_positions = None\n\n def __init__(self, **kwargs):\n super(RecycleBoxLayout, self).__init__(**kwargs)\n self.funbind('children', self._trigger_layout)\n\n def _update_sizes(self, changed):\n horizontal = self.orientation == 'horizontal'\n padding_left, padding_top, padding_right, padding_bottom = self.padding\n padding_x = padding_left + padding_right\n padding_y = padding_top + padding_bottom\n selfw = self.width\n selfh = self.height\n layout_w = max(0, selfw - padding_x)\n layout_h = max(0, selfh - padding_y)\n cx = self.x + padding_left\n cy = self.y + padding_bottom\n view_opts = self.view_opts\n remove_view = self.remove_view\n\n for (index, widget, (w, h), (wn, hn), (shw, shh), (shnw, shnh),\n (shw_min, shh_min), (shwn_min, shhn_min), (shw_max, shh_max),\n (shwn_max, shhn_max), ph, phn) in changed:\n if (horizontal and\n (shw != shnw or w != wn or shw_min != shwn_min or\n shw_max != shwn_max) or\n not horizontal and\n (shh != shnh or h != hn or shh_min != shhn_min or\n shh_max != shhn_max)):\n return True\n\n remove_view(widget, index)\n opt = view_opts[index]\n if horizontal:\n wo, ho = opt['size']\n if shnh is not None:\n _, h = opt['size'] = [wo, shnh * layout_h]\n else:\n h = ho\n\n xo, yo = opt['pos']\n for key, value in phn.items():\n posy = value * layout_h\n if key == 'y':\n yo = posy + cy\n elif key == 'top':\n yo = posy - h\n elif key == 'center_y':\n yo = posy - (h / 2.)\n opt['pos'] = [xo, yo]\n else:\n wo, ho = opt['size']\n if shnw is not None:\n w, _ = opt['size'] = [shnw * layout_w, ho]\n else:\n w = wo\n\n xo, yo = opt['pos']\n for key, value in phn.items():\n posx = value * layout_w\n if key == 'x':\n xo = posx + cx\n elif key == 'right':\n xo = posx - w\n elif key == 'center_x':\n xo = posx - (w / 2.)\n opt['pos'] = [xo, yo]\n\n return relayout\n\n def compute_layout(self, data, flags):\n super(RecycleBoxLayout, self).compute_layout(data, flags)\n\n changed = self._changed_views\n if (changed is None or\n changed and not self._update_sizes(changed)):\n return\n\n self.clear_layout()\n self._rv_positions = None\n if not data:\n l, t, r, b = self.padding\n self.minimum_size = l + r, t + b\n return\n\n view_opts = self.view_opts\n n = len(view_opts)\n for i, x, y, w, h in self._iterate_layout(\n [(opt['size'], opt['size_hint'], opt['pos_hint'],\n opt['size_hint_min'], opt['size_hint_max']) for\n opt in reversed(view_opts)]):\n opt = view_opts[n - i - 1]\n shw, shh = opt['size_hint']\n opt['pos'] = x, y\n wo, ho = opt['size']\n # layout won't/shouldn't change previous size if size_hint is None\n # which is what w/h being None means.\n opt['size'] = [(wo if shw is None else w),\n (ho if shh is None else h)]\n\n spacing = self.spacing\n pos = self._rv_positions = [None, ] * len(data)\n\n if self.orientation == 'horizontal':\n pos[0] = self.x\n last = pos[0] + self.padding[0] + view_opts[0]['size'][0] + \\\n spacing / 2.\n for i, val in enumerate(view_opts[1:], 1):\n pos[i] = last\n last += val['size'][0] + spacing\n else:\n last = pos[-1] = \\\n self.y + self.height - self.padding[1] - \\\n view_opts[0]['size'][1] - spacing / 2.\n n = len(view_opts)\n for i, val in enumerate(view_opts[1:], 1):\n last -= spacing + val['size'][1]\n pos[n - 1 - i] = last\n\n def get_view_index_at(self, pos):\n calc_pos = self._rv_positions\n if not calc_pos:\n return 0\n\n x, y = pos\n\n if self.orientation == 'horizontal':\n if x >= calc_pos[-1] or len(calc_pos) == 1:\n return len(calc_pos) - 1\n\n ix = 0\n for val in calc_pos[1:]:\n if x < val:\n return ix\n ix += 1\n else:\n if y >= calc_pos[-1] or len(calc_pos) == 1:\n return 0\n\n iy = 0\n for val in calc_pos[1:]:\n if y < val:\n return len(calc_pos) - iy - 1\n iy += 1\n\n assert False\n\n def compute_visible_views(self, data, viewport):\n if self._rv_positions is None or not data:\n return []\n\n x, y, w, h = viewport\n at_idx = self.get_view_index_at\n if self.orientation == 'horizontal':\n a, b = at_idx((x, y)), at_idx((x + w, y))\n else:\n a, b = at_idx((x, y + h)), at_idx((x, y))\n return list(range(a, b + 1))\n", "path": "kivy/uix/recycleboxlayout.py"}], "after_files": [{"content": "\"\"\"\nRecycleBoxLayout\n================\n\n.. versionadded:: 1.9.2\n\n.. warning::\n This module is highly experimental, its API may change in the future and\n the documentation is not complete at this time.\n\nThe RecycleBoxLayout is designed to provide a\n:class:`~kivy.uix.boxlayout.BoxLayout` type layout when used with the\n:class:`~kivy.uix.recycleview.RecycleView` widget. Please refer to the\n:mod:`~kivy.uix.recycleview` module documentation for more information.\n\n\"\"\"\n\nfrom kivy.uix.recyclelayout import RecycleLayout\nfrom kivy.uix.boxlayout import BoxLayout\n\n__all__ = ('RecycleBoxLayout', )\n\n\nclass RecycleBoxLayout(RecycleLayout, BoxLayout):\n\n _rv_positions = None\n\n def __init__(self, **kwargs):\n super(RecycleBoxLayout, self).__init__(**kwargs)\n self.funbind('children', self._trigger_layout)\n\n def _update_sizes(self, changed):\n horizontal = self.orientation == 'horizontal'\n padding_left, padding_top, padding_right, padding_bottom = self.padding\n padding_x = padding_left + padding_right\n padding_y = padding_top + padding_bottom\n selfw = self.width\n selfh = self.height\n layout_w = max(0, selfw - padding_x)\n layout_h = max(0, selfh - padding_y)\n cx = self.x + padding_left\n cy = self.y + padding_bottom\n view_opts = self.view_opts\n remove_view = self.remove_view\n\n for (index, widget, (w, h), (wn, hn), (shw, shh), (shnw, shnh),\n (shw_min, shh_min), (shwn_min, shhn_min), (shw_max, shh_max),\n (shwn_max, shhn_max), ph, phn) in changed:\n if (horizontal and\n (shw != shnw or w != wn or shw_min != shwn_min or\n shw_max != shwn_max) or\n not horizontal and\n (shh != shnh or h != hn or shh_min != shhn_min or\n shh_max != shhn_max)):\n return True\n\n remove_view(widget, index)\n opt = view_opts[index]\n if horizontal:\n wo, ho = opt['size']\n if shnh is not None:\n _, h = opt['size'] = [wo, shnh * layout_h]\n else:\n h = ho\n\n xo, yo = opt['pos']\n for key, value in phn.items():\n posy = value * layout_h\n if key == 'y':\n yo = posy + cy\n elif key == 'top':\n yo = posy - h\n elif key == 'center_y':\n yo = posy - (h / 2.)\n opt['pos'] = [xo, yo]\n else:\n wo, ho = opt['size']\n if shnw is not None:\n w, _ = opt['size'] = [shnw * layout_w, ho]\n else:\n w = wo\n\n xo, yo = opt['pos']\n for key, value in phn.items():\n posx = value * layout_w\n if key == 'x':\n xo = posx + cx\n elif key == 'right':\n xo = posx - w\n elif key == 'center_x':\n xo = posx - (w / 2.)\n opt['pos'] = [xo, yo]\n\n return False\n\n def compute_layout(self, data, flags):\n super(RecycleBoxLayout, self).compute_layout(data, flags)\n\n changed = self._changed_views\n if (changed is None or\n changed and not self._update_sizes(changed)):\n return\n\n self.clear_layout()\n self._rv_positions = None\n if not data:\n l, t, r, b = self.padding\n self.minimum_size = l + r, t + b\n return\n\n view_opts = self.view_opts\n n = len(view_opts)\n for i, x, y, w, h in self._iterate_layout(\n [(opt['size'], opt['size_hint'], opt['pos_hint'],\n opt['size_hint_min'], opt['size_hint_max']) for\n opt in reversed(view_opts)]):\n opt = view_opts[n - i - 1]\n shw, shh = opt['size_hint']\n opt['pos'] = x, y\n wo, ho = opt['size']\n # layout won't/shouldn't change previous size if size_hint is None\n # which is what w/h being None means.\n opt['size'] = [(wo if shw is None else w),\n (ho if shh is None else h)]\n\n spacing = self.spacing\n pos = self._rv_positions = [None, ] * len(data)\n\n if self.orientation == 'horizontal':\n pos[0] = self.x\n last = pos[0] + self.padding[0] + view_opts[0]['size'][0] + \\\n spacing / 2.\n for i, val in enumerate(view_opts[1:], 1):\n pos[i] = last\n last += val['size'][0] + spacing\n else:\n last = pos[-1] = \\\n self.y + self.height - self.padding[1] - \\\n view_opts[0]['size'][1] - spacing / 2.\n n = len(view_opts)\n for i, val in enumerate(view_opts[1:], 1):\n last -= spacing + val['size'][1]\n pos[n - 1 - i] = last\n\n def get_view_index_at(self, pos):\n calc_pos = self._rv_positions\n if not calc_pos:\n return 0\n\n x, y = pos\n\n if self.orientation == 'horizontal':\n if x >= calc_pos[-1] or len(calc_pos) == 1:\n return len(calc_pos) - 1\n\n ix = 0\n for val in calc_pos[1:]:\n if x < val:\n return ix\n ix += 1\n else:\n if y >= calc_pos[-1] or len(calc_pos) == 1:\n return 0\n\n iy = 0\n for val in calc_pos[1:]:\n if y < val:\n return len(calc_pos) - iy - 1\n iy += 1\n\n assert False\n\n def compute_visible_views(self, data, viewport):\n if self._rv_positions is None or not data:\n return []\n\n x, y, w, h = viewport\n at_idx = self.get_view_index_at\n if self.orientation == 'horizontal':\n a, b = at_idx((x, y)), at_idx((x + w, y))\n else:\n a, b = at_idx((x, y + h)), at_idx((x, y))\n return list(range(a, b + 1))\n", "path": "kivy/uix/recycleboxlayout.py"}]}
| 2,991 | 120 |
gh_patches_debug_22632
|
rasdani/github-patches
|
git_diff
|
Pylons__pyramid-1448
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
add PYTHONSTARTUP support to pshell
Currently pshell does not source any `.pystartup` file pointed to by the `PYTHONSTARTUP` environment variable. It'd be nice to support this.
https://docs.python.org/2/tutorial/interpreter.html#the-interactive-startup-file
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyramid/scripts/pshell.py`
Content:
```
1 from code import interact
2 import optparse
3 import sys
4 import textwrap
5
6 from pyramid.compat import configparser
7 from pyramid.util import DottedNameResolver
8 from pyramid.paster import bootstrap
9
10 from pyramid.paster import setup_logging
11
12 from pyramid.scripts.common import parse_vars
13
14 def main(argv=sys.argv, quiet=False):
15 command = PShellCommand(argv, quiet)
16 return command.run()
17
18 class PShellCommand(object):
19 usage = '%prog config_uri'
20 description = """\
21 Open an interactive shell with a Pyramid app loaded. This command
22 accepts one positional argument named "config_uri" which specifies the
23 PasteDeploy config file to use for the interactive shell. The format is
24 "inifile#name". If the name is left off, the Pyramid default application
25 will be assumed. Example: "pshell myapp.ini#main"
26
27 If you do not point the loader directly at the section of the ini file
28 containing your Pyramid application, the command will attempt to
29 find the app for you. If you are loading a pipeline that contains more
30 than one Pyramid application within it, the loader will use the
31 last one.
32 """
33 bootstrap = (bootstrap,) # for testing
34
35 parser = optparse.OptionParser(
36 usage,
37 description=textwrap.dedent(description)
38 )
39 parser.add_option('-p', '--python-shell',
40 action='store', type='string', dest='python_shell',
41 default='', help='ipython | bpython | python')
42 parser.add_option('--setup',
43 dest='setup',
44 help=("A callable that will be passed the environment "
45 "before it is made available to the shell. This "
46 "option will override the 'setup' key in the "
47 "[pshell] ini section."))
48
49 ConfigParser = configparser.ConfigParser # testing
50
51 loaded_objects = {}
52 object_help = {}
53 setup = None
54
55 def __init__(self, argv, quiet=False):
56 self.quiet = quiet
57 self.options, self.args = self.parser.parse_args(argv[1:])
58
59 def pshell_file_config(self, filename):
60 config = self.ConfigParser()
61 config.read(filename)
62 try:
63 items = config.items('pshell')
64 except configparser.NoSectionError:
65 return
66
67 resolver = DottedNameResolver(None)
68 self.loaded_objects = {}
69 self.object_help = {}
70 self.setup = None
71 for k, v in items:
72 if k == 'setup':
73 self.setup = v
74 else:
75 self.loaded_objects[k] = resolver.maybe_resolve(v)
76 self.object_help[k] = v
77
78 def out(self, msg): # pragma: no cover
79 if not self.quiet:
80 print(msg)
81
82 def run(self, shell=None):
83 if not self.args:
84 self.out('Requires a config file argument')
85 return 2
86 config_uri = self.args[0]
87 config_file = config_uri.split('#', 1)[0]
88 setup_logging(config_file)
89 self.pshell_file_config(config_file)
90
91 # bootstrap the environ
92 env = self.bootstrap[0](config_uri, options=parse_vars(self.args[1:]))
93
94 # remove the closer from the env
95 closer = env.pop('closer')
96
97 # setup help text for default environment
98 env_help = dict(env)
99 env_help['app'] = 'The WSGI application.'
100 env_help['root'] = 'Root of the default resource tree.'
101 env_help['registry'] = 'Active Pyramid registry.'
102 env_help['request'] = 'Active request object.'
103 env_help['root_factory'] = (
104 'Default root factory used to create `root`.')
105
106 # override use_script with command-line options
107 if self.options.setup:
108 self.setup = self.options.setup
109
110 if self.setup:
111 # store the env before muddling it with the script
112 orig_env = env.copy()
113
114 # call the setup callable
115 resolver = DottedNameResolver(None)
116 setup = resolver.maybe_resolve(self.setup)
117 setup(env)
118
119 # remove any objects from default help that were overidden
120 for k, v in env.items():
121 if k not in orig_env or env[k] != orig_env[k]:
122 env_help[k] = v
123
124 # load the pshell section of the ini file
125 env.update(self.loaded_objects)
126
127 # eliminate duplicates from env, allowing custom vars to override
128 for k in self.loaded_objects:
129 if k in env_help:
130 del env_help[k]
131
132 # generate help text
133 help = ''
134 if env_help:
135 help += 'Environment:'
136 for var in sorted(env_help.keys()):
137 help += '\n %-12s %s' % (var, env_help[var])
138
139 if self.object_help:
140 help += '\n\nCustom Variables:'
141 for var in sorted(self.object_help.keys()):
142 help += '\n %-12s %s' % (var, self.object_help[var])
143
144 if shell is None:
145 shell = self.make_shell()
146
147 try:
148 shell(env, help)
149 finally:
150 closer()
151
152 def make_shell(self):
153 shell = None
154 user_shell = self.options.python_shell.lower()
155 if not user_shell:
156 shell = self.make_ipython_shell()
157 if shell is None:
158 shell = self.make_bpython_shell()
159
160 elif user_shell == 'ipython':
161 shell = self.make_ipython_shell()
162
163 elif user_shell == 'bpython':
164 shell = self.make_bpython_shell()
165
166 if shell is None:
167 shell = self.make_default_shell()
168
169 return shell
170
171 def make_default_shell(self, interact=interact):
172 def shell(env, help):
173 cprt = 'Type "help" for more information.'
174 banner = "Python %s on %s\n%s" % (sys.version, sys.platform, cprt)
175 banner += '\n\n' + help + '\n'
176 interact(banner, local=env)
177 return shell
178
179 def make_bpython_shell(self, BPShell=None):
180 if BPShell is None: # pragma: no cover
181 try:
182 from bpython import embed
183 BPShell = embed
184 except ImportError:
185 return None
186 def shell(env, help):
187 BPShell(locals_=env, banner=help + '\n')
188 return shell
189
190 def make_ipython_shell(self):
191 shell = self.make_ipython_v1_1_shell()
192 if shell is None:
193 shell = self.make_ipython_v0_11_shell()
194 if shell is None:
195 shell = self.make_ipython_v0_10_shell()
196 return shell
197
198 def make_ipython_v1_1_shell(self, IPShellFactory=None):
199 if IPShellFactory is None: # pragma: no cover
200 try:
201 from IPython.terminal.embed import (
202 InteractiveShellEmbed)
203 IPShellFactory = InteractiveShellEmbed
204 except ImportError:
205 return None
206 def shell(env, help):
207 IPShell = IPShellFactory(banner2=help + '\n', user_ns=env)
208 IPShell()
209 return shell
210
211 def make_ipython_v0_11_shell(self, IPShellFactory=None):
212 if IPShellFactory is None: # pragma: no cover
213 try:
214 from IPython.frontend.terminal.embed import (
215 InteractiveShellEmbed)
216 IPShellFactory = InteractiveShellEmbed
217 except ImportError:
218 return None
219 def shell(env, help):
220 IPShell = IPShellFactory(banner2=help + '\n', user_ns=env)
221 IPShell()
222 return shell
223
224 def make_ipython_v0_10_shell(self, IPShellFactory=None):
225 if IPShellFactory is None: # pragma: no cover
226 try:
227 from IPython.Shell import IPShellEmbed
228 IPShellFactory = IPShellEmbed
229 except ImportError:
230 return None
231 def shell(env, help):
232 IPShell = IPShellFactory(argv=[], user_ns=env)
233 IPShell.set_banner(IPShell.IP.BANNER + '\n' + help + '\n')
234 IPShell()
235 return shell
236
237 if __name__ == '__main__': # pragma: no cover
238 sys.exit(main() or 0)
239
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyramid/scripts/pshell.py b/pyramid/scripts/pshell.py
--- a/pyramid/scripts/pshell.py
+++ b/pyramid/scripts/pshell.py
@@ -1,9 +1,11 @@
from code import interact
import optparse
+import os
import sys
import textwrap
from pyramid.compat import configparser
+from pyramid.compat import exec_
from pyramid.util import DottedNameResolver
from pyramid.paster import bootstrap
@@ -51,6 +53,7 @@
loaded_objects = {}
object_help = {}
setup = None
+ pystartup = os.environ.get('PYTHONSTARTUP')
def __init__(self, argv, quiet=False):
self.quiet = quiet
@@ -144,6 +147,12 @@
if shell is None:
shell = self.make_shell()
+ if self.pystartup and os.path.isfile(self.pystartup):
+ with open(self.pystartup, 'rb') as fp:
+ exec_(fp.read().decode('utf-8'), env)
+ if '__builtins__' in env:
+ del env['__builtins__']
+
try:
shell(env, help)
finally:
|
{"golden_diff": "diff --git a/pyramid/scripts/pshell.py b/pyramid/scripts/pshell.py\n--- a/pyramid/scripts/pshell.py\n+++ b/pyramid/scripts/pshell.py\n@@ -1,9 +1,11 @@\n from code import interact\n import optparse\n+import os\n import sys\n import textwrap\n \n from pyramid.compat import configparser\n+from pyramid.compat import exec_\n from pyramid.util import DottedNameResolver\n from pyramid.paster import bootstrap\n \n@@ -51,6 +53,7 @@\n loaded_objects = {}\n object_help = {}\n setup = None\n+ pystartup = os.environ.get('PYTHONSTARTUP')\n \n def __init__(self, argv, quiet=False):\n self.quiet = quiet\n@@ -144,6 +147,12 @@\n if shell is None:\n shell = self.make_shell()\n \n+ if self.pystartup and os.path.isfile(self.pystartup):\n+ with open(self.pystartup, 'rb') as fp:\n+ exec_(fp.read().decode('utf-8'), env)\n+ if '__builtins__' in env:\n+ del env['__builtins__']\n+\n try:\n shell(env, help)\n finally:\n", "issue": "add PYTHONSTARTUP support to pshell\nCurrently pshell does not source any `.pystartup` file pointed to by the `PYTHONSTARTUP` environment variable. It'd be nice to support this.\n\nhttps://docs.python.org/2/tutorial/interpreter.html#the-interactive-startup-file\n\n", "before_files": [{"content": "from code import interact\nimport optparse\nimport sys\nimport textwrap\n\nfrom pyramid.compat import configparser\nfrom pyramid.util import DottedNameResolver\nfrom pyramid.paster import bootstrap\n\nfrom pyramid.paster import setup_logging\n\nfrom pyramid.scripts.common import parse_vars\n\ndef main(argv=sys.argv, quiet=False):\n command = PShellCommand(argv, quiet)\n return command.run()\n\nclass PShellCommand(object):\n usage = '%prog config_uri'\n description = \"\"\"\\\n Open an interactive shell with a Pyramid app loaded. This command\n accepts one positional argument named \"config_uri\" which specifies the\n PasteDeploy config file to use for the interactive shell. The format is\n \"inifile#name\". If the name is left off, the Pyramid default application\n will be assumed. Example: \"pshell myapp.ini#main\"\n\n If you do not point the loader directly at the section of the ini file\n containing your Pyramid application, the command will attempt to\n find the app for you. If you are loading a pipeline that contains more\n than one Pyramid application within it, the loader will use the\n last one.\n \"\"\"\n bootstrap = (bootstrap,) # for testing\n\n parser = optparse.OptionParser(\n usage,\n description=textwrap.dedent(description)\n )\n parser.add_option('-p', '--python-shell',\n action='store', type='string', dest='python_shell',\n default='', help='ipython | bpython | python')\n parser.add_option('--setup',\n dest='setup',\n help=(\"A callable that will be passed the environment \"\n \"before it is made available to the shell. This \"\n \"option will override the 'setup' key in the \"\n \"[pshell] ini section.\"))\n\n ConfigParser = configparser.ConfigParser # testing\n\n loaded_objects = {}\n object_help = {}\n setup = None\n\n def __init__(self, argv, quiet=False):\n self.quiet = quiet\n self.options, self.args = self.parser.parse_args(argv[1:])\n\n def pshell_file_config(self, filename):\n config = self.ConfigParser()\n config.read(filename)\n try:\n items = config.items('pshell')\n except configparser.NoSectionError:\n return\n\n resolver = DottedNameResolver(None)\n self.loaded_objects = {}\n self.object_help = {}\n self.setup = None\n for k, v in items:\n if k == 'setup':\n self.setup = v\n else:\n self.loaded_objects[k] = resolver.maybe_resolve(v)\n self.object_help[k] = v\n\n def out(self, msg): # pragma: no cover\n if not self.quiet:\n print(msg)\n\n def run(self, shell=None):\n if not self.args:\n self.out('Requires a config file argument')\n return 2\n config_uri = self.args[0]\n config_file = config_uri.split('#', 1)[0]\n setup_logging(config_file)\n self.pshell_file_config(config_file)\n\n # bootstrap the environ\n env = self.bootstrap[0](config_uri, options=parse_vars(self.args[1:]))\n\n # remove the closer from the env\n closer = env.pop('closer')\n\n # setup help text for default environment\n env_help = dict(env)\n env_help['app'] = 'The WSGI application.'\n env_help['root'] = 'Root of the default resource tree.'\n env_help['registry'] = 'Active Pyramid registry.'\n env_help['request'] = 'Active request object.'\n env_help['root_factory'] = (\n 'Default root factory used to create `root`.')\n\n # override use_script with command-line options\n if self.options.setup:\n self.setup = self.options.setup\n\n if self.setup:\n # store the env before muddling it with the script\n orig_env = env.copy()\n\n # call the setup callable\n resolver = DottedNameResolver(None)\n setup = resolver.maybe_resolve(self.setup)\n setup(env)\n\n # remove any objects from default help that were overidden\n for k, v in env.items():\n if k not in orig_env or env[k] != orig_env[k]:\n env_help[k] = v\n\n # load the pshell section of the ini file\n env.update(self.loaded_objects)\n\n # eliminate duplicates from env, allowing custom vars to override\n for k in self.loaded_objects:\n if k in env_help:\n del env_help[k]\n\n # generate help text\n help = ''\n if env_help:\n help += 'Environment:'\n for var in sorted(env_help.keys()):\n help += '\\n %-12s %s' % (var, env_help[var])\n\n if self.object_help:\n help += '\\n\\nCustom Variables:'\n for var in sorted(self.object_help.keys()):\n help += '\\n %-12s %s' % (var, self.object_help[var])\n\n if shell is None:\n shell = self.make_shell()\n\n try:\n shell(env, help)\n finally:\n closer()\n\n def make_shell(self):\n shell = None\n user_shell = self.options.python_shell.lower()\n if not user_shell:\n shell = self.make_ipython_shell()\n if shell is None:\n shell = self.make_bpython_shell()\n\n elif user_shell == 'ipython':\n shell = self.make_ipython_shell()\n\n elif user_shell == 'bpython':\n shell = self.make_bpython_shell()\n\n if shell is None:\n shell = self.make_default_shell()\n\n return shell\n\n def make_default_shell(self, interact=interact):\n def shell(env, help):\n cprt = 'Type \"help\" for more information.'\n banner = \"Python %s on %s\\n%s\" % (sys.version, sys.platform, cprt)\n banner += '\\n\\n' + help + '\\n'\n interact(banner, local=env)\n return shell\n\n def make_bpython_shell(self, BPShell=None):\n if BPShell is None: # pragma: no cover\n try:\n from bpython import embed\n BPShell = embed\n except ImportError:\n return None\n def shell(env, help):\n BPShell(locals_=env, banner=help + '\\n')\n return shell\n\n def make_ipython_shell(self):\n shell = self.make_ipython_v1_1_shell()\n if shell is None:\n shell = self.make_ipython_v0_11_shell()\n if shell is None:\n shell = self.make_ipython_v0_10_shell()\n return shell\n\n def make_ipython_v1_1_shell(self, IPShellFactory=None):\n if IPShellFactory is None: # pragma: no cover\n try:\n from IPython.terminal.embed import (\n InteractiveShellEmbed)\n IPShellFactory = InteractiveShellEmbed\n except ImportError:\n return None\n def shell(env, help):\n IPShell = IPShellFactory(banner2=help + '\\n', user_ns=env)\n IPShell()\n return shell\n\n def make_ipython_v0_11_shell(self, IPShellFactory=None):\n if IPShellFactory is None: # pragma: no cover\n try:\n from IPython.frontend.terminal.embed import (\n InteractiveShellEmbed)\n IPShellFactory = InteractiveShellEmbed\n except ImportError:\n return None\n def shell(env, help):\n IPShell = IPShellFactory(banner2=help + '\\n', user_ns=env)\n IPShell()\n return shell\n\n def make_ipython_v0_10_shell(self, IPShellFactory=None):\n if IPShellFactory is None: # pragma: no cover\n try:\n from IPython.Shell import IPShellEmbed\n IPShellFactory = IPShellEmbed\n except ImportError:\n return None\n def shell(env, help):\n IPShell = IPShellFactory(argv=[], user_ns=env)\n IPShell.set_banner(IPShell.IP.BANNER + '\\n' + help + '\\n')\n IPShell()\n return shell\n\nif __name__ == '__main__': # pragma: no cover\n sys.exit(main() or 0)\n", "path": "pyramid/scripts/pshell.py"}], "after_files": [{"content": "from code import interact\nimport optparse\nimport os\nimport sys\nimport textwrap\n\nfrom pyramid.compat import configparser\nfrom pyramid.compat import exec_\nfrom pyramid.util import DottedNameResolver\nfrom pyramid.paster import bootstrap\n\nfrom pyramid.paster import setup_logging\n\nfrom pyramid.scripts.common import parse_vars\n\ndef main(argv=sys.argv, quiet=False):\n command = PShellCommand(argv, quiet)\n return command.run()\n\nclass PShellCommand(object):\n usage = '%prog config_uri'\n description = \"\"\"\\\n Open an interactive shell with a Pyramid app loaded. This command\n accepts one positional argument named \"config_uri\" which specifies the\n PasteDeploy config file to use for the interactive shell. The format is\n \"inifile#name\". If the name is left off, the Pyramid default application\n will be assumed. Example: \"pshell myapp.ini#main\"\n\n If you do not point the loader directly at the section of the ini file\n containing your Pyramid application, the command will attempt to\n find the app for you. If you are loading a pipeline that contains more\n than one Pyramid application within it, the loader will use the\n last one.\n \"\"\"\n bootstrap = (bootstrap,) # for testing\n\n parser = optparse.OptionParser(\n usage,\n description=textwrap.dedent(description)\n )\n parser.add_option('-p', '--python-shell',\n action='store', type='string', dest='python_shell',\n default='', help='ipython | bpython | python')\n parser.add_option('--setup',\n dest='setup',\n help=(\"A callable that will be passed the environment \"\n \"before it is made available to the shell. This \"\n \"option will override the 'setup' key in the \"\n \"[pshell] ini section.\"))\n\n ConfigParser = configparser.ConfigParser # testing\n\n loaded_objects = {}\n object_help = {}\n setup = None\n pystartup = os.environ.get('PYTHONSTARTUP')\n\n def __init__(self, argv, quiet=False):\n self.quiet = quiet\n self.options, self.args = self.parser.parse_args(argv[1:])\n\n def pshell_file_config(self, filename):\n config = self.ConfigParser()\n config.read(filename)\n try:\n items = config.items('pshell')\n except configparser.NoSectionError:\n return\n\n resolver = DottedNameResolver(None)\n self.loaded_objects = {}\n self.object_help = {}\n self.setup = None\n for k, v in items:\n if k == 'setup':\n self.setup = v\n else:\n self.loaded_objects[k] = resolver.maybe_resolve(v)\n self.object_help[k] = v\n\n def out(self, msg): # pragma: no cover\n if not self.quiet:\n print(msg)\n\n def run(self, shell=None):\n if not self.args:\n self.out('Requires a config file argument')\n return 2\n config_uri = self.args[0]\n config_file = config_uri.split('#', 1)[0]\n setup_logging(config_file)\n self.pshell_file_config(config_file)\n\n # bootstrap the environ\n env = self.bootstrap[0](config_uri, options=parse_vars(self.args[1:]))\n\n # remove the closer from the env\n closer = env.pop('closer')\n\n # setup help text for default environment\n env_help = dict(env)\n env_help['app'] = 'The WSGI application.'\n env_help['root'] = 'Root of the default resource tree.'\n env_help['registry'] = 'Active Pyramid registry.'\n env_help['request'] = 'Active request object.'\n env_help['root_factory'] = (\n 'Default root factory used to create `root`.')\n\n # override use_script with command-line options\n if self.options.setup:\n self.setup = self.options.setup\n\n if self.setup:\n # store the env before muddling it with the script\n orig_env = env.copy()\n\n # call the setup callable\n resolver = DottedNameResolver(None)\n setup = resolver.maybe_resolve(self.setup)\n setup(env)\n\n # remove any objects from default help that were overidden\n for k, v in env.items():\n if k not in orig_env or env[k] != orig_env[k]:\n env_help[k] = v\n\n # load the pshell section of the ini file\n env.update(self.loaded_objects)\n\n # eliminate duplicates from env, allowing custom vars to override\n for k in self.loaded_objects:\n if k in env_help:\n del env_help[k]\n\n # generate help text\n help = ''\n if env_help:\n help += 'Environment:'\n for var in sorted(env_help.keys()):\n help += '\\n %-12s %s' % (var, env_help[var])\n\n if self.object_help:\n help += '\\n\\nCustom Variables:'\n for var in sorted(self.object_help.keys()):\n help += '\\n %-12s %s' % (var, self.object_help[var])\n\n if shell is None:\n shell = self.make_shell()\n\n if self.pystartup and os.path.isfile(self.pystartup):\n with open(self.pystartup, 'rb') as fp:\n exec_(fp.read().decode('utf-8'), env)\n if '__builtins__' in env:\n del env['__builtins__']\n\n try:\n shell(env, help)\n finally:\n closer()\n\n def make_shell(self):\n shell = None\n user_shell = self.options.python_shell.lower()\n if not user_shell:\n shell = self.make_ipython_shell()\n if shell is None:\n shell = self.make_bpython_shell()\n\n elif user_shell == 'ipython':\n shell = self.make_ipython_shell()\n\n elif user_shell == 'bpython':\n shell = self.make_bpython_shell()\n\n if shell is None:\n shell = self.make_default_shell()\n\n return shell\n\n def make_default_shell(self, interact=interact):\n def shell(env, help):\n cprt = 'Type \"help\" for more information.'\n banner = \"Python %s on %s\\n%s\" % (sys.version, sys.platform, cprt)\n banner += '\\n\\n' + help + '\\n'\n interact(banner, local=env)\n return shell\n\n def make_bpython_shell(self, BPShell=None):\n if BPShell is None: # pragma: no cover\n try:\n from bpython import embed\n BPShell = embed\n except ImportError:\n return None\n def shell(env, help):\n BPShell(locals_=env, banner=help + '\\n')\n return shell\n\n def make_ipython_shell(self):\n shell = self.make_ipython_v1_1_shell()\n if shell is None:\n shell = self.make_ipython_v0_11_shell()\n if shell is None:\n shell = self.make_ipython_v0_10_shell()\n return shell\n\n def make_ipython_v1_1_shell(self, IPShellFactory=None):\n if IPShellFactory is None: # pragma: no cover\n try:\n from IPython.terminal.embed import (\n InteractiveShellEmbed)\n IPShellFactory = InteractiveShellEmbed\n except ImportError:\n return None\n def shell(env, help):\n IPShell = IPShellFactory(banner2=help + '\\n', user_ns=env)\n IPShell()\n return shell\n\n def make_ipython_v0_11_shell(self, IPShellFactory=None):\n if IPShellFactory is None: # pragma: no cover\n try:\n from IPython.frontend.terminal.embed import (\n InteractiveShellEmbed)\n IPShellFactory = InteractiveShellEmbed\n except ImportError:\n return None\n def shell(env, help):\n IPShell = IPShellFactory(banner2=help + '\\n', user_ns=env)\n IPShell()\n return shell\n\n def make_ipython_v0_10_shell(self, IPShellFactory=None):\n if IPShellFactory is None: # pragma: no cover\n try:\n from IPython.Shell import IPShellEmbed\n IPShellFactory = IPShellEmbed\n except ImportError:\n return None\n def shell(env, help):\n IPShell = IPShellFactory(argv=[], user_ns=env)\n IPShell.set_banner(IPShell.IP.BANNER + '\\n' + help + '\\n')\n IPShell()\n return shell\n\nif __name__ == '__main__': # pragma: no cover\n sys.exit(main() or 0)\n", "path": "pyramid/scripts/pshell.py"}]}
| 2,720 | 269 |
gh_patches_debug_32641
|
rasdani/github-patches
|
git_diff
|
certbot__certbot-7388
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Standalone server waits forever to drain clients during shutdown
Originally discovered in [a help thread on the forums](https://community.letsencrypt.org/t/certbot-hangs-on-stopping-server/102450?u=_az).
## My operating system is (include version):
Ubuntu 19.04
## I installed Certbot with (certbot-auto, OS package manager, pip, etc):
certbot-auto
## I ran this command and it produced this output:
sudo certbot-auto certonly -d certbot.eff.org --standalone --dry-run
and simultaneously in a second terminal:
nc -vvv localhost 80
Causes an infinite hang until the `nc` client disconnects:
2019-09-20 19:29:28,932:DEBUG:certbot.error_handler:Calling registered functions
2019-09-20 19:29:28,932:INFO:certbot.auth_handler:Cleaning up challenges
2019-09-20 19:29:28,932:DEBUG:certbot.plugins.standalone:Stopping server at :::80...
## Certbot's behavior differed from what I expected because:
The standalone servers should not wait forever for clients to drain. I don't think they should wait at all and instead should just close the server socket. There is no reason to expect further legitimate traffic to the server.
If a very patient internet crawler happens to hit the port, we need to dispatch of it quickly.
It may be appropriate to also add read and write timeouts to the socket, since it is practically always going to be exposed to the internet.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `acme/acme/standalone.py`
Content:
```
1 """Support for standalone client challenge solvers. """
2 import collections
3 import functools
4 import logging
5 import socket
6 import threading
7
8 from six.moves import BaseHTTPServer # type: ignore
9 from six.moves import http_client
10 from six.moves import socketserver # type: ignore
11
12 from acme import challenges
13 from acme import crypto_util
14 from acme.magic_typing import List
15
16 logger = logging.getLogger(__name__)
17
18
19 class TLSServer(socketserver.TCPServer):
20 """Generic TLS Server."""
21
22 def __init__(self, *args, **kwargs):
23 self.ipv6 = kwargs.pop("ipv6", False)
24 if self.ipv6:
25 self.address_family = socket.AF_INET6
26 else:
27 self.address_family = socket.AF_INET
28 self.certs = kwargs.pop("certs", {})
29 self.method = kwargs.pop(
30 "method", crypto_util._DEFAULT_SSL_METHOD)
31 self.allow_reuse_address = kwargs.pop("allow_reuse_address", True)
32 socketserver.TCPServer.__init__(self, *args, **kwargs)
33
34 def _wrap_sock(self):
35 self.socket = crypto_util.SSLSocket(
36 self.socket, cert_selection=self._cert_selection,
37 alpn_selection=getattr(self, '_alpn_selection', None),
38 method=self.method)
39
40 def _cert_selection(self, connection): # pragma: no cover
41 """Callback selecting certificate for connection."""
42 server_name = connection.get_servername()
43 return self.certs.get(server_name, None)
44
45 def server_bind(self):
46 self._wrap_sock()
47 return socketserver.TCPServer.server_bind(self)
48
49
50 class ACMEServerMixin:
51 """ACME server common settings mixin."""
52 # TODO: c.f. #858
53 server_version = "ACME client standalone challenge solver"
54 allow_reuse_address = True
55
56
57 class BaseDualNetworkedServers(object):
58 """Base class for a pair of IPv6 and IPv4 servers that tries to do everything
59 it's asked for both servers, but where failures in one server don't
60 affect the other.
61
62 If two servers are instantiated, they will serve on the same port.
63 """
64
65 def __init__(self, ServerClass, server_address, *remaining_args, **kwargs):
66 port = server_address[1]
67 self.threads = [] # type: List[threading.Thread]
68 self.servers = [] # type: List[ACMEServerMixin]
69
70 # Must try True first.
71 # Ubuntu, for example, will fail to bind to IPv4 if we've already bound
72 # to IPv6. But that's ok, since it will accept IPv4 connections on the IPv6
73 # socket. On the other hand, FreeBSD will successfully bind to IPv4 on the
74 # same port, which means that server will accept the IPv4 connections.
75 # If Python is compiled without IPv6, we'll error out but (probably) successfully
76 # create the IPv4 server.
77 for ip_version in [True, False]:
78 try:
79 kwargs["ipv6"] = ip_version
80 new_address = (server_address[0],) + (port,) + server_address[2:]
81 new_args = (new_address,) + remaining_args
82 server = ServerClass(*new_args, **kwargs)
83 logger.debug(
84 "Successfully bound to %s:%s using %s", new_address[0],
85 new_address[1], "IPv6" if ip_version else "IPv4")
86 except socket.error:
87 if self.servers:
88 # Already bound using IPv6.
89 logger.debug(
90 "Certbot wasn't able to bind to %s:%s using %s, this "
91 "is often expected due to the dual stack nature of "
92 "IPv6 socket implementations.",
93 new_address[0], new_address[1],
94 "IPv6" if ip_version else "IPv4")
95 else:
96 logger.debug(
97 "Failed to bind to %s:%s using %s", new_address[0],
98 new_address[1], "IPv6" if ip_version else "IPv4")
99 else:
100 self.servers.append(server)
101 # If two servers are set up and port 0 was passed in, ensure we always
102 # bind to the same port for both servers.
103 port = server.socket.getsockname()[1]
104 if not self.servers:
105 raise socket.error("Could not bind to IPv4 or IPv6.")
106
107 def serve_forever(self):
108 """Wraps socketserver.TCPServer.serve_forever"""
109 for server in self.servers:
110 thread = threading.Thread(
111 target=server.serve_forever)
112 thread.start()
113 self.threads.append(thread)
114
115 def getsocknames(self):
116 """Wraps socketserver.TCPServer.socket.getsockname"""
117 return [server.socket.getsockname() for server in self.servers]
118
119 def shutdown_and_server_close(self):
120 """Wraps socketserver.TCPServer.shutdown, socketserver.TCPServer.server_close, and
121 threading.Thread.join"""
122 for server in self.servers:
123 server.shutdown()
124 server.server_close()
125 for thread in self.threads:
126 thread.join()
127 self.threads = []
128
129
130 class TLSALPN01Server(TLSServer, ACMEServerMixin):
131 """TLSALPN01 Server."""
132
133 ACME_TLS_1_PROTOCOL = b"acme-tls/1"
134
135 def __init__(self, server_address, certs, challenge_certs, ipv6=False):
136 TLSServer.__init__(
137 self, server_address, _BaseRequestHandlerWithLogging, certs=certs,
138 ipv6=ipv6)
139 self.challenge_certs = challenge_certs
140
141 def _cert_selection(self, connection):
142 # TODO: We would like to serve challenge cert only if asked for it via
143 # ALPN. To do this, we need to retrieve the list of protos from client
144 # hello, but this is currently impossible with openssl [0], and ALPN
145 # negotiation is done after cert selection.
146 # Therefore, currently we always return challenge cert, and terminate
147 # handshake in alpn_selection() if ALPN protos are not what we expect.
148 # [0] https://github.com/openssl/openssl/issues/4952
149 server_name = connection.get_servername()
150 logger.debug("Serving challenge cert for server name %s", server_name)
151 return self.challenge_certs.get(server_name, None)
152
153 def _alpn_selection(self, _connection, alpn_protos):
154 """Callback to select alpn protocol."""
155 if len(alpn_protos) == 1 and alpn_protos[0] == self.ACME_TLS_1_PROTOCOL:
156 logger.debug("Agreed on %s ALPN", self.ACME_TLS_1_PROTOCOL)
157 return self.ACME_TLS_1_PROTOCOL
158 logger.debug("Cannot agree on ALPN proto. Got: %s", str(alpn_protos))
159 # Explicitly close the connection now, by returning an empty string.
160 # See https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_alpn_select_callback # pylint: disable=line-too-long
161 return b""
162
163
164 class HTTPServer(BaseHTTPServer.HTTPServer):
165 """Generic HTTP Server."""
166
167 def __init__(self, *args, **kwargs):
168 self.ipv6 = kwargs.pop("ipv6", False)
169 if self.ipv6:
170 self.address_family = socket.AF_INET6
171 else:
172 self.address_family = socket.AF_INET
173 BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs)
174
175
176 class HTTP01Server(HTTPServer, ACMEServerMixin):
177 """HTTP01 Server."""
178
179 def __init__(self, server_address, resources, ipv6=False):
180 HTTPServer.__init__(
181 self, server_address, HTTP01RequestHandler.partial_init(
182 simple_http_resources=resources), ipv6=ipv6)
183
184
185 class HTTP01DualNetworkedServers(BaseDualNetworkedServers):
186 """HTTP01Server Wrapper. Tries everything for both. Failures for one don't
187 affect the other."""
188
189 def __init__(self, *args, **kwargs):
190 BaseDualNetworkedServers.__init__(self, HTTP01Server, *args, **kwargs)
191
192
193 class HTTP01RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
194 """HTTP01 challenge handler.
195
196 Adheres to the stdlib's `socketserver.BaseRequestHandler` interface.
197
198 :ivar set simple_http_resources: A set of `HTTP01Resource`
199 objects. TODO: better name?
200
201 """
202 HTTP01Resource = collections.namedtuple(
203 "HTTP01Resource", "chall response validation")
204
205 def __init__(self, *args, **kwargs):
206 self.simple_http_resources = kwargs.pop("simple_http_resources", set())
207 BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
208
209 def log_message(self, format, *args): # pylint: disable=redefined-builtin
210 """Log arbitrary message."""
211 logger.debug("%s - - %s", self.client_address[0], format % args)
212
213 def handle(self):
214 """Handle request."""
215 self.log_message("Incoming request")
216 BaseHTTPServer.BaseHTTPRequestHandler.handle(self)
217
218 def do_GET(self): # pylint: disable=invalid-name,missing-function-docstring
219 if self.path == "/":
220 self.handle_index()
221 elif self.path.startswith("/" + challenges.HTTP01.URI_ROOT_PATH):
222 self.handle_simple_http_resource()
223 else:
224 self.handle_404()
225
226 def handle_index(self):
227 """Handle index page."""
228 self.send_response(200)
229 self.send_header("Content-Type", "text/html")
230 self.end_headers()
231 self.wfile.write(self.server.server_version.encode())
232
233 def handle_404(self):
234 """Handler 404 Not Found errors."""
235 self.send_response(http_client.NOT_FOUND, message="Not Found")
236 self.send_header("Content-type", "text/html")
237 self.end_headers()
238 self.wfile.write(b"404")
239
240 def handle_simple_http_resource(self):
241 """Handle HTTP01 provisioned resources."""
242 for resource in self.simple_http_resources:
243 if resource.chall.path == self.path:
244 self.log_message("Serving HTTP01 with token %r",
245 resource.chall.encode("token"))
246 self.send_response(http_client.OK)
247 self.end_headers()
248 self.wfile.write(resource.validation.encode())
249 return
250 else: # pylint: disable=useless-else-on-loop
251 self.log_message("No resources to serve")
252 self.log_message("%s does not correspond to any resource. ignoring",
253 self.path)
254
255 @classmethod
256 def partial_init(cls, simple_http_resources):
257 """Partially initialize this handler.
258
259 This is useful because `socketserver.BaseServer` takes
260 uninitialized handler and initializes it with the current
261 request.
262
263 """
264 return functools.partial(
265 cls, simple_http_resources=simple_http_resources)
266
267
268 class _BaseRequestHandlerWithLogging(socketserver.BaseRequestHandler):
269 """BaseRequestHandler with logging."""
270
271 def log_message(self, format, *args): # pylint: disable=redefined-builtin
272 """Log arbitrary message."""
273 logger.debug("%s - - %s", self.client_address[0], format % args)
274
275 def handle(self):
276 """Handle request."""
277 self.log_message("Incoming request")
278 socketserver.BaseRequestHandler.handle(self)
279
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/acme/acme/standalone.py b/acme/acme/standalone.py
--- a/acme/acme/standalone.py
+++ b/acme/acme/standalone.py
@@ -176,10 +176,10 @@
class HTTP01Server(HTTPServer, ACMEServerMixin):
"""HTTP01 Server."""
- def __init__(self, server_address, resources, ipv6=False):
+ def __init__(self, server_address, resources, ipv6=False, timeout=30):
HTTPServer.__init__(
self, server_address, HTTP01RequestHandler.partial_init(
- simple_http_resources=resources), ipv6=ipv6)
+ simple_http_resources=resources, timeout=timeout), ipv6=ipv6)
class HTTP01DualNetworkedServers(BaseDualNetworkedServers):
@@ -204,6 +204,7 @@
def __init__(self, *args, **kwargs):
self.simple_http_resources = kwargs.pop("simple_http_resources", set())
+ self.timeout = kwargs.pop('timeout', 30)
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def log_message(self, format, *args): # pylint: disable=redefined-builtin
@@ -253,7 +254,7 @@
self.path)
@classmethod
- def partial_init(cls, simple_http_resources):
+ def partial_init(cls, simple_http_resources, timeout):
"""Partially initialize this handler.
This is useful because `socketserver.BaseServer` takes
@@ -262,7 +263,8 @@
"""
return functools.partial(
- cls, simple_http_resources=simple_http_resources)
+ cls, simple_http_resources=simple_http_resources,
+ timeout=timeout)
class _BaseRequestHandlerWithLogging(socketserver.BaseRequestHandler):
|
{"golden_diff": "diff --git a/acme/acme/standalone.py b/acme/acme/standalone.py\n--- a/acme/acme/standalone.py\n+++ b/acme/acme/standalone.py\n@@ -176,10 +176,10 @@\n class HTTP01Server(HTTPServer, ACMEServerMixin):\n \"\"\"HTTP01 Server.\"\"\"\n \n- def __init__(self, server_address, resources, ipv6=False):\n+ def __init__(self, server_address, resources, ipv6=False, timeout=30):\n HTTPServer.__init__(\n self, server_address, HTTP01RequestHandler.partial_init(\n- simple_http_resources=resources), ipv6=ipv6)\n+ simple_http_resources=resources, timeout=timeout), ipv6=ipv6)\n \n \n class HTTP01DualNetworkedServers(BaseDualNetworkedServers):\n@@ -204,6 +204,7 @@\n \n def __init__(self, *args, **kwargs):\n self.simple_http_resources = kwargs.pop(\"simple_http_resources\", set())\n+ self.timeout = kwargs.pop('timeout', 30)\n BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)\n \n def log_message(self, format, *args): # pylint: disable=redefined-builtin\n@@ -253,7 +254,7 @@\n self.path)\n \n @classmethod\n- def partial_init(cls, simple_http_resources):\n+ def partial_init(cls, simple_http_resources, timeout):\n \"\"\"Partially initialize this handler.\n \n This is useful because `socketserver.BaseServer` takes\n@@ -262,7 +263,8 @@\n \n \"\"\"\n return functools.partial(\n- cls, simple_http_resources=simple_http_resources)\n+ cls, simple_http_resources=simple_http_resources,\n+ timeout=timeout)\n \n \n class _BaseRequestHandlerWithLogging(socketserver.BaseRequestHandler):\n", "issue": "Standalone server waits forever to drain clients during shutdown\nOriginally discovered in [a help thread on the forums](https://community.letsencrypt.org/t/certbot-hangs-on-stopping-server/102450?u=_az).\r\n\r\n## My operating system is (include version):\r\nUbuntu 19.04\r\n\r\n## I installed Certbot with (certbot-auto, OS package manager, pip, etc):\r\ncertbot-auto\r\n\r\n## I ran this command and it produced this output:\r\n sudo certbot-auto certonly -d certbot.eff.org --standalone --dry-run\r\n\r\nand simultaneously in a second terminal:\r\n\r\n nc -vvv localhost 80\r\n\r\nCauses an infinite hang until the `nc` client disconnects:\r\n\r\n 2019-09-20 19:29:28,932:DEBUG:certbot.error_handler:Calling registered functions\r\n 2019-09-20 19:29:28,932:INFO:certbot.auth_handler:Cleaning up challenges\r\n 2019-09-20 19:29:28,932:DEBUG:certbot.plugins.standalone:Stopping server at :::80...\r\n\r\n## Certbot's behavior differed from what I expected because:\r\nThe standalone servers should not wait forever for clients to drain. I don't think they should wait at all and instead should just close the server socket. There is no reason to expect further legitimate traffic to the server. \r\n\r\nIf a very patient internet crawler happens to hit the port, we need to dispatch of it quickly.\r\n\r\nIt may be appropriate to also add read and write timeouts to the socket, since it is practically always going to be exposed to the internet.\n", "before_files": [{"content": "\"\"\"Support for standalone client challenge solvers. \"\"\"\nimport collections\nimport functools\nimport logging\nimport socket\nimport threading\n\nfrom six.moves import BaseHTTPServer # type: ignore\nfrom six.moves import http_client\nfrom six.moves import socketserver # type: ignore\n\nfrom acme import challenges\nfrom acme import crypto_util\nfrom acme.magic_typing import List\n\nlogger = logging.getLogger(__name__)\n\n\nclass TLSServer(socketserver.TCPServer):\n \"\"\"Generic TLS Server.\"\"\"\n\n def __init__(self, *args, **kwargs):\n self.ipv6 = kwargs.pop(\"ipv6\", False)\n if self.ipv6:\n self.address_family = socket.AF_INET6\n else:\n self.address_family = socket.AF_INET\n self.certs = kwargs.pop(\"certs\", {})\n self.method = kwargs.pop(\n \"method\", crypto_util._DEFAULT_SSL_METHOD)\n self.allow_reuse_address = kwargs.pop(\"allow_reuse_address\", True)\n socketserver.TCPServer.__init__(self, *args, **kwargs)\n\n def _wrap_sock(self):\n self.socket = crypto_util.SSLSocket(\n self.socket, cert_selection=self._cert_selection,\n alpn_selection=getattr(self, '_alpn_selection', None),\n method=self.method)\n\n def _cert_selection(self, connection): # pragma: no cover\n \"\"\"Callback selecting certificate for connection.\"\"\"\n server_name = connection.get_servername()\n return self.certs.get(server_name, None)\n\n def server_bind(self):\n self._wrap_sock()\n return socketserver.TCPServer.server_bind(self)\n\n\nclass ACMEServerMixin:\n \"\"\"ACME server common settings mixin.\"\"\"\n # TODO: c.f. #858\n server_version = \"ACME client standalone challenge solver\"\n allow_reuse_address = True\n\n\nclass BaseDualNetworkedServers(object):\n \"\"\"Base class for a pair of IPv6 and IPv4 servers that tries to do everything\n it's asked for both servers, but where failures in one server don't\n affect the other.\n\n If two servers are instantiated, they will serve on the same port.\n \"\"\"\n\n def __init__(self, ServerClass, server_address, *remaining_args, **kwargs):\n port = server_address[1]\n self.threads = [] # type: List[threading.Thread]\n self.servers = [] # type: List[ACMEServerMixin]\n\n # Must try True first.\n # Ubuntu, for example, will fail to bind to IPv4 if we've already bound\n # to IPv6. But that's ok, since it will accept IPv4 connections on the IPv6\n # socket. On the other hand, FreeBSD will successfully bind to IPv4 on the\n # same port, which means that server will accept the IPv4 connections.\n # If Python is compiled without IPv6, we'll error out but (probably) successfully\n # create the IPv4 server.\n for ip_version in [True, False]:\n try:\n kwargs[\"ipv6\"] = ip_version\n new_address = (server_address[0],) + (port,) + server_address[2:]\n new_args = (new_address,) + remaining_args\n server = ServerClass(*new_args, **kwargs)\n logger.debug(\n \"Successfully bound to %s:%s using %s\", new_address[0],\n new_address[1], \"IPv6\" if ip_version else \"IPv4\")\n except socket.error:\n if self.servers:\n # Already bound using IPv6.\n logger.debug(\n \"Certbot wasn't able to bind to %s:%s using %s, this \"\n \"is often expected due to the dual stack nature of \"\n \"IPv6 socket implementations.\",\n new_address[0], new_address[1],\n \"IPv6\" if ip_version else \"IPv4\")\n else:\n logger.debug(\n \"Failed to bind to %s:%s using %s\", new_address[0],\n new_address[1], \"IPv6\" if ip_version else \"IPv4\")\n else:\n self.servers.append(server)\n # If two servers are set up and port 0 was passed in, ensure we always\n # bind to the same port for both servers.\n port = server.socket.getsockname()[1]\n if not self.servers:\n raise socket.error(\"Could not bind to IPv4 or IPv6.\")\n\n def serve_forever(self):\n \"\"\"Wraps socketserver.TCPServer.serve_forever\"\"\"\n for server in self.servers:\n thread = threading.Thread(\n target=server.serve_forever)\n thread.start()\n self.threads.append(thread)\n\n def getsocknames(self):\n \"\"\"Wraps socketserver.TCPServer.socket.getsockname\"\"\"\n return [server.socket.getsockname() for server in self.servers]\n\n def shutdown_and_server_close(self):\n \"\"\"Wraps socketserver.TCPServer.shutdown, socketserver.TCPServer.server_close, and\n threading.Thread.join\"\"\"\n for server in self.servers:\n server.shutdown()\n server.server_close()\n for thread in self.threads:\n thread.join()\n self.threads = []\n\n\nclass TLSALPN01Server(TLSServer, ACMEServerMixin):\n \"\"\"TLSALPN01 Server.\"\"\"\n\n ACME_TLS_1_PROTOCOL = b\"acme-tls/1\"\n\n def __init__(self, server_address, certs, challenge_certs, ipv6=False):\n TLSServer.__init__(\n self, server_address, _BaseRequestHandlerWithLogging, certs=certs,\n ipv6=ipv6)\n self.challenge_certs = challenge_certs\n\n def _cert_selection(self, connection):\n # TODO: We would like to serve challenge cert only if asked for it via\n # ALPN. To do this, we need to retrieve the list of protos from client\n # hello, but this is currently impossible with openssl [0], and ALPN\n # negotiation is done after cert selection.\n # Therefore, currently we always return challenge cert, and terminate\n # handshake in alpn_selection() if ALPN protos are not what we expect.\n # [0] https://github.com/openssl/openssl/issues/4952\n server_name = connection.get_servername()\n logger.debug(\"Serving challenge cert for server name %s\", server_name)\n return self.challenge_certs.get(server_name, None)\n\n def _alpn_selection(self, _connection, alpn_protos):\n \"\"\"Callback to select alpn protocol.\"\"\"\n if len(alpn_protos) == 1 and alpn_protos[0] == self.ACME_TLS_1_PROTOCOL:\n logger.debug(\"Agreed on %s ALPN\", self.ACME_TLS_1_PROTOCOL)\n return self.ACME_TLS_1_PROTOCOL\n logger.debug(\"Cannot agree on ALPN proto. Got: %s\", str(alpn_protos))\n # Explicitly close the connection now, by returning an empty string.\n # See https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_alpn_select_callback # pylint: disable=line-too-long\n return b\"\"\n\n\nclass HTTPServer(BaseHTTPServer.HTTPServer):\n \"\"\"Generic HTTP Server.\"\"\"\n\n def __init__(self, *args, **kwargs):\n self.ipv6 = kwargs.pop(\"ipv6\", False)\n if self.ipv6:\n self.address_family = socket.AF_INET6\n else:\n self.address_family = socket.AF_INET\n BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs)\n\n\nclass HTTP01Server(HTTPServer, ACMEServerMixin):\n \"\"\"HTTP01 Server.\"\"\"\n\n def __init__(self, server_address, resources, ipv6=False):\n HTTPServer.__init__(\n self, server_address, HTTP01RequestHandler.partial_init(\n simple_http_resources=resources), ipv6=ipv6)\n\n\nclass HTTP01DualNetworkedServers(BaseDualNetworkedServers):\n \"\"\"HTTP01Server Wrapper. Tries everything for both. Failures for one don't\n affect the other.\"\"\"\n\n def __init__(self, *args, **kwargs):\n BaseDualNetworkedServers.__init__(self, HTTP01Server, *args, **kwargs)\n\n\nclass HTTP01RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):\n \"\"\"HTTP01 challenge handler.\n\n Adheres to the stdlib's `socketserver.BaseRequestHandler` interface.\n\n :ivar set simple_http_resources: A set of `HTTP01Resource`\n objects. TODO: better name?\n\n \"\"\"\n HTTP01Resource = collections.namedtuple(\n \"HTTP01Resource\", \"chall response validation\")\n\n def __init__(self, *args, **kwargs):\n self.simple_http_resources = kwargs.pop(\"simple_http_resources\", set())\n BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)\n\n def log_message(self, format, *args): # pylint: disable=redefined-builtin\n \"\"\"Log arbitrary message.\"\"\"\n logger.debug(\"%s - - %s\", self.client_address[0], format % args)\n\n def handle(self):\n \"\"\"Handle request.\"\"\"\n self.log_message(\"Incoming request\")\n BaseHTTPServer.BaseHTTPRequestHandler.handle(self)\n\n def do_GET(self): # pylint: disable=invalid-name,missing-function-docstring\n if self.path == \"/\":\n self.handle_index()\n elif self.path.startswith(\"/\" + challenges.HTTP01.URI_ROOT_PATH):\n self.handle_simple_http_resource()\n else:\n self.handle_404()\n\n def handle_index(self):\n \"\"\"Handle index page.\"\"\"\n self.send_response(200)\n self.send_header(\"Content-Type\", \"text/html\")\n self.end_headers()\n self.wfile.write(self.server.server_version.encode())\n\n def handle_404(self):\n \"\"\"Handler 404 Not Found errors.\"\"\"\n self.send_response(http_client.NOT_FOUND, message=\"Not Found\")\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(b\"404\")\n\n def handle_simple_http_resource(self):\n \"\"\"Handle HTTP01 provisioned resources.\"\"\"\n for resource in self.simple_http_resources:\n if resource.chall.path == self.path:\n self.log_message(\"Serving HTTP01 with token %r\",\n resource.chall.encode(\"token\"))\n self.send_response(http_client.OK)\n self.end_headers()\n self.wfile.write(resource.validation.encode())\n return\n else: # pylint: disable=useless-else-on-loop\n self.log_message(\"No resources to serve\")\n self.log_message(\"%s does not correspond to any resource. ignoring\",\n self.path)\n\n @classmethod\n def partial_init(cls, simple_http_resources):\n \"\"\"Partially initialize this handler.\n\n This is useful because `socketserver.BaseServer` takes\n uninitialized handler and initializes it with the current\n request.\n\n \"\"\"\n return functools.partial(\n cls, simple_http_resources=simple_http_resources)\n\n\nclass _BaseRequestHandlerWithLogging(socketserver.BaseRequestHandler):\n \"\"\"BaseRequestHandler with logging.\"\"\"\n\n def log_message(self, format, *args): # pylint: disable=redefined-builtin\n \"\"\"Log arbitrary message.\"\"\"\n logger.debug(\"%s - - %s\", self.client_address[0], format % args)\n\n def handle(self):\n \"\"\"Handle request.\"\"\"\n self.log_message(\"Incoming request\")\n socketserver.BaseRequestHandler.handle(self)\n", "path": "acme/acme/standalone.py"}], "after_files": [{"content": "\"\"\"Support for standalone client challenge solvers. \"\"\"\nimport collections\nimport functools\nimport logging\nimport socket\nimport threading\n\nfrom six.moves import BaseHTTPServer # type: ignore\nfrom six.moves import http_client\nfrom six.moves import socketserver # type: ignore\n\nfrom acme import challenges\nfrom acme import crypto_util\nfrom acme.magic_typing import List\n\nlogger = logging.getLogger(__name__)\n\n\nclass TLSServer(socketserver.TCPServer):\n \"\"\"Generic TLS Server.\"\"\"\n\n def __init__(self, *args, **kwargs):\n self.ipv6 = kwargs.pop(\"ipv6\", False)\n if self.ipv6:\n self.address_family = socket.AF_INET6\n else:\n self.address_family = socket.AF_INET\n self.certs = kwargs.pop(\"certs\", {})\n self.method = kwargs.pop(\n \"method\", crypto_util._DEFAULT_SSL_METHOD)\n self.allow_reuse_address = kwargs.pop(\"allow_reuse_address\", True)\n socketserver.TCPServer.__init__(self, *args, **kwargs)\n\n def _wrap_sock(self):\n self.socket = crypto_util.SSLSocket(\n self.socket, cert_selection=self._cert_selection,\n alpn_selection=getattr(self, '_alpn_selection', None),\n method=self.method)\n\n def _cert_selection(self, connection): # pragma: no cover\n \"\"\"Callback selecting certificate for connection.\"\"\"\n server_name = connection.get_servername()\n return self.certs.get(server_name, None)\n\n def server_bind(self):\n self._wrap_sock()\n return socketserver.TCPServer.server_bind(self)\n\n\nclass ACMEServerMixin:\n \"\"\"ACME server common settings mixin.\"\"\"\n # TODO: c.f. #858\n server_version = \"ACME client standalone challenge solver\"\n allow_reuse_address = True\n\n\nclass BaseDualNetworkedServers(object):\n \"\"\"Base class for a pair of IPv6 and IPv4 servers that tries to do everything\n it's asked for both servers, but where failures in one server don't\n affect the other.\n\n If two servers are instantiated, they will serve on the same port.\n \"\"\"\n\n def __init__(self, ServerClass, server_address, *remaining_args, **kwargs):\n port = server_address[1]\n self.threads = [] # type: List[threading.Thread]\n self.servers = [] # type: List[ACMEServerMixin]\n\n # Must try True first.\n # Ubuntu, for example, will fail to bind to IPv4 if we've already bound\n # to IPv6. But that's ok, since it will accept IPv4 connections on the IPv6\n # socket. On the other hand, FreeBSD will successfully bind to IPv4 on the\n # same port, which means that server will accept the IPv4 connections.\n # If Python is compiled without IPv6, we'll error out but (probably) successfully\n # create the IPv4 server.\n for ip_version in [True, False]:\n try:\n kwargs[\"ipv6\"] = ip_version\n new_address = (server_address[0],) + (port,) + server_address[2:]\n new_args = (new_address,) + remaining_args\n server = ServerClass(*new_args, **kwargs)\n logger.debug(\n \"Successfully bound to %s:%s using %s\", new_address[0],\n new_address[1], \"IPv6\" if ip_version else \"IPv4\")\n except socket.error:\n if self.servers:\n # Already bound using IPv6.\n logger.debug(\n \"Certbot wasn't able to bind to %s:%s using %s, this \"\n \"is often expected due to the dual stack nature of \"\n \"IPv6 socket implementations.\",\n new_address[0], new_address[1],\n \"IPv6\" if ip_version else \"IPv4\")\n else:\n logger.debug(\n \"Failed to bind to %s:%s using %s\", new_address[0],\n new_address[1], \"IPv6\" if ip_version else \"IPv4\")\n else:\n self.servers.append(server)\n # If two servers are set up and port 0 was passed in, ensure we always\n # bind to the same port for both servers.\n port = server.socket.getsockname()[1]\n if not self.servers:\n raise socket.error(\"Could not bind to IPv4 or IPv6.\")\n\n def serve_forever(self):\n \"\"\"Wraps socketserver.TCPServer.serve_forever\"\"\"\n for server in self.servers:\n thread = threading.Thread(\n target=server.serve_forever)\n thread.start()\n self.threads.append(thread)\n\n def getsocknames(self):\n \"\"\"Wraps socketserver.TCPServer.socket.getsockname\"\"\"\n return [server.socket.getsockname() for server in self.servers]\n\n def shutdown_and_server_close(self):\n \"\"\"Wraps socketserver.TCPServer.shutdown, socketserver.TCPServer.server_close, and\n threading.Thread.join\"\"\"\n for server in self.servers:\n server.shutdown()\n server.server_close()\n for thread in self.threads:\n thread.join()\n self.threads = []\n\n\nclass TLSALPN01Server(TLSServer, ACMEServerMixin):\n \"\"\"TLSALPN01 Server.\"\"\"\n\n ACME_TLS_1_PROTOCOL = b\"acme-tls/1\"\n\n def __init__(self, server_address, certs, challenge_certs, ipv6=False):\n TLSServer.__init__(\n self, server_address, _BaseRequestHandlerWithLogging, certs=certs,\n ipv6=ipv6)\n self.challenge_certs = challenge_certs\n\n def _cert_selection(self, connection):\n # TODO: We would like to serve challenge cert only if asked for it via\n # ALPN. To do this, we need to retrieve the list of protos from client\n # hello, but this is currently impossible with openssl [0], and ALPN\n # negotiation is done after cert selection.\n # Therefore, currently we always return challenge cert, and terminate\n # handshake in alpn_selection() if ALPN protos are not what we expect.\n # [0] https://github.com/openssl/openssl/issues/4952\n server_name = connection.get_servername()\n logger.debug(\"Serving challenge cert for server name %s\", server_name)\n return self.challenge_certs.get(server_name, None)\n\n def _alpn_selection(self, _connection, alpn_protos):\n \"\"\"Callback to select alpn protocol.\"\"\"\n if len(alpn_protos) == 1 and alpn_protos[0] == self.ACME_TLS_1_PROTOCOL:\n logger.debug(\"Agreed on %s ALPN\", self.ACME_TLS_1_PROTOCOL)\n return self.ACME_TLS_1_PROTOCOL\n logger.debug(\"Cannot agree on ALPN proto. Got: %s\", str(alpn_protos))\n # Explicitly close the connection now, by returning an empty string.\n # See https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_alpn_select_callback # pylint: disable=line-too-long\n return b\"\"\n\n\nclass HTTPServer(BaseHTTPServer.HTTPServer):\n \"\"\"Generic HTTP Server.\"\"\"\n\n def __init__(self, *args, **kwargs):\n self.ipv6 = kwargs.pop(\"ipv6\", False)\n if self.ipv6:\n self.address_family = socket.AF_INET6\n else:\n self.address_family = socket.AF_INET\n BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs)\n\n\nclass HTTP01Server(HTTPServer, ACMEServerMixin):\n \"\"\"HTTP01 Server.\"\"\"\n\n def __init__(self, server_address, resources, ipv6=False, timeout=30):\n HTTPServer.__init__(\n self, server_address, HTTP01RequestHandler.partial_init(\n simple_http_resources=resources, timeout=timeout), ipv6=ipv6)\n\n\nclass HTTP01DualNetworkedServers(BaseDualNetworkedServers):\n \"\"\"HTTP01Server Wrapper. Tries everything for both. Failures for one don't\n affect the other.\"\"\"\n\n def __init__(self, *args, **kwargs):\n BaseDualNetworkedServers.__init__(self, HTTP01Server, *args, **kwargs)\n\n\nclass HTTP01RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):\n \"\"\"HTTP01 challenge handler.\n\n Adheres to the stdlib's `socketserver.BaseRequestHandler` interface.\n\n :ivar set simple_http_resources: A set of `HTTP01Resource`\n objects. TODO: better name?\n\n \"\"\"\n HTTP01Resource = collections.namedtuple(\n \"HTTP01Resource\", \"chall response validation\")\n\n def __init__(self, *args, **kwargs):\n self.simple_http_resources = kwargs.pop(\"simple_http_resources\", set())\n self.timeout = kwargs.pop('timeout', 30)\n BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)\n\n def log_message(self, format, *args): # pylint: disable=redefined-builtin\n \"\"\"Log arbitrary message.\"\"\"\n logger.debug(\"%s - - %s\", self.client_address[0], format % args)\n\n def handle(self):\n \"\"\"Handle request.\"\"\"\n self.log_message(\"Incoming request\")\n BaseHTTPServer.BaseHTTPRequestHandler.handle(self)\n\n def do_GET(self): # pylint: disable=invalid-name,missing-function-docstring\n if self.path == \"/\":\n self.handle_index()\n elif self.path.startswith(\"/\" + challenges.HTTP01.URI_ROOT_PATH):\n self.handle_simple_http_resource()\n else:\n self.handle_404()\n\n def handle_index(self):\n \"\"\"Handle index page.\"\"\"\n self.send_response(200)\n self.send_header(\"Content-Type\", \"text/html\")\n self.end_headers()\n self.wfile.write(self.server.server_version.encode())\n\n def handle_404(self):\n \"\"\"Handler 404 Not Found errors.\"\"\"\n self.send_response(http_client.NOT_FOUND, message=\"Not Found\")\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(b\"404\")\n\n def handle_simple_http_resource(self):\n \"\"\"Handle HTTP01 provisioned resources.\"\"\"\n for resource in self.simple_http_resources:\n if resource.chall.path == self.path:\n self.log_message(\"Serving HTTP01 with token %r\",\n resource.chall.encode(\"token\"))\n self.send_response(http_client.OK)\n self.end_headers()\n self.wfile.write(resource.validation.encode())\n return\n else: # pylint: disable=useless-else-on-loop\n self.log_message(\"No resources to serve\")\n self.log_message(\"%s does not correspond to any resource. ignoring\",\n self.path)\n\n @classmethod\n def partial_init(cls, simple_http_resources, timeout):\n \"\"\"Partially initialize this handler.\n\n This is useful because `socketserver.BaseServer` takes\n uninitialized handler and initializes it with the current\n request.\n\n \"\"\"\n return functools.partial(\n cls, simple_http_resources=simple_http_resources,\n timeout=timeout)\n\n\nclass _BaseRequestHandlerWithLogging(socketserver.BaseRequestHandler):\n \"\"\"BaseRequestHandler with logging.\"\"\"\n\n def log_message(self, format, *args): # pylint: disable=redefined-builtin\n \"\"\"Log arbitrary message.\"\"\"\n logger.debug(\"%s - - %s\", self.client_address[0], format % args)\n\n def handle(self):\n \"\"\"Handle request.\"\"\"\n self.log_message(\"Incoming request\")\n socketserver.BaseRequestHandler.handle(self)\n", "path": "acme/acme/standalone.py"}]}
| 3,855 | 419 |
gh_patches_debug_11121
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-1853
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Using !Ref in ContentUri of LayerVersion throw error E0001
*cfn-lint version: 0.43.0*
*Description of issue.*
The `ContentUri` property of a resource type [AWS::Serverless::LayerVersion][1] can be a string or a `LayerContent` object. But, if we use `!Ref` the template is marked as an error with the message:
>[cfn-lint] E0001: Error transforming template: Resource with id [CommonDependenciesLayer4ffbb56ae8] is invalid. 'ContentUri' requires Bucket and Key properties to be specified.
Here is an example:
```yaml
AWSTemplateFormatVersion: '2010-09-09'
Transform: AWS::Serverless-2016-10-31
Parameters:
EnvironmentName:
Description: An environment name
Type: String
Default: staging
AllowedValues:
- sandbox
- prod
- staging
LayerContentUri:
Type: String
Description: Layer content uri
Default: '../dependencies-layer/nodejs'
Resources:
CommonDependenciesLayer:
Type: AWS::Serverless::LayerVersion
Properties:
LayerName: !Sub '${EnvironmentName}-common-dependencies'
Description: 'Common dependencies'
ContentUri: !Ref LayerContentUri
CompatibleRuntimes:
- nodejs12.x
RetentionPolicy: Retain
Metadata:
BuildMethod: nodejs12.x
```
The template deploys ok using `sam deploy` so it is not an error. I'm already using the latest version of `cfn-lint` and already did `cfn-lint -u`
Hope this gets fixed soon.
PS: nice job!
[1]: https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-resource-layerversion.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/transform.py`
Content:
```
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import os
6 import logging
7 import six
8 import samtranslator
9 from samtranslator.parser import parser
10 from samtranslator.translator.translator import Translator
11 from samtranslator.public.exceptions import InvalidDocumentException
12
13 from cfnlint.helpers import load_resource, convert_dict, format_json_string
14 from cfnlint.data import Serverless
15 from cfnlint.rules import Match, TransformError
16 LOGGER = logging.getLogger('cfnlint')
17
18
19 class Transform(object):
20 """
21 Application Serverless Module tranform Wrapper.
22 Based on code from AWS SAM CLI:
23 https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/commands/validate/lib/sam_template_validator.py
24 """
25
26 def __init__(self, filename, template, region):
27 """
28 Initialize Transform class
29 """
30 self._filename = filename
31 self._template = template
32 self._region = region
33 self._parameters = {}
34
35 self._managed_policy_map = self.load_managed_policies()
36 self._sam_parser = parser.Parser()
37
38 def template(self):
39 """Get the template"""
40 return self._template
41
42 def load_managed_policies(self):
43 """
44 Load the ManagedPolicies locally, based on the AWS-CLI:
45 https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/lib/samlib/default_managed_policies.json
46 """
47 return load_resource(Serverless, 'ManagedPolicies.json')
48
49 def _replace_local_codeuri(self):
50 """
51 Replaces the CodeUri in AWS::Serverless::Function and DefinitionUri in
52 AWS::Serverless::Api to a fake S3 Uri. This is to support running the
53 SAM Translator with valid values for these fields. If this is not done,
54 the template is invalid in the eyes of SAM Translator (the translator
55 does not support local paths)
56 """
57
58 all_resources = self._template.get('Resources', {})
59
60 template_globals = self._template.get('Globals', {})
61 auto_publish_alias = template_globals.get('Function', {}).get('AutoPublishAlias')
62 if isinstance(auto_publish_alias, dict):
63 if len(auto_publish_alias) == 1:
64 for k, v in auto_publish_alias.items():
65 if k == 'Ref':
66 if v in self._template.get('Parameters'):
67 self._parameters[v] = 'Alias'
68
69
70 for _, resource in all_resources.items():
71
72 resource_type = resource.get('Type')
73 resource_dict = resource.get('Properties')
74
75 if resource_type == 'AWS::Serverless::Function':
76
77 Transform._update_to_s3_uri('CodeUri', resource_dict)
78 auto_publish_alias = resource_dict.get('AutoPublishAlias')
79 if isinstance(auto_publish_alias, dict):
80 if len(auto_publish_alias) == 1:
81 for k, v in auto_publish_alias.items():
82 if k == 'Ref':
83 if v in self._template.get('Parameters'):
84 self._parameters[v] = 'Alias'
85 if resource_type in ['AWS::Serverless::LayerVersion']:
86 if resource_dict.get('ContentUri'):
87 Transform._update_to_s3_uri('ContentUri', resource_dict)
88 if resource_type == 'AWS::Serverless::Application':
89 if resource_dict.get('Location'):
90 resource_dict['Location'] = ''
91 Transform._update_to_s3_uri('Location', resource_dict)
92 if resource_type == 'AWS::Serverless::Api':
93 if ('DefinitionBody' not in resource_dict and
94 'Auth' not in resource_dict and 'Cors' not in resource_dict):
95 Transform._update_to_s3_uri('DefinitionUri', resource_dict)
96 else:
97 resource_dict['DefinitionBody'] = ''
98 if resource_type == 'AWS::Serverless::StateMachine' and resource_dict.get('DefinitionUri'):
99 Transform._update_to_s3_uri('DefinitionUri', resource_dict)
100
101 def transform_template(self):
102 """
103 Transform the Template using the Serverless Application Model.
104 """
105 matches = []
106
107 try:
108 # Output the SAM Translator version in debug mode
109 LOGGER.info('SAM Translator: %s', samtranslator.__version__)
110
111 sam_translator = Translator(
112 managed_policy_map=self._managed_policy_map,
113 sam_parser=self._sam_parser)
114
115 self._replace_local_codeuri()
116
117 # Tell SAM to use the region we're linting in, this has to be
118 # controlled using the default AWS mechanisms, see also:
119 # https://github.com/awslabs/serverless-application-model/blob/master/samtranslator/translator/arn_generator.py
120 LOGGER.info('Setting AWS_DEFAULT_REGION to %s', self._region)
121 os.environ['AWS_DEFAULT_REGION'] = self._region
122
123 self._template = convert_dict(
124 sam_translator.translate(sam_template=self._template,
125 parameter_values=self._parameters))
126
127 LOGGER.info('Transformed template: \n%s',
128 format_json_string(self._template))
129 except InvalidDocumentException as e:
130 message = 'Error transforming template: {0}'
131 for cause in e.causes:
132 matches.append(Match(
133 1, 1,
134 1, 1,
135 self._filename,
136 TransformError(), message.format(cause.message)))
137 except Exception as e: # pylint: disable=W0703
138 LOGGER.debug('Error transforming template: %s', str(e))
139 LOGGER.debug('Stack trace: %s', e, exc_info=True)
140 message = 'Error transforming template: {0}'
141 matches.append(Match(
142 1, 1,
143 1, 1,
144 self._filename,
145 TransformError(), message.format(str(e))))
146
147 return matches
148
149 @staticmethod
150 def is_s3_uri(uri):
151 """
152 Checks the uri and determines if it is a valid S3 Uri
153 Parameters
154 ----------
155 uri str, required
156 Uri to check
157 Returns
158 -------
159 bool
160 Returns True if the uri given is an S3 uri, otherwise False
161 """
162 return isinstance(uri, six.string_types) and uri.startswith('s3://')
163
164 @staticmethod
165 def _update_to_s3_uri(
166 property_key, resource_property_dict,
167 s3_uri_value='s3://bucket/value'):
168 """
169 Updates the 'property_key' in the 'resource_property_dict' to the
170 value of 's3_uri_value'
171 Note: The function will mutate the resource_property_dict that is pass
172 in Parameters
173 ----------
174 property_key str, required
175 Key in the resource_property_dict
176 resource_property_dict dict, required
177 Property dictionary of a Resource in the template to replace
178 s3_uri_value str, optional
179 Value to update the value of the property_key to
180 """
181 uri_property = resource_property_dict.get(property_key, '.')
182
183 # ignore if dict or already an S3 Uri
184 if isinstance(uri_property, dict) or Transform.is_s3_uri(uri_property):
185 return
186
187 resource_property_dict[property_key] = s3_uri_value
188
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cfnlint/transform.py b/src/cfnlint/transform.py
--- a/src/cfnlint/transform.py
+++ b/src/cfnlint/transform.py
@@ -181,7 +181,13 @@
uri_property = resource_property_dict.get(property_key, '.')
# ignore if dict or already an S3 Uri
- if isinstance(uri_property, dict) or Transform.is_s3_uri(uri_property):
+ if isinstance(uri_property, dict):
+ if len(uri_property) == 1:
+ for k in uri_property.keys():
+ if k == 'Ref':
+ resource_property_dict[property_key] = s3_uri_value
+ return
+ if Transform.is_s3_uri(uri_property):
return
resource_property_dict[property_key] = s3_uri_value
|
{"golden_diff": "diff --git a/src/cfnlint/transform.py b/src/cfnlint/transform.py\n--- a/src/cfnlint/transform.py\n+++ b/src/cfnlint/transform.py\n@@ -181,7 +181,13 @@\n uri_property = resource_property_dict.get(property_key, '.')\n \n # ignore if dict or already an S3 Uri\n- if isinstance(uri_property, dict) or Transform.is_s3_uri(uri_property):\n+ if isinstance(uri_property, dict):\n+ if len(uri_property) == 1:\n+ for k in uri_property.keys():\n+ if k == 'Ref':\n+ resource_property_dict[property_key] = s3_uri_value\n+ return\n+ if Transform.is_s3_uri(uri_property):\n return\n \n resource_property_dict[property_key] = s3_uri_value\n", "issue": "Using !Ref in ContentUri of LayerVersion throw error E0001\n*cfn-lint version: 0.43.0*\r\n\r\n*Description of issue.*\r\n\r\nThe `ContentUri` property of a resource type [AWS::Serverless::LayerVersion][1] can be a string or a `LayerContent` object. But, if we use `!Ref` the template is marked as an error with the message:\r\n>[cfn-lint] E0001: Error transforming template: Resource with id [CommonDependenciesLayer4ffbb56ae8] is invalid. 'ContentUri' requires Bucket and Key properties to be specified.\r\n\r\nHere is an example:\r\n```yaml\r\nAWSTemplateFormatVersion: '2010-09-09'\r\nTransform: AWS::Serverless-2016-10-31\r\n\r\nParameters:\r\n EnvironmentName:\r\n Description: An environment name\r\n Type: String\r\n Default: staging\r\n AllowedValues:\r\n - sandbox\r\n - prod\r\n - staging\r\n\r\n LayerContentUri:\r\n Type: String\r\n Description: Layer content uri\r\n Default: '../dependencies-layer/nodejs'\r\n\r\n\r\nResources:\r\n CommonDependenciesLayer:\r\n Type: AWS::Serverless::LayerVersion\r\n Properties:\r\n LayerName: !Sub '${EnvironmentName}-common-dependencies'\r\n Description: 'Common dependencies'\r\n ContentUri: !Ref LayerContentUri\r\n CompatibleRuntimes:\r\n - nodejs12.x\r\n RetentionPolicy: Retain\r\n Metadata:\r\n BuildMethod: nodejs12.x\r\n```\r\nThe template deploys ok using `sam deploy` so it is not an error. I'm already using the latest version of `cfn-lint` and already did `cfn-lint -u`\r\n\r\nHope this gets fixed soon.\r\n\r\nPS: nice job!\r\n\r\n[1]: https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-resource-layerversion.html\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport os\nimport logging\nimport six\nimport samtranslator\nfrom samtranslator.parser import parser\nfrom samtranslator.translator.translator import Translator\nfrom samtranslator.public.exceptions import InvalidDocumentException\n\nfrom cfnlint.helpers import load_resource, convert_dict, format_json_string\nfrom cfnlint.data import Serverless\nfrom cfnlint.rules import Match, TransformError\nLOGGER = logging.getLogger('cfnlint')\n\n\nclass Transform(object):\n \"\"\"\n Application Serverless Module tranform Wrapper.\n Based on code from AWS SAM CLI:\n https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/commands/validate/lib/sam_template_validator.py\n \"\"\"\n\n def __init__(self, filename, template, region):\n \"\"\"\n Initialize Transform class\n \"\"\"\n self._filename = filename\n self._template = template\n self._region = region\n self._parameters = {}\n\n self._managed_policy_map = self.load_managed_policies()\n self._sam_parser = parser.Parser()\n\n def template(self):\n \"\"\"Get the template\"\"\"\n return self._template\n\n def load_managed_policies(self):\n \"\"\"\n Load the ManagedPolicies locally, based on the AWS-CLI:\n https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/lib/samlib/default_managed_policies.json\n \"\"\"\n return load_resource(Serverless, 'ManagedPolicies.json')\n\n def _replace_local_codeuri(self):\n \"\"\"\n Replaces the CodeUri in AWS::Serverless::Function and DefinitionUri in\n AWS::Serverless::Api to a fake S3 Uri. This is to support running the\n SAM Translator with valid values for these fields. If this is not done,\n the template is invalid in the eyes of SAM Translator (the translator\n does not support local paths)\n \"\"\"\n\n all_resources = self._template.get('Resources', {})\n\n template_globals = self._template.get('Globals', {})\n auto_publish_alias = template_globals.get('Function', {}).get('AutoPublishAlias')\n if isinstance(auto_publish_alias, dict):\n if len(auto_publish_alias) == 1:\n for k, v in auto_publish_alias.items():\n if k == 'Ref':\n if v in self._template.get('Parameters'):\n self._parameters[v] = 'Alias'\n\n\n for _, resource in all_resources.items():\n\n resource_type = resource.get('Type')\n resource_dict = resource.get('Properties')\n\n if resource_type == 'AWS::Serverless::Function':\n\n Transform._update_to_s3_uri('CodeUri', resource_dict)\n auto_publish_alias = resource_dict.get('AutoPublishAlias')\n if isinstance(auto_publish_alias, dict):\n if len(auto_publish_alias) == 1:\n for k, v in auto_publish_alias.items():\n if k == 'Ref':\n if v in self._template.get('Parameters'):\n self._parameters[v] = 'Alias'\n if resource_type in ['AWS::Serverless::LayerVersion']:\n if resource_dict.get('ContentUri'):\n Transform._update_to_s3_uri('ContentUri', resource_dict)\n if resource_type == 'AWS::Serverless::Application':\n if resource_dict.get('Location'):\n resource_dict['Location'] = ''\n Transform._update_to_s3_uri('Location', resource_dict)\n if resource_type == 'AWS::Serverless::Api':\n if ('DefinitionBody' not in resource_dict and\n 'Auth' not in resource_dict and 'Cors' not in resource_dict):\n Transform._update_to_s3_uri('DefinitionUri', resource_dict)\n else:\n resource_dict['DefinitionBody'] = ''\n if resource_type == 'AWS::Serverless::StateMachine' and resource_dict.get('DefinitionUri'):\n Transform._update_to_s3_uri('DefinitionUri', resource_dict)\n\n def transform_template(self):\n \"\"\"\n Transform the Template using the Serverless Application Model.\n \"\"\"\n matches = []\n\n try:\n # Output the SAM Translator version in debug mode\n LOGGER.info('SAM Translator: %s', samtranslator.__version__)\n\n sam_translator = Translator(\n managed_policy_map=self._managed_policy_map,\n sam_parser=self._sam_parser)\n\n self._replace_local_codeuri()\n\n # Tell SAM to use the region we're linting in, this has to be\n # controlled using the default AWS mechanisms, see also:\n # https://github.com/awslabs/serverless-application-model/blob/master/samtranslator/translator/arn_generator.py\n LOGGER.info('Setting AWS_DEFAULT_REGION to %s', self._region)\n os.environ['AWS_DEFAULT_REGION'] = self._region\n\n self._template = convert_dict(\n sam_translator.translate(sam_template=self._template,\n parameter_values=self._parameters))\n\n LOGGER.info('Transformed template: \\n%s',\n format_json_string(self._template))\n except InvalidDocumentException as e:\n message = 'Error transforming template: {0}'\n for cause in e.causes:\n matches.append(Match(\n 1, 1,\n 1, 1,\n self._filename,\n TransformError(), message.format(cause.message)))\n except Exception as e: # pylint: disable=W0703\n LOGGER.debug('Error transforming template: %s', str(e))\n LOGGER.debug('Stack trace: %s', e, exc_info=True)\n message = 'Error transforming template: {0}'\n matches.append(Match(\n 1, 1,\n 1, 1,\n self._filename,\n TransformError(), message.format(str(e))))\n\n return matches\n\n @staticmethod\n def is_s3_uri(uri):\n \"\"\"\n Checks the uri and determines if it is a valid S3 Uri\n Parameters\n ----------\n uri str, required\n Uri to check\n Returns\n -------\n bool\n Returns True if the uri given is an S3 uri, otherwise False\n \"\"\"\n return isinstance(uri, six.string_types) and uri.startswith('s3://')\n\n @staticmethod\n def _update_to_s3_uri(\n property_key, resource_property_dict,\n s3_uri_value='s3://bucket/value'):\n \"\"\"\n Updates the 'property_key' in the 'resource_property_dict' to the\n value of 's3_uri_value'\n Note: The function will mutate the resource_property_dict that is pass\n in Parameters\n ----------\n property_key str, required\n Key in the resource_property_dict\n resource_property_dict dict, required\n Property dictionary of a Resource in the template to replace\n s3_uri_value str, optional\n Value to update the value of the property_key to\n \"\"\"\n uri_property = resource_property_dict.get(property_key, '.')\n\n # ignore if dict or already an S3 Uri\n if isinstance(uri_property, dict) or Transform.is_s3_uri(uri_property):\n return\n\n resource_property_dict[property_key] = s3_uri_value\n", "path": "src/cfnlint/transform.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport os\nimport logging\nimport six\nimport samtranslator\nfrom samtranslator.parser import parser\nfrom samtranslator.translator.translator import Translator\nfrom samtranslator.public.exceptions import InvalidDocumentException\n\nfrom cfnlint.helpers import load_resource, convert_dict, format_json_string\nfrom cfnlint.data import Serverless\nfrom cfnlint.rules import Match, TransformError\nLOGGER = logging.getLogger('cfnlint')\n\n\nclass Transform(object):\n \"\"\"\n Application Serverless Module tranform Wrapper.\n Based on code from AWS SAM CLI:\n https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/commands/validate/lib/sam_template_validator.py\n \"\"\"\n\n def __init__(self, filename, template, region):\n \"\"\"\n Initialize Transform class\n \"\"\"\n self._filename = filename\n self._template = template\n self._region = region\n self._parameters = {}\n\n self._managed_policy_map = self.load_managed_policies()\n self._sam_parser = parser.Parser()\n\n def template(self):\n \"\"\"Get the template\"\"\"\n return self._template\n\n def load_managed_policies(self):\n \"\"\"\n Load the ManagedPolicies locally, based on the AWS-CLI:\n https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/lib/samlib/default_managed_policies.json\n \"\"\"\n return load_resource(Serverless, 'ManagedPolicies.json')\n\n def _replace_local_codeuri(self):\n \"\"\"\n Replaces the CodeUri in AWS::Serverless::Function and DefinitionUri in\n AWS::Serverless::Api to a fake S3 Uri. This is to support running the\n SAM Translator with valid values for these fields. If this is not done,\n the template is invalid in the eyes of SAM Translator (the translator\n does not support local paths)\n \"\"\"\n\n all_resources = self._template.get('Resources', {})\n\n template_globals = self._template.get('Globals', {})\n auto_publish_alias = template_globals.get('Function', {}).get('AutoPublishAlias')\n if isinstance(auto_publish_alias, dict):\n if len(auto_publish_alias) == 1:\n for k, v in auto_publish_alias.items():\n if k == 'Ref':\n if v in self._template.get('Parameters'):\n self._parameters[v] = 'Alias'\n\n\n for _, resource in all_resources.items():\n\n resource_type = resource.get('Type')\n resource_dict = resource.get('Properties')\n\n if resource_type == 'AWS::Serverless::Function':\n\n Transform._update_to_s3_uri('CodeUri', resource_dict)\n auto_publish_alias = resource_dict.get('AutoPublishAlias')\n if isinstance(auto_publish_alias, dict):\n if len(auto_publish_alias) == 1:\n for k, v in auto_publish_alias.items():\n if k == 'Ref':\n if v in self._template.get('Parameters'):\n self._parameters[v] = 'Alias'\n if resource_type in ['AWS::Serverless::LayerVersion']:\n if resource_dict.get('ContentUri'):\n Transform._update_to_s3_uri('ContentUri', resource_dict)\n if resource_type == 'AWS::Serverless::Application':\n if resource_dict.get('Location'):\n resource_dict['Location'] = ''\n Transform._update_to_s3_uri('Location', resource_dict)\n if resource_type == 'AWS::Serverless::Api':\n if ('DefinitionBody' not in resource_dict and\n 'Auth' not in resource_dict and 'Cors' not in resource_dict):\n Transform._update_to_s3_uri('DefinitionUri', resource_dict)\n else:\n resource_dict['DefinitionBody'] = ''\n if resource_type == 'AWS::Serverless::StateMachine' and resource_dict.get('DefinitionUri'):\n Transform._update_to_s3_uri('DefinitionUri', resource_dict)\n\n def transform_template(self):\n \"\"\"\n Transform the Template using the Serverless Application Model.\n \"\"\"\n matches = []\n\n try:\n # Output the SAM Translator version in debug mode\n LOGGER.info('SAM Translator: %s', samtranslator.__version__)\n\n sam_translator = Translator(\n managed_policy_map=self._managed_policy_map,\n sam_parser=self._sam_parser)\n\n self._replace_local_codeuri()\n\n # Tell SAM to use the region we're linting in, this has to be\n # controlled using the default AWS mechanisms, see also:\n # https://github.com/awslabs/serverless-application-model/blob/master/samtranslator/translator/arn_generator.py\n LOGGER.info('Setting AWS_DEFAULT_REGION to %s', self._region)\n os.environ['AWS_DEFAULT_REGION'] = self._region\n\n self._template = convert_dict(\n sam_translator.translate(sam_template=self._template,\n parameter_values=self._parameters))\n\n LOGGER.info('Transformed template: \\n%s',\n format_json_string(self._template))\n except InvalidDocumentException as e:\n message = 'Error transforming template: {0}'\n for cause in e.causes:\n matches.append(Match(\n 1, 1,\n 1, 1,\n self._filename,\n TransformError(), message.format(cause.message)))\n except Exception as e: # pylint: disable=W0703\n LOGGER.debug('Error transforming template: %s', str(e))\n LOGGER.debug('Stack trace: %s', e, exc_info=True)\n message = 'Error transforming template: {0}'\n matches.append(Match(\n 1, 1,\n 1, 1,\n self._filename,\n TransformError(), message.format(str(e))))\n\n return matches\n\n @staticmethod\n def is_s3_uri(uri):\n \"\"\"\n Checks the uri and determines if it is a valid S3 Uri\n Parameters\n ----------\n uri str, required\n Uri to check\n Returns\n -------\n bool\n Returns True if the uri given is an S3 uri, otherwise False\n \"\"\"\n return isinstance(uri, six.string_types) and uri.startswith('s3://')\n\n @staticmethod\n def _update_to_s3_uri(\n property_key, resource_property_dict,\n s3_uri_value='s3://bucket/value'):\n \"\"\"\n Updates the 'property_key' in the 'resource_property_dict' to the\n value of 's3_uri_value'\n Note: The function will mutate the resource_property_dict that is pass\n in Parameters\n ----------\n property_key str, required\n Key in the resource_property_dict\n resource_property_dict dict, required\n Property dictionary of a Resource in the template to replace\n s3_uri_value str, optional\n Value to update the value of the property_key to\n \"\"\"\n uri_property = resource_property_dict.get(property_key, '.')\n\n # ignore if dict or already an S3 Uri\n if isinstance(uri_property, dict):\n if len(uri_property) == 1:\n for k in uri_property.keys():\n if k == 'Ref':\n resource_property_dict[property_key] = s3_uri_value\n return\n if Transform.is_s3_uri(uri_property):\n return\n\n resource_property_dict[property_key] = s3_uri_value\n", "path": "src/cfnlint/transform.py"}]}
| 2,652 | 181 |
gh_patches_debug_5143
|
rasdani/github-patches
|
git_diff
|
cloudtools__troposphere-324
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RDS ValueError on LicenseModel
I'm hitting a similar problem to #316, Postgres instances only use the string `'postgresql-license'` for the LicenseModel parameter and I'm getting a ValueError:
```
ValueError: DBInstance LicenseModel must be one of: license-included, bring-your-own-license, general-public-license
```
I could not find the `postgresql-license` in any API documentation, but it is in the [user-guide](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_CreatePostgreSQLInstance.html)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `troposphere/rds.py`
Content:
```
1 # Copyright (c) 2013, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6 import re
7
8 from . import AWSHelperFn, AWSObject, AWSProperty, Ref
9 from .validators import boolean, network_port, integer, positive_integer
10
11 # Taken from:
12 # http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html
13
14 VALID_STORAGE_TYPES = ('standard', 'gp2', 'io1')
15 VALID_DB_ENGINES = ('MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee',
16 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex',
17 'sqlserver-web', 'postgres')
18 VALID_LICENSE_MODELS = ('license-included', 'bring-your-own-license',
19 'general-public-license')
20
21
22 def validate_iops(iops):
23 """DBInstance Iops validation rules."""
24
25 iops = integer(iops)
26 if int(iops) < 1000:
27 raise ValueError("DBInstance Iops, if set, must be greater than 1000.")
28 if int(iops) > 10000:
29 raise ValueError("DBInstance Iops, if set, must be less than 10000.")
30 return iops
31
32
33 def validate_storage_type(storage_type):
34 """Validate StorageType for DBInstance"""
35
36 if storage_type not in VALID_STORAGE_TYPES:
37 raise ValueError("DBInstance StorageType must be one of: %s" %
38 ", ".join(VALID_STORAGE_TYPES))
39 return storage_type
40
41
42 def validate_engine(engine):
43 """Validate database Engine for DBInstance """
44
45 if engine not in VALID_DB_ENGINES:
46 raise ValueError("DBInstance Engine must be one of: %s" %
47 ", ".join(VALID_DB_ENGINES))
48 return engine
49
50
51 def validate_license_model(license_model):
52 """Validate LicenseModel for DBInstance"""
53
54 if license_model not in VALID_LICENSE_MODELS:
55 raise ValueError("DBInstance LicenseModel must be one of: %s" %
56 ", ".join(VALID_LICENSE_MODELS))
57 return license_model
58
59
60 def validate_backup_window(window):
61 """Validate PreferredBackupWindow for DBInstance"""
62
63 hour = r'[01]?[0-9]|2[0-3]'
64 minute = r'[0-5][0-9]'
65 r = ("(?P<start_hour>%s):(?P<start_minute>%s)-"
66 "(?P<end_hour>%s):(?P<end_minute>%s)") % (hour, minute, hour, minute)
67 range_regex = re.compile(r)
68 m = range_regex.match(window)
69 if not m:
70 raise ValueError("DBInstance PreferredBackupWindow must be in the "
71 "format: hh24:mi-hh24:mi")
72 start_ts = (int(m.group('start_hour')) * 60) + int(m.group('start_minute'))
73 end_ts = (int(m.group('end_hour')) * 60) + int(m.group('end_minute'))
74 if abs(end_ts - start_ts) < 30:
75 raise ValueError("DBInstance PreferredBackupWindow must be at least "
76 "30 minutes long.")
77 return window
78
79
80 def validate_maintenance_window(window):
81 """Validate PreferredMaintenanceWindow for DBInstance"""
82
83 days = ("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")
84 day_re = r'[A-Z]{1}[a-z]{2}'
85 hour = r'[01]?[0-9]|2[0-3]'
86 minute = r'[0-5][0-9]'
87 r = ("(?P<start_day>%s):(?P<start_hour>%s):(?P<start_minute>%s)-"
88 "(?P<end_day>%s):(?P<end_hour>%s):(?P<end_minute>%s)") % (day_re,
89 hour,
90 minute,
91 day_re,
92 hour,
93 minute)
94 range_regex = re.compile(r)
95 m = range_regex.match(window)
96 if not m:
97 raise ValueError("DBInstance PreferredMaintenanceWindow must be in "
98 "the format: ddd:hh24:mi-ddd:hh24:mi")
99 if m.group('start_day') not in days or m.group('end_day') not in days:
100 raise ValueError("DBInstance PreferredMaintenanceWindow day part of "
101 "ranges must be one of: %s" % ", ".join(days))
102 start_ts = (days.index(m.group('start_day')) * 24 * 60) + \
103 (int(m.group('start_hour')) * 60) + int(m.group('start_minute'))
104 end_ts = (days.index(m.group('end_day')) * 24 * 60) + \
105 (int(m.group('end_hour')) * 60) + int(m.group('end_minute'))
106 if abs(end_ts - start_ts) < 30:
107 raise ValueError("DBInstance PreferredMaintenanceWindow must be at "
108 "least 30 minutes long.")
109 return window
110
111
112 def validate_backup_retention_period(days):
113 """Validate BackupRetentionPeriod for DBInstance"""
114
115 days = positive_integer(days)
116 if int(days) > 35:
117 raise ValueError("DBInstance BackupRetentionPeriod cannot be larger "
118 "than 35 days.")
119 return days
120
121
122 class DBInstance(AWSObject):
123 resource_type = "AWS::RDS::DBInstance"
124
125 props = {
126 'AllocatedStorage': (positive_integer, True),
127 'AllowMajorVersionUpgrade': (boolean, False),
128 'AutoMinorVersionUpgrade': (boolean, False),
129 'AvailabilityZone': (basestring, False),
130 'BackupRetentionPeriod': (validate_backup_retention_period, False),
131 'CharacterSetName': (basestring, False),
132 'DBInstanceClass': (basestring, True),
133 'DBInstanceIdentifier': (basestring, False),
134 'DBName': (basestring, False),
135 'DBParameterGroupName': (basestring, False),
136 'DBSecurityGroups': (list, False),
137 'DBSnapshotIdentifier': (basestring, False),
138 'DBSubnetGroupName': (basestring, False),
139 'Engine': (validate_engine, True),
140 'EngineVersion': (basestring, False),
141 'Iops': (validate_iops, False),
142 'KmsKeyId': (basestring, False),
143 'LicenseModel': (validate_license_model, False),
144 'MasterUsername': (basestring, False),
145 'MasterUserPassword': (basestring, False),
146 'MultiAZ': (boolean, False),
147 'OptionGroupName': (basestring, False),
148 'Port': (network_port, False),
149 'PreferredBackupWindow': (validate_backup_window, False),
150 'PreferredMaintenanceWindow': (basestring, False),
151 'PubliclyAccessible': (boolean, False),
152 'SourceDBInstanceIdentifier': (basestring, False),
153 'StorageEncrypted': (boolean, False),
154 'StorageType': (basestring, False),
155 'Tags': (list, False),
156 'VPCSecurityGroups': ([basestring, AWSHelperFn], False),
157 }
158
159 def validate(self):
160 if 'SourceDBInstanceIdentifier' in self.properties:
161
162 invalid_replica_properties = (
163 'BackupRetentionPeriod', 'DBName', 'MasterUsername',
164 'MasterUserPassword', 'PreferredBackupWindow', 'MultiAZ',
165 'DBSnapshotIdentifier', 'DBSubnetGroupName',
166 )
167
168 invalid_properties = [s for s in self.properties.keys() if
169 s in invalid_replica_properties]
170
171 if invalid_properties:
172 raise ValueError(
173 ('{0} properties can\'t be provided when '
174 'SourceDBInstanceIdentifier is present '
175 'AWS::RDS::DBInstance.'
176 ).format(', '.join(sorted(invalid_properties))))
177
178 if ('DBSnapshotIdentifier' not in self.properties and
179 'SourceDBInstanceIdentifier' not in self.properties) and \
180 ('MasterUsername' not in self.properties or
181 'MasterUserPassword' not in self.properties):
182 raise ValueError(
183 'Either (MasterUsername and MasterUserPassword) or'
184 ' DBSnapshotIdentifier are required in type '
185 'AWS::RDS::DBInstance.'
186 )
187
188 if 'KmsKeyId' in self.properties and \
189 'StorageEncrypted' not in self.properties:
190 raise ValueError(
191 'If KmsKeyId is provided, StorageEncrypted is required '
192 'AWS::RDS::DBInstance.'
193 )
194
195 nonetype = type(None)
196 avail_zone = self.properties.get('AvailabilityZone', None)
197 multi_az = self.properties.get('MultiAZ', None)
198 if not (isinstance(avail_zone, (AWSHelperFn, nonetype)) and
199 isinstance(multi_az, (AWSHelperFn, nonetype))):
200 if 'AvailabilityZone' in self.properties and \
201 self.properties.get('MultiAZ', None):
202 raise ValueError("AvailabiltyZone cannot be set on "
203 "DBInstance if MultiAZ is set to true.")
204
205 storage_type = self.properties.get('StorageType', None)
206 if storage_type and storage_type == 'io1' and \
207 'Iops' not in self.properties:
208 raise ValueError("Must specify Iops if using StorageType io1")
209
210 allocated_storage = self.properties.get('AllocatedStorage')
211 iops = self.properties.get('Iops', None)
212 if iops:
213 if not isinstance(allocated_storage, AWSHelperFn) and \
214 allocated_storage < 100:
215 raise ValueError("AllocatedStorage must be at least 100 when "
216 "Iops is set.")
217 if not isinstance(allocated_storage, AWSHelperFn) and not \
218 isinstance(iops, AWSHelperFn) and \
219 float(iops) / float(allocated_storage) > 10.0:
220 raise ValueError("AllocatedStorage must be no less than "
221 "1/10th the provisioned Iops")
222
223 return True
224
225
226 class DBParameterGroup(AWSObject):
227 resource_type = "AWS::RDS::DBParameterGroup"
228
229 props = {
230 'Description': (basestring, False),
231 'Family': (basestring, False),
232 'Parameters': (dict, False),
233 'Tags': (list, False),
234 }
235
236
237 class DBSubnetGroup(AWSObject):
238 resource_type = "AWS::RDS::DBSubnetGroup"
239
240 props = {
241 'DBSubnetGroupDescription': (basestring, True),
242 'SubnetIds': (list, True),
243 'Tags': (list, False),
244 }
245
246
247 class RDSSecurityGroup(AWSProperty):
248 props = {
249 'CIDRIP': (basestring, False),
250 'EC2SecurityGroupId': (basestring, False),
251 'EC2SecurityGroupName': (basestring, False),
252 'EC2SecurityGroupOwnerId': (basestring, False),
253 }
254
255
256 class DBSecurityGroup(AWSObject):
257 resource_type = "AWS::RDS::DBSecurityGroup"
258
259 props = {
260 'EC2VpcId': (basestring, False),
261 'DBSecurityGroupIngress': (list, True),
262 'GroupDescription': (basestring, True),
263 'Tags': (list, False),
264 }
265
266
267 class DBSecurityGroupIngress(AWSObject):
268 resource_type = "AWS::RDS::DBSecurityGroupIngress"
269
270 props = {
271 'CIDRIP': (basestring, False),
272 'DBSecurityGroupName': (basestring, True),
273 'EC2SecurityGroupId': (basestring, False),
274 'EC2SecurityGroupName': (basestring, False),
275 'EC2SecurityGroupOwnerId': (basestring, False),
276 }
277
278
279 class EventSubscription(AWSObject):
280 resource_type = "AWS::RDS::EventSubscription"
281
282 props = {
283 'Enabled': (boolean, False),
284 'EventCategories': ([basestring], False),
285 'SnsTopicArn': (basestring, True),
286 'SourceIds': ([basestring, Ref], False),
287 'SourceType': (basestring, False),
288 }
289
290
291 class OptionSetting(AWSProperty):
292 props = {
293 'Name': (basestring, False),
294 'Value': (basestring, False),
295 }
296
297
298 class OptionConfiguration(AWSProperty):
299 props = {
300 'DBSecurityGroupMemberships': ([basestring, Ref], False),
301 'OptionName': (basestring, True),
302 'OptionSettings': ([OptionSetting], False),
303 'Port': (network_port, False),
304 'VpcSecurityGroupMemberships': ([basestring, Ref], False),
305 }
306
307
308 class OptionGroup(AWSObject):
309 resource_type = "AWS::RDS::OptionGroup"
310
311 props = {
312 'EngineName': (basestring, True),
313 'MajorEngineVersion': (basestring, True),
314 'OptionGroupDescription': (basestring, True),
315 'OptionConfigurations': ([OptionConfiguration], True),
316 'Tags': (list, False),
317 }
318
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/troposphere/rds.py b/troposphere/rds.py
--- a/troposphere/rds.py
+++ b/troposphere/rds.py
@@ -16,7 +16,7 @@
'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex',
'sqlserver-web', 'postgres')
VALID_LICENSE_MODELS = ('license-included', 'bring-your-own-license',
- 'general-public-license')
+ 'general-public-license', 'postgresql-license')
def validate_iops(iops):
|
{"golden_diff": "diff --git a/troposphere/rds.py b/troposphere/rds.py\n--- a/troposphere/rds.py\n+++ b/troposphere/rds.py\n@@ -16,7 +16,7 @@\n 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex',\n 'sqlserver-web', 'postgres')\n VALID_LICENSE_MODELS = ('license-included', 'bring-your-own-license',\n- 'general-public-license')\n+ 'general-public-license', 'postgresql-license')\n \n \n def validate_iops(iops):\n", "issue": "RDS ValueError on LicenseModel\nI'm hitting a similar problem to #316, Postgres instances only use the string `'postgresql-license'` for the LicenseModel parameter and I'm getting a ValueError:\n\n```\nValueError: DBInstance LicenseModel must be one of: license-included, bring-your-own-license, general-public-license\n```\n\nI could not find the `postgresql-license` in any API documentation, but it is in the [user-guide](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_CreatePostgreSQLInstance.html)\n\n", "before_files": [{"content": "# Copyright (c) 2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nimport re\n\nfrom . import AWSHelperFn, AWSObject, AWSProperty, Ref\nfrom .validators import boolean, network_port, integer, positive_integer\n\n# Taken from:\n# http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html\n\nVALID_STORAGE_TYPES = ('standard', 'gp2', 'io1')\nVALID_DB_ENGINES = ('MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee',\n 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex',\n 'sqlserver-web', 'postgres')\nVALID_LICENSE_MODELS = ('license-included', 'bring-your-own-license',\n 'general-public-license')\n\n\ndef validate_iops(iops):\n \"\"\"DBInstance Iops validation rules.\"\"\"\n\n iops = integer(iops)\n if int(iops) < 1000:\n raise ValueError(\"DBInstance Iops, if set, must be greater than 1000.\")\n if int(iops) > 10000:\n raise ValueError(\"DBInstance Iops, if set, must be less than 10000.\")\n return iops\n\n\ndef validate_storage_type(storage_type):\n \"\"\"Validate StorageType for DBInstance\"\"\"\n\n if storage_type not in VALID_STORAGE_TYPES:\n raise ValueError(\"DBInstance StorageType must be one of: %s\" %\n \", \".join(VALID_STORAGE_TYPES))\n return storage_type\n\n\ndef validate_engine(engine):\n \"\"\"Validate database Engine for DBInstance \"\"\"\n\n if engine not in VALID_DB_ENGINES:\n raise ValueError(\"DBInstance Engine must be one of: %s\" %\n \", \".join(VALID_DB_ENGINES))\n return engine\n\n\ndef validate_license_model(license_model):\n \"\"\"Validate LicenseModel for DBInstance\"\"\"\n\n if license_model not in VALID_LICENSE_MODELS:\n raise ValueError(\"DBInstance LicenseModel must be one of: %s\" %\n \", \".join(VALID_LICENSE_MODELS))\n return license_model\n\n\ndef validate_backup_window(window):\n \"\"\"Validate PreferredBackupWindow for DBInstance\"\"\"\n\n hour = r'[01]?[0-9]|2[0-3]'\n minute = r'[0-5][0-9]'\n r = (\"(?P<start_hour>%s):(?P<start_minute>%s)-\"\n \"(?P<end_hour>%s):(?P<end_minute>%s)\") % (hour, minute, hour, minute)\n range_regex = re.compile(r)\n m = range_regex.match(window)\n if not m:\n raise ValueError(\"DBInstance PreferredBackupWindow must be in the \"\n \"format: hh24:mi-hh24:mi\")\n start_ts = (int(m.group('start_hour')) * 60) + int(m.group('start_minute'))\n end_ts = (int(m.group('end_hour')) * 60) + int(m.group('end_minute'))\n if abs(end_ts - start_ts) < 30:\n raise ValueError(\"DBInstance PreferredBackupWindow must be at least \"\n \"30 minutes long.\")\n return window\n\n\ndef validate_maintenance_window(window):\n \"\"\"Validate PreferredMaintenanceWindow for DBInstance\"\"\"\n\n days = (\"Mon\", \"Tue\", \"Wed\", \"Thu\", \"Fri\", \"Sat\", \"Sun\")\n day_re = r'[A-Z]{1}[a-z]{2}'\n hour = r'[01]?[0-9]|2[0-3]'\n minute = r'[0-5][0-9]'\n r = (\"(?P<start_day>%s):(?P<start_hour>%s):(?P<start_minute>%s)-\"\n \"(?P<end_day>%s):(?P<end_hour>%s):(?P<end_minute>%s)\") % (day_re,\n hour,\n minute,\n day_re,\n hour,\n minute)\n range_regex = re.compile(r)\n m = range_regex.match(window)\n if not m:\n raise ValueError(\"DBInstance PreferredMaintenanceWindow must be in \"\n \"the format: ddd:hh24:mi-ddd:hh24:mi\")\n if m.group('start_day') not in days or m.group('end_day') not in days:\n raise ValueError(\"DBInstance PreferredMaintenanceWindow day part of \"\n \"ranges must be one of: %s\" % \", \".join(days))\n start_ts = (days.index(m.group('start_day')) * 24 * 60) + \\\n (int(m.group('start_hour')) * 60) + int(m.group('start_minute'))\n end_ts = (days.index(m.group('end_day')) * 24 * 60) + \\\n (int(m.group('end_hour')) * 60) + int(m.group('end_minute'))\n if abs(end_ts - start_ts) < 30:\n raise ValueError(\"DBInstance PreferredMaintenanceWindow must be at \"\n \"least 30 minutes long.\")\n return window\n\n\ndef validate_backup_retention_period(days):\n \"\"\"Validate BackupRetentionPeriod for DBInstance\"\"\"\n\n days = positive_integer(days)\n if int(days) > 35:\n raise ValueError(\"DBInstance BackupRetentionPeriod cannot be larger \"\n \"than 35 days.\")\n return days\n\n\nclass DBInstance(AWSObject):\n resource_type = \"AWS::RDS::DBInstance\"\n\n props = {\n 'AllocatedStorage': (positive_integer, True),\n 'AllowMajorVersionUpgrade': (boolean, False),\n 'AutoMinorVersionUpgrade': (boolean, False),\n 'AvailabilityZone': (basestring, False),\n 'BackupRetentionPeriod': (validate_backup_retention_period, False),\n 'CharacterSetName': (basestring, False),\n 'DBInstanceClass': (basestring, True),\n 'DBInstanceIdentifier': (basestring, False),\n 'DBName': (basestring, False),\n 'DBParameterGroupName': (basestring, False),\n 'DBSecurityGroups': (list, False),\n 'DBSnapshotIdentifier': (basestring, False),\n 'DBSubnetGroupName': (basestring, False),\n 'Engine': (validate_engine, True),\n 'EngineVersion': (basestring, False),\n 'Iops': (validate_iops, False),\n 'KmsKeyId': (basestring, False),\n 'LicenseModel': (validate_license_model, False),\n 'MasterUsername': (basestring, False),\n 'MasterUserPassword': (basestring, False),\n 'MultiAZ': (boolean, False),\n 'OptionGroupName': (basestring, False),\n 'Port': (network_port, False),\n 'PreferredBackupWindow': (validate_backup_window, False),\n 'PreferredMaintenanceWindow': (basestring, False),\n 'PubliclyAccessible': (boolean, False),\n 'SourceDBInstanceIdentifier': (basestring, False),\n 'StorageEncrypted': (boolean, False),\n 'StorageType': (basestring, False),\n 'Tags': (list, False),\n 'VPCSecurityGroups': ([basestring, AWSHelperFn], False),\n }\n\n def validate(self):\n if 'SourceDBInstanceIdentifier' in self.properties:\n\n invalid_replica_properties = (\n 'BackupRetentionPeriod', 'DBName', 'MasterUsername',\n 'MasterUserPassword', 'PreferredBackupWindow', 'MultiAZ',\n 'DBSnapshotIdentifier', 'DBSubnetGroupName',\n )\n\n invalid_properties = [s for s in self.properties.keys() if\n s in invalid_replica_properties]\n\n if invalid_properties:\n raise ValueError(\n ('{0} properties can\\'t be provided when '\n 'SourceDBInstanceIdentifier is present '\n 'AWS::RDS::DBInstance.'\n ).format(', '.join(sorted(invalid_properties))))\n\n if ('DBSnapshotIdentifier' not in self.properties and\n 'SourceDBInstanceIdentifier' not in self.properties) and \\\n ('MasterUsername' not in self.properties or\n 'MasterUserPassword' not in self.properties):\n raise ValueError(\n 'Either (MasterUsername and MasterUserPassword) or'\n ' DBSnapshotIdentifier are required in type '\n 'AWS::RDS::DBInstance.'\n )\n\n if 'KmsKeyId' in self.properties and \\\n 'StorageEncrypted' not in self.properties:\n raise ValueError(\n 'If KmsKeyId is provided, StorageEncrypted is required '\n 'AWS::RDS::DBInstance.'\n )\n\n nonetype = type(None)\n avail_zone = self.properties.get('AvailabilityZone', None)\n multi_az = self.properties.get('MultiAZ', None)\n if not (isinstance(avail_zone, (AWSHelperFn, nonetype)) and\n isinstance(multi_az, (AWSHelperFn, nonetype))):\n if 'AvailabilityZone' in self.properties and \\\n self.properties.get('MultiAZ', None):\n raise ValueError(\"AvailabiltyZone cannot be set on \"\n \"DBInstance if MultiAZ is set to true.\")\n\n storage_type = self.properties.get('StorageType', None)\n if storage_type and storage_type == 'io1' and \\\n 'Iops' not in self.properties:\n raise ValueError(\"Must specify Iops if using StorageType io1\")\n\n allocated_storage = self.properties.get('AllocatedStorage')\n iops = self.properties.get('Iops', None)\n if iops:\n if not isinstance(allocated_storage, AWSHelperFn) and \\\n allocated_storage < 100:\n raise ValueError(\"AllocatedStorage must be at least 100 when \"\n \"Iops is set.\")\n if not isinstance(allocated_storage, AWSHelperFn) and not \\\n isinstance(iops, AWSHelperFn) and \\\n float(iops) / float(allocated_storage) > 10.0:\n raise ValueError(\"AllocatedStorage must be no less than \"\n \"1/10th the provisioned Iops\")\n\n return True\n\n\nclass DBParameterGroup(AWSObject):\n resource_type = \"AWS::RDS::DBParameterGroup\"\n\n props = {\n 'Description': (basestring, False),\n 'Family': (basestring, False),\n 'Parameters': (dict, False),\n 'Tags': (list, False),\n }\n\n\nclass DBSubnetGroup(AWSObject):\n resource_type = \"AWS::RDS::DBSubnetGroup\"\n\n props = {\n 'DBSubnetGroupDescription': (basestring, True),\n 'SubnetIds': (list, True),\n 'Tags': (list, False),\n }\n\n\nclass RDSSecurityGroup(AWSProperty):\n props = {\n 'CIDRIP': (basestring, False),\n 'EC2SecurityGroupId': (basestring, False),\n 'EC2SecurityGroupName': (basestring, False),\n 'EC2SecurityGroupOwnerId': (basestring, False),\n }\n\n\nclass DBSecurityGroup(AWSObject):\n resource_type = \"AWS::RDS::DBSecurityGroup\"\n\n props = {\n 'EC2VpcId': (basestring, False),\n 'DBSecurityGroupIngress': (list, True),\n 'GroupDescription': (basestring, True),\n 'Tags': (list, False),\n }\n\n\nclass DBSecurityGroupIngress(AWSObject):\n resource_type = \"AWS::RDS::DBSecurityGroupIngress\"\n\n props = {\n 'CIDRIP': (basestring, False),\n 'DBSecurityGroupName': (basestring, True),\n 'EC2SecurityGroupId': (basestring, False),\n 'EC2SecurityGroupName': (basestring, False),\n 'EC2SecurityGroupOwnerId': (basestring, False),\n }\n\n\nclass EventSubscription(AWSObject):\n resource_type = \"AWS::RDS::EventSubscription\"\n\n props = {\n 'Enabled': (boolean, False),\n 'EventCategories': ([basestring], False),\n 'SnsTopicArn': (basestring, True),\n 'SourceIds': ([basestring, Ref], False),\n 'SourceType': (basestring, False),\n }\n\n\nclass OptionSetting(AWSProperty):\n props = {\n 'Name': (basestring, False),\n 'Value': (basestring, False),\n }\n\n\nclass OptionConfiguration(AWSProperty):\n props = {\n 'DBSecurityGroupMemberships': ([basestring, Ref], False),\n 'OptionName': (basestring, True),\n 'OptionSettings': ([OptionSetting], False),\n 'Port': (network_port, False),\n 'VpcSecurityGroupMemberships': ([basestring, Ref], False),\n }\n\n\nclass OptionGroup(AWSObject):\n resource_type = \"AWS::RDS::OptionGroup\"\n\n props = {\n 'EngineName': (basestring, True),\n 'MajorEngineVersion': (basestring, True),\n 'OptionGroupDescription': (basestring, True),\n 'OptionConfigurations': ([OptionConfiguration], True),\n 'Tags': (list, False),\n }\n", "path": "troposphere/rds.py"}], "after_files": [{"content": "# Copyright (c) 2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nimport re\n\nfrom . import AWSHelperFn, AWSObject, AWSProperty, Ref\nfrom .validators import boolean, network_port, integer, positive_integer\n\n# Taken from:\n# http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html\n\nVALID_STORAGE_TYPES = ('standard', 'gp2', 'io1')\nVALID_DB_ENGINES = ('MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee',\n 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex',\n 'sqlserver-web', 'postgres')\nVALID_LICENSE_MODELS = ('license-included', 'bring-your-own-license',\n 'general-public-license', 'postgresql-license')\n\n\ndef validate_iops(iops):\n \"\"\"DBInstance Iops validation rules.\"\"\"\n\n iops = integer(iops)\n if int(iops) < 1000:\n raise ValueError(\"DBInstance Iops, if set, must be greater than 1000.\")\n if int(iops) > 10000:\n raise ValueError(\"DBInstance Iops, if set, must be less than 10000.\")\n return iops\n\n\ndef validate_storage_type(storage_type):\n \"\"\"Validate StorageType for DBInstance\"\"\"\n\n if storage_type not in VALID_STORAGE_TYPES:\n raise ValueError(\"DBInstance StorageType must be one of: %s\" %\n \", \".join(VALID_STORAGE_TYPES))\n return storage_type\n\n\ndef validate_engine(engine):\n \"\"\"Validate database Engine for DBInstance \"\"\"\n\n if engine not in VALID_DB_ENGINES:\n raise ValueError(\"DBInstance Engine must be one of: %s\" %\n \", \".join(VALID_DB_ENGINES))\n return engine\n\n\ndef validate_license_model(license_model):\n \"\"\"Validate LicenseModel for DBInstance\"\"\"\n\n if license_model not in VALID_LICENSE_MODELS:\n raise ValueError(\"DBInstance LicenseModel must be one of: %s\" %\n \", \".join(VALID_LICENSE_MODELS))\n return license_model\n\n\ndef validate_backup_window(window):\n \"\"\"Validate PreferredBackupWindow for DBInstance\"\"\"\n\n hour = r'[01]?[0-9]|2[0-3]'\n minute = r'[0-5][0-9]'\n r = (\"(?P<start_hour>%s):(?P<start_minute>%s)-\"\n \"(?P<end_hour>%s):(?P<end_minute>%s)\") % (hour, minute, hour, minute)\n range_regex = re.compile(r)\n m = range_regex.match(window)\n if not m:\n raise ValueError(\"DBInstance PreferredBackupWindow must be in the \"\n \"format: hh24:mi-hh24:mi\")\n start_ts = (int(m.group('start_hour')) * 60) + int(m.group('start_minute'))\n end_ts = (int(m.group('end_hour')) * 60) + int(m.group('end_minute'))\n if abs(end_ts - start_ts) < 30:\n raise ValueError(\"DBInstance PreferredBackupWindow must be at least \"\n \"30 minutes long.\")\n return window\n\n\ndef validate_maintenance_window(window):\n \"\"\"Validate PreferredMaintenanceWindow for DBInstance\"\"\"\n\n days = (\"Mon\", \"Tue\", \"Wed\", \"Thu\", \"Fri\", \"Sat\", \"Sun\")\n day_re = r'[A-Z]{1}[a-z]{2}'\n hour = r'[01]?[0-9]|2[0-3]'\n minute = r'[0-5][0-9]'\n r = (\"(?P<start_day>%s):(?P<start_hour>%s):(?P<start_minute>%s)-\"\n \"(?P<end_day>%s):(?P<end_hour>%s):(?P<end_minute>%s)\") % (day_re,\n hour,\n minute,\n day_re,\n hour,\n minute)\n range_regex = re.compile(r)\n m = range_regex.match(window)\n if not m:\n raise ValueError(\"DBInstance PreferredMaintenanceWindow must be in \"\n \"the format: ddd:hh24:mi-ddd:hh24:mi\")\n if m.group('start_day') not in days or m.group('end_day') not in days:\n raise ValueError(\"DBInstance PreferredMaintenanceWindow day part of \"\n \"ranges must be one of: %s\" % \", \".join(days))\n start_ts = (days.index(m.group('start_day')) * 24 * 60) + \\\n (int(m.group('start_hour')) * 60) + int(m.group('start_minute'))\n end_ts = (days.index(m.group('end_day')) * 24 * 60) + \\\n (int(m.group('end_hour')) * 60) + int(m.group('end_minute'))\n if abs(end_ts - start_ts) < 30:\n raise ValueError(\"DBInstance PreferredMaintenanceWindow must be at \"\n \"least 30 minutes long.\")\n return window\n\n\ndef validate_backup_retention_period(days):\n \"\"\"Validate BackupRetentionPeriod for DBInstance\"\"\"\n\n days = positive_integer(days)\n if int(days) > 35:\n raise ValueError(\"DBInstance BackupRetentionPeriod cannot be larger \"\n \"than 35 days.\")\n return days\n\n\nclass DBInstance(AWSObject):\n resource_type = \"AWS::RDS::DBInstance\"\n\n props = {\n 'AllocatedStorage': (positive_integer, True),\n 'AllowMajorVersionUpgrade': (boolean, False),\n 'AutoMinorVersionUpgrade': (boolean, False),\n 'AvailabilityZone': (basestring, False),\n 'BackupRetentionPeriod': (validate_backup_retention_period, False),\n 'CharacterSetName': (basestring, False),\n 'DBInstanceClass': (basestring, True),\n 'DBInstanceIdentifier': (basestring, False),\n 'DBName': (basestring, False),\n 'DBParameterGroupName': (basestring, False),\n 'DBSecurityGroups': (list, False),\n 'DBSnapshotIdentifier': (basestring, False),\n 'DBSubnetGroupName': (basestring, False),\n 'Engine': (validate_engine, True),\n 'EngineVersion': (basestring, False),\n 'Iops': (validate_iops, False),\n 'KmsKeyId': (basestring, False),\n 'LicenseModel': (validate_license_model, False),\n 'MasterUsername': (basestring, False),\n 'MasterUserPassword': (basestring, False),\n 'MultiAZ': (boolean, False),\n 'OptionGroupName': (basestring, False),\n 'Port': (network_port, False),\n 'PreferredBackupWindow': (validate_backup_window, False),\n 'PreferredMaintenanceWindow': (basestring, False),\n 'PubliclyAccessible': (boolean, False),\n 'SourceDBInstanceIdentifier': (basestring, False),\n 'StorageEncrypted': (boolean, False),\n 'StorageType': (basestring, False),\n 'Tags': (list, False),\n 'VPCSecurityGroups': ([basestring, AWSHelperFn], False),\n }\n\n def validate(self):\n if 'SourceDBInstanceIdentifier' in self.properties:\n\n invalid_replica_properties = (\n 'BackupRetentionPeriod', 'DBName', 'MasterUsername',\n 'MasterUserPassword', 'PreferredBackupWindow', 'MultiAZ',\n 'DBSnapshotIdentifier', 'DBSubnetGroupName',\n )\n\n invalid_properties = [s for s in self.properties.keys() if\n s in invalid_replica_properties]\n\n if invalid_properties:\n raise ValueError(\n ('{0} properties can\\'t be provided when '\n 'SourceDBInstanceIdentifier is present '\n 'AWS::RDS::DBInstance.'\n ).format(', '.join(sorted(invalid_properties))))\n\n if ('DBSnapshotIdentifier' not in self.properties and\n 'SourceDBInstanceIdentifier' not in self.properties) and \\\n ('MasterUsername' not in self.properties or\n 'MasterUserPassword' not in self.properties):\n raise ValueError(\n 'Either (MasterUsername and MasterUserPassword) or'\n ' DBSnapshotIdentifier are required in type '\n 'AWS::RDS::DBInstance.'\n )\n\n if 'KmsKeyId' in self.properties and \\\n 'StorageEncrypted' not in self.properties:\n raise ValueError(\n 'If KmsKeyId is provided, StorageEncrypted is required '\n 'AWS::RDS::DBInstance.'\n )\n\n nonetype = type(None)\n avail_zone = self.properties.get('AvailabilityZone', None)\n multi_az = self.properties.get('MultiAZ', None)\n if not (isinstance(avail_zone, (AWSHelperFn, nonetype)) and\n isinstance(multi_az, (AWSHelperFn, nonetype))):\n if 'AvailabilityZone' in self.properties and \\\n self.properties.get('MultiAZ', None):\n raise ValueError(\"AvailabiltyZone cannot be set on \"\n \"DBInstance if MultiAZ is set to true.\")\n\n storage_type = self.properties.get('StorageType', None)\n if storage_type and storage_type == 'io1' and \\\n 'Iops' not in self.properties:\n raise ValueError(\"Must specify Iops if using StorageType io1\")\n\n allocated_storage = self.properties.get('AllocatedStorage')\n iops = self.properties.get('Iops', None)\n if iops:\n if not isinstance(allocated_storage, AWSHelperFn) and \\\n allocated_storage < 100:\n raise ValueError(\"AllocatedStorage must be at least 100 when \"\n \"Iops is set.\")\n if not isinstance(allocated_storage, AWSHelperFn) and not \\\n isinstance(iops, AWSHelperFn) and \\\n float(iops) / float(allocated_storage) > 10.0:\n raise ValueError(\"AllocatedStorage must be no less than \"\n \"1/10th the provisioned Iops\")\n\n return True\n\n\nclass DBParameterGroup(AWSObject):\n resource_type = \"AWS::RDS::DBParameterGroup\"\n\n props = {\n 'Description': (basestring, False),\n 'Family': (basestring, False),\n 'Parameters': (dict, False),\n 'Tags': (list, False),\n }\n\n\nclass DBSubnetGroup(AWSObject):\n resource_type = \"AWS::RDS::DBSubnetGroup\"\n\n props = {\n 'DBSubnetGroupDescription': (basestring, True),\n 'SubnetIds': (list, True),\n 'Tags': (list, False),\n }\n\n\nclass RDSSecurityGroup(AWSProperty):\n props = {\n 'CIDRIP': (basestring, False),\n 'EC2SecurityGroupId': (basestring, False),\n 'EC2SecurityGroupName': (basestring, False),\n 'EC2SecurityGroupOwnerId': (basestring, False),\n }\n\n\nclass DBSecurityGroup(AWSObject):\n resource_type = \"AWS::RDS::DBSecurityGroup\"\n\n props = {\n 'EC2VpcId': (basestring, False),\n 'DBSecurityGroupIngress': (list, True),\n 'GroupDescription': (basestring, True),\n 'Tags': (list, False),\n }\n\n\nclass DBSecurityGroupIngress(AWSObject):\n resource_type = \"AWS::RDS::DBSecurityGroupIngress\"\n\n props = {\n 'CIDRIP': (basestring, False),\n 'DBSecurityGroupName': (basestring, True),\n 'EC2SecurityGroupId': (basestring, False),\n 'EC2SecurityGroupName': (basestring, False),\n 'EC2SecurityGroupOwnerId': (basestring, False),\n }\n\n\nclass EventSubscription(AWSObject):\n resource_type = \"AWS::RDS::EventSubscription\"\n\n props = {\n 'Enabled': (boolean, False),\n 'EventCategories': ([basestring], False),\n 'SnsTopicArn': (basestring, True),\n 'SourceIds': ([basestring, Ref], False),\n 'SourceType': (basestring, False),\n }\n\n\nclass OptionSetting(AWSProperty):\n props = {\n 'Name': (basestring, False),\n 'Value': (basestring, False),\n }\n\n\nclass OptionConfiguration(AWSProperty):\n props = {\n 'DBSecurityGroupMemberships': ([basestring, Ref], False),\n 'OptionName': (basestring, True),\n 'OptionSettings': ([OptionSetting], False),\n 'Port': (network_port, False),\n 'VpcSecurityGroupMemberships': ([basestring, Ref], False),\n }\n\n\nclass OptionGroup(AWSObject):\n resource_type = \"AWS::RDS::OptionGroup\"\n\n props = {\n 'EngineName': (basestring, True),\n 'MajorEngineVersion': (basestring, True),\n 'OptionGroupDescription': (basestring, True),\n 'OptionConfigurations': ([OptionConfiguration], True),\n 'Tags': (list, False),\n }\n", "path": "troposphere/rds.py"}]}
| 4,071 | 117 |
gh_patches_debug_63849
|
rasdani/github-patches
|
git_diff
|
WeblateOrg__weblate-10794
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Syntax highlighting of search input
### Describe the issue
1. Go to a screenshot
2. Enter "not found" as the search term
3. A lot of strings appear as search results, most of them not containing anything related to "not found"
If I enter "not" or "found" then fewer results are found compared to "not found".
### I already tried
- [X] I've read and searched [the documentation](https://docs.weblate.org/).
- [X] I've searched for similar issues in this repository.
### Steps to reproduce the behavior
1. Go to a screenshot
2. Enter "not found" as the search term
3. A lot of strings appear as search results, most of them not containing anything related to "not found"
### Expected behavior
Search only lists strings containing "not found"
### Screenshots

### Exception traceback
_No response_
### How do you run Weblate?
weblate.org service
### Weblate versions
_No response_
### Weblate deploy checks
_No response_
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `weblate/utils/forms.py`
Content:
```
1 # Copyright © Michal Čihař <[email protected]>
2 #
3 # SPDX-License-Identifier: GPL-3.0-or-later
4
5 from crispy_forms.layout import Div, Field
6 from crispy_forms.utils import TEMPLATE_PACK
7 from django import forms
8 from django.core.exceptions import ValidationError
9 from django.db.models import Q
10 from django.forms.models import ModelChoiceIterator
11 from django.template.loader import render_to_string
12 from django.utils.translation import gettext, gettext_lazy
13
14 from weblate.trans.defines import EMAIL_LENGTH, USERNAME_LENGTH
15 from weblate.trans.filter import FILTERS
16 from weblate.trans.util import sort_unicode
17 from weblate.utils.errors import report_error
18 from weblate.utils.search import parse_query
19 from weblate.utils.validators import validate_email, validate_username
20
21
22 class QueryField(forms.CharField):
23 def __init__(self, parser: str = "unit", **kwargs):
24 if "label" not in kwargs:
25 kwargs["label"] = gettext_lazy("Query")
26 if "required" not in kwargs:
27 kwargs["required"] = False
28 self.parser = parser
29 super().__init__(**kwargs)
30
31 def clean(self, value):
32 if not value:
33 if self.required:
34 raise ValidationError(gettext("Missing query string."))
35 return ""
36 try:
37 parse_query(value, parser=self.parser)
38 except ValueError as error:
39 raise ValidationError(
40 gettext("Could not parse query string: {}").format(error)
41 ) from error
42 except Exception as error:
43 report_error(cause="Error parsing search query")
44 raise ValidationError(
45 gettext("Could not parse query string: {}").format(error)
46 ) from error
47 return value
48
49
50 class UsernameField(forms.CharField):
51 default_validators = [validate_username]
52
53 def __init__(self, *args, **kwargs):
54 params = {
55 "max_length": USERNAME_LENGTH,
56 "help_text": gettext_lazy(
57 "Username may only contain letters, "
58 "numbers or the following characters: @ . + - _"
59 ),
60 "label": gettext_lazy("Username"),
61 "required": True,
62 }
63 params.update(kwargs)
64 self.valid = None
65
66 super().__init__(*args, **params)
67
68
69 class UserField(forms.CharField):
70 def __init__(
71 self,
72 queryset=None,
73 empty_label="---------",
74 to_field_name=None,
75 limit_choices_to=None,
76 blank=None,
77 **kwargs,
78 ):
79 # This swallows some parameters to mimic ModelChoiceField API
80 super().__init__(**kwargs)
81
82 def widget_attrs(self, widget):
83 attrs = super().widget_attrs(widget)
84 attrs["dir"] = "ltr"
85 attrs["class"] = "user-autocomplete"
86 attrs["spellcheck"] = "false"
87 attrs["autocorrect"] = "off"
88 attrs["autocomplete"] = "off"
89 attrs["autocapitalize"] = "off"
90 return attrs
91
92 def clean(self, value):
93 from weblate.auth.models import User
94
95 if not value:
96 if self.required:
97 raise ValidationError(gettext("Missing username or e-mail."))
98 return None
99 try:
100 return User.objects.get(Q(username=value) | Q(email=value))
101 except User.DoesNotExist:
102 raise ValidationError(gettext("Could not find any such user."))
103 except User.MultipleObjectsReturned:
104 raise ValidationError(gettext("More possible users were found."))
105
106
107 class EmailField(forms.EmailField):
108 """
109 Slightly restricted EmailField.
110
111 We blacklist some additional local parts and customize error messages.
112 """
113
114 default_validators = [validate_email]
115
116 def __init__(self, *args, **kwargs):
117 kwargs.setdefault("max_length", EMAIL_LENGTH)
118 super().__init__(*args, **kwargs)
119
120
121 class SortedSelectMixin:
122 """Mixin for Select widgets to sort choices alphabetically."""
123
124 def optgroups(self, name, value, attrs=None):
125 groups = super().optgroups(name, value, attrs)
126 return sort_unicode(groups, lambda val: str(val[1][0]["label"]))
127
128
129 class ColorWidget(forms.RadioSelect):
130 def __init__(self, attrs=None, choices=()):
131 attrs = {**(attrs or {}), "class": "color_edit"}
132 super().__init__(attrs, choices)
133
134
135 class SortedSelectMultiple(SortedSelectMixin, forms.SelectMultiple):
136 """Wrapper class to sort choices alphabetically."""
137
138
139 class SortedSelect(SortedSelectMixin, forms.Select):
140 """Wrapper class to sort choices alphabetically."""
141
142
143 class ContextDiv(Div):
144 def __init__(self, *fields, **kwargs):
145 self.context = kwargs.pop("context", {})
146 super().__init__(*fields, **kwargs)
147
148 def render(self, form, context, template_pack=TEMPLATE_PACK, **kwargs):
149 template = self.get_template_name(template_pack)
150 return render_to_string(template, self.context)
151
152
153 class SearchField(Field):
154 def __init__(self, *args, **kwargs):
155 kwargs["template"] = "snippets/query-field.html"
156 super().__init__(*args, **kwargs)
157
158 def render(self, form, context, template_pack=TEMPLATE_PACK, **kwargs):
159 extra_context = {"custom_filter_list": self.get_search_query_choices()}
160 return super().render(form, context, template_pack, extra_context, **kwargs)
161
162 def get_search_query_choices(self):
163 """Return all filtering choices for query field."""
164 filter_keys = [
165 "nottranslated",
166 "todo",
167 "translated",
168 "fuzzy",
169 "suggestions",
170 "variants",
171 "screenshots",
172 "labels",
173 "context",
174 "nosuggestions",
175 "comments",
176 "allchecks",
177 "approved",
178 "unapproved",
179 ]
180 return [
181 (key, FILTERS.get_filter_name(key), FILTERS.get_filter_query(key))
182 for key in filter_keys
183 ]
184
185
186 class CachedQueryIterator(ModelChoiceIterator):
187 """
188 Choice iterator for cached querysets.
189
190 It assumes the queryset is reused and avoids using an iterator or counting queries.
191 """
192
193 def __iter__(self):
194 if self.field.empty_label is not None:
195 yield ("", self.field.empty_label)
196 for obj in self.queryset:
197 yield self.choice(obj)
198
199 def __len__(self):
200 return len(self.queryset) + (1 if self.field.empty_label is not None else 0)
201
202 def __bool__(self):
203 return self.field.empty_label is not None or bool(self.queryset)
204
205
206 class NonCopyingSetQuerysetMixin:
207 iterator = CachedQueryIterator
208
209 def _get_queryset(self):
210 return self._queryset
211
212 def _set_queryset(self, queryset):
213 self._queryset = queryset
214 self.widget.choices = self.choices
215
216 queryset = property(_get_queryset, _set_queryset)
217
218
219 class CachedModelChoiceField(NonCopyingSetQuerysetMixin, forms.ModelChoiceField):
220 pass
221
222
223 class CachedModelMultipleChoiceField(
224 NonCopyingSetQuerysetMixin, forms.ModelMultipleChoiceField
225 ):
226 pass
227
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/weblate/utils/forms.py b/weblate/utils/forms.py
--- a/weblate/utils/forms.py
+++ b/weblate/utils/forms.py
@@ -25,6 +25,8 @@
kwargs["label"] = gettext_lazy("Query")
if "required" not in kwargs:
kwargs["required"] = False
+ if "widget" not in kwargs:
+ kwargs["widget"] = forms.Textarea(attrs={"cols": None, "rows": 1})
self.parser = parser
super().__init__(**kwargs)
|
{"golden_diff": "diff --git a/weblate/utils/forms.py b/weblate/utils/forms.py\n--- a/weblate/utils/forms.py\n+++ b/weblate/utils/forms.py\n@@ -25,6 +25,8 @@\n kwargs[\"label\"] = gettext_lazy(\"Query\")\n if \"required\" not in kwargs:\n kwargs[\"required\"] = False\n+ if \"widget\" not in kwargs:\n+ kwargs[\"widget\"] = forms.Textarea(attrs={\"cols\": None, \"rows\": 1})\n self.parser = parser\n super().__init__(**kwargs)\n", "issue": "Syntax highlighting of search input\n### Describe the issue\n\n1. Go to a screenshot\r\n2. Enter \"not found\" as the search term\r\n3. A lot of strings appear as search results, most of them not containing anything related to \"not found\"\r\n\r\n\r\nIf I enter \"not\" or \"found\" then fewer results are found compared to \"not found\".\n\n### I already tried\n\n- [X] I've read and searched [the documentation](https://docs.weblate.org/).\n- [X] I've searched for similar issues in this repository.\n\n### Steps to reproduce the behavior\n\n1. Go to a screenshot\r\n2. Enter \"not found\" as the search term\r\n3. A lot of strings appear as search results, most of them not containing anything related to \"not found\"\n\n### Expected behavior\n\nSearch only lists strings containing \"not found\"\n\n### Screenshots\n\n\r\n\n\n### Exception traceback\n\n_No response_\n\n### How do you run Weblate?\n\nweblate.org service\n\n### Weblate versions\n\n_No response_\n\n### Weblate deploy checks\n\n_No response_\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom crispy_forms.layout import Div, Field\nfrom crispy_forms.utils import TEMPLATE_PACK\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import Q\nfrom django.forms.models import ModelChoiceIterator\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import gettext, gettext_lazy\n\nfrom weblate.trans.defines import EMAIL_LENGTH, USERNAME_LENGTH\nfrom weblate.trans.filter import FILTERS\nfrom weblate.trans.util import sort_unicode\nfrom weblate.utils.errors import report_error\nfrom weblate.utils.search import parse_query\nfrom weblate.utils.validators import validate_email, validate_username\n\n\nclass QueryField(forms.CharField):\n def __init__(self, parser: str = \"unit\", **kwargs):\n if \"label\" not in kwargs:\n kwargs[\"label\"] = gettext_lazy(\"Query\")\n if \"required\" not in kwargs:\n kwargs[\"required\"] = False\n self.parser = parser\n super().__init__(**kwargs)\n\n def clean(self, value):\n if not value:\n if self.required:\n raise ValidationError(gettext(\"Missing query string.\"))\n return \"\"\n try:\n parse_query(value, parser=self.parser)\n except ValueError as error:\n raise ValidationError(\n gettext(\"Could not parse query string: {}\").format(error)\n ) from error\n except Exception as error:\n report_error(cause=\"Error parsing search query\")\n raise ValidationError(\n gettext(\"Could not parse query string: {}\").format(error)\n ) from error\n return value\n\n\nclass UsernameField(forms.CharField):\n default_validators = [validate_username]\n\n def __init__(self, *args, **kwargs):\n params = {\n \"max_length\": USERNAME_LENGTH,\n \"help_text\": gettext_lazy(\n \"Username may only contain letters, \"\n \"numbers or the following characters: @ . + - _\"\n ),\n \"label\": gettext_lazy(\"Username\"),\n \"required\": True,\n }\n params.update(kwargs)\n self.valid = None\n\n super().__init__(*args, **params)\n\n\nclass UserField(forms.CharField):\n def __init__(\n self,\n queryset=None,\n empty_label=\"---------\",\n to_field_name=None,\n limit_choices_to=None,\n blank=None,\n **kwargs,\n ):\n # This swallows some parameters to mimic ModelChoiceField API\n super().__init__(**kwargs)\n\n def widget_attrs(self, widget):\n attrs = super().widget_attrs(widget)\n attrs[\"dir\"] = \"ltr\"\n attrs[\"class\"] = \"user-autocomplete\"\n attrs[\"spellcheck\"] = \"false\"\n attrs[\"autocorrect\"] = \"off\"\n attrs[\"autocomplete\"] = \"off\"\n attrs[\"autocapitalize\"] = \"off\"\n return attrs\n\n def clean(self, value):\n from weblate.auth.models import User\n\n if not value:\n if self.required:\n raise ValidationError(gettext(\"Missing username or e-mail.\"))\n return None\n try:\n return User.objects.get(Q(username=value) | Q(email=value))\n except User.DoesNotExist:\n raise ValidationError(gettext(\"Could not find any such user.\"))\n except User.MultipleObjectsReturned:\n raise ValidationError(gettext(\"More possible users were found.\"))\n\n\nclass EmailField(forms.EmailField):\n \"\"\"\n Slightly restricted EmailField.\n\n We blacklist some additional local parts and customize error messages.\n \"\"\"\n\n default_validators = [validate_email]\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault(\"max_length\", EMAIL_LENGTH)\n super().__init__(*args, **kwargs)\n\n\nclass SortedSelectMixin:\n \"\"\"Mixin for Select widgets to sort choices alphabetically.\"\"\"\n\n def optgroups(self, name, value, attrs=None):\n groups = super().optgroups(name, value, attrs)\n return sort_unicode(groups, lambda val: str(val[1][0][\"label\"]))\n\n\nclass ColorWidget(forms.RadioSelect):\n def __init__(self, attrs=None, choices=()):\n attrs = {**(attrs or {}), \"class\": \"color_edit\"}\n super().__init__(attrs, choices)\n\n\nclass SortedSelectMultiple(SortedSelectMixin, forms.SelectMultiple):\n \"\"\"Wrapper class to sort choices alphabetically.\"\"\"\n\n\nclass SortedSelect(SortedSelectMixin, forms.Select):\n \"\"\"Wrapper class to sort choices alphabetically.\"\"\"\n\n\nclass ContextDiv(Div):\n def __init__(self, *fields, **kwargs):\n self.context = kwargs.pop(\"context\", {})\n super().__init__(*fields, **kwargs)\n\n def render(self, form, context, template_pack=TEMPLATE_PACK, **kwargs):\n template = self.get_template_name(template_pack)\n return render_to_string(template, self.context)\n\n\nclass SearchField(Field):\n def __init__(self, *args, **kwargs):\n kwargs[\"template\"] = \"snippets/query-field.html\"\n super().__init__(*args, **kwargs)\n\n def render(self, form, context, template_pack=TEMPLATE_PACK, **kwargs):\n extra_context = {\"custom_filter_list\": self.get_search_query_choices()}\n return super().render(form, context, template_pack, extra_context, **kwargs)\n\n def get_search_query_choices(self):\n \"\"\"Return all filtering choices for query field.\"\"\"\n filter_keys = [\n \"nottranslated\",\n \"todo\",\n \"translated\",\n \"fuzzy\",\n \"suggestions\",\n \"variants\",\n \"screenshots\",\n \"labels\",\n \"context\",\n \"nosuggestions\",\n \"comments\",\n \"allchecks\",\n \"approved\",\n \"unapproved\",\n ]\n return [\n (key, FILTERS.get_filter_name(key), FILTERS.get_filter_query(key))\n for key in filter_keys\n ]\n\n\nclass CachedQueryIterator(ModelChoiceIterator):\n \"\"\"\n Choice iterator for cached querysets.\n\n It assumes the queryset is reused and avoids using an iterator or counting queries.\n \"\"\"\n\n def __iter__(self):\n if self.field.empty_label is not None:\n yield (\"\", self.field.empty_label)\n for obj in self.queryset:\n yield self.choice(obj)\n\n def __len__(self):\n return len(self.queryset) + (1 if self.field.empty_label is not None else 0)\n\n def __bool__(self):\n return self.field.empty_label is not None or bool(self.queryset)\n\n\nclass NonCopyingSetQuerysetMixin:\n iterator = CachedQueryIterator\n\n def _get_queryset(self):\n return self._queryset\n\n def _set_queryset(self, queryset):\n self._queryset = queryset\n self.widget.choices = self.choices\n\n queryset = property(_get_queryset, _set_queryset)\n\n\nclass CachedModelChoiceField(NonCopyingSetQuerysetMixin, forms.ModelChoiceField):\n pass\n\n\nclass CachedModelMultipleChoiceField(\n NonCopyingSetQuerysetMixin, forms.ModelMultipleChoiceField\n):\n pass\n", "path": "weblate/utils/forms.py"}], "after_files": [{"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom crispy_forms.layout import Div, Field\nfrom crispy_forms.utils import TEMPLATE_PACK\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import Q\nfrom django.forms.models import ModelChoiceIterator\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import gettext, gettext_lazy\n\nfrom weblate.trans.defines import EMAIL_LENGTH, USERNAME_LENGTH\nfrom weblate.trans.filter import FILTERS\nfrom weblate.trans.util import sort_unicode\nfrom weblate.utils.errors import report_error\nfrom weblate.utils.search import parse_query\nfrom weblate.utils.validators import validate_email, validate_username\n\n\nclass QueryField(forms.CharField):\n def __init__(self, parser: str = \"unit\", **kwargs):\n if \"label\" not in kwargs:\n kwargs[\"label\"] = gettext_lazy(\"Query\")\n if \"required\" not in kwargs:\n kwargs[\"required\"] = False\n if \"widget\" not in kwargs:\n kwargs[\"widget\"] = forms.Textarea(attrs={\"cols\": None, \"rows\": 1})\n self.parser = parser\n super().__init__(**kwargs)\n\n def clean(self, value):\n if not value:\n if self.required:\n raise ValidationError(gettext(\"Missing query string.\"))\n return \"\"\n try:\n parse_query(value, parser=self.parser)\n except ValueError as error:\n raise ValidationError(\n gettext(\"Could not parse query string: {}\").format(error)\n ) from error\n except Exception as error:\n report_error(cause=\"Error parsing search query\")\n raise ValidationError(\n gettext(\"Could not parse query string: {}\").format(error)\n ) from error\n return value\n\n\nclass UsernameField(forms.CharField):\n default_validators = [validate_username]\n\n def __init__(self, *args, **kwargs):\n params = {\n \"max_length\": USERNAME_LENGTH,\n \"help_text\": gettext_lazy(\n \"Username may only contain letters, \"\n \"numbers or the following characters: @ . + - _\"\n ),\n \"label\": gettext_lazy(\"Username\"),\n \"required\": True,\n }\n params.update(kwargs)\n self.valid = None\n\n super().__init__(*args, **params)\n\n\nclass UserField(forms.CharField):\n def __init__(\n self,\n queryset=None,\n empty_label=\"---------\",\n to_field_name=None,\n limit_choices_to=None,\n blank=None,\n **kwargs,\n ):\n # This swallows some parameters to mimic ModelChoiceField API\n super().__init__(**kwargs)\n\n def widget_attrs(self, widget):\n attrs = super().widget_attrs(widget)\n attrs[\"dir\"] = \"ltr\"\n attrs[\"class\"] = \"user-autocomplete\"\n attrs[\"spellcheck\"] = \"false\"\n attrs[\"autocorrect\"] = \"off\"\n attrs[\"autocomplete\"] = \"off\"\n attrs[\"autocapitalize\"] = \"off\"\n return attrs\n\n def clean(self, value):\n from weblate.auth.models import User\n\n if not value:\n if self.required:\n raise ValidationError(gettext(\"Missing username or e-mail.\"))\n return None\n try:\n return User.objects.get(Q(username=value) | Q(email=value))\n except User.DoesNotExist:\n raise ValidationError(gettext(\"Could not find any such user.\"))\n except User.MultipleObjectsReturned:\n raise ValidationError(gettext(\"More possible users were found.\"))\n\n\nclass EmailField(forms.EmailField):\n \"\"\"\n Slightly restricted EmailField.\n\n We blacklist some additional local parts and customize error messages.\n \"\"\"\n\n default_validators = [validate_email]\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault(\"max_length\", EMAIL_LENGTH)\n super().__init__(*args, **kwargs)\n\n\nclass SortedSelectMixin:\n \"\"\"Mixin for Select widgets to sort choices alphabetically.\"\"\"\n\n def optgroups(self, name, value, attrs=None):\n groups = super().optgroups(name, value, attrs)\n return sort_unicode(groups, lambda val: str(val[1][0][\"label\"]))\n\n\nclass ColorWidget(forms.RadioSelect):\n def __init__(self, attrs=None, choices=()):\n attrs = {**(attrs or {}), \"class\": \"color_edit\"}\n super().__init__(attrs, choices)\n\n\nclass SortedSelectMultiple(SortedSelectMixin, forms.SelectMultiple):\n \"\"\"Wrapper class to sort choices alphabetically.\"\"\"\n\n\nclass SortedSelect(SortedSelectMixin, forms.Select):\n \"\"\"Wrapper class to sort choices alphabetically.\"\"\"\n\n\nclass ContextDiv(Div):\n def __init__(self, *fields, **kwargs):\n self.context = kwargs.pop(\"context\", {})\n super().__init__(*fields, **kwargs)\n\n def render(self, form, context, template_pack=TEMPLATE_PACK, **kwargs):\n template = self.get_template_name(template_pack)\n return render_to_string(template, self.context)\n\n\nclass SearchField(Field):\n def __init__(self, *args, **kwargs):\n kwargs[\"template\"] = \"snippets/query-field.html\"\n super().__init__(*args, **kwargs)\n\n def render(self, form, context, template_pack=TEMPLATE_PACK, **kwargs):\n extra_context = {\"custom_filter_list\": self.get_search_query_choices()}\n return super().render(form, context, template_pack, extra_context, **kwargs)\n\n def get_search_query_choices(self):\n \"\"\"Return all filtering choices for query field.\"\"\"\n filter_keys = [\n \"nottranslated\",\n \"todo\",\n \"translated\",\n \"fuzzy\",\n \"suggestions\",\n \"variants\",\n \"screenshots\",\n \"labels\",\n \"context\",\n \"nosuggestions\",\n \"comments\",\n \"allchecks\",\n \"approved\",\n \"unapproved\",\n ]\n return [\n (key, FILTERS.get_filter_name(key), FILTERS.get_filter_query(key))\n for key in filter_keys\n ]\n\n\nclass CachedQueryIterator(ModelChoiceIterator):\n \"\"\"\n Choice iterator for cached querysets.\n\n It assumes the queryset is reused and avoids using an iterator or counting queries.\n \"\"\"\n\n def __iter__(self):\n if self.field.empty_label is not None:\n yield (\"\", self.field.empty_label)\n for obj in self.queryset:\n yield self.choice(obj)\n\n def __len__(self):\n return len(self.queryset) + (1 if self.field.empty_label is not None else 0)\n\n def __bool__(self):\n return self.field.empty_label is not None or bool(self.queryset)\n\n\nclass NonCopyingSetQuerysetMixin:\n iterator = CachedQueryIterator\n\n def _get_queryset(self):\n return self._queryset\n\n def _set_queryset(self, queryset):\n self._queryset = queryset\n self.widget.choices = self.choices\n\n queryset = property(_get_queryset, _set_queryset)\n\n\nclass CachedModelChoiceField(NonCopyingSetQuerysetMixin, forms.ModelChoiceField):\n pass\n\n\nclass CachedModelMultipleChoiceField(\n NonCopyingSetQuerysetMixin, forms.ModelMultipleChoiceField\n):\n pass\n", "path": "weblate/utils/forms.py"}]}
| 2,639 | 122 |
gh_patches_debug_23594
|
rasdani/github-patches
|
git_diff
|
canonical__microk8s-3054
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
latest/edge: server cert generation failure too verbose
```
Restarting cluster-agent to load new server certificate
error: error running snapctl: snap "microk8s" has "service-control" change in progress
Traceback (most recent call last):
File "/snap/microk8s/3125/scripts/cluster/add_token.py", line 190, in <module>
subprocess.check_call(["snapctl", "restart", "microk8s.daemon-cluster-agent"])
File "/snap/microk8s/3125/usr/lib/python3.6/subprocess.py", line 311, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['snapctl', 'restart', 'microk8s.daemon-cluster-agent']' returned non-zero exit status 1.
```
The action failing is fine, but the call stack being printed is un-necessary
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/cluster/add_token.py`
Content:
```
1 import json
2 import yaml
3 import os
4 import sys
5 import time
6 import argparse
7 import subprocess
8 import hashlib
9 import ssl
10 import http.client
11
12 from common.utils import is_node_running_dqlite
13
14 try:
15 from secrets import token_hex
16 except ImportError:
17 from os import urandom
18
19 def token_hex(nbytes=None):
20 return urandom(nbytes).hex()
21
22
23 cluster_tokens_file = os.path.expandvars("${SNAP_DATA}/credentials/cluster-tokens.txt")
24 utils_sh_file = os.path.expandvars("${SNAP}/actions/common/utils.sh")
25 token_with_expiry = "{}|{}\n"
26 token_without_expiry = "{}\n"
27
28
29 def add_token_with_expiry(token, file, ttl):
30 """
31 This method will add a token to the token file with or without expiry
32 Expiry time is in seconds.
33
34 Format of the item in the file: <token>|<expiry in seconds>
35
36 :param str token: The token to add to the file
37 :param str file: The file name for which the token will be written to
38 :param ttl: How long the token should last before expiry, represented in seconds.
39 """
40
41 with open(file, "a+") as fp:
42 if ttl != -1:
43 expiry = int(round(time.time())) + ttl
44 fp.write(token_with_expiry.format(token, expiry))
45 else:
46 fp.write(token_without_expiry.format(token))
47
48
49 def run_util(*args, debug=False):
50 env = os.environ.copy()
51 prog = ["bash", utils_sh_file]
52 prog.extend(args)
53
54 if debug:
55 print("\033[;1;32m+ %s\033[;0;0m" % " ".join(prog))
56
57 result = subprocess.run(
58 prog,
59 stdin=subprocess.PIPE,
60 stdout=subprocess.PIPE,
61 stderr=subprocess.PIPE,
62 env=env,
63 )
64
65 try:
66 result.check_returncode()
67 except subprocess.CalledProcessError:
68 print("Failed to call utility function.")
69 sys.exit(1)
70
71 return result.stdout.decode("utf-8").strip()
72
73
74 def get_network_info():
75 """
76 Obtain machine IP address(es) and cluster agent port.
77 :return: tuple of default IP, all IPs, and cluster agent port
78 """
79 default_ip = run_util("get_default_ip")
80 all_ips = run_util("get_ips").split(" ")
81 port = run_util("cluster_agent_port")
82
83 return (default_ip, all_ips, port)
84
85
86 def print_pretty(token, check):
87 default_ip, all_ips, port = get_network_info()
88
89 print("From the node you wish to join to this cluster, run the following:")
90 print(f"microk8s join {default_ip}:{port}/{token}/{check}\n")
91
92 if is_node_running_dqlite():
93 print(
94 "Use the '--worker' flag to join a node as a worker not running the control plane, eg:"
95 )
96 print(f"microk8s join {default_ip}:{port}/{token}/{check} --worker\n")
97
98 print(
99 "If the node you are adding is not reachable through the default interface you can use one of the following:"
100 )
101 for ip in all_ips:
102 print(f"microk8s join {ip}:{port}/{token}/{check}")
103
104
105 def get_output_dict(token, check):
106 _, all_ips, port = get_network_info()
107 info = {
108 "token": f"{token}/{check}",
109 "urls": [f"{ip}:{port}/{token}/{check}" for ip in all_ips],
110 }
111 return info
112
113
114 def print_json(token, check):
115 info = get_output_dict(token, check)
116 print(json.dumps(info, indent=2))
117
118
119 def print_yaml(token, check):
120 info = get_output_dict(token, check)
121 print(yaml.dump(info, indent=2))
122
123
124 def print_short(token, check):
125 default_ip, all_ips, port = get_network_info()
126
127 print(f"microk8s join {default_ip}:{port}/{token}/{check}")
128 for ip in all_ips:
129 print(f"microk8s join {ip}:{port}/{token}/{check}")
130
131
132 if __name__ == "__main__":
133
134 # initiate the parser with a description
135 parser = argparse.ArgumentParser(
136 description="Produce a connection string for a node to join the cluster.",
137 prog="microk8s add-node",
138 )
139 parser.add_argument(
140 "--token-ttl",
141 "-l",
142 help="Specify how long the token is valid, before it expires. "
143 'Value of "-1" indicates that the token is usable only once '
144 "(i.e. after joining a node, the token becomes invalid)",
145 type=int,
146 default="-1",
147 )
148 parser.add_argument(
149 "--token",
150 "-t",
151 help="Specify the bootstrap token to add, must be 32 characters long. "
152 "Auto generates when empty.",
153 )
154 parser.add_argument(
155 "--format",
156 help="Format the output of the token in pretty, short, token, or token-check",
157 default="pretty",
158 choices={"pretty", "short", "token", "token-check", "json", "yaml"},
159 )
160
161 # read arguments from the command line
162 args = parser.parse_args()
163
164 ttl = args.token_ttl
165
166 if args.token is not None:
167 token = args.token
168 else:
169 token = token_hex(16)
170
171 if len(token) < 32:
172 print("Invalid token size. It must be 32 characters long.")
173 exit(1)
174
175 add_token_with_expiry(token, cluster_tokens_file, ttl)
176 check = run_util("server_cert_check")
177
178 # NOTE(akolaitis): if the server certificate has changed after the cluster-agent
179 # service started, the server hash will be out of date. make sure to restart
180 # the cluster-agent in this case, otherwise the joining nodes will be unable
181 # to verify.
182 context = ssl._create_unverified_context()
183 conn = http.client.HTTPSConnection("127.0.0.1:25000", context=context)
184 conn.connect()
185 der_cert_bin = conn.sock.getpeercert(True)
186 conn.close()
187 peer_cert_hash = hashlib.sha256(der_cert_bin).hexdigest()
188 if not peer_cert_hash.startswith(check):
189 print("Restarting cluster-agent to load new server certificate")
190 subprocess.check_call(["snapctl", "restart", "microk8s.daemon-cluster-agent"])
191
192 if args.format == "pretty":
193 print_pretty(token, check)
194 elif args.format == "short":
195 print_short(token, check)
196 elif args.format == "token-check":
197 print(f"{token}/{check}")
198 elif args.format == "json":
199 print_json(token, check)
200 elif args.format == "yaml":
201 print_yaml(token, check)
202 else:
203 print(token)
204
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scripts/cluster/add_token.py b/scripts/cluster/add_token.py
--- a/scripts/cluster/add_token.py
+++ b/scripts/cluster/add_token.py
@@ -5,9 +5,6 @@
import time
import argparse
import subprocess
-import hashlib
-import ssl
-import http.client
from common.utils import is_node_running_dqlite
@@ -175,20 +172,6 @@
add_token_with_expiry(token, cluster_tokens_file, ttl)
check = run_util("server_cert_check")
- # NOTE(akolaitis): if the server certificate has changed after the cluster-agent
- # service started, the server hash will be out of date. make sure to restart
- # the cluster-agent in this case, otherwise the joining nodes will be unable
- # to verify.
- context = ssl._create_unverified_context()
- conn = http.client.HTTPSConnection("127.0.0.1:25000", context=context)
- conn.connect()
- der_cert_bin = conn.sock.getpeercert(True)
- conn.close()
- peer_cert_hash = hashlib.sha256(der_cert_bin).hexdigest()
- if not peer_cert_hash.startswith(check):
- print("Restarting cluster-agent to load new server certificate")
- subprocess.check_call(["snapctl", "restart", "microk8s.daemon-cluster-agent"])
-
if args.format == "pretty":
print_pretty(token, check)
elif args.format == "short":
|
{"golden_diff": "diff --git a/scripts/cluster/add_token.py b/scripts/cluster/add_token.py\n--- a/scripts/cluster/add_token.py\n+++ b/scripts/cluster/add_token.py\n@@ -5,9 +5,6 @@\n import time\n import argparse\n import subprocess\n-import hashlib\n-import ssl\n-import http.client\n \n from common.utils import is_node_running_dqlite\n \n@@ -175,20 +172,6 @@\n add_token_with_expiry(token, cluster_tokens_file, ttl)\n check = run_util(\"server_cert_check\")\n \n- # NOTE(akolaitis): if the server certificate has changed after the cluster-agent\n- # service started, the server hash will be out of date. make sure to restart\n- # the cluster-agent in this case, otherwise the joining nodes will be unable\n- # to verify.\n- context = ssl._create_unverified_context()\n- conn = http.client.HTTPSConnection(\"127.0.0.1:25000\", context=context)\n- conn.connect()\n- der_cert_bin = conn.sock.getpeercert(True)\n- conn.close()\n- peer_cert_hash = hashlib.sha256(der_cert_bin).hexdigest()\n- if not peer_cert_hash.startswith(check):\n- print(\"Restarting cluster-agent to load new server certificate\")\n- subprocess.check_call([\"snapctl\", \"restart\", \"microk8s.daemon-cluster-agent\"])\n-\n if args.format == \"pretty\":\n print_pretty(token, check)\n elif args.format == \"short\":\n", "issue": "latest/edge: server cert generation failure too verbose\n```\r\nRestarting cluster-agent to load new server certificate\r\n\r\nerror: error running snapctl: snap \"microk8s\" has \"service-control\" change in progress\r\n\r\nTraceback (most recent call last):\r\n\r\n File \"/snap/microk8s/3125/scripts/cluster/add_token.py\", line 190, in <module>\r\n\r\n subprocess.check_call([\"snapctl\", \"restart\", \"microk8s.daemon-cluster-agent\"])\r\n\r\n File \"/snap/microk8s/3125/usr/lib/python3.6/subprocess.py\", line 311, in check_call\r\n\r\n raise CalledProcessError(retcode, cmd)\r\n\r\nsubprocess.CalledProcessError: Command '['snapctl', 'restart', 'microk8s.daemon-cluster-agent']' returned non-zero exit status 1.\r\n\r\n```\r\n\r\nThe action failing is fine, but the call stack being printed is un-necessary \n", "before_files": [{"content": "import json\nimport yaml\nimport os\nimport sys\nimport time\nimport argparse\nimport subprocess\nimport hashlib\nimport ssl\nimport http.client\n\nfrom common.utils import is_node_running_dqlite\n\ntry:\n from secrets import token_hex\nexcept ImportError:\n from os import urandom\n\n def token_hex(nbytes=None):\n return urandom(nbytes).hex()\n\n\ncluster_tokens_file = os.path.expandvars(\"${SNAP_DATA}/credentials/cluster-tokens.txt\")\nutils_sh_file = os.path.expandvars(\"${SNAP}/actions/common/utils.sh\")\ntoken_with_expiry = \"{}|{}\\n\"\ntoken_without_expiry = \"{}\\n\"\n\n\ndef add_token_with_expiry(token, file, ttl):\n \"\"\"\n This method will add a token to the token file with or without expiry\n Expiry time is in seconds.\n\n Format of the item in the file: <token>|<expiry in seconds>\n\n :param str token: The token to add to the file\n :param str file: The file name for which the token will be written to\n :param ttl: How long the token should last before expiry, represented in seconds.\n \"\"\"\n\n with open(file, \"a+\") as fp:\n if ttl != -1:\n expiry = int(round(time.time())) + ttl\n fp.write(token_with_expiry.format(token, expiry))\n else:\n fp.write(token_without_expiry.format(token))\n\n\ndef run_util(*args, debug=False):\n env = os.environ.copy()\n prog = [\"bash\", utils_sh_file]\n prog.extend(args)\n\n if debug:\n print(\"\\033[;1;32m+ %s\\033[;0;0m\" % \" \".join(prog))\n\n result = subprocess.run(\n prog,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n env=env,\n )\n\n try:\n result.check_returncode()\n except subprocess.CalledProcessError:\n print(\"Failed to call utility function.\")\n sys.exit(1)\n\n return result.stdout.decode(\"utf-8\").strip()\n\n\ndef get_network_info():\n \"\"\"\n Obtain machine IP address(es) and cluster agent port.\n :return: tuple of default IP, all IPs, and cluster agent port\n \"\"\"\n default_ip = run_util(\"get_default_ip\")\n all_ips = run_util(\"get_ips\").split(\" \")\n port = run_util(\"cluster_agent_port\")\n\n return (default_ip, all_ips, port)\n\n\ndef print_pretty(token, check):\n default_ip, all_ips, port = get_network_info()\n\n print(\"From the node you wish to join to this cluster, run the following:\")\n print(f\"microk8s join {default_ip}:{port}/{token}/{check}\\n\")\n\n if is_node_running_dqlite():\n print(\n \"Use the '--worker' flag to join a node as a worker not running the control plane, eg:\"\n )\n print(f\"microk8s join {default_ip}:{port}/{token}/{check} --worker\\n\")\n\n print(\n \"If the node you are adding is not reachable through the default interface you can use one of the following:\"\n )\n for ip in all_ips:\n print(f\"microk8s join {ip}:{port}/{token}/{check}\")\n\n\ndef get_output_dict(token, check):\n _, all_ips, port = get_network_info()\n info = {\n \"token\": f\"{token}/{check}\",\n \"urls\": [f\"{ip}:{port}/{token}/{check}\" for ip in all_ips],\n }\n return info\n\n\ndef print_json(token, check):\n info = get_output_dict(token, check)\n print(json.dumps(info, indent=2))\n\n\ndef print_yaml(token, check):\n info = get_output_dict(token, check)\n print(yaml.dump(info, indent=2))\n\n\ndef print_short(token, check):\n default_ip, all_ips, port = get_network_info()\n\n print(f\"microk8s join {default_ip}:{port}/{token}/{check}\")\n for ip in all_ips:\n print(f\"microk8s join {ip}:{port}/{token}/{check}\")\n\n\nif __name__ == \"__main__\":\n\n # initiate the parser with a description\n parser = argparse.ArgumentParser(\n description=\"Produce a connection string for a node to join the cluster.\",\n prog=\"microk8s add-node\",\n )\n parser.add_argument(\n \"--token-ttl\",\n \"-l\",\n help=\"Specify how long the token is valid, before it expires. \"\n 'Value of \"-1\" indicates that the token is usable only once '\n \"(i.e. after joining a node, the token becomes invalid)\",\n type=int,\n default=\"-1\",\n )\n parser.add_argument(\n \"--token\",\n \"-t\",\n help=\"Specify the bootstrap token to add, must be 32 characters long. \"\n \"Auto generates when empty.\",\n )\n parser.add_argument(\n \"--format\",\n help=\"Format the output of the token in pretty, short, token, or token-check\",\n default=\"pretty\",\n choices={\"pretty\", \"short\", \"token\", \"token-check\", \"json\", \"yaml\"},\n )\n\n # read arguments from the command line\n args = parser.parse_args()\n\n ttl = args.token_ttl\n\n if args.token is not None:\n token = args.token\n else:\n token = token_hex(16)\n\n if len(token) < 32:\n print(\"Invalid token size. It must be 32 characters long.\")\n exit(1)\n\n add_token_with_expiry(token, cluster_tokens_file, ttl)\n check = run_util(\"server_cert_check\")\n\n # NOTE(akolaitis): if the server certificate has changed after the cluster-agent\n # service started, the server hash will be out of date. make sure to restart\n # the cluster-agent in this case, otherwise the joining nodes will be unable\n # to verify.\n context = ssl._create_unverified_context()\n conn = http.client.HTTPSConnection(\"127.0.0.1:25000\", context=context)\n conn.connect()\n der_cert_bin = conn.sock.getpeercert(True)\n conn.close()\n peer_cert_hash = hashlib.sha256(der_cert_bin).hexdigest()\n if not peer_cert_hash.startswith(check):\n print(\"Restarting cluster-agent to load new server certificate\")\n subprocess.check_call([\"snapctl\", \"restart\", \"microk8s.daemon-cluster-agent\"])\n\n if args.format == \"pretty\":\n print_pretty(token, check)\n elif args.format == \"short\":\n print_short(token, check)\n elif args.format == \"token-check\":\n print(f\"{token}/{check}\")\n elif args.format == \"json\":\n print_json(token, check)\n elif args.format == \"yaml\":\n print_yaml(token, check)\n else:\n print(token)\n", "path": "scripts/cluster/add_token.py"}], "after_files": [{"content": "import json\nimport yaml\nimport os\nimport sys\nimport time\nimport argparse\nimport subprocess\n\nfrom common.utils import is_node_running_dqlite\n\ntry:\n from secrets import token_hex\nexcept ImportError:\n from os import urandom\n\n def token_hex(nbytes=None):\n return urandom(nbytes).hex()\n\n\ncluster_tokens_file = os.path.expandvars(\"${SNAP_DATA}/credentials/cluster-tokens.txt\")\nutils_sh_file = os.path.expandvars(\"${SNAP}/actions/common/utils.sh\")\ntoken_with_expiry = \"{}|{}\\n\"\ntoken_without_expiry = \"{}\\n\"\n\n\ndef add_token_with_expiry(token, file, ttl):\n \"\"\"\n This method will add a token to the token file with or without expiry\n Expiry time is in seconds.\n\n Format of the item in the file: <token>|<expiry in seconds>\n\n :param str token: The token to add to the file\n :param str file: The file name for which the token will be written to\n :param ttl: How long the token should last before expiry, represented in seconds.\n \"\"\"\n\n with open(file, \"a+\") as fp:\n if ttl != -1:\n expiry = int(round(time.time())) + ttl\n fp.write(token_with_expiry.format(token, expiry))\n else:\n fp.write(token_without_expiry.format(token))\n\n\ndef run_util(*args, debug=False):\n env = os.environ.copy()\n prog = [\"bash\", utils_sh_file]\n prog.extend(args)\n\n if debug:\n print(\"\\033[;1;32m+ %s\\033[;0;0m\" % \" \".join(prog))\n\n result = subprocess.run(\n prog,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n env=env,\n )\n\n try:\n result.check_returncode()\n except subprocess.CalledProcessError:\n print(\"Failed to call utility function.\")\n sys.exit(1)\n\n return result.stdout.decode(\"utf-8\").strip()\n\n\ndef get_network_info():\n \"\"\"\n Obtain machine IP address(es) and cluster agent port.\n :return: tuple of default IP, all IPs, and cluster agent port\n \"\"\"\n default_ip = run_util(\"get_default_ip\")\n all_ips = run_util(\"get_ips\").split(\" \")\n port = run_util(\"cluster_agent_port\")\n\n return (default_ip, all_ips, port)\n\n\ndef print_pretty(token, check):\n default_ip, all_ips, port = get_network_info()\n\n print(\"From the node you wish to join to this cluster, run the following:\")\n print(f\"microk8s join {default_ip}:{port}/{token}/{check}\\n\")\n\n if is_node_running_dqlite():\n print(\n \"Use the '--worker' flag to join a node as a worker not running the control plane, eg:\"\n )\n print(f\"microk8s join {default_ip}:{port}/{token}/{check} --worker\\n\")\n\n print(\n \"If the node you are adding is not reachable through the default interface you can use one of the following:\"\n )\n for ip in all_ips:\n print(f\"microk8s join {ip}:{port}/{token}/{check}\")\n\n\ndef get_output_dict(token, check):\n _, all_ips, port = get_network_info()\n info = {\n \"token\": f\"{token}/{check}\",\n \"urls\": [f\"{ip}:{port}/{token}/{check}\" for ip in all_ips],\n }\n return info\n\n\ndef print_json(token, check):\n info = get_output_dict(token, check)\n print(json.dumps(info, indent=2))\n\n\ndef print_yaml(token, check):\n info = get_output_dict(token, check)\n print(yaml.dump(info, indent=2))\n\n\ndef print_short(token, check):\n default_ip, all_ips, port = get_network_info()\n\n print(f\"microk8s join {default_ip}:{port}/{token}/{check}\")\n for ip in all_ips:\n print(f\"microk8s join {ip}:{port}/{token}/{check}\")\n\n\nif __name__ == \"__main__\":\n\n # initiate the parser with a description\n parser = argparse.ArgumentParser(\n description=\"Produce a connection string for a node to join the cluster.\",\n prog=\"microk8s add-node\",\n )\n parser.add_argument(\n \"--token-ttl\",\n \"-l\",\n help=\"Specify how long the token is valid, before it expires. \"\n 'Value of \"-1\" indicates that the token is usable only once '\n \"(i.e. after joining a node, the token becomes invalid)\",\n type=int,\n default=\"-1\",\n )\n parser.add_argument(\n \"--token\",\n \"-t\",\n help=\"Specify the bootstrap token to add, must be 32 characters long. \"\n \"Auto generates when empty.\",\n )\n parser.add_argument(\n \"--format\",\n help=\"Format the output of the token in pretty, short, token, or token-check\",\n default=\"pretty\",\n choices={\"pretty\", \"short\", \"token\", \"token-check\", \"json\", \"yaml\"},\n )\n\n # read arguments from the command line\n args = parser.parse_args()\n\n ttl = args.token_ttl\n\n if args.token is not None:\n token = args.token\n else:\n token = token_hex(16)\n\n if len(token) < 32:\n print(\"Invalid token size. It must be 32 characters long.\")\n exit(1)\n\n add_token_with_expiry(token, cluster_tokens_file, ttl)\n check = run_util(\"server_cert_check\")\n\n if args.format == \"pretty\":\n print_pretty(token, check)\n elif args.format == \"short\":\n print_short(token, check)\n elif args.format == \"token-check\":\n print(f\"{token}/{check}\")\n elif args.format == \"json\":\n print_json(token, check)\n elif args.format == \"yaml\":\n print_yaml(token, check)\n else:\n print(token)\n", "path": "scripts/cluster/add_token.py"}]}
| 2,470 | 333 |
gh_patches_debug_7280
|
rasdani/github-patches
|
git_diff
|
jupyterhub__jupyterhub-4428
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
FAQ audience confusion
This FAQ item is unfortunately causing some local support problems for our institution:
https://github.com/jupyterhub/jupyterhub/blob/38ba275367936c9a5a081a7b5acf326134705a0a/docs/source/faq/faq.md?plain=1#L3
As I understand, this FAQ item is written for a sophisticated end-user who is setting up their own jupterhub on their own servers. They have sysadmin access to the filesystems to configure it so that users on their system can easily share notebooks using these instructions, e.g. with [Jupyterlab "copy shareable link"](https://jupyterlab.readthedocs.io/en/stable/user/files.html).
However, these instructions cause support problems downstream when our faculty assume they can use these instructions to allow their students to share links to their notebooks. Of course, this depends on how the institution's jupyterhub is configured on the backend.
If you could clarify this FAQ item for these audiences, I think that would help reduce frustration for general end users who search google for how to share a notebook link and stumble onto this entry. I think what most of them are searching for is [RTC (Real Time Collaboration) link sharing](https://jupyterlab.readthedocs.io/en/stable/user/rtc.html) which (as of March 2023) is not yet ready for prime-time and depends on the jupyterhub server environment configured for it, which most are not. Sometimes what they are trying to do can be accomplished with [nbgitpuller link generator](https://hub.jupyter.org/nbgitpuller/link), but I think most people are looking for RTC.
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/source/conf.py`
Content:
```
1 # Configuration file for Sphinx to build our documentation to HTML.
2 #
3 # Configuration reference: https://www.sphinx-doc.org/en/master/usage/configuration.html
4 #
5 import contextlib
6 import datetime
7 import io
8 import os
9 import subprocess
10
11 from docutils import nodes
12 from sphinx.directives.other import SphinxDirective
13
14 import jupyterhub
15 from jupyterhub.app import JupyterHub
16
17 # -- Project information -----------------------------------------------------
18 # ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
19 #
20 project = "JupyterHub"
21 author = "Project Jupyter Contributors"
22 copyright = f"{datetime.date.today().year}, {author}"
23
24
25 # -- General Sphinx configuration --------------------------------------------
26 # ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
27 #
28 extensions = [
29 "sphinx.ext.autodoc",
30 "sphinx.ext.intersphinx",
31 "sphinx.ext.napoleon",
32 "autodoc_traits",
33 "sphinx_copybutton",
34 "sphinx-jsonschema",
35 "sphinxext.opengraph",
36 "sphinxext.rediraffe",
37 "jupyterhub_sphinx_theme",
38 "myst_parser",
39 ]
40 root_doc = "index"
41 source_suffix = [".md"]
42 # default_role let's use use `foo` instead of ``foo`` in rST
43 default_role = "literal"
44
45
46 # -- MyST configuration ------------------------------------------------------
47 # ref: https://myst-parser.readthedocs.io/en/latest/configuration.html
48 #
49 myst_heading_anchors = 2
50
51 myst_enable_extensions = [
52 # available extensions: https://myst-parser.readthedocs.io/en/latest/syntax/optional.html
53 "attrs_inline",
54 "colon_fence",
55 "deflist",
56 "fieldlist",
57 "substitution",
58 ]
59
60 myst_substitutions = {
61 # date example: Dev 07, 2022
62 "date": datetime.date.today().strftime("%b %d, %Y").title(),
63 "version": jupyterhub.__version__,
64 }
65
66
67 # -- Custom directives to generate documentation -----------------------------
68 # ref: https://myst-parser.readthedocs.io/en/latest/syntax/roles-and-directives.html
69 #
70 # We define custom directives to help us generate documentation using Python on
71 # demand when referenced from our documentation files.
72 #
73
74 # Create a temp instance of JupyterHub for use by two separate directive classes
75 # to get the output from using the "--generate-config" and "--help-all" CLI
76 # flags respectively.
77 #
78 jupyterhub_app = JupyterHub()
79
80
81 class ConfigDirective(SphinxDirective):
82 """Generate the configuration file output for use in the documentation."""
83
84 has_content = False
85 required_arguments = 0
86 optional_arguments = 0
87 final_argument_whitespace = False
88 option_spec = {}
89
90 def run(self):
91 # The generated configuration file for this version
92 generated_config = jupyterhub_app.generate_config_file()
93 # post-process output
94 home_dir = os.environ["HOME"]
95 generated_config = generated_config.replace(home_dir, "$HOME", 1)
96 par = nodes.literal_block(text=generated_config)
97 return [par]
98
99
100 class HelpAllDirective(SphinxDirective):
101 """Print the output of jupyterhub help --all for use in the documentation."""
102
103 has_content = False
104 required_arguments = 0
105 optional_arguments = 0
106 final_argument_whitespace = False
107 option_spec = {}
108
109 def run(self):
110 # The output of the help command for this version
111 buffer = io.StringIO()
112 with contextlib.redirect_stdout(buffer):
113 jupyterhub_app.print_help("--help-all")
114 all_help = buffer.getvalue()
115 # post-process output
116 home_dir = os.environ["HOME"]
117 all_help = all_help.replace(home_dir, "$HOME", 1)
118 par = nodes.literal_block(text=all_help)
119 return [par]
120
121
122 def setup(app):
123 app.add_css_file("custom.css")
124 app.add_directive("jupyterhub-generate-config", ConfigDirective)
125 app.add_directive("jupyterhub-help-all", HelpAllDirective)
126
127
128 # -- Read The Docs -----------------------------------------------------------
129 #
130 # Since RTD runs sphinx-build directly without running "make html", we run the
131 # pre-requisite steps for "make html" from here if needed.
132 #
133 if os.environ.get("READTHEDOCS"):
134 docs = os.path.dirname(os.path.dirname(__file__))
135 subprocess.check_call(["make", "metrics", "scopes"], cwd=docs)
136
137
138 # -- Spell checking ----------------------------------------------------------
139 # ref: https://sphinxcontrib-spelling.readthedocs.io/en/latest/customize.html#configuration-options
140 #
141 # The "sphinxcontrib.spelling" extension is optionally enabled if its available.
142 #
143 try:
144 import sphinxcontrib.spelling # noqa
145 except ImportError:
146 pass
147 else:
148 extensions.append("sphinxcontrib.spelling")
149 spelling_word_list_filename = "spelling_wordlist.txt"
150
151
152 # -- Options for HTML output -------------------------------------------------
153 # ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
154 #
155 html_logo = "_static/images/logo/logo.png"
156 html_favicon = "_static/images/logo/favicon.ico"
157 html_static_path = ["_static"]
158
159 html_theme = "jupyterhub_sphinx_theme"
160 html_theme_options = {
161 "icon_links": [
162 {
163 "name": "GitHub",
164 "url": "https://github.com/jupyterhub/jupyterhub",
165 "icon": "fa-brands fa-github",
166 },
167 ],
168 "use_edit_page_button": True,
169 "navbar_align": "left",
170 }
171 html_context = {
172 "github_user": "jupyterhub",
173 "github_repo": "jupyterhub",
174 "github_version": "main",
175 "doc_path": "docs/source",
176 }
177
178
179 # -- Options for linkcheck builder -------------------------------------------
180 # ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-the-linkcheck-builder
181 #
182 linkcheck_ignore = [
183 r"(.*)github\.com(.*)#", # javascript based anchors
184 r"(.*)/#%21(.*)/(.*)", # /#!forum/jupyter - encoded anchor edge case
185 r"https://github.com/[^/]*$", # too many github usernames / searches in changelog
186 "https://github.com/jupyterhub/jupyterhub/pull/", # too many PRs in changelog
187 "https://github.com/jupyterhub/jupyterhub/compare/", # too many comparisons in changelog
188 r"https?://(localhost|127.0.0.1).*", # ignore localhost references in auto-links
189 r".*/rest-api.html#.*", # ignore javascript-resolved internal rest-api links
190 r"https://jupyter.chameleoncloud.org", # FIXME: ignore (presumably) short-term SSL issue
191 ]
192 linkcheck_anchors_ignore = [
193 "/#!",
194 "/#%21",
195 ]
196
197 # -- Intersphinx -------------------------------------------------------------
198 # ref: https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html#configuration
199 #
200 intersphinx_mapping = {
201 "python": ("https://docs.python.org/3/", None),
202 "tornado": ("https://www.tornadoweb.org/en/stable/", None),
203 "jupyter-server": ("https://jupyter-server.readthedocs.io/en/stable/", None),
204 }
205
206 # -- Options for the opengraph extension -------------------------------------
207 # ref: https://github.com/wpilibsuite/sphinxext-opengraph#options
208 #
209 # ogp_site_url is set automatically by RTD
210 ogp_image = "_static/logo.png"
211 ogp_use_first_image = True
212
213
214 # -- Options for the rediraffe extension -------------------------------------
215 # ref: https://github.com/wpilibsuite/sphinxext-rediraffe#readme
216 #
217 # This extension helps us relocate content without breaking links. If a
218 # document is moved internally, a redirect link should be configured as below to
219 # help us not break links.
220 #
221 # The workflow for adding redirects can be as follows:
222 # 1. Change "rediraffe_branch" below to point to the commit/ branch you
223 # want to base off the changes.
224 # 2. Option 1: run "make rediraffecheckdiff"
225 # a. Analyze the output of this command.
226 # b. Manually add the redirect entries to the "redirects.txt" file.
227 # Option 2: run "make rediraffewritediff"
228 # a. rediraffe will then automatically add the obvious redirects to redirects.txt.
229 # b. Analyze the output of the command for broken links.
230 # c. Check the "redirects.txt" file for any files that were moved/ renamed but are not listed.
231 # d. Manually add the redirects that have been mised by the automatic builder to "redirects.txt".
232 # Option 3: Do not use the commands above and, instead, do everything manually - by taking
233 # note of the files you have moved or renamed and adding them to the "redirects.txt" file.
234 #
235 # If you are basing changes off another branch/ commit, always change back
236 # rediraffe_branch to main before pushing your changes upstream.
237 #
238 rediraffe_branch = os.environ.get("REDIRAFFE_BRANCH", "main")
239 rediraffe_redirects = "redirects.txt"
240
241 # allow 80% match for autogenerated redirects
242 rediraffe_auto_redirect_perc = 80
243
244 # rediraffe_redirects = {
245 # "old-file": "new-folder/new-file-name",
246 # }
247
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -201,6 +201,7 @@
"python": ("https://docs.python.org/3/", None),
"tornado": ("https://www.tornadoweb.org/en/stable/", None),
"jupyter-server": ("https://jupyter-server.readthedocs.io/en/stable/", None),
+ "nbgitpuller": ("https://nbgitpuller.readthedocs.io/en/latest", None),
}
# -- Options for the opengraph extension -------------------------------------
|
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -201,6 +201,7 @@\n \"python\": (\"https://docs.python.org/3/\", None),\n \"tornado\": (\"https://www.tornadoweb.org/en/stable/\", None),\n \"jupyter-server\": (\"https://jupyter-server.readthedocs.io/en/stable/\", None),\n+ \"nbgitpuller\": (\"https://nbgitpuller.readthedocs.io/en/latest\", None),\n }\n \n # -- Options for the opengraph extension -------------------------------------\n", "issue": "FAQ audience confusion\nThis FAQ item is unfortunately causing some local support problems for our institution:\r\n\r\nhttps://github.com/jupyterhub/jupyterhub/blob/38ba275367936c9a5a081a7b5acf326134705a0a/docs/source/faq/faq.md?plain=1#L3\r\n\r\nAs I understand, this FAQ item is written for a sophisticated end-user who is setting up their own jupterhub on their own servers. They have sysadmin access to the filesystems to configure it so that users on their system can easily share notebooks using these instructions, e.g. with [Jupyterlab \"copy shareable link\"](https://jupyterlab.readthedocs.io/en/stable/user/files.html).\r\n\r\nHowever, these instructions cause support problems downstream when our faculty assume they can use these instructions to allow their students to share links to their notebooks. Of course, this depends on how the institution's jupyterhub is configured on the backend.\r\n\r\nIf you could clarify this FAQ item for these audiences, I think that would help reduce frustration for general end users who search google for how to share a notebook link and stumble onto this entry. I think what most of them are searching for is [RTC (Real Time Collaboration) link sharing](https://jupyterlab.readthedocs.io/en/stable/user/rtc.html) which (as of March 2023) is not yet ready for prime-time and depends on the jupyterhub server environment configured for it, which most are not. Sometimes what they are trying to do can be accomplished with [nbgitpuller link generator](https://hub.jupyter.org/nbgitpuller/link), but I think most people are looking for RTC.\r\n\r\nThanks!\n", "before_files": [{"content": "# Configuration file for Sphinx to build our documentation to HTML.\n#\n# Configuration reference: https://www.sphinx-doc.org/en/master/usage/configuration.html\n#\nimport contextlib\nimport datetime\nimport io\nimport os\nimport subprocess\n\nfrom docutils import nodes\nfrom sphinx.directives.other import SphinxDirective\n\nimport jupyterhub\nfrom jupyterhub.app import JupyterHub\n\n# -- Project information -----------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n#\nproject = \"JupyterHub\"\nauthor = \"Project Jupyter Contributors\"\ncopyright = f\"{datetime.date.today().year}, {author}\"\n\n\n# -- General Sphinx configuration --------------------------------------------\n# ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n#\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"autodoc_traits\",\n \"sphinx_copybutton\",\n \"sphinx-jsonschema\",\n \"sphinxext.opengraph\",\n \"sphinxext.rediraffe\",\n \"jupyterhub_sphinx_theme\",\n \"myst_parser\",\n]\nroot_doc = \"index\"\nsource_suffix = [\".md\"]\n# default_role let's use use `foo` instead of ``foo`` in rST\ndefault_role = \"literal\"\n\n\n# -- MyST configuration ------------------------------------------------------\n# ref: https://myst-parser.readthedocs.io/en/latest/configuration.html\n#\nmyst_heading_anchors = 2\n\nmyst_enable_extensions = [\n # available extensions: https://myst-parser.readthedocs.io/en/latest/syntax/optional.html\n \"attrs_inline\",\n \"colon_fence\",\n \"deflist\",\n \"fieldlist\",\n \"substitution\",\n]\n\nmyst_substitutions = {\n # date example: Dev 07, 2022\n \"date\": datetime.date.today().strftime(\"%b %d, %Y\").title(),\n \"version\": jupyterhub.__version__,\n}\n\n\n# -- Custom directives to generate documentation -----------------------------\n# ref: https://myst-parser.readthedocs.io/en/latest/syntax/roles-and-directives.html\n#\n# We define custom directives to help us generate documentation using Python on\n# demand when referenced from our documentation files.\n#\n\n# Create a temp instance of JupyterHub for use by two separate directive classes\n# to get the output from using the \"--generate-config\" and \"--help-all\" CLI\n# flags respectively.\n#\njupyterhub_app = JupyterHub()\n\n\nclass ConfigDirective(SphinxDirective):\n \"\"\"Generate the configuration file output for use in the documentation.\"\"\"\n\n has_content = False\n required_arguments = 0\n optional_arguments = 0\n final_argument_whitespace = False\n option_spec = {}\n\n def run(self):\n # The generated configuration file for this version\n generated_config = jupyterhub_app.generate_config_file()\n # post-process output\n home_dir = os.environ[\"HOME\"]\n generated_config = generated_config.replace(home_dir, \"$HOME\", 1)\n par = nodes.literal_block(text=generated_config)\n return [par]\n\n\nclass HelpAllDirective(SphinxDirective):\n \"\"\"Print the output of jupyterhub help --all for use in the documentation.\"\"\"\n\n has_content = False\n required_arguments = 0\n optional_arguments = 0\n final_argument_whitespace = False\n option_spec = {}\n\n def run(self):\n # The output of the help command for this version\n buffer = io.StringIO()\n with contextlib.redirect_stdout(buffer):\n jupyterhub_app.print_help(\"--help-all\")\n all_help = buffer.getvalue()\n # post-process output\n home_dir = os.environ[\"HOME\"]\n all_help = all_help.replace(home_dir, \"$HOME\", 1)\n par = nodes.literal_block(text=all_help)\n return [par]\n\n\ndef setup(app):\n app.add_css_file(\"custom.css\")\n app.add_directive(\"jupyterhub-generate-config\", ConfigDirective)\n app.add_directive(\"jupyterhub-help-all\", HelpAllDirective)\n\n\n# -- Read The Docs -----------------------------------------------------------\n#\n# Since RTD runs sphinx-build directly without running \"make html\", we run the\n# pre-requisite steps for \"make html\" from here if needed.\n#\nif os.environ.get(\"READTHEDOCS\"):\n docs = os.path.dirname(os.path.dirname(__file__))\n subprocess.check_call([\"make\", \"metrics\", \"scopes\"], cwd=docs)\n\n\n# -- Spell checking ----------------------------------------------------------\n# ref: https://sphinxcontrib-spelling.readthedocs.io/en/latest/customize.html#configuration-options\n#\n# The \"sphinxcontrib.spelling\" extension is optionally enabled if its available.\n#\ntry:\n import sphinxcontrib.spelling # noqa\nexcept ImportError:\n pass\nelse:\n extensions.append(\"sphinxcontrib.spelling\")\nspelling_word_list_filename = \"spelling_wordlist.txt\"\n\n\n# -- Options for HTML output -------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n#\nhtml_logo = \"_static/images/logo/logo.png\"\nhtml_favicon = \"_static/images/logo/favicon.ico\"\nhtml_static_path = [\"_static\"]\n\nhtml_theme = \"jupyterhub_sphinx_theme\"\nhtml_theme_options = {\n \"icon_links\": [\n {\n \"name\": \"GitHub\",\n \"url\": \"https://github.com/jupyterhub/jupyterhub\",\n \"icon\": \"fa-brands fa-github\",\n },\n ],\n \"use_edit_page_button\": True,\n \"navbar_align\": \"left\",\n}\nhtml_context = {\n \"github_user\": \"jupyterhub\",\n \"github_repo\": \"jupyterhub\",\n \"github_version\": \"main\",\n \"doc_path\": \"docs/source\",\n}\n\n\n# -- Options for linkcheck builder -------------------------------------------\n# ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-the-linkcheck-builder\n#\nlinkcheck_ignore = [\n r\"(.*)github\\.com(.*)#\", # javascript based anchors\n r\"(.*)/#%21(.*)/(.*)\", # /#!forum/jupyter - encoded anchor edge case\n r\"https://github.com/[^/]*$\", # too many github usernames / searches in changelog\n \"https://github.com/jupyterhub/jupyterhub/pull/\", # too many PRs in changelog\n \"https://github.com/jupyterhub/jupyterhub/compare/\", # too many comparisons in changelog\n r\"https?://(localhost|127.0.0.1).*\", # ignore localhost references in auto-links\n r\".*/rest-api.html#.*\", # ignore javascript-resolved internal rest-api links\n r\"https://jupyter.chameleoncloud.org\", # FIXME: ignore (presumably) short-term SSL issue\n]\nlinkcheck_anchors_ignore = [\n \"/#!\",\n \"/#%21\",\n]\n\n# -- Intersphinx -------------------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html#configuration\n#\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"tornado\": (\"https://www.tornadoweb.org/en/stable/\", None),\n \"jupyter-server\": (\"https://jupyter-server.readthedocs.io/en/stable/\", None),\n}\n\n# -- Options for the opengraph extension -------------------------------------\n# ref: https://github.com/wpilibsuite/sphinxext-opengraph#options\n#\n# ogp_site_url is set automatically by RTD\nogp_image = \"_static/logo.png\"\nogp_use_first_image = True\n\n\n# -- Options for the rediraffe extension -------------------------------------\n# ref: https://github.com/wpilibsuite/sphinxext-rediraffe#readme\n#\n# This extension helps us relocate content without breaking links. If a\n# document is moved internally, a redirect link should be configured as below to\n# help us not break links.\n#\n# The workflow for adding redirects can be as follows:\n# 1. Change \"rediraffe_branch\" below to point to the commit/ branch you\n# want to base off the changes.\n# 2. Option 1: run \"make rediraffecheckdiff\"\n# a. Analyze the output of this command.\n# b. Manually add the redirect entries to the \"redirects.txt\" file.\n# Option 2: run \"make rediraffewritediff\"\n# a. rediraffe will then automatically add the obvious redirects to redirects.txt.\n# b. Analyze the output of the command for broken links.\n# c. Check the \"redirects.txt\" file for any files that were moved/ renamed but are not listed.\n# d. Manually add the redirects that have been mised by the automatic builder to \"redirects.txt\".\n# Option 3: Do not use the commands above and, instead, do everything manually - by taking\n# note of the files you have moved or renamed and adding them to the \"redirects.txt\" file.\n#\n# If you are basing changes off another branch/ commit, always change back\n# rediraffe_branch to main before pushing your changes upstream.\n#\nrediraffe_branch = os.environ.get(\"REDIRAFFE_BRANCH\", \"main\")\nrediraffe_redirects = \"redirects.txt\"\n\n# allow 80% match for autogenerated redirects\nrediraffe_auto_redirect_perc = 80\n\n# rediraffe_redirects = {\n# \"old-file\": \"new-folder/new-file-name\",\n# }\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "# Configuration file for Sphinx to build our documentation to HTML.\n#\n# Configuration reference: https://www.sphinx-doc.org/en/master/usage/configuration.html\n#\nimport contextlib\nimport datetime\nimport io\nimport os\nimport subprocess\n\nfrom docutils import nodes\nfrom sphinx.directives.other import SphinxDirective\n\nimport jupyterhub\nfrom jupyterhub.app import JupyterHub\n\n# -- Project information -----------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n#\nproject = \"JupyterHub\"\nauthor = \"Project Jupyter Contributors\"\ncopyright = f\"{datetime.date.today().year}, {author}\"\n\n\n# -- General Sphinx configuration --------------------------------------------\n# ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n#\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"autodoc_traits\",\n \"sphinx_copybutton\",\n \"sphinx-jsonschema\",\n \"sphinxext.opengraph\",\n \"sphinxext.rediraffe\",\n \"jupyterhub_sphinx_theme\",\n \"myst_parser\",\n]\nroot_doc = \"index\"\nsource_suffix = [\".md\"]\n# default_role let's use use `foo` instead of ``foo`` in rST\ndefault_role = \"literal\"\n\n\n# -- MyST configuration ------------------------------------------------------\n# ref: https://myst-parser.readthedocs.io/en/latest/configuration.html\n#\nmyst_heading_anchors = 2\n\nmyst_enable_extensions = [\n # available extensions: https://myst-parser.readthedocs.io/en/latest/syntax/optional.html\n \"attrs_inline\",\n \"colon_fence\",\n \"deflist\",\n \"fieldlist\",\n \"substitution\",\n]\n\nmyst_substitutions = {\n # date example: Dev 07, 2022\n \"date\": datetime.date.today().strftime(\"%b %d, %Y\").title(),\n \"version\": jupyterhub.__version__,\n}\n\n\n# -- Custom directives to generate documentation -----------------------------\n# ref: https://myst-parser.readthedocs.io/en/latest/syntax/roles-and-directives.html\n#\n# We define custom directives to help us generate documentation using Python on\n# demand when referenced from our documentation files.\n#\n\n# Create a temp instance of JupyterHub for use by two separate directive classes\n# to get the output from using the \"--generate-config\" and \"--help-all\" CLI\n# flags respectively.\n#\njupyterhub_app = JupyterHub()\n\n\nclass ConfigDirective(SphinxDirective):\n \"\"\"Generate the configuration file output for use in the documentation.\"\"\"\n\n has_content = False\n required_arguments = 0\n optional_arguments = 0\n final_argument_whitespace = False\n option_spec = {}\n\n def run(self):\n # The generated configuration file for this version\n generated_config = jupyterhub_app.generate_config_file()\n # post-process output\n home_dir = os.environ[\"HOME\"]\n generated_config = generated_config.replace(home_dir, \"$HOME\", 1)\n par = nodes.literal_block(text=generated_config)\n return [par]\n\n\nclass HelpAllDirective(SphinxDirective):\n \"\"\"Print the output of jupyterhub help --all for use in the documentation.\"\"\"\n\n has_content = False\n required_arguments = 0\n optional_arguments = 0\n final_argument_whitespace = False\n option_spec = {}\n\n def run(self):\n # The output of the help command for this version\n buffer = io.StringIO()\n with contextlib.redirect_stdout(buffer):\n jupyterhub_app.print_help(\"--help-all\")\n all_help = buffer.getvalue()\n # post-process output\n home_dir = os.environ[\"HOME\"]\n all_help = all_help.replace(home_dir, \"$HOME\", 1)\n par = nodes.literal_block(text=all_help)\n return [par]\n\n\ndef setup(app):\n app.add_css_file(\"custom.css\")\n app.add_directive(\"jupyterhub-generate-config\", ConfigDirective)\n app.add_directive(\"jupyterhub-help-all\", HelpAllDirective)\n\n\n# -- Read The Docs -----------------------------------------------------------\n#\n# Since RTD runs sphinx-build directly without running \"make html\", we run the\n# pre-requisite steps for \"make html\" from here if needed.\n#\nif os.environ.get(\"READTHEDOCS\"):\n docs = os.path.dirname(os.path.dirname(__file__))\n subprocess.check_call([\"make\", \"metrics\", \"scopes\"], cwd=docs)\n\n\n# -- Spell checking ----------------------------------------------------------\n# ref: https://sphinxcontrib-spelling.readthedocs.io/en/latest/customize.html#configuration-options\n#\n# The \"sphinxcontrib.spelling\" extension is optionally enabled if its available.\n#\ntry:\n import sphinxcontrib.spelling # noqa\nexcept ImportError:\n pass\nelse:\n extensions.append(\"sphinxcontrib.spelling\")\nspelling_word_list_filename = \"spelling_wordlist.txt\"\n\n\n# -- Options for HTML output -------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n#\nhtml_logo = \"_static/images/logo/logo.png\"\nhtml_favicon = \"_static/images/logo/favicon.ico\"\nhtml_static_path = [\"_static\"]\n\nhtml_theme = \"jupyterhub_sphinx_theme\"\nhtml_theme_options = {\n \"icon_links\": [\n {\n \"name\": \"GitHub\",\n \"url\": \"https://github.com/jupyterhub/jupyterhub\",\n \"icon\": \"fa-brands fa-github\",\n },\n ],\n \"use_edit_page_button\": True,\n \"navbar_align\": \"left\",\n}\nhtml_context = {\n \"github_user\": \"jupyterhub\",\n \"github_repo\": \"jupyterhub\",\n \"github_version\": \"main\",\n \"doc_path\": \"docs/source\",\n}\n\n\n# -- Options for linkcheck builder -------------------------------------------\n# ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-the-linkcheck-builder\n#\nlinkcheck_ignore = [\n r\"(.*)github\\.com(.*)#\", # javascript based anchors\n r\"(.*)/#%21(.*)/(.*)\", # /#!forum/jupyter - encoded anchor edge case\n r\"https://github.com/[^/]*$\", # too many github usernames / searches in changelog\n \"https://github.com/jupyterhub/jupyterhub/pull/\", # too many PRs in changelog\n \"https://github.com/jupyterhub/jupyterhub/compare/\", # too many comparisons in changelog\n r\"https?://(localhost|127.0.0.1).*\", # ignore localhost references in auto-links\n r\".*/rest-api.html#.*\", # ignore javascript-resolved internal rest-api links\n r\"https://jupyter.chameleoncloud.org\", # FIXME: ignore (presumably) short-term SSL issue\n]\nlinkcheck_anchors_ignore = [\n \"/#!\",\n \"/#%21\",\n]\n\n# -- Intersphinx -------------------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html#configuration\n#\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"tornado\": (\"https://www.tornadoweb.org/en/stable/\", None),\n \"jupyter-server\": (\"https://jupyter-server.readthedocs.io/en/stable/\", None),\n \"nbgitpuller\": (\"https://nbgitpuller.readthedocs.io/en/latest\", None),\n}\n\n# -- Options for the opengraph extension -------------------------------------\n# ref: https://github.com/wpilibsuite/sphinxext-opengraph#options\n#\n# ogp_site_url is set automatically by RTD\nogp_image = \"_static/logo.png\"\nogp_use_first_image = True\n\n\n# -- Options for the rediraffe extension -------------------------------------\n# ref: https://github.com/wpilibsuite/sphinxext-rediraffe#readme\n#\n# This extension helps us relocate content without breaking links. If a\n# document is moved internally, a redirect link should be configured as below to\n# help us not break links.\n#\n# The workflow for adding redirects can be as follows:\n# 1. Change \"rediraffe_branch\" below to point to the commit/ branch you\n# want to base off the changes.\n# 2. Option 1: run \"make rediraffecheckdiff\"\n# a. Analyze the output of this command.\n# b. Manually add the redirect entries to the \"redirects.txt\" file.\n# Option 2: run \"make rediraffewritediff\"\n# a. rediraffe will then automatically add the obvious redirects to redirects.txt.\n# b. Analyze the output of the command for broken links.\n# c. Check the \"redirects.txt\" file for any files that were moved/ renamed but are not listed.\n# d. Manually add the redirects that have been mised by the automatic builder to \"redirects.txt\".\n# Option 3: Do not use the commands above and, instead, do everything manually - by taking\n# note of the files you have moved or renamed and adding them to the \"redirects.txt\" file.\n#\n# If you are basing changes off another branch/ commit, always change back\n# rediraffe_branch to main before pushing your changes upstream.\n#\nrediraffe_branch = \"main\"\nrediraffe_redirects = \"redirects.txt\"\n# rediraffe_redirects = {\n# \"old-file\": \"new-folder/new-file-name\",\n# }\n", "path": "docs/source/conf.py"}]}
| 3,328 | 138 |
gh_patches_debug_29733
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-4879
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to Evaluate Final Result from condition
**Describe the issue**
CKV_GCP_43: "Ensure KMS encryption keys are rotated within a period of 90 days"
**Examples**
Check: CKV_GCP_43: "Ensure KMS encryption keys are rotated within a period of 90 days"
FAILED for resource: module.kms.google_kms_crypto_key.key
File: /main.tf:11-29
Calling File: /example/production/main.tf:1-6
Guide: https://docs.bridgecrew.io/docs/bc_gcp_general_4
11 | resource "google_kms_crypto_key" "key" {
12 | count = var.prevent_destroy ? length(var.keys) : 0
13 | name = var.keys[count.index]
14 | key_ring = google_kms_key_ring.key_ring.id
15 | rotation_period = contains(["ASYMMETRIC_SIGN", "ASYMMETRIC_DECRYPT"], var.purpose) ? null : var.key_rotation_period
16 | #rotation_period = var.key_rotation_period
17 | purpose = var.purpose
18 |
19 | lifecycle {
20 | prevent_destroy = true
21 | }
22 |
23 | version_template {
24 | algorithm = var.key_algorithm
25 | protection_level = var.key_protection_level
26 | }
27 |
28 | labels = var.labels
29 | }
Checkov should providing error only in ASYMMETRIC key creation not the ENCRYPT_DCRYPT purpose for KMS key. Even after setting the purpose to ENCRYPT_DCRYPT and key_rotation_period variable to 90 days(7776000s), check is failing.
**Version (please complete the following information):**
- Checkov Version 2.3.156
**Additional context**
`contains(["ASYMMETRIC_SIGN", "ASYMMETRIC_DECRYPT"], var.purpose) ? null : var.key_rotation_period`
Above line should be evaluated and marked as passed for GCP KMS as ASYMMETRIC key is not supporting Automatic rotation.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/gcp/GoogleKMSRotationPeriod.py`
Content:
```
1 from typing import Dict, List, Any
2
3 from checkov.common.util.type_forcers import force_int
4
5 from checkov.common.models.enums import CheckResult, CheckCategories
6 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
7
8 # rotation_period time unit is seconds
9 ONE_DAY = 24 * 60 * 60
10 NINETY_DAYS = 90 * ONE_DAY
11
12
13 class GoogleKMSKeyRotationPeriod(BaseResourceCheck):
14 def __init__(self) -> None:
15 name = "Ensure KMS encryption keys are rotated within a period of 90 days"
16 id = "CKV_GCP_43"
17 supported_resources = ["google_kms_crypto_key"]
18 categories = [CheckCategories.GENERAL_SECURITY]
19 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
20
21 def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:
22 self.evaluated_keys = ["rotation_period"]
23 rotation = conf.get("rotation_period")
24 if rotation and rotation[0]:
25 time = force_int(rotation[0][:-1])
26 if time and ONE_DAY <= time <= NINETY_DAYS:
27 return CheckResult.PASSED
28 return CheckResult.FAILED
29
30
31 check = GoogleKMSKeyRotationPeriod()
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/checkov/terraform/checks/resource/gcp/GoogleKMSRotationPeriod.py b/checkov/terraform/checks/resource/gcp/GoogleKMSRotationPeriod.py
--- a/checkov/terraform/checks/resource/gcp/GoogleKMSRotationPeriod.py
+++ b/checkov/terraform/checks/resource/gcp/GoogleKMSRotationPeriod.py
@@ -5,6 +5,7 @@
from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
+ASYMMETRIC_KEYS = {"ASYMMETRIC_DECRYPT", "ASYMMETRIC_SIGN"}
# rotation_period time unit is seconds
ONE_DAY = 24 * 60 * 60
NINETY_DAYS = 90 * ONE_DAY
@@ -14,11 +15,17 @@
def __init__(self) -> None:
name = "Ensure KMS encryption keys are rotated within a period of 90 days"
id = "CKV_GCP_43"
- supported_resources = ["google_kms_crypto_key"]
- categories = [CheckCategories.GENERAL_SECURITY]
+ supported_resources = ("google_kms_crypto_key",)
+ categories = (CheckCategories.GENERAL_SECURITY,)
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:
+ purpose = conf.get("purpose")
+ if purpose and isinstance(purpose, list) and purpose[0] in ASYMMETRIC_KEYS:
+ # https://cloud.google.com/kms/docs/key-rotation#asymmetric
+ # automatic key rotation is not supported for asymmetric keys
+ return CheckResult.UNKNOWN
+
self.evaluated_keys = ["rotation_period"]
rotation = conf.get("rotation_period")
if rotation and rotation[0]:
|
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/gcp/GoogleKMSRotationPeriod.py b/checkov/terraform/checks/resource/gcp/GoogleKMSRotationPeriod.py\n--- a/checkov/terraform/checks/resource/gcp/GoogleKMSRotationPeriod.py\n+++ b/checkov/terraform/checks/resource/gcp/GoogleKMSRotationPeriod.py\n@@ -5,6 +5,7 @@\n from checkov.common.models.enums import CheckResult, CheckCategories\n from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n \n+ASYMMETRIC_KEYS = {\"ASYMMETRIC_DECRYPT\", \"ASYMMETRIC_SIGN\"}\n # rotation_period time unit is seconds\n ONE_DAY = 24 * 60 * 60\n NINETY_DAYS = 90 * ONE_DAY\n@@ -14,11 +15,17 @@\n def __init__(self) -> None:\n name = \"Ensure KMS encryption keys are rotated within a period of 90 days\"\n id = \"CKV_GCP_43\"\n- supported_resources = [\"google_kms_crypto_key\"]\n- categories = [CheckCategories.GENERAL_SECURITY]\n+ supported_resources = (\"google_kms_crypto_key\",)\n+ categories = (CheckCategories.GENERAL_SECURITY,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:\n+ purpose = conf.get(\"purpose\")\n+ if purpose and isinstance(purpose, list) and purpose[0] in ASYMMETRIC_KEYS:\n+ # https://cloud.google.com/kms/docs/key-rotation#asymmetric\n+ # automatic key rotation is not supported for asymmetric keys\n+ return CheckResult.UNKNOWN\n+\n self.evaluated_keys = [\"rotation_period\"]\n rotation = conf.get(\"rotation_period\")\n if rotation and rotation[0]:\n", "issue": "Unable to Evaluate Final Result from condition \n**Describe the issue**\r\nCKV_GCP_43: \"Ensure KMS encryption keys are rotated within a period of 90 days\"\r\n\r\n**Examples**\r\nCheck: CKV_GCP_43: \"Ensure KMS encryption keys are rotated within a period of 90 days\"\r\n\tFAILED for resource: module.kms.google_kms_crypto_key.key\r\n\tFile: /main.tf:11-29\r\n\tCalling File: /example/production/main.tf:1-6\r\n\tGuide: https://docs.bridgecrew.io/docs/bc_gcp_general_4\r\n\r\n\t\t11 | resource \"google_kms_crypto_key\" \"key\" {\r\n\t\t12 | count = var.prevent_destroy ? length(var.keys) : 0\r\n\t\t13 | name = var.keys[count.index]\r\n\t\t14 | key_ring = google_kms_key_ring.key_ring.id\r\n\t\t15 | rotation_period = contains([\"ASYMMETRIC_SIGN\", \"ASYMMETRIC_DECRYPT\"], var.purpose) ? null : var.key_rotation_period\r\n\t\t16 | #rotation_period = var.key_rotation_period\r\n\t\t17 | purpose = var.purpose\r\n\t\t18 |\r\n\t\t19 | lifecycle {\r\n\t\t20 | prevent_destroy = true\r\n\t\t21 | }\r\n\t\t22 |\r\n\t\t23 | version_template {\r\n\t\t24 | algorithm = var.key_algorithm\r\n\t\t25 | protection_level = var.key_protection_level\r\n\t\t26 | }\r\n\t\t27 |\r\n\t\t28 | labels = var.labels\r\n\t\t29 | }\r\n\r\nCheckov should providing error only in ASYMMETRIC key creation not the ENCRYPT_DCRYPT purpose for KMS key. Even after setting the purpose to ENCRYPT_DCRYPT and key_rotation_period variable to 90 days(7776000s), check is failing.\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.3.156\r\n\r\n**Additional context**\r\n`contains([\"ASYMMETRIC_SIGN\", \"ASYMMETRIC_DECRYPT\"], var.purpose) ? null : var.key_rotation_period`\r\nAbove line should be evaluated and marked as passed for GCP KMS as ASYMMETRIC key is not supporting Automatic rotation.\r\n\n", "before_files": [{"content": "from typing import Dict, List, Any\n\nfrom checkov.common.util.type_forcers import force_int\n\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n# rotation_period time unit is seconds\nONE_DAY = 24 * 60 * 60\nNINETY_DAYS = 90 * ONE_DAY\n\n\nclass GoogleKMSKeyRotationPeriod(BaseResourceCheck):\n def __init__(self) -> None:\n name = \"Ensure KMS encryption keys are rotated within a period of 90 days\"\n id = \"CKV_GCP_43\"\n supported_resources = [\"google_kms_crypto_key\"]\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:\n self.evaluated_keys = [\"rotation_period\"]\n rotation = conf.get(\"rotation_period\")\n if rotation and rotation[0]:\n time = force_int(rotation[0][:-1])\n if time and ONE_DAY <= time <= NINETY_DAYS:\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = GoogleKMSKeyRotationPeriod()\n", "path": "checkov/terraform/checks/resource/gcp/GoogleKMSRotationPeriod.py"}], "after_files": [{"content": "from typing import Dict, List, Any\n\nfrom checkov.common.util.type_forcers import force_int\n\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\nASYMMETRIC_KEYS = {\"ASYMMETRIC_DECRYPT\", \"ASYMMETRIC_SIGN\"}\n# rotation_period time unit is seconds\nONE_DAY = 24 * 60 * 60\nNINETY_DAYS = 90 * ONE_DAY\n\n\nclass GoogleKMSKeyRotationPeriod(BaseResourceCheck):\n def __init__(self) -> None:\n name = \"Ensure KMS encryption keys are rotated within a period of 90 days\"\n id = \"CKV_GCP_43\"\n supported_resources = (\"google_kms_crypto_key\",)\n categories = (CheckCategories.GENERAL_SECURITY,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:\n purpose = conf.get(\"purpose\")\n if purpose and isinstance(purpose, list) and purpose[0] in ASYMMETRIC_KEYS:\n # https://cloud.google.com/kms/docs/key-rotation#asymmetric\n # automatic key rotation is not supported for asymmetric keys\n return CheckResult.UNKNOWN\n\n self.evaluated_keys = [\"rotation_period\"]\n rotation = conf.get(\"rotation_period\")\n if rotation and rotation[0]:\n time = force_int(rotation[0][:-1])\n if time and ONE_DAY <= time <= NINETY_DAYS:\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = GoogleKMSKeyRotationPeriod()\n", "path": "checkov/terraform/checks/resource/gcp/GoogleKMSRotationPeriod.py"}]}
| 1,121 | 417 |
gh_patches_debug_50437
|
rasdani/github-patches
|
git_diff
|
readthedocs__readthedocs.org-5470
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Build List Screen Performance Issue
While working on #5464, I discovered a performance issue on the [build list screen](http://readthedocs.org/projects/requests/builds/).
There appears to be a couple duplicated queries in the build list screen. One is to get the project (probably from `Build.get_absolute_url`) for the build and the other gets the version (probably from `build_list_detailed.html`). This results in O(n) SQL queries where n is the number of builds displayed. It should be possible to get the project and version for each build using `select_related`.
<img width="1155" alt="Screen Shot 2019-03-15 at 11 00 13 AM" src="https://user-images.githubusercontent.com/185043/54452149-a0d76e80-4711-11e9-82f4-763418863f94.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/builds/views.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Views for builds app."""
4
5 import logging
6 import textwrap
7
8 from django.contrib import messages
9 from django.contrib.auth.decorators import login_required
10 from django.http import (
11 HttpResponseForbidden,
12 HttpResponsePermanentRedirect,
13 HttpResponseRedirect,
14 )
15 from django.shortcuts import get_object_or_404
16 from django.urls import reverse
17 from django.utils.decorators import method_decorator
18 from django.views.generic import DetailView, ListView
19 from requests.utils import quote
20 from urllib.parse import urlparse
21
22 from readthedocs.doc_builder.exceptions import BuildEnvironmentError
23 from readthedocs.builds.models import Build, Version
24 from readthedocs.core.permissions import AdminPermission
25 from readthedocs.core.utils import trigger_build
26 from readthedocs.projects.models import Project
27
28
29 log = logging.getLogger(__name__)
30
31
32 class BuildBase:
33 model = Build
34
35 def get_queryset(self):
36 self.project_slug = self.kwargs.get('project_slug', None)
37 self.project = get_object_or_404(
38 Project.objects.protected(self.request.user),
39 slug=self.project_slug,
40 )
41 queryset = Build.objects.public(
42 user=self.request.user,
43 project=self.project,
44 )
45
46 return queryset
47
48
49 class BuildTriggerMixin:
50
51 @method_decorator(login_required)
52 def post(self, request, project_slug):
53 project = get_object_or_404(Project, slug=project_slug)
54
55 if not AdminPermission.is_admin(request.user, project):
56 return HttpResponseForbidden()
57
58 version_slug = request.POST.get('version_slug')
59 version = get_object_or_404(
60 Version,
61 project=project,
62 slug=version_slug,
63 )
64
65 update_docs_task, build = trigger_build(
66 project=project,
67 version=version,
68 )
69 if (update_docs_task, build) == (None, None):
70 # Build was skipped
71 messages.add_message(
72 request,
73 messages.WARNING,
74 "This project is currently disabled and can't trigger new builds.",
75 )
76 return HttpResponseRedirect(
77 reverse('builds_project_list', args=[project.slug]),
78 )
79
80 return HttpResponseRedirect(
81 reverse('builds_detail', args=[project.slug, build.pk]),
82 )
83
84
85 class BuildList(BuildBase, BuildTriggerMixin, ListView):
86
87 def get_context_data(self, **kwargs):
88 context = super().get_context_data(**kwargs)
89
90 active_builds = self.get_queryset().exclude(
91 state='finished',
92 ).values('id')
93
94 context['project'] = self.project
95 context['active_builds'] = active_builds
96 context['versions'] = Version.objects.public(
97 user=self.request.user,
98 project=self.project,
99 )
100 context['build_qs'] = self.get_queryset()
101
102 return context
103
104
105 class BuildDetail(BuildBase, DetailView):
106 pk_url_kwarg = 'build_pk'
107
108 def get_context_data(self, **kwargs):
109 context = super().get_context_data(**kwargs)
110 context['project'] = self.project
111
112 build = self.get_object()
113 if build.error != BuildEnvironmentError.GENERIC_WITH_BUILD_ID.format(build_id=build.pk):
114 # Do not suggest to open an issue if the error is not generic
115 return context
116
117 scheme = (
118 'https://github.com/rtfd/readthedocs.org/issues/new'
119 '?title={title}{build_id}'
120 '&body={body}'
121 )
122
123 # TODO: we could use ``.github/ISSUE_TEMPLATE.md`` here, but we would
124 # need to add some variables to it which could impact in the UX when
125 # filling an issue from the web
126 body = """
127 ## Details:
128
129 * Project URL: https://readthedocs.org/projects/{project_slug}/
130 * Build URL(if applicable): https://readthedocs.org{build_path}
131 * Read the Docs username(if applicable): {username}
132
133 ## Expected Result
134
135 *A description of what you wanted to happen*
136
137 ## Actual Result
138
139 *A description of what actually happened*""".format(
140 project_slug=self.project,
141 build_path=self.request.path,
142 username=self.request.user,
143 )
144
145 scheme_dict = {
146 'title': quote('Build error with build id #'),
147 'build_id': context['build'].id,
148 'body': quote(textwrap.dedent(body)),
149 }
150
151 issue_url = scheme.format(**scheme_dict)
152 issue_url = urlparse(issue_url).geturl()
153 context['issue_url'] = issue_url
154 return context
155
156
157 # Old build view redirects
158
159
160 def builds_redirect_list(request, project_slug): # pylint: disable=unused-argument
161 return HttpResponsePermanentRedirect(
162 reverse('builds_project_list', args=[project_slug]),
163 )
164
165
166 def builds_redirect_detail(request, project_slug, pk): # pylint: disable=unused-argument
167 return HttpResponsePermanentRedirect(
168 reverse('builds_detail', args=[project_slug, pk]),
169 )
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/readthedocs/builds/views.py b/readthedocs/builds/views.py
--- a/readthedocs/builds/views.py
+++ b/readthedocs/builds/views.py
@@ -41,7 +41,7 @@
queryset = Build.objects.public(
user=self.request.user,
project=self.project,
- )
+ ).select_related('project', 'version')
return queryset
|
{"golden_diff": "diff --git a/readthedocs/builds/views.py b/readthedocs/builds/views.py\n--- a/readthedocs/builds/views.py\n+++ b/readthedocs/builds/views.py\n@@ -41,7 +41,7 @@\n queryset = Build.objects.public(\n user=self.request.user,\n project=self.project,\n- )\n+ ).select_related('project', 'version')\n \n return queryset\n", "issue": "Build List Screen Performance Issue\nWhile working on #5464, I discovered a performance issue on the [build list screen](http://readthedocs.org/projects/requests/builds/).\r\n\r\nThere appears to be a couple duplicated queries in the build list screen. One is to get the project (probably from `Build.get_absolute_url`) for the build and the other gets the version (probably from `build_list_detailed.html`). This results in O(n) SQL queries where n is the number of builds displayed. It should be possible to get the project and version for each build using `select_related`.\r\n\r\n<img width=\"1155\" alt=\"Screen Shot 2019-03-15 at 11 00 13 AM\" src=\"https://user-images.githubusercontent.com/185043/54452149-a0d76e80-4711-11e9-82f4-763418863f94.png\">\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Views for builds app.\"\"\"\n\nimport logging\nimport textwrap\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import (\n HttpResponseForbidden,\n HttpResponsePermanentRedirect,\n HttpResponseRedirect,\n)\nfrom django.shortcuts import get_object_or_404\nfrom django.urls import reverse\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import DetailView, ListView\nfrom requests.utils import quote\nfrom urllib.parse import urlparse\n\nfrom readthedocs.doc_builder.exceptions import BuildEnvironmentError\nfrom readthedocs.builds.models import Build, Version\nfrom readthedocs.core.permissions import AdminPermission\nfrom readthedocs.core.utils import trigger_build\nfrom readthedocs.projects.models import Project\n\n\nlog = logging.getLogger(__name__)\n\n\nclass BuildBase:\n model = Build\n\n def get_queryset(self):\n self.project_slug = self.kwargs.get('project_slug', None)\n self.project = get_object_or_404(\n Project.objects.protected(self.request.user),\n slug=self.project_slug,\n )\n queryset = Build.objects.public(\n user=self.request.user,\n project=self.project,\n )\n\n return queryset\n\n\nclass BuildTriggerMixin:\n\n @method_decorator(login_required)\n def post(self, request, project_slug):\n project = get_object_or_404(Project, slug=project_slug)\n\n if not AdminPermission.is_admin(request.user, project):\n return HttpResponseForbidden()\n\n version_slug = request.POST.get('version_slug')\n version = get_object_or_404(\n Version,\n project=project,\n slug=version_slug,\n )\n\n update_docs_task, build = trigger_build(\n project=project,\n version=version,\n )\n if (update_docs_task, build) == (None, None):\n # Build was skipped\n messages.add_message(\n request,\n messages.WARNING,\n \"This project is currently disabled and can't trigger new builds.\",\n )\n return HttpResponseRedirect(\n reverse('builds_project_list', args=[project.slug]),\n )\n\n return HttpResponseRedirect(\n reverse('builds_detail', args=[project.slug, build.pk]),\n )\n\n\nclass BuildList(BuildBase, BuildTriggerMixin, ListView):\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n active_builds = self.get_queryset().exclude(\n state='finished',\n ).values('id')\n\n context['project'] = self.project\n context['active_builds'] = active_builds\n context['versions'] = Version.objects.public(\n user=self.request.user,\n project=self.project,\n )\n context['build_qs'] = self.get_queryset()\n\n return context\n\n\nclass BuildDetail(BuildBase, DetailView):\n pk_url_kwarg = 'build_pk'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['project'] = self.project\n\n build = self.get_object()\n if build.error != BuildEnvironmentError.GENERIC_WITH_BUILD_ID.format(build_id=build.pk):\n # Do not suggest to open an issue if the error is not generic\n return context\n\n scheme = (\n 'https://github.com/rtfd/readthedocs.org/issues/new'\n '?title={title}{build_id}'\n '&body={body}'\n )\n\n # TODO: we could use ``.github/ISSUE_TEMPLATE.md`` here, but we would\n # need to add some variables to it which could impact in the UX when\n # filling an issue from the web\n body = \"\"\"\n ## Details:\n\n * Project URL: https://readthedocs.org/projects/{project_slug}/\n * Build URL(if applicable): https://readthedocs.org{build_path}\n * Read the Docs username(if applicable): {username}\n\n ## Expected Result\n\n *A description of what you wanted to happen*\n\n ## Actual Result\n\n *A description of what actually happened*\"\"\".format(\n project_slug=self.project,\n build_path=self.request.path,\n username=self.request.user,\n )\n\n scheme_dict = {\n 'title': quote('Build error with build id #'),\n 'build_id': context['build'].id,\n 'body': quote(textwrap.dedent(body)),\n }\n\n issue_url = scheme.format(**scheme_dict)\n issue_url = urlparse(issue_url).geturl()\n context['issue_url'] = issue_url\n return context\n\n\n# Old build view redirects\n\n\ndef builds_redirect_list(request, project_slug): # pylint: disable=unused-argument\n return HttpResponsePermanentRedirect(\n reverse('builds_project_list', args=[project_slug]),\n )\n\n\ndef builds_redirect_detail(request, project_slug, pk): # pylint: disable=unused-argument\n return HttpResponsePermanentRedirect(\n reverse('builds_detail', args=[project_slug, pk]),\n )\n", "path": "readthedocs/builds/views.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Views for builds app.\"\"\"\n\nimport logging\nimport textwrap\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import (\n HttpResponseForbidden,\n HttpResponsePermanentRedirect,\n HttpResponseRedirect,\n)\nfrom django.shortcuts import get_object_or_404\nfrom django.urls import reverse\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import DetailView, ListView\nfrom requests.utils import quote\nfrom urllib.parse import urlparse\n\nfrom readthedocs.doc_builder.exceptions import BuildEnvironmentError\nfrom readthedocs.builds.models import Build, Version\nfrom readthedocs.core.permissions import AdminPermission\nfrom readthedocs.core.utils import trigger_build\nfrom readthedocs.projects.models import Project\n\n\nlog = logging.getLogger(__name__)\n\n\nclass BuildBase:\n model = Build\n\n def get_queryset(self):\n self.project_slug = self.kwargs.get('project_slug', None)\n self.project = get_object_or_404(\n Project.objects.protected(self.request.user),\n slug=self.project_slug,\n )\n queryset = Build.objects.public(\n user=self.request.user,\n project=self.project,\n ).select_related('project', 'version')\n\n return queryset\n\n\nclass BuildTriggerMixin:\n\n @method_decorator(login_required)\n def post(self, request, project_slug):\n project = get_object_or_404(Project, slug=project_slug)\n\n if not AdminPermission.is_admin(request.user, project):\n return HttpResponseForbidden()\n\n version_slug = request.POST.get('version_slug')\n version = get_object_or_404(\n Version,\n project=project,\n slug=version_slug,\n )\n\n update_docs_task, build = trigger_build(\n project=project,\n version=version,\n )\n if (update_docs_task, build) == (None, None):\n # Build was skipped\n messages.add_message(\n request,\n messages.WARNING,\n \"This project is currently disabled and can't trigger new builds.\",\n )\n return HttpResponseRedirect(\n reverse('builds_project_list', args=[project.slug]),\n )\n\n return HttpResponseRedirect(\n reverse('builds_detail', args=[project.slug, build.pk]),\n )\n\n\nclass BuildList(BuildBase, BuildTriggerMixin, ListView):\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n active_builds = self.get_queryset().exclude(\n state='finished',\n ).values('id')\n\n context['project'] = self.project\n context['active_builds'] = active_builds\n context['versions'] = Version.objects.public(\n user=self.request.user,\n project=self.project,\n )\n context['build_qs'] = self.get_queryset()\n\n return context\n\n\nclass BuildDetail(BuildBase, DetailView):\n pk_url_kwarg = 'build_pk'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['project'] = self.project\n\n build = self.get_object()\n if build.error != BuildEnvironmentError.GENERIC_WITH_BUILD_ID.format(build_id=build.pk):\n # Do not suggest to open an issue if the error is not generic\n return context\n\n scheme = (\n 'https://github.com/rtfd/readthedocs.org/issues/new'\n '?title={title}{build_id}'\n '&body={body}'\n )\n\n # TODO: we could use ``.github/ISSUE_TEMPLATE.md`` here, but we would\n # need to add some variables to it which could impact in the UX when\n # filling an issue from the web\n body = \"\"\"\n ## Details:\n\n * Project URL: https://readthedocs.org/projects/{project_slug}/\n * Build URL(if applicable): https://readthedocs.org{build_path}\n * Read the Docs username(if applicable): {username}\n\n ## Expected Result\n\n *A description of what you wanted to happen*\n\n ## Actual Result\n\n *A description of what actually happened*\"\"\".format(\n project_slug=self.project,\n build_path=self.request.path,\n username=self.request.user,\n )\n\n scheme_dict = {\n 'title': quote('Build error with build id #'),\n 'build_id': context['build'].id,\n 'body': quote(textwrap.dedent(body)),\n }\n\n issue_url = scheme.format(**scheme_dict)\n issue_url = urlparse(issue_url).geturl()\n context['issue_url'] = issue_url\n return context\n\n\n# Old build view redirects\n\n\ndef builds_redirect_list(request, project_slug): # pylint: disable=unused-argument\n return HttpResponsePermanentRedirect(\n reverse('builds_project_list', args=[project_slug]),\n )\n\n\ndef builds_redirect_detail(request, project_slug, pk): # pylint: disable=unused-argument\n return HttpResponsePermanentRedirect(\n reverse('builds_detail', args=[project_slug, pk]),\n )\n", "path": "readthedocs/builds/views.py"}]}
| 1,949 | 89 |
gh_patches_debug_4687
|
rasdani/github-patches
|
git_diff
|
rasterio__rasterio-258
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Reduce cython compile errors
We get a bunch of compile-time warnings from the cython code. I think we could reduce these.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 # Two environmental variables influence this script.
4 #
5 # GDAL_CONFIG: the path to a gdal-config program that points to GDAL headers,
6 # libraries, and data files.
7 #
8 # PACKAGE_DATA: if defined, GDAL and PROJ4 data files will be copied into the
9 # source or binary distribution. This is essential when creating self-contained
10 # binary wheels.
11
12 import logging
13 import os
14 import pprint
15 import shutil
16 import subprocess
17 import sys
18
19 from setuptools import setup
20 from setuptools.extension import Extension
21
22 logging.basicConfig()
23 log = logging.getLogger()
24
25 # python -W all setup.py ...
26 if 'all' in sys.warnoptions:
27 log.level = logging.DEBUG
28
29 # Parse the version from the fiona module.
30 with open('rasterio/__init__.py') as f:
31 for line in f:
32 if line.find("__version__") >= 0:
33 version = line.split("=")[1].strip()
34 version = version.strip('"')
35 version = version.strip("'")
36 continue
37
38 with open('VERSION.txt', 'w') as f:
39 f.write(version)
40
41 # Use Cython if available.
42 try:
43 from Cython.Build import cythonize
44 except ImportError:
45 cythonize = None
46
47 # By default we'll try to get options via gdal-config. On systems without,
48 # options will need to be set in setup.cfg or on the setup command line.
49 include_dirs = []
50 library_dirs = []
51 libraries = []
52 extra_link_args = []
53
54 try:
55 import numpy
56 include_dirs.append(numpy.get_include())
57 except ImportError:
58 log.critical("Numpy and its headers are required to run setup(). Exiting.")
59 sys.exit(1)
60
61 try:
62 gdal_config = os.environ.get('GDAL_CONFIG', 'gdal-config')
63 with open("gdal-config.txt", "w") as gcfg:
64 subprocess.call([gdal_config, "--cflags"], stdout=gcfg)
65 subprocess.call([gdal_config, "--libs"], stdout=gcfg)
66 subprocess.call([gdal_config, "--datadir"], stdout=gcfg)
67 with open("gdal-config.txt", "r") as gcfg:
68 cflags = gcfg.readline().strip()
69 libs = gcfg.readline().strip()
70 datadir = gcfg.readline().strip()
71 for item in cflags.split():
72 if item.startswith("-I"):
73 include_dirs.extend(item[2:].split(":"))
74 for item in libs.split():
75 if item.startswith("-L"):
76 library_dirs.extend(item[2:].split(":"))
77 elif item.startswith("-l"):
78 libraries.append(item[2:])
79 else:
80 # e.g. -framework GDAL
81 extra_link_args.append(item)
82
83 # Conditionally copy the GDAL data. To be used in conjunction with
84 # the bdist_wheel command to make self-contained binary wheels.
85 if os.environ.get('PACKAGE_DATA'):
86 try:
87 shutil.rmtree('rasterio/gdal_data')
88 except OSError:
89 pass
90 shutil.copytree(datadir, 'rasterio/gdal_data')
91
92 except Exception as e:
93 log.warning("Failed to get options via gdal-config: %s", str(e))
94
95 # Conditionally copy PROJ.4 data.
96 if os.environ.get('PACKAGE_DATA'):
97 projdatadir = os.environ.get('PROJ_LIB', '/usr/local/share/proj')
98 if os.path.exists(projdatadir):
99 try:
100 shutil.rmtree('rasterio/proj_data')
101 except OSError:
102 pass
103 shutil.copytree(projdatadir, 'rasterio/proj_data')
104
105 ext_options = dict(
106 include_dirs=include_dirs,
107 library_dirs=library_dirs,
108 libraries=libraries,
109 extra_link_args=extra_link_args)
110
111 log.debug('ext_options:\n%s', pprint.pformat(ext_options))
112
113 # When building from a repo, Cython is required.
114 if os.path.exists("MANIFEST.in") and "clean" not in sys.argv:
115 log.info("MANIFEST.in found, presume a repo, cythonizing...")
116 if not cythonize:
117 log.critical(
118 "Cython.Build.cythonize not found. "
119 "Cython is required to build from a repo.")
120 sys.exit(1)
121 ext_modules = cythonize([
122 Extension(
123 'rasterio._base', ['rasterio/_base.pyx'], **ext_options),
124 Extension(
125 'rasterio._io', ['rasterio/_io.pyx'], **ext_options),
126 Extension(
127 'rasterio._copy', ['rasterio/_copy.pyx'], **ext_options),
128 Extension(
129 'rasterio._features', ['rasterio/_features.pyx'], **ext_options),
130 Extension(
131 'rasterio._drivers', ['rasterio/_drivers.pyx'], **ext_options),
132 Extension(
133 'rasterio._warp', ['rasterio/_warp.pyx'], **ext_options),
134 Extension(
135 'rasterio._err', ['rasterio/_err.pyx'], **ext_options),
136 Extension(
137 'rasterio._example', ['rasterio/_example.pyx'], **ext_options),
138 ])
139
140 # If there's no manifest template, as in an sdist, we just specify .c files.
141 else:
142 ext_modules = [
143 Extension(
144 'rasterio._base', ['rasterio/_base.c'], **ext_options),
145 Extension(
146 'rasterio._io', ['rasterio/_io.c'], **ext_options),
147 Extension(
148 'rasterio._copy', ['rasterio/_copy.c'], **ext_options),
149 Extension(
150 'rasterio._features', ['rasterio/_features.c'], **ext_options),
151 Extension(
152 'rasterio._drivers', ['rasterio/_drivers.c'], **ext_options),
153 Extension(
154 'rasterio._warp', ['rasterio/_warp.cpp'], **ext_options),
155 Extension(
156 'rasterio._err', ['rasterio/_err.c'], **ext_options),
157 Extension(
158 'rasterio._example', ['rasterio/_example.c'], **ext_options),
159 ]
160
161 with open('README.rst') as f:
162 readme = f.read()
163
164 # Runtime requirements.
165 inst_reqs = [
166 'affine>=1.0',
167 'cligj',
168 'Numpy>=1.7' ]
169
170 if sys.version_info < (3, 4):
171 inst_reqs.append('enum34')
172
173 setup_args = dict(
174 name='rasterio',
175 version=version,
176 description="Fast and direct raster I/O for use with Numpy and SciPy",
177 long_description=readme,
178 classifiers=[
179 'Development Status :: 4 - Beta',
180 'Intended Audience :: Developers',
181 'Intended Audience :: Information Technology',
182 'Intended Audience :: Science/Research',
183 'License :: OSI Approved :: BSD License',
184 'Programming Language :: C',
185 'Programming Language :: Python :: 2.6',
186 'Programming Language :: Python :: 2.7',
187 'Programming Language :: Python :: 3.3',
188 'Programming Language :: Python :: 3.4',
189 'Topic :: Multimedia :: Graphics :: Graphics Conversion',
190 'Topic :: Scientific/Engineering :: GIS'],
191 keywords='raster gdal',
192 author='Sean Gillies',
193 author_email='[email protected]',
194 url='https://github.com/mapbox/rasterio',
195 license='BSD',
196 package_dir={'': '.'},
197 packages=['rasterio', 'rasterio.rio'],
198 entry_points='''
199 [console_scripts]
200 rio=rasterio.rio.main:cli
201 ''',
202 include_package_data=True,
203 ext_modules=ext_modules,
204 zip_safe=False,
205 install_requires=inst_reqs)
206
207 if os.environ.get('PACKAGE_DATA'):
208 setup_args['package_data'] = {'rasterio': ['gdal_data/*', 'proj_data/*']}
209
210 setup(**setup_args)
211
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -135,7 +135,7 @@
'rasterio._err', ['rasterio/_err.pyx'], **ext_options),
Extension(
'rasterio._example', ['rasterio/_example.pyx'], **ext_options),
- ])
+ ], quiet=True)
# If there's no manifest template, as in an sdist, we just specify .c files.
else:
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -135,7 +135,7 @@\n 'rasterio._err', ['rasterio/_err.pyx'], **ext_options),\n Extension(\n 'rasterio._example', ['rasterio/_example.pyx'], **ext_options),\n- ])\n+ ], quiet=True)\n \n # If there's no manifest template, as in an sdist, we just specify .c files.\n else:\n", "issue": "Reduce cython compile errors\nWe get a bunch of compile-time warnings from the cython code. I think we could reduce these.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# Two environmental variables influence this script.\n#\n# GDAL_CONFIG: the path to a gdal-config program that points to GDAL headers,\n# libraries, and data files.\n#\n# PACKAGE_DATA: if defined, GDAL and PROJ4 data files will be copied into the\n# source or binary distribution. This is essential when creating self-contained\n# binary wheels.\n\nimport logging\nimport os\nimport pprint\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup\nfrom setuptools.extension import Extension\n\nlogging.basicConfig()\nlog = logging.getLogger()\n\n# python -W all setup.py ...\nif 'all' in sys.warnoptions:\n log.level = logging.DEBUG\n\n# Parse the version from the fiona module.\nwith open('rasterio/__init__.py') as f:\n for line in f:\n if line.find(\"__version__\") >= 0:\n version = line.split(\"=\")[1].strip()\n version = version.strip('\"')\n version = version.strip(\"'\")\n continue\n\nwith open('VERSION.txt', 'w') as f:\n f.write(version)\n\n# Use Cython if available.\ntry:\n from Cython.Build import cythonize\nexcept ImportError:\n cythonize = None\n\n# By default we'll try to get options via gdal-config. On systems without,\n# options will need to be set in setup.cfg or on the setup command line.\ninclude_dirs = []\nlibrary_dirs = []\nlibraries = []\nextra_link_args = []\n\ntry:\n import numpy\n include_dirs.append(numpy.get_include())\nexcept ImportError:\n log.critical(\"Numpy and its headers are required to run setup(). Exiting.\")\n sys.exit(1)\n\ntry:\n gdal_config = os.environ.get('GDAL_CONFIG', 'gdal-config')\n with open(\"gdal-config.txt\", \"w\") as gcfg:\n subprocess.call([gdal_config, \"--cflags\"], stdout=gcfg)\n subprocess.call([gdal_config, \"--libs\"], stdout=gcfg)\n subprocess.call([gdal_config, \"--datadir\"], stdout=gcfg)\n with open(\"gdal-config.txt\", \"r\") as gcfg:\n cflags = gcfg.readline().strip()\n libs = gcfg.readline().strip()\n datadir = gcfg.readline().strip()\n for item in cflags.split():\n if item.startswith(\"-I\"):\n include_dirs.extend(item[2:].split(\":\"))\n for item in libs.split():\n if item.startswith(\"-L\"):\n library_dirs.extend(item[2:].split(\":\"))\n elif item.startswith(\"-l\"):\n libraries.append(item[2:])\n else:\n # e.g. -framework GDAL\n extra_link_args.append(item)\n\n # Conditionally copy the GDAL data. To be used in conjunction with\n # the bdist_wheel command to make self-contained binary wheels.\n if os.environ.get('PACKAGE_DATA'):\n try:\n shutil.rmtree('rasterio/gdal_data')\n except OSError:\n pass\n shutil.copytree(datadir, 'rasterio/gdal_data')\n\nexcept Exception as e:\n log.warning(\"Failed to get options via gdal-config: %s\", str(e))\n\n# Conditionally copy PROJ.4 data.\nif os.environ.get('PACKAGE_DATA'):\n projdatadir = os.environ.get('PROJ_LIB', '/usr/local/share/proj')\n if os.path.exists(projdatadir):\n try:\n shutil.rmtree('rasterio/proj_data')\n except OSError:\n pass\n shutil.copytree(projdatadir, 'rasterio/proj_data')\n\next_options = dict(\n include_dirs=include_dirs,\n library_dirs=library_dirs,\n libraries=libraries,\n extra_link_args=extra_link_args)\n\nlog.debug('ext_options:\\n%s', pprint.pformat(ext_options))\n\n# When building from a repo, Cython is required.\nif os.path.exists(\"MANIFEST.in\") and \"clean\" not in sys.argv:\n log.info(\"MANIFEST.in found, presume a repo, cythonizing...\")\n if not cythonize:\n log.critical(\n \"Cython.Build.cythonize not found. \"\n \"Cython is required to build from a repo.\")\n sys.exit(1)\n ext_modules = cythonize([\n Extension(\n 'rasterio._base', ['rasterio/_base.pyx'], **ext_options),\n Extension(\n 'rasterio._io', ['rasterio/_io.pyx'], **ext_options),\n Extension(\n 'rasterio._copy', ['rasterio/_copy.pyx'], **ext_options),\n Extension(\n 'rasterio._features', ['rasterio/_features.pyx'], **ext_options),\n Extension(\n 'rasterio._drivers', ['rasterio/_drivers.pyx'], **ext_options),\n Extension(\n 'rasterio._warp', ['rasterio/_warp.pyx'], **ext_options),\n Extension(\n 'rasterio._err', ['rasterio/_err.pyx'], **ext_options),\n Extension(\n 'rasterio._example', ['rasterio/_example.pyx'], **ext_options),\n ])\n\n# If there's no manifest template, as in an sdist, we just specify .c files.\nelse:\n ext_modules = [\n Extension(\n 'rasterio._base', ['rasterio/_base.c'], **ext_options),\n Extension(\n 'rasterio._io', ['rasterio/_io.c'], **ext_options),\n Extension(\n 'rasterio._copy', ['rasterio/_copy.c'], **ext_options),\n Extension(\n 'rasterio._features', ['rasterio/_features.c'], **ext_options),\n Extension(\n 'rasterio._drivers', ['rasterio/_drivers.c'], **ext_options),\n Extension(\n 'rasterio._warp', ['rasterio/_warp.cpp'], **ext_options),\n Extension(\n 'rasterio._err', ['rasterio/_err.c'], **ext_options),\n Extension(\n 'rasterio._example', ['rasterio/_example.c'], **ext_options),\n ]\n\nwith open('README.rst') as f:\n readme = f.read()\n\n# Runtime requirements.\ninst_reqs = [\n 'affine>=1.0',\n 'cligj',\n 'Numpy>=1.7' ]\n\nif sys.version_info < (3, 4):\n inst_reqs.append('enum34')\n\nsetup_args = dict(\n name='rasterio',\n version=version,\n description=\"Fast and direct raster I/O for use with Numpy and SciPy\",\n long_description=readme,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Multimedia :: Graphics :: Graphics Conversion',\n 'Topic :: Scientific/Engineering :: GIS'],\n keywords='raster gdal',\n author='Sean Gillies',\n author_email='[email protected]',\n url='https://github.com/mapbox/rasterio',\n license='BSD',\n package_dir={'': '.'},\n packages=['rasterio', 'rasterio.rio'],\n entry_points='''\n [console_scripts]\n rio=rasterio.rio.main:cli\n ''',\n include_package_data=True,\n ext_modules=ext_modules,\n zip_safe=False,\n install_requires=inst_reqs)\n\nif os.environ.get('PACKAGE_DATA'):\n setup_args['package_data'] = {'rasterio': ['gdal_data/*', 'proj_data/*']}\n\nsetup(**setup_args)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n# Two environmental variables influence this script.\n#\n# GDAL_CONFIG: the path to a gdal-config program that points to GDAL headers,\n# libraries, and data files.\n#\n# PACKAGE_DATA: if defined, GDAL and PROJ4 data files will be copied into the\n# source or binary distribution. This is essential when creating self-contained\n# binary wheels.\n\nimport logging\nimport os\nimport pprint\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup\nfrom setuptools.extension import Extension\n\nlogging.basicConfig()\nlog = logging.getLogger()\n\n# python -W all setup.py ...\nif 'all' in sys.warnoptions:\n log.level = logging.DEBUG\n\n# Parse the version from the fiona module.\nwith open('rasterio/__init__.py') as f:\n for line in f:\n if line.find(\"__version__\") >= 0:\n version = line.split(\"=\")[1].strip()\n version = version.strip('\"')\n version = version.strip(\"'\")\n continue\n\nwith open('VERSION.txt', 'w') as f:\n f.write(version)\n\n# Use Cython if available.\ntry:\n from Cython.Build import cythonize\nexcept ImportError:\n cythonize = None\n\n# By default we'll try to get options via gdal-config. On systems without,\n# options will need to be set in setup.cfg or on the setup command line.\ninclude_dirs = []\nlibrary_dirs = []\nlibraries = []\nextra_link_args = []\n\ntry:\n import numpy\n include_dirs.append(numpy.get_include())\nexcept ImportError:\n log.critical(\"Numpy and its headers are required to run setup(). Exiting.\")\n sys.exit(1)\n\ntry:\n gdal_config = os.environ.get('GDAL_CONFIG', 'gdal-config')\n with open(\"gdal-config.txt\", \"w\") as gcfg:\n subprocess.call([gdal_config, \"--cflags\"], stdout=gcfg)\n subprocess.call([gdal_config, \"--libs\"], stdout=gcfg)\n subprocess.call([gdal_config, \"--datadir\"], stdout=gcfg)\n with open(\"gdal-config.txt\", \"r\") as gcfg:\n cflags = gcfg.readline().strip()\n libs = gcfg.readline().strip()\n datadir = gcfg.readline().strip()\n for item in cflags.split():\n if item.startswith(\"-I\"):\n include_dirs.extend(item[2:].split(\":\"))\n for item in libs.split():\n if item.startswith(\"-L\"):\n library_dirs.extend(item[2:].split(\":\"))\n elif item.startswith(\"-l\"):\n libraries.append(item[2:])\n else:\n # e.g. -framework GDAL\n extra_link_args.append(item)\n\n # Conditionally copy the GDAL data. To be used in conjunction with\n # the bdist_wheel command to make self-contained binary wheels.\n if os.environ.get('PACKAGE_DATA'):\n try:\n shutil.rmtree('rasterio/gdal_data')\n except OSError:\n pass\n shutil.copytree(datadir, 'rasterio/gdal_data')\n\nexcept Exception as e:\n log.warning(\"Failed to get options via gdal-config: %s\", str(e))\n\n# Conditionally copy PROJ.4 data.\nif os.environ.get('PACKAGE_DATA'):\n projdatadir = os.environ.get('PROJ_LIB', '/usr/local/share/proj')\n if os.path.exists(projdatadir):\n try:\n shutil.rmtree('rasterio/proj_data')\n except OSError:\n pass\n shutil.copytree(projdatadir, 'rasterio/proj_data')\n\next_options = dict(\n include_dirs=include_dirs,\n library_dirs=library_dirs,\n libraries=libraries,\n extra_link_args=extra_link_args)\n\nlog.debug('ext_options:\\n%s', pprint.pformat(ext_options))\n\n# When building from a repo, Cython is required.\nif os.path.exists(\"MANIFEST.in\") and \"clean\" not in sys.argv:\n log.info(\"MANIFEST.in found, presume a repo, cythonizing...\")\n if not cythonize:\n log.critical(\n \"Cython.Build.cythonize not found. \"\n \"Cython is required to build from a repo.\")\n sys.exit(1)\n ext_modules = cythonize([\n Extension(\n 'rasterio._base', ['rasterio/_base.pyx'], **ext_options),\n Extension(\n 'rasterio._io', ['rasterio/_io.pyx'], **ext_options),\n Extension(\n 'rasterio._copy', ['rasterio/_copy.pyx'], **ext_options),\n Extension(\n 'rasterio._features', ['rasterio/_features.pyx'], **ext_options),\n Extension(\n 'rasterio._drivers', ['rasterio/_drivers.pyx'], **ext_options),\n Extension(\n 'rasterio._warp', ['rasterio/_warp.pyx'], **ext_options),\n Extension(\n 'rasterio._err', ['rasterio/_err.pyx'], **ext_options),\n Extension(\n 'rasterio._example', ['rasterio/_example.pyx'], **ext_options),\n ], quiet=True)\n\n# If there's no manifest template, as in an sdist, we just specify .c files.\nelse:\n ext_modules = [\n Extension(\n 'rasterio._base', ['rasterio/_base.c'], **ext_options),\n Extension(\n 'rasterio._io', ['rasterio/_io.c'], **ext_options),\n Extension(\n 'rasterio._copy', ['rasterio/_copy.c'], **ext_options),\n Extension(\n 'rasterio._features', ['rasterio/_features.c'], **ext_options),\n Extension(\n 'rasterio._drivers', ['rasterio/_drivers.c'], **ext_options),\n Extension(\n 'rasterio._warp', ['rasterio/_warp.cpp'], **ext_options),\n Extension(\n 'rasterio._err', ['rasterio/_err.c'], **ext_options),\n Extension(\n 'rasterio._example', ['rasterio/_example.c'], **ext_options),\n ]\n\nwith open('README.rst') as f:\n readme = f.read()\n\n# Runtime requirements.\ninst_reqs = [\n 'affine>=1.0',\n 'cligj',\n 'Numpy>=1.7' ]\n\nif sys.version_info < (3, 4):\n inst_reqs.append('enum34')\n\nsetup_args = dict(\n name='rasterio',\n version=version,\n description=\"Fast and direct raster I/O for use with Numpy and SciPy\",\n long_description=readme,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Multimedia :: Graphics :: Graphics Conversion',\n 'Topic :: Scientific/Engineering :: GIS'],\n keywords='raster gdal',\n author='Sean Gillies',\n author_email='[email protected]',\n url='https://github.com/mapbox/rasterio',\n license='BSD',\n package_dir={'': '.'},\n packages=['rasterio', 'rasterio.rio'],\n entry_points='''\n [console_scripts]\n rio=rasterio.rio.main:cli\n ''',\n include_package_data=True,\n ext_modules=ext_modules,\n zip_safe=False,\n install_requires=inst_reqs)\n\nif os.environ.get('PACKAGE_DATA'):\n setup_args['package_data'] = {'rasterio': ['gdal_data/*', 'proj_data/*']}\n\nsetup(**setup_args)\n", "path": "setup.py"}]}
| 2,529 | 112 |
gh_patches_debug_12541
|
rasdani/github-patches
|
git_diff
|
arviz-devs__arviz-1038
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plot_posterior writes auto instead of point estimate name
**Describe the bug**
`plot_posterior` should write the name of the point estimate, however it currently writes `auto` if `auto` is passed as point estimate. This can be seen in the [docs example](https://arviz-devs.github.io/arviz/examples/matplotlib/mpl_plot_posterior.html)
**Expected behavior**
It should write the name of the point estimate used (taken from rcParams).
**Additional context**
ArviZ master
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `arviz/plots/posteriorplot.py`
Content:
```
1 """Plot posterior densities."""
2 from typing import Optional
3
4 from ..data import convert_to_dataset
5 from .plot_utils import (
6 xarray_var_iter,
7 _scale_fig_size,
8 default_grid,
9 get_coords,
10 filter_plotters_list,
11 get_plotting_function,
12 )
13 from ..utils import _var_names
14 from ..rcparams import rcParams
15
16
17 def plot_posterior(
18 data,
19 var_names=None,
20 coords=None,
21 figsize=None,
22 textsize=None,
23 credible_interval=None,
24 multimodal=False,
25 round_to: Optional[int] = None,
26 point_estimate="auto",
27 group="posterior",
28 rope=None,
29 ref_val=None,
30 kind="kde",
31 bw=4.5,
32 bins=None,
33 ax=None,
34 backend=None,
35 backend_kwargs=None,
36 show=None,
37 **kwargs
38 ):
39 """Plot Posterior densities in the style of John K. Kruschke's book.
40
41 Parameters
42 ----------
43 data : obj
44 Any object that can be converted to an az.InferenceData object
45 Refer to documentation of az.convert_to_dataset for details
46 var_names : list of variable names
47 Variables to be plotted, two variables are required.
48 coords : mapping, optional
49 Coordinates of var_names to be plotted. Passed to `Dataset.sel`
50 figsize : tuple
51 Figure size. If None it will be defined automatically.
52 textsize: float
53 Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
54 on figsize.
55 credible_interval : float, optional
56 Credible intervals. Defaults to 0.94. Use None to hide the credible interval
57 multimodal : bool
58 If true (default) it may compute more than one credible interval if the distribution is
59 multimodal and the modes are well separated.
60 round_to : int, optional
61 Controls formatting of floats. Defaults to 2 or the integer part, whichever is bigger.
62 point_estimate : Optional[str]
63 Plot point estimate per variable. Values should be 'mean', 'median', 'mode' or None.
64 Defaults to 'auto' i.e. it falls back to default set in rcParams.
65 group : str, optional
66 Specifies which InferenceData group should be plotted. Defaults to ‘posterior’.
67 rope: tuple or dictionary of tuples
68 Lower and upper values of the Region Of Practical Equivalence. If a list is provided, its
69 length should match the number of variables.
70 ref_val: float or dictionary of floats
71 display the percentage below and above the values in ref_val. Must be None (default),
72 a constant, a list or a dictionary like see an example below. If a list is provided, its
73 length should match the number of variables.
74 kind: str
75 Type of plot to display (kde or hist) For discrete variables this argument is ignored and
76 a histogram is always used.
77 bw : float
78 Bandwidth scaling factor for the KDE. Should be larger than 0. The higher this number the
79 smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule
80 of thumb (the default rule used by SciPy). Only works if `kind == kde`.
81 bins : integer or sequence or 'auto', optional
82 Controls the number of bins, accepts the same keywords `matplotlib.hist()` does. Only works
83 if `kind == hist`. If None (default) it will use `auto` for continuous variables and
84 `range(xmin, xmax + 1)` for discrete variables.
85 ax: axes, optional
86 Matplotlib axes or bokeh figures.
87 backend: str, optional
88 Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
89 backend_kwargs: bool, optional
90 These are kwargs specific to the backend being used. For additional documentation
91 check the plotting method of the backend.
92 show : bool, optional
93 Call backend show function.
94 **kwargs
95 Passed as-is to plt.hist() or plt.plot() function depending on the value of `kind`.
96
97 Returns
98 -------
99 axes : matplotlib axes or bokeh figures
100
101 Examples
102 --------
103 Show a default kernel density plot following style of John Kruschke
104
105 .. plot::
106 :context: close-figs
107
108 >>> import arviz as az
109 >>> data = az.load_arviz_data('centered_eight')
110 >>> az.plot_posterior(data)
111
112 Plot subset variables by specifying variable name exactly
113
114 .. plot::
115 :context: close-figs
116
117 >>> az.plot_posterior(data, var_names=['mu'])
118
119 Plot Region of Practical Equivalence (rope) for all distributions
120
121 .. plot::
122 :context: close-figs
123
124 >>> az.plot_posterior(data, var_names=['mu', 'theta'], rope=(-1, 1))
125
126 Plot Region of Practical Equivalence for selected distributions
127
128 .. plot::
129 :context: close-figs
130
131 >>> rope = {'mu': [{'rope': (-2, 2)}], 'theta': [{'school': 'Choate', 'rope': (2, 4)}]}
132 >>> az.plot_posterior(data, var_names=['mu', 'theta'], rope=rope)
133
134
135 Add reference lines
136
137 .. plot::
138 :context: close-figs
139
140 >>> az.plot_posterior(data, var_names=['mu', 'theta'], ref_val=0)
141
142 Show point estimate of distribution
143
144 .. plot::
145 :context: close-figs
146
147 >>> az.plot_posterior(data, var_names=['mu', 'theta'], point_estimate='mode')
148
149 Show reference values using variable names and coordinates
150
151 .. plot::
152 :context: close-figs
153
154 >>> az.plot_posterior(data, ref_val= {"theta": [{"school": "Deerfield", "ref_val": 4},
155 ... {"school": "Choate", "ref_val": 3}]})
156
157 Show reference values using a list
158
159 .. plot::
160 :context: close-figs
161
162 >>> az.plot_posterior(data, ref_val=[1] + [5] * 8 + [1])
163
164
165 Plot posterior as a histogram
166
167 .. plot::
168 :context: close-figs
169
170 >>> az.plot_posterior(data, var_names=['mu'], kind='hist')
171
172 Change size of credible interval
173
174 .. plot::
175 :context: close-figs
176
177 >>> az.plot_posterior(data, var_names=['mu'], credible_interval=.75)
178 """
179 data = convert_to_dataset(data, group=group)
180 var_names = _var_names(var_names, data)
181
182 if coords is None:
183 coords = {}
184
185 if credible_interval is None:
186 credible_interval = rcParams["stats.credible_interval"]
187 else:
188 if not 1 >= credible_interval > 0:
189 raise ValueError("The value of credible_interval should be in the interval (0, 1]")
190
191 plotters = filter_plotters_list(
192 list(xarray_var_iter(get_coords(data, coords), var_names=var_names, combined=True)),
193 "plot_posterior",
194 )
195 length_plotters = len(plotters)
196 rows, cols = default_grid(length_plotters)
197
198 (figsize, ax_labelsize, titlesize, xt_labelsize, _linewidth, _) = _scale_fig_size(
199 figsize, textsize, rows, cols
200 )
201 kwargs.setdefault("linewidth", _linewidth)
202
203 posteriorplot_kwargs = dict(
204 ax=ax,
205 length_plotters=length_plotters,
206 rows=rows,
207 cols=cols,
208 figsize=figsize,
209 plotters=plotters,
210 bw=bw,
211 bins=bins,
212 kind=kind,
213 point_estimate=point_estimate,
214 round_to=round_to,
215 credible_interval=credible_interval,
216 multimodal=multimodal,
217 ref_val=ref_val,
218 rope=rope,
219 ax_labelsize=ax_labelsize,
220 xt_labelsize=xt_labelsize,
221 kwargs=kwargs,
222 titlesize=titlesize,
223 backend_kwargs=backend_kwargs,
224 show=show,
225 )
226
227 if backend == "bokeh":
228
229 posteriorplot_kwargs.pop("xt_labelsize")
230 posteriorplot_kwargs.pop("titlesize")
231
232 # TODO: Add backend kwargs
233 plot = get_plotting_function("plot_posterior", "posteriorplot", backend)
234 ax = plot(**posteriorplot_kwargs)
235 return ax
236
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/arviz/plots/posteriorplot.py b/arviz/plots/posteriorplot.py
--- a/arviz/plots/posteriorplot.py
+++ b/arviz/plots/posteriorplot.py
@@ -188,6 +188,11 @@
if not 1 >= credible_interval > 0:
raise ValueError("The value of credible_interval should be in the interval (0, 1]")
+ if point_estimate == "auto":
+ point_estimate = rcParams["plot.point_estimate"]
+ elif point_estimate not in {"mean", "median", "mode", None}:
+ raise ValueError("The value of point_estimate must be either mean, median, mode or None.")
+
plotters = filter_plotters_list(
list(xarray_var_iter(get_coords(data, coords), var_names=var_names, combined=True)),
"plot_posterior",
|
{"golden_diff": "diff --git a/arviz/plots/posteriorplot.py b/arviz/plots/posteriorplot.py\n--- a/arviz/plots/posteriorplot.py\n+++ b/arviz/plots/posteriorplot.py\n@@ -188,6 +188,11 @@\n if not 1 >= credible_interval > 0:\n raise ValueError(\"The value of credible_interval should be in the interval (0, 1]\")\n \n+ if point_estimate == \"auto\":\n+ point_estimate = rcParams[\"plot.point_estimate\"]\n+ elif point_estimate not in {\"mean\", \"median\", \"mode\", None}:\n+ raise ValueError(\"The value of point_estimate must be either mean, median, mode or None.\")\n+\n plotters = filter_plotters_list(\n list(xarray_var_iter(get_coords(data, coords), var_names=var_names, combined=True)),\n \"plot_posterior\",\n", "issue": "plot_posterior writes auto instead of point estimate name\n**Describe the bug**\r\n`plot_posterior` should write the name of the point estimate, however it currently writes `auto` if `auto` is passed as point estimate. This can be seen in the [docs example](https://arviz-devs.github.io/arviz/examples/matplotlib/mpl_plot_posterior.html)\r\n\r\n**Expected behavior**\r\nIt should write the name of the point estimate used (taken from rcParams).\r\n\r\n**Additional context**\r\nArviZ master\r\n\n", "before_files": [{"content": "\"\"\"Plot posterior densities.\"\"\"\nfrom typing import Optional\n\nfrom ..data import convert_to_dataset\nfrom .plot_utils import (\n xarray_var_iter,\n _scale_fig_size,\n default_grid,\n get_coords,\n filter_plotters_list,\n get_plotting_function,\n)\nfrom ..utils import _var_names\nfrom ..rcparams import rcParams\n\n\ndef plot_posterior(\n data,\n var_names=None,\n coords=None,\n figsize=None,\n textsize=None,\n credible_interval=None,\n multimodal=False,\n round_to: Optional[int] = None,\n point_estimate=\"auto\",\n group=\"posterior\",\n rope=None,\n ref_val=None,\n kind=\"kde\",\n bw=4.5,\n bins=None,\n ax=None,\n backend=None,\n backend_kwargs=None,\n show=None,\n **kwargs\n):\n \"\"\"Plot Posterior densities in the style of John K. Kruschke's book.\n\n Parameters\n ----------\n data : obj\n Any object that can be converted to an az.InferenceData object\n Refer to documentation of az.convert_to_dataset for details\n var_names : list of variable names\n Variables to be plotted, two variables are required.\n coords : mapping, optional\n Coordinates of var_names to be plotted. Passed to `Dataset.sel`\n figsize : tuple\n Figure size. If None it will be defined automatically.\n textsize: float\n Text size scaling factor for labels, titles and lines. If None it will be autoscaled based\n on figsize.\n credible_interval : float, optional\n Credible intervals. Defaults to 0.94. Use None to hide the credible interval\n multimodal : bool\n If true (default) it may compute more than one credible interval if the distribution is\n multimodal and the modes are well separated.\n round_to : int, optional\n Controls formatting of floats. Defaults to 2 or the integer part, whichever is bigger.\n point_estimate : Optional[str]\n Plot point estimate per variable. Values should be 'mean', 'median', 'mode' or None.\n Defaults to 'auto' i.e. it falls back to default set in rcParams.\n group : str, optional\n Specifies which InferenceData group should be plotted. Defaults to \u2018posterior\u2019.\n rope: tuple or dictionary of tuples\n Lower and upper values of the Region Of Practical Equivalence. If a list is provided, its\n length should match the number of variables.\n ref_val: float or dictionary of floats\n display the percentage below and above the values in ref_val. Must be None (default),\n a constant, a list or a dictionary like see an example below. If a list is provided, its\n length should match the number of variables.\n kind: str\n Type of plot to display (kde or hist) For discrete variables this argument is ignored and\n a histogram is always used.\n bw : float\n Bandwidth scaling factor for the KDE. Should be larger than 0. The higher this number the\n smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule\n of thumb (the default rule used by SciPy). Only works if `kind == kde`.\n bins : integer or sequence or 'auto', optional\n Controls the number of bins, accepts the same keywords `matplotlib.hist()` does. Only works\n if `kind == hist`. If None (default) it will use `auto` for continuous variables and\n `range(xmin, xmax + 1)` for discrete variables.\n ax: axes, optional\n Matplotlib axes or bokeh figures.\n backend: str, optional\n Select plotting backend {\"matplotlib\",\"bokeh\"}. Default \"matplotlib\".\n backend_kwargs: bool, optional\n These are kwargs specific to the backend being used. For additional documentation\n check the plotting method of the backend.\n show : bool, optional\n Call backend show function.\n **kwargs\n Passed as-is to plt.hist() or plt.plot() function depending on the value of `kind`.\n\n Returns\n -------\n axes : matplotlib axes or bokeh figures\n\n Examples\n --------\n Show a default kernel density plot following style of John Kruschke\n\n .. plot::\n :context: close-figs\n\n >>> import arviz as az\n >>> data = az.load_arviz_data('centered_eight')\n >>> az.plot_posterior(data)\n\n Plot subset variables by specifying variable name exactly\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu'])\n\n Plot Region of Practical Equivalence (rope) for all distributions\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu', 'theta'], rope=(-1, 1))\n\n Plot Region of Practical Equivalence for selected distributions\n\n .. plot::\n :context: close-figs\n\n >>> rope = {'mu': [{'rope': (-2, 2)}], 'theta': [{'school': 'Choate', 'rope': (2, 4)}]}\n >>> az.plot_posterior(data, var_names=['mu', 'theta'], rope=rope)\n\n\n Add reference lines\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu', 'theta'], ref_val=0)\n\n Show point estimate of distribution\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu', 'theta'], point_estimate='mode')\n\n Show reference values using variable names and coordinates\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, ref_val= {\"theta\": [{\"school\": \"Deerfield\", \"ref_val\": 4},\n ... {\"school\": \"Choate\", \"ref_val\": 3}]})\n\n Show reference values using a list\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, ref_val=[1] + [5] * 8 + [1])\n\n\n Plot posterior as a histogram\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu'], kind='hist')\n\n Change size of credible interval\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu'], credible_interval=.75)\n \"\"\"\n data = convert_to_dataset(data, group=group)\n var_names = _var_names(var_names, data)\n\n if coords is None:\n coords = {}\n\n if credible_interval is None:\n credible_interval = rcParams[\"stats.credible_interval\"]\n else:\n if not 1 >= credible_interval > 0:\n raise ValueError(\"The value of credible_interval should be in the interval (0, 1]\")\n\n plotters = filter_plotters_list(\n list(xarray_var_iter(get_coords(data, coords), var_names=var_names, combined=True)),\n \"plot_posterior\",\n )\n length_plotters = len(plotters)\n rows, cols = default_grid(length_plotters)\n\n (figsize, ax_labelsize, titlesize, xt_labelsize, _linewidth, _) = _scale_fig_size(\n figsize, textsize, rows, cols\n )\n kwargs.setdefault(\"linewidth\", _linewidth)\n\n posteriorplot_kwargs = dict(\n ax=ax,\n length_plotters=length_plotters,\n rows=rows,\n cols=cols,\n figsize=figsize,\n plotters=plotters,\n bw=bw,\n bins=bins,\n kind=kind,\n point_estimate=point_estimate,\n round_to=round_to,\n credible_interval=credible_interval,\n multimodal=multimodal,\n ref_val=ref_val,\n rope=rope,\n ax_labelsize=ax_labelsize,\n xt_labelsize=xt_labelsize,\n kwargs=kwargs,\n titlesize=titlesize,\n backend_kwargs=backend_kwargs,\n show=show,\n )\n\n if backend == \"bokeh\":\n\n posteriorplot_kwargs.pop(\"xt_labelsize\")\n posteriorplot_kwargs.pop(\"titlesize\")\n\n # TODO: Add backend kwargs\n plot = get_plotting_function(\"plot_posterior\", \"posteriorplot\", backend)\n ax = plot(**posteriorplot_kwargs)\n return ax\n", "path": "arviz/plots/posteriorplot.py"}], "after_files": [{"content": "\"\"\"Plot posterior densities.\"\"\"\nfrom typing import Optional\n\nfrom ..data import convert_to_dataset\nfrom .plot_utils import (\n xarray_var_iter,\n _scale_fig_size,\n default_grid,\n get_coords,\n filter_plotters_list,\n get_plotting_function,\n)\nfrom ..utils import _var_names\nfrom ..rcparams import rcParams\n\n\ndef plot_posterior(\n data,\n var_names=None,\n coords=None,\n figsize=None,\n textsize=None,\n credible_interval=None,\n multimodal=False,\n round_to: Optional[int] = None,\n point_estimate=\"auto\",\n group=\"posterior\",\n rope=None,\n ref_val=None,\n kind=\"kde\",\n bw=4.5,\n bins=None,\n ax=None,\n backend=None,\n backend_kwargs=None,\n show=None,\n **kwargs\n):\n \"\"\"Plot Posterior densities in the style of John K. Kruschke's book.\n\n Parameters\n ----------\n data : obj\n Any object that can be converted to an az.InferenceData object\n Refer to documentation of az.convert_to_dataset for details\n var_names : list of variable names\n Variables to be plotted, two variables are required.\n coords : mapping, optional\n Coordinates of var_names to be plotted. Passed to `Dataset.sel`\n figsize : tuple\n Figure size. If None it will be defined automatically.\n textsize: float\n Text size scaling factor for labels, titles and lines. If None it will be autoscaled based\n on figsize.\n credible_interval : float, optional\n Credible intervals. Defaults to 0.94. Use None to hide the credible interval\n multimodal : bool\n If true (default) it may compute more than one credible interval if the distribution is\n multimodal and the modes are well separated.\n round_to : int, optional\n Controls formatting of floats. Defaults to 2 or the integer part, whichever is bigger.\n point_estimate : Optional[str]\n Plot point estimate per variable. Values should be 'mean', 'median', 'mode' or None.\n Defaults to 'auto' i.e. it falls back to default set in rcParams.\n group : str, optional\n Specifies which InferenceData group should be plotted. Defaults to \u2018posterior\u2019.\n rope: tuple or dictionary of tuples\n Lower and upper values of the Region Of Practical Equivalence. If a list is provided, its\n length should match the number of variables.\n ref_val: float or dictionary of floats\n display the percentage below and above the values in ref_val. Must be None (default),\n a constant, a list or a dictionary like see an example below. If a list is provided, its\n length should match the number of variables.\n kind: str\n Type of plot to display (kde or hist) For discrete variables this argument is ignored and\n a histogram is always used.\n bw : float\n Bandwidth scaling factor for the KDE. Should be larger than 0. The higher this number the\n smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule\n of thumb (the default rule used by SciPy). Only works if `kind == kde`.\n bins : integer or sequence or 'auto', optional\n Controls the number of bins, accepts the same keywords `matplotlib.hist()` does. Only works\n if `kind == hist`. If None (default) it will use `auto` for continuous variables and\n `range(xmin, xmax + 1)` for discrete variables.\n ax: axes, optional\n Matplotlib axes or bokeh figures.\n backend: str, optional\n Select plotting backend {\"matplotlib\",\"bokeh\"}. Default \"matplotlib\".\n backend_kwargs: bool, optional\n These are kwargs specific to the backend being used. For additional documentation\n check the plotting method of the backend.\n show : bool, optional\n Call backend show function.\n **kwargs\n Passed as-is to plt.hist() or plt.plot() function depending on the value of `kind`.\n\n Returns\n -------\n axes : matplotlib axes or bokeh figures\n\n Examples\n --------\n Show a default kernel density plot following style of John Kruschke\n\n .. plot::\n :context: close-figs\n\n >>> import arviz as az\n >>> data = az.load_arviz_data('centered_eight')\n >>> az.plot_posterior(data)\n\n Plot subset variables by specifying variable name exactly\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu'])\n\n Plot Region of Practical Equivalence (rope) for all distributions\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu', 'theta'], rope=(-1, 1))\n\n Plot Region of Practical Equivalence for selected distributions\n\n .. plot::\n :context: close-figs\n\n >>> rope = {'mu': [{'rope': (-2, 2)}], 'theta': [{'school': 'Choate', 'rope': (2, 4)}]}\n >>> az.plot_posterior(data, var_names=['mu', 'theta'], rope=rope)\n\n\n Add reference lines\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu', 'theta'], ref_val=0)\n\n Show point estimate of distribution\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu', 'theta'], point_estimate='mode')\n\n Show reference values using variable names and coordinates\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, ref_val= {\"theta\": [{\"school\": \"Deerfield\", \"ref_val\": 4},\n ... {\"school\": \"Choate\", \"ref_val\": 3}]})\n\n Show reference values using a list\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, ref_val=[1] + [5] * 8 + [1])\n\n\n Plot posterior as a histogram\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu'], kind='hist')\n\n Change size of credible interval\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_posterior(data, var_names=['mu'], credible_interval=.75)\n \"\"\"\n data = convert_to_dataset(data, group=group)\n var_names = _var_names(var_names, data)\n\n if coords is None:\n coords = {}\n\n if credible_interval is None:\n credible_interval = rcParams[\"stats.credible_interval\"]\n else:\n if not 1 >= credible_interval > 0:\n raise ValueError(\"The value of credible_interval should be in the interval (0, 1]\")\n\n if point_estimate == \"auto\":\n point_estimate = rcParams[\"plot.point_estimate\"]\n elif point_estimate not in {\"mean\", \"median\", \"mode\", None}:\n raise ValueError(\"The value of point_estimate must be either mean, median, mode or None.\")\n\n plotters = filter_plotters_list(\n list(xarray_var_iter(get_coords(data, coords), var_names=var_names, combined=True)),\n \"plot_posterior\",\n )\n length_plotters = len(plotters)\n rows, cols = default_grid(length_plotters)\n\n (figsize, ax_labelsize, titlesize, xt_labelsize, _linewidth, _) = _scale_fig_size(\n figsize, textsize, rows, cols\n )\n kwargs.setdefault(\"linewidth\", _linewidth)\n\n posteriorplot_kwargs = dict(\n ax=ax,\n length_plotters=length_plotters,\n rows=rows,\n cols=cols,\n figsize=figsize,\n plotters=plotters,\n bw=bw,\n bins=bins,\n kind=kind,\n point_estimate=point_estimate,\n round_to=round_to,\n credible_interval=credible_interval,\n multimodal=multimodal,\n ref_val=ref_val,\n rope=rope,\n ax_labelsize=ax_labelsize,\n xt_labelsize=xt_labelsize,\n kwargs=kwargs,\n titlesize=titlesize,\n backend_kwargs=backend_kwargs,\n show=show,\n )\n\n if backend == \"bokeh\":\n\n posteriorplot_kwargs.pop(\"xt_labelsize\")\n posteriorplot_kwargs.pop(\"titlesize\")\n\n # TODO: Add backend kwargs\n plot = get_plotting_function(\"plot_posterior\", \"posteriorplot\", backend)\n ax = plot(**posteriorplot_kwargs)\n return ax\n", "path": "arviz/plots/posteriorplot.py"}]}
| 2,825 | 192 |
gh_patches_debug_29450
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-4476
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AWS_CKV_7 False Positive on assymetric key check in Cloudformation
**Describe the issue**
In terraform, the check avoids false positives with an extra check against symmetric keys before checking whether rotation is enabled. This same check hasn't been configured for cloudformation:
```
def scan_resource_conf(self, conf):
# Only symmetric keys support auto rotation. The attribute is optional and defaults to symmetric.
spec = conf.get('customer_master_key_spec')
if not spec or 'SYMMETRIC_DEFAULT' in spec:
return super().scan_resource_conf(conf)
else:
return CheckResult.PASSED
```
**Examples**
```
RSASigningKey:
Type: 'AWS::KMS::Key'
Properties:
Description: RSA-3072 asymmetric KMS key for signing and verification
KeySpec: RSA_3072
KeyUsage: SIGN_VERIFY
KeyPolicy:
Version: 2012-10-17
Id: key-default-1
Statement:
- Sid: Enable IAM User Permissions
Effect: Allow
Principal:
AWS: 'arn:aws:iam::111122223333:root'
Action: 'kms:*'
Resource: '*'
- Sid: Allow administration of the key
Effect: Allow
Principal:
AWS: 'arn:aws:iam::111122223333:role/Admin'
Action:
- 'kms:Create*'
- 'kms:Describe*'
- 'kms:Enable*'
- 'kms:List*'
- 'kms:Put*'
- 'kms:Update*'
- 'kms:Revoke*'
- 'kms:Disable*'
- 'kms:Get*'
- 'kms:Delete*'
- 'kms:ScheduleKeyDeletion'
- 'kms:CancelKeyDeletion'
Resource: '*'
- Sid: Allow use of the key
Effect: Allow
Principal:
AWS: 'arn:aws:iam::111122223333:role/Developer'
Action:
- 'kms:Sign'
- 'kms:Verify'
- 'kms:DescribeKey'
Resource: '*'
```
**Version (please complete the following information):**
- Checkov Version [e.g. 2.3.0]
**Additional context**
This blocks checkov working for assymetric keys in CFN.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/cloudformation/checks/resource/aws/KMSRotation.py`
Content:
```
1 from checkov.common.models.enums import CheckCategories
2 from checkov.cloudformation.checks.resource.base_resource_value_check import BaseResourceValueCheck
3
4
5 class KMSRotation(BaseResourceValueCheck):
6 def __init__(self) -> None:
7 name = "Ensure rotation for customer created CMKs is enabled"
8 id = "CKV_AWS_7"
9 supported_resources = ("AWS::KMS::Key",)
10 categories = (CheckCategories.ENCRYPTION,)
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def get_inspected_key(self) -> str:
14 return "Properties/EnableKeyRotation"
15
16
17 check = KMSRotation()
18
```
Path: `checkov/terraform/checks/resource/aws/KMSRotation.py`
Content:
```
1 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
2 from checkov.common.models.enums import CheckCategories, CheckResult
3
4
5 class KMSRotation(BaseResourceValueCheck):
6 def __init__(self):
7 name = "Ensure rotation for customer created CMKs is enabled"
8 id = "CKV_AWS_7"
9 supported_resources = ['aws_kms_key']
10 categories = [CheckCategories.ENCRYPTION]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def get_inspected_key(self):
14 return "enable_key_rotation"
15
16 def scan_resource_conf(self, conf):
17 # Only symmetric keys support auto rotation. The attribute is optional and defaults to symmetric.
18 spec = conf.get('customer_master_key_spec')
19 if not spec or 'SYMMETRIC_DEFAULT' in spec:
20 return super().scan_resource_conf(conf)
21 else:
22 return CheckResult.PASSED
23
24
25 check = KMSRotation()
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/checkov/cloudformation/checks/resource/aws/KMSRotation.py b/checkov/cloudformation/checks/resource/aws/KMSRotation.py
--- a/checkov/cloudformation/checks/resource/aws/KMSRotation.py
+++ b/checkov/cloudformation/checks/resource/aws/KMSRotation.py
@@ -1,4 +1,4 @@
-from checkov.common.models.enums import CheckCategories
+from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.cloudformation.checks.resource.base_resource_value_check import BaseResourceValueCheck
@@ -13,5 +13,15 @@
def get_inspected_key(self) -> str:
return "Properties/EnableKeyRotation"
+ def scan_resource_conf(self, conf):
+ # Only symmetric keys support auto rotation. The attribute is optional and defaults to symmetric.
+ properties = conf.get("Properties")
+ if properties and isinstance(properties, dict):
+ spec = properties.get("KeySpec")
+ if spec and isinstance(spec, str):
+ if 'SYMMETRIC_DEFAULT' not in spec and 'HMAC' not in spec:
+ return CheckResult.UNKNOWN
+ return super().scan_resource_conf(conf)
+
check = KMSRotation()
diff --git a/checkov/terraform/checks/resource/aws/KMSRotation.py b/checkov/terraform/checks/resource/aws/KMSRotation.py
--- a/checkov/terraform/checks/resource/aws/KMSRotation.py
+++ b/checkov/terraform/checks/resource/aws/KMSRotation.py
@@ -16,10 +16,10 @@
def scan_resource_conf(self, conf):
# Only symmetric keys support auto rotation. The attribute is optional and defaults to symmetric.
spec = conf.get('customer_master_key_spec')
- if not spec or 'SYMMETRIC_DEFAULT' in spec:
+ if not spec or 'SYMMETRIC_DEFAULT' in spec or 'HMAC' in spec:
return super().scan_resource_conf(conf)
else:
- return CheckResult.PASSED
+ return CheckResult.UNKNOWN
check = KMSRotation()
|
{"golden_diff": "diff --git a/checkov/cloudformation/checks/resource/aws/KMSRotation.py b/checkov/cloudformation/checks/resource/aws/KMSRotation.py\n--- a/checkov/cloudformation/checks/resource/aws/KMSRotation.py\n+++ b/checkov/cloudformation/checks/resource/aws/KMSRotation.py\n@@ -1,4 +1,4 @@\n-from checkov.common.models.enums import CheckCategories\n+from checkov.common.models.enums import CheckCategories, CheckResult\n from checkov.cloudformation.checks.resource.base_resource_value_check import BaseResourceValueCheck\n \n \n@@ -13,5 +13,15 @@\n def get_inspected_key(self) -> str:\n return \"Properties/EnableKeyRotation\"\n \n+ def scan_resource_conf(self, conf):\n+ # Only symmetric keys support auto rotation. The attribute is optional and defaults to symmetric.\n+ properties = conf.get(\"Properties\")\n+ if properties and isinstance(properties, dict):\n+ spec = properties.get(\"KeySpec\")\n+ if spec and isinstance(spec, str):\n+ if 'SYMMETRIC_DEFAULT' not in spec and 'HMAC' not in spec:\n+ return CheckResult.UNKNOWN\n+ return super().scan_resource_conf(conf)\n+\n \n check = KMSRotation()\ndiff --git a/checkov/terraform/checks/resource/aws/KMSRotation.py b/checkov/terraform/checks/resource/aws/KMSRotation.py\n--- a/checkov/terraform/checks/resource/aws/KMSRotation.py\n+++ b/checkov/terraform/checks/resource/aws/KMSRotation.py\n@@ -16,10 +16,10 @@\n def scan_resource_conf(self, conf):\n # Only symmetric keys support auto rotation. The attribute is optional and defaults to symmetric.\n spec = conf.get('customer_master_key_spec')\n- if not spec or 'SYMMETRIC_DEFAULT' in spec:\n+ if not spec or 'SYMMETRIC_DEFAULT' in spec or 'HMAC' in spec:\n return super().scan_resource_conf(conf)\n else:\n- return CheckResult.PASSED\n+ return CheckResult.UNKNOWN\n \n \n check = KMSRotation()\n", "issue": "AWS_CKV_7 False Positive on assymetric key check in Cloudformation\n**Describe the issue**\r\nIn terraform, the check avoids false positives with an extra check against symmetric keys before checking whether rotation is enabled. This same check hasn't been configured for cloudformation:\r\n\r\n```\r\ndef scan_resource_conf(self, conf):\r\n # Only symmetric keys support auto rotation. The attribute is optional and defaults to symmetric.\r\n spec = conf.get('customer_master_key_spec')\r\n if not spec or 'SYMMETRIC_DEFAULT' in spec:\r\n return super().scan_resource_conf(conf)\r\n else:\r\n return CheckResult.PASSED\r\n```\r\n\r\n**Examples**\r\n\r\n```\r\nRSASigningKey:\r\n Type: 'AWS::KMS::Key'\r\n Properties:\r\n Description: RSA-3072 asymmetric KMS key for signing and verification\r\n KeySpec: RSA_3072\r\n KeyUsage: SIGN_VERIFY\r\n KeyPolicy:\r\n Version: 2012-10-17\r\n Id: key-default-1\r\n Statement:\r\n - Sid: Enable IAM User Permissions\r\n Effect: Allow\r\n Principal:\r\n AWS: 'arn:aws:iam::111122223333:root'\r\n Action: 'kms:*'\r\n Resource: '*'\r\n - Sid: Allow administration of the key\r\n Effect: Allow\r\n Principal:\r\n AWS: 'arn:aws:iam::111122223333:role/Admin'\r\n Action:\r\n - 'kms:Create*'\r\n - 'kms:Describe*'\r\n - 'kms:Enable*'\r\n - 'kms:List*'\r\n - 'kms:Put*'\r\n - 'kms:Update*'\r\n - 'kms:Revoke*'\r\n - 'kms:Disable*'\r\n - 'kms:Get*'\r\n - 'kms:Delete*'\r\n - 'kms:ScheduleKeyDeletion'\r\n - 'kms:CancelKeyDeletion'\r\n Resource: '*'\r\n - Sid: Allow use of the key\r\n Effect: Allow\r\n Principal:\r\n AWS: 'arn:aws:iam::111122223333:role/Developer'\r\n Action:\r\n - 'kms:Sign'\r\n - 'kms:Verify'\r\n - 'kms:DescribeKey'\r\n Resource: '*'\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version [e.g. 2.3.0]\r\n\r\n**Additional context**\r\n\r\nThis blocks checkov working for assymetric keys in CFN.\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories\nfrom checkov.cloudformation.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass KMSRotation(BaseResourceValueCheck):\n def __init__(self) -> None:\n name = \"Ensure rotation for customer created CMKs is enabled\"\n id = \"CKV_AWS_7\"\n supported_resources = (\"AWS::KMS::Key\",)\n categories = (CheckCategories.ENCRYPTION,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self) -> str:\n return \"Properties/EnableKeyRotation\"\n\n\ncheck = KMSRotation()\n", "path": "checkov/cloudformation/checks/resource/aws/KMSRotation.py"}, {"content": "from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom checkov.common.models.enums import CheckCategories, CheckResult\n\n\nclass KMSRotation(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure rotation for customer created CMKs is enabled\"\n id = \"CKV_AWS_7\"\n supported_resources = ['aws_kms_key']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"enable_key_rotation\"\n\n def scan_resource_conf(self, conf):\n # Only symmetric keys support auto rotation. The attribute is optional and defaults to symmetric.\n spec = conf.get('customer_master_key_spec')\n if not spec or 'SYMMETRIC_DEFAULT' in spec:\n return super().scan_resource_conf(conf)\n else:\n return CheckResult.PASSED\n\n\ncheck = KMSRotation()\n", "path": "checkov/terraform/checks/resource/aws/KMSRotation.py"}], "after_files": [{"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.cloudformation.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass KMSRotation(BaseResourceValueCheck):\n def __init__(self) -> None:\n name = \"Ensure rotation for customer created CMKs is enabled\"\n id = \"CKV_AWS_7\"\n supported_resources = (\"AWS::KMS::Key\",)\n categories = (CheckCategories.ENCRYPTION,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self) -> str:\n return \"Properties/EnableKeyRotation\"\n\n def scan_resource_conf(self, conf):\n # Only symmetric keys support auto rotation. The attribute is optional and defaults to symmetric.\n properties = conf.get(\"Properties\")\n if properties and isinstance(properties, dict):\n spec = properties.get(\"KeySpec\")\n if spec and isinstance(spec, str):\n if 'SYMMETRIC_DEFAULT' not in spec and 'HMAC' not in spec:\n return CheckResult.UNKNOWN\n return super().scan_resource_conf(conf)\n\n\ncheck = KMSRotation()\n", "path": "checkov/cloudformation/checks/resource/aws/KMSRotation.py"}, {"content": "from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom checkov.common.models.enums import CheckCategories, CheckResult\n\n\nclass KMSRotation(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure rotation for customer created CMKs is enabled\"\n id = \"CKV_AWS_7\"\n supported_resources = ['aws_kms_key']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"enable_key_rotation\"\n\n def scan_resource_conf(self, conf):\n # Only symmetric keys support auto rotation. The attribute is optional and defaults to symmetric.\n spec = conf.get('customer_master_key_spec')\n if not spec or 'SYMMETRIC_DEFAULT' in spec or 'HMAC' in spec:\n return super().scan_resource_conf(conf)\n else:\n return CheckResult.UNKNOWN\n\n\ncheck = KMSRotation()\n", "path": "checkov/terraform/checks/resource/aws/KMSRotation.py"}]}
| 1,291 | 447 |
gh_patches_debug_1522
|
rasdani/github-patches
|
git_diff
|
openstates__openstates-scrapers-1736
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CA: email issue
State: CA
(via contact)
I’d like to call your attention to an error on your email address listings for State Senators in California. Your database lists an address in the form of [email protected], whereas the correct email is [email protected].
It does appear that Senate staffers are [email protected].
We’ve been using your service, and our advocates’ emails are bouncing back.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openstates/ca/legislators.py`
Content:
```
1 import re
2 import collections
3 import unicodedata
4 from operator import methodcaller
5
6 import lxml.html
7
8 from billy.scrape.legislators import LegislatorScraper, Legislator
9
10
11 def parse_address(s, split=re.compile(r'[;,]\s{,3}').split):
12 '''
13 Extract address fields from text.
14 '''
15 # If the address isn't formatted correctly, skip for now.
16 if ';' not in s:
17 return []
18
19 fields = 'city state_zip phone'.split()
20 vals = split(s)
21 res = []
22 while True:
23 try:
24 _field = fields.pop()
25 _value = vals.pop()
26 except IndexError:
27 break
28 else:
29 if _value.strip():
30 res.append((_field, _value))
31 if vals:
32 res.append(('street', ', '.join(vals)))
33 return res
34
35
36 class CALegislatorScraper(LegislatorScraper):
37
38 jurisdiction = 'ca'
39
40 urls = {'upper': 'http://senate.ca.gov/senators',
41 'lower': 'http://assembly.ca.gov/assemblymembers'}
42
43 def scrape(self, chamber, term):
44
45 url = self.urls[chamber]
46 html = self.get(url).text
47 doc = lxml.html.fromstring(html)
48
49 if chamber == 'lower':
50 rows = doc.xpath('//table/tbody/tr')
51 parse = self.parse_assembly
52 else:
53 rows = doc.xpath('//div[contains(@class, "views-row")]')
54 parse = self.parse_senate
55
56 for tr in rows:
57 legislator = parse(tr, term, chamber)
58 if legislator is None:
59 continue
60 if 'Vacant' in legislator['full_name']:
61 continue
62
63 legislator.add_source(url)
64 legislator['full_name'] = legislator['full_name'].strip()
65 self.save_legislator(legislator)
66
67 def parse_senate(self, div, term, chamber):
68 name = div.xpath('.//h3/text()')[0]
69 if name.endswith(' (R)'):
70 party = 'Republican'
71 elif name.endswith(' (D)'):
72 party = 'Democratic'
73 else:
74 self.warning('skipping ' + name)
75 return None
76 name = name.split(' (')[0]
77
78 district = div.xpath(
79 './/div[contains(@class, "senator-district")]/div/text()'
80 )[0].strip().lstrip('0')
81 photo_url = div.xpath('.//img/@src')[0]
82 url = div.xpath('.//a/@href')[0]
83
84 leg = Legislator(term, chamber, full_name=name, party=party, district=district,
85 photo_url=photo_url, url=url)
86
87 # CA senators have working emails, but they're not putting them on
88 # their public pages anymore
89 email = self._construct_email(chamber, name)
90
91 for addr in div.xpath('.//div[contains(@class, "views-field-field-senator-capitol-office")]//p'):
92 addr, phone = addr.text_content().split('; ')
93 leg.add_office(
94 'capitol', 'Senate Office',
95 address=addr.strip(), phone=phone.strip(), email=email)
96
97 for addr in div.xpath('.//div[contains(@class, "views-field-field-senator-district-office")]//p'):
98 for addr in addr.text_content().strip().splitlines():
99 try:
100 addr, phone = addr.strip().replace(u'\xa0', ' ').split('; ')
101 leg.add_office(
102 'district', 'District Office',
103 address=addr.strip(), phone=phone.strip())
104 except ValueError:
105 addr = addr.strip().replace(u'\xa0', ' ')
106 leg.add_office('district', 'District Office', address=addr)
107
108 return leg
109
110 def parse_assembly(self, tr, term, chamber):
111 '''
112 Given a tr element, get specific data from it.
113 '''
114
115 strip = methodcaller('strip')
116
117 xpath = 'td[contains(@class, "views-field-field-%s-%s")]%s'
118
119 xp = {
120 'url': [('lname-sort', '/a[not(contains(text(), "edit"))]/@href')],
121 'district': [('district', '/text()')],
122 'party': [('party', '/text()')],
123 'full_name': [('office-information', '/a[not(contains(text(), "edit"))]/text()')],
124 'address': [('office-information', '/h3/following-sibling::text()'),
125 ('office-information', '/p/text()')]
126 }
127
128 titles = {'upper': 'senator', 'lower': 'member'}
129
130 funcs = {
131 'full_name': lambda s: re.sub( # "Assembly" is misspelled once
132 r'Contact Assembl?y Member', '', s).strip(),
133 'address': parse_address,
134 }
135
136 rubberstamp = lambda _: _
137 tr_xpath = tr.xpath
138 res = collections.defaultdict(list)
139 for k, xpath_info in xp.items():
140 for vals in xpath_info:
141 f = funcs.get(k, rubberstamp)
142 vals = (titles[chamber],) + vals
143 vals = map(f, map(strip, tr_xpath(xpath % vals)))
144 res[k].extend(vals)
145
146 # Photo.
147 try:
148 res['photo_url'] = tr_xpath('td/p/img/@src')[0]
149 except IndexError:
150 pass
151
152 # Remove junk from assembly member names.
153 junk = 'Contact Assembly Member '
154
155 try:
156 res['full_name'] = res['full_name'].pop().replace(junk, '')
157 except IndexError:
158 return
159
160 # Addresses.
161 addresses = res['address']
162 try:
163 addresses = map(dict, filter(None, addresses))
164 except ValueError:
165 # Sometimes legislators only have one address, in which
166 # case this awful hack is helpful.
167 addresses = map(dict, filter(None, [addresses]))
168
169 for address in addresses[:]:
170
171 # Toss results that don't have required keys.
172 if not set(['street', 'city', 'state_zip']) < set(address):
173 if address in addresses:
174 addresses.remove(address)
175
176 # Re-key the addresses
177 offices = []
178 if addresses:
179 # Mariko Yamada's addresses wouldn't parse correctly as of
180 # 3/23/2013, so here we're forced to test whether any
181 # addresses were even found.
182 addresses[0].update(type='capitol', name='Capitol Office')
183 offices.append(addresses[0])
184
185 # CA reps have working emails, but they're not putting them on
186 # their public pages anymore
187 offices[0]['email'] = \
188 self._construct_email(chamber, res['full_name'])
189
190 for office in addresses[1:]:
191 office.update(type='district', name='District Office')
192 offices.append(office)
193
194 for office in offices:
195 street = office['street']
196 state_zip = re.sub(r'\s+', ' ', office['state_zip'])
197 street = '%s\n%s, %s' % (street, office['city'], state_zip)
198 office['address'] = street
199 office['fax'] = None
200 if 'email' not in office:
201 office['email'] = None
202
203 del office['street'], office['city'], office['state_zip']
204
205 res['offices'] = offices
206 del res['address']
207
208 # Normalize party.
209 for party in res['party'][:]:
210 if party:
211 if party == 'Democrat':
212 party = 'Democratic'
213 res['party'] = party
214 break
215 else:
216 res['party'] = None
217
218 # Mariko Yamada also didn't have a url that lxml would parse
219 # as of 3/22/2013.
220 if res['url']:
221 res['url'] = res['url'].pop()
222 else:
223 del res['url']
224
225 # strip leading zero
226 res['district'] = str(int(res['district'].pop()))
227
228 # Add a source for the url.
229 leg = Legislator(term, chamber, **res)
230 leg.update(**res)
231
232 return leg
233
234 def _construct_email(self, chamber, full_name):
235 last_name = re.split(r'\s+', full_name)[-1].lower()
236
237 # translate accents to non-accented versions for use in an
238 # email and drop apostrophes
239 last_name = ''.join(c for c in
240 unicodedata.normalize('NFD', unicode(last_name))
241 if unicodedata.category(c) != 'Mn')
242 last_name = last_name.replace("'", "")
243
244 if chamber == 'lower':
245 return 'assemblymember.' + last_name + '@assembly.ca.gov'
246 else:
247 return 'senator.' + last_name + '@sen.ca.gov'
248
249
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/openstates/ca/legislators.py b/openstates/ca/legislators.py
--- a/openstates/ca/legislators.py
+++ b/openstates/ca/legislators.py
@@ -244,5 +244,4 @@
if chamber == 'lower':
return 'assemblymember.' + last_name + '@assembly.ca.gov'
else:
- return 'senator.' + last_name + '@sen.ca.gov'
-
+ return 'senator.' + last_name + '@senator.ca.gov'
|
{"golden_diff": "diff --git a/openstates/ca/legislators.py b/openstates/ca/legislators.py\n--- a/openstates/ca/legislators.py\n+++ b/openstates/ca/legislators.py\n@@ -244,5 +244,4 @@\n if chamber == 'lower':\n return 'assemblymember.' + last_name + '@assembly.ca.gov'\n else:\n- return 'senator.' + last_name + '@sen.ca.gov'\n- \n+ return 'senator.' + last_name + '@senator.ca.gov'\n", "issue": "CA: email issue\nState: CA\r\n\r\n(via contact)\r\nI\u2019d like to call your attention to an error on your email address listings for State Senators in California. Your database lists an address in the form of [email protected], whereas the correct email is [email protected].\r\n \r\nIt does appear that Senate staffers are [email protected].\r\n \r\nWe\u2019ve been using your service, and our advocates\u2019 emails are bouncing back.\r\n\r\n\n", "before_files": [{"content": "import re\nimport collections\nimport unicodedata\nfrom operator import methodcaller\n\nimport lxml.html\n\nfrom billy.scrape.legislators import LegislatorScraper, Legislator\n\n\ndef parse_address(s, split=re.compile(r'[;,]\\s{,3}').split):\n '''\n Extract address fields from text.\n '''\n # If the address isn't formatted correctly, skip for now.\n if ';' not in s:\n return []\n\n fields = 'city state_zip phone'.split()\n vals = split(s)\n res = []\n while True:\n try:\n _field = fields.pop()\n _value = vals.pop()\n except IndexError:\n break\n else:\n if _value.strip():\n res.append((_field, _value))\n if vals:\n res.append(('street', ', '.join(vals)))\n return res\n\n\nclass CALegislatorScraper(LegislatorScraper):\n\n jurisdiction = 'ca'\n\n urls = {'upper': 'http://senate.ca.gov/senators',\n 'lower': 'http://assembly.ca.gov/assemblymembers'}\n\n def scrape(self, chamber, term):\n\n url = self.urls[chamber]\n html = self.get(url).text\n doc = lxml.html.fromstring(html)\n\n if chamber == 'lower':\n rows = doc.xpath('//table/tbody/tr')\n parse = self.parse_assembly\n else:\n rows = doc.xpath('//div[contains(@class, \"views-row\")]')\n parse = self.parse_senate\n\n for tr in rows:\n legislator = parse(tr, term, chamber)\n if legislator is None:\n continue\n if 'Vacant' in legislator['full_name']:\n continue\n\n legislator.add_source(url)\n legislator['full_name'] = legislator['full_name'].strip()\n self.save_legislator(legislator)\n\n def parse_senate(self, div, term, chamber):\n name = div.xpath('.//h3/text()')[0]\n if name.endswith(' (R)'):\n party = 'Republican'\n elif name.endswith(' (D)'):\n party = 'Democratic'\n else:\n self.warning('skipping ' + name)\n return None\n name = name.split(' (')[0]\n\n district = div.xpath(\n './/div[contains(@class, \"senator-district\")]/div/text()'\n )[0].strip().lstrip('0')\n photo_url = div.xpath('.//img/@src')[0]\n url = div.xpath('.//a/@href')[0]\n\n leg = Legislator(term, chamber, full_name=name, party=party, district=district,\n photo_url=photo_url, url=url)\n\n # CA senators have working emails, but they're not putting them on\n # their public pages anymore\n email = self._construct_email(chamber, name)\n\n for addr in div.xpath('.//div[contains(@class, \"views-field-field-senator-capitol-office\")]//p'):\n addr, phone = addr.text_content().split('; ')\n leg.add_office(\n 'capitol', 'Senate Office',\n address=addr.strip(), phone=phone.strip(), email=email)\n\n for addr in div.xpath('.//div[contains(@class, \"views-field-field-senator-district-office\")]//p'):\n for addr in addr.text_content().strip().splitlines():\n try:\n addr, phone = addr.strip().replace(u'\\xa0', ' ').split('; ')\n leg.add_office(\n 'district', 'District Office',\n address=addr.strip(), phone=phone.strip())\n except ValueError:\n addr = addr.strip().replace(u'\\xa0', ' ')\n leg.add_office('district', 'District Office', address=addr)\n\n return leg\n\n def parse_assembly(self, tr, term, chamber):\n '''\n Given a tr element, get specific data from it.\n '''\n\n strip = methodcaller('strip')\n\n xpath = 'td[contains(@class, \"views-field-field-%s-%s\")]%s'\n\n xp = {\n 'url': [('lname-sort', '/a[not(contains(text(), \"edit\"))]/@href')],\n 'district': [('district', '/text()')],\n 'party': [('party', '/text()')],\n 'full_name': [('office-information', '/a[not(contains(text(), \"edit\"))]/text()')],\n 'address': [('office-information', '/h3/following-sibling::text()'),\n ('office-information', '/p/text()')]\n }\n\n titles = {'upper': 'senator', 'lower': 'member'}\n\n funcs = {\n 'full_name': lambda s: re.sub( # \"Assembly\" is misspelled once\n r'Contact Assembl?y Member', '', s).strip(),\n 'address': parse_address,\n }\n\n rubberstamp = lambda _: _\n tr_xpath = tr.xpath\n res = collections.defaultdict(list)\n for k, xpath_info in xp.items():\n for vals in xpath_info:\n f = funcs.get(k, rubberstamp)\n vals = (titles[chamber],) + vals\n vals = map(f, map(strip, tr_xpath(xpath % vals)))\n res[k].extend(vals)\n\n # Photo.\n try:\n res['photo_url'] = tr_xpath('td/p/img/@src')[0]\n except IndexError:\n pass\n\n # Remove junk from assembly member names.\n junk = 'Contact Assembly Member '\n\n try:\n res['full_name'] = res['full_name'].pop().replace(junk, '')\n except IndexError:\n return\n\n # Addresses.\n addresses = res['address']\n try:\n addresses = map(dict, filter(None, addresses))\n except ValueError:\n # Sometimes legislators only have one address, in which\n # case this awful hack is helpful.\n addresses = map(dict, filter(None, [addresses]))\n\n for address in addresses[:]:\n\n # Toss results that don't have required keys.\n if not set(['street', 'city', 'state_zip']) < set(address):\n if address in addresses:\n addresses.remove(address)\n\n # Re-key the addresses\n offices = []\n if addresses:\n # Mariko Yamada's addresses wouldn't parse correctly as of\n # 3/23/2013, so here we're forced to test whether any\n # addresses were even found.\n addresses[0].update(type='capitol', name='Capitol Office')\n offices.append(addresses[0])\n\n # CA reps have working emails, but they're not putting them on\n # their public pages anymore\n offices[0]['email'] = \\\n self._construct_email(chamber, res['full_name'])\n\n for office in addresses[1:]:\n office.update(type='district', name='District Office')\n offices.append(office)\n\n for office in offices:\n street = office['street']\n state_zip = re.sub(r'\\s+', ' ', office['state_zip'])\n street = '%s\\n%s, %s' % (street, office['city'], state_zip)\n office['address'] = street\n office['fax'] = None\n if 'email' not in office:\n office['email'] = None\n\n del office['street'], office['city'], office['state_zip']\n\n res['offices'] = offices\n del res['address']\n\n # Normalize party.\n for party in res['party'][:]:\n if party:\n if party == 'Democrat':\n party = 'Democratic'\n res['party'] = party\n break\n else:\n res['party'] = None\n\n # Mariko Yamada also didn't have a url that lxml would parse\n # as of 3/22/2013.\n if res['url']:\n res['url'] = res['url'].pop()\n else:\n del res['url']\n\n # strip leading zero\n res['district'] = str(int(res['district'].pop()))\n\n # Add a source for the url.\n leg = Legislator(term, chamber, **res)\n leg.update(**res)\n\n return leg\n\n def _construct_email(self, chamber, full_name):\n last_name = re.split(r'\\s+', full_name)[-1].lower()\n\n # translate accents to non-accented versions for use in an\n # email and drop apostrophes\n last_name = ''.join(c for c in\n unicodedata.normalize('NFD', unicode(last_name))\n if unicodedata.category(c) != 'Mn')\n last_name = last_name.replace(\"'\", \"\")\n\n if chamber == 'lower':\n return 'assemblymember.' + last_name + '@assembly.ca.gov'\n else:\n return 'senator.' + last_name + '@sen.ca.gov'\n \n", "path": "openstates/ca/legislators.py"}], "after_files": [{"content": "import re\nimport collections\nimport unicodedata\nfrom operator import methodcaller\n\nimport lxml.html\n\nfrom billy.scrape.legislators import LegislatorScraper, Legislator\n\n\ndef parse_address(s, split=re.compile(r'[;,]\\s{,3}').split):\n '''\n Extract address fields from text.\n '''\n # If the address isn't formatted correctly, skip for now.\n if ';' not in s:\n return []\n\n fields = 'city state_zip phone'.split()\n vals = split(s)\n res = []\n while True:\n try:\n _field = fields.pop()\n _value = vals.pop()\n except IndexError:\n break\n else:\n if _value.strip():\n res.append((_field, _value))\n if vals:\n res.append(('street', ', '.join(vals)))\n return res\n\n\nclass CALegislatorScraper(LegislatorScraper):\n\n jurisdiction = 'ca'\n\n urls = {'upper': 'http://senate.ca.gov/senators',\n 'lower': 'http://assembly.ca.gov/assemblymembers'}\n\n def scrape(self, chamber, term):\n\n url = self.urls[chamber]\n html = self.get(url).text\n doc = lxml.html.fromstring(html)\n\n if chamber == 'lower':\n rows = doc.xpath('//table/tbody/tr')\n parse = self.parse_assembly\n else:\n rows = doc.xpath('//div[contains(@class, \"views-row\")]')\n parse = self.parse_senate\n\n for tr in rows:\n legislator = parse(tr, term, chamber)\n if legislator is None:\n continue\n if 'Vacant' in legislator['full_name']:\n continue\n\n legislator.add_source(url)\n legislator['full_name'] = legislator['full_name'].strip()\n self.save_legislator(legislator)\n\n def parse_senate(self, div, term, chamber):\n name = div.xpath('.//h3/text()')[0]\n if name.endswith(' (R)'):\n party = 'Republican'\n elif name.endswith(' (D)'):\n party = 'Democratic'\n else:\n self.warning('skipping ' + name)\n return None\n name = name.split(' (')[0]\n\n district = div.xpath(\n './/div[contains(@class, \"senator-district\")]/div/text()'\n )[0].strip().lstrip('0')\n photo_url = div.xpath('.//img/@src')[0]\n url = div.xpath('.//a/@href')[0]\n\n leg = Legislator(term, chamber, full_name=name, party=party, district=district,\n photo_url=photo_url, url=url)\n\n # CA senators have working emails, but they're not putting them on\n # their public pages anymore\n email = self._construct_email(chamber, name)\n\n for addr in div.xpath('.//div[contains(@class, \"views-field-field-senator-capitol-office\")]//p'):\n addr, phone = addr.text_content().split('; ')\n leg.add_office(\n 'capitol', 'Senate Office',\n address=addr.strip(), phone=phone.strip(), email=email)\n\n for addr in div.xpath('.//div[contains(@class, \"views-field-field-senator-district-office\")]//p'):\n for addr in addr.text_content().strip().splitlines():\n try:\n addr, phone = addr.strip().replace(u'\\xa0', ' ').split('; ')\n leg.add_office(\n 'district', 'District Office',\n address=addr.strip(), phone=phone.strip())\n except ValueError:\n addr = addr.strip().replace(u'\\xa0', ' ')\n leg.add_office('district', 'District Office', address=addr)\n\n return leg\n\n def parse_assembly(self, tr, term, chamber):\n '''\n Given a tr element, get specific data from it.\n '''\n\n strip = methodcaller('strip')\n\n xpath = 'td[contains(@class, \"views-field-field-%s-%s\")]%s'\n\n xp = {\n 'url': [('lname-sort', '/a[not(contains(text(), \"edit\"))]/@href')],\n 'district': [('district', '/text()')],\n 'party': [('party', '/text()')],\n 'full_name': [('office-information', '/a[not(contains(text(), \"edit\"))]/text()')],\n 'address': [('office-information', '/h3/following-sibling::text()'),\n ('office-information', '/p/text()')]\n }\n\n titles = {'upper': 'senator', 'lower': 'member'}\n\n funcs = {\n 'full_name': lambda s: re.sub( # \"Assembly\" is misspelled once\n r'Contact Assembl?y Member', '', s).strip(),\n 'address': parse_address,\n }\n\n rubberstamp = lambda _: _\n tr_xpath = tr.xpath\n res = collections.defaultdict(list)\n for k, xpath_info in xp.items():\n for vals in xpath_info:\n f = funcs.get(k, rubberstamp)\n vals = (titles[chamber],) + vals\n vals = map(f, map(strip, tr_xpath(xpath % vals)))\n res[k].extend(vals)\n\n # Photo.\n try:\n res['photo_url'] = tr_xpath('td/p/img/@src')[0]\n except IndexError:\n pass\n\n # Remove junk from assembly member names.\n junk = 'Contact Assembly Member '\n\n try:\n res['full_name'] = res['full_name'].pop().replace(junk, '')\n except IndexError:\n return\n\n # Addresses.\n addresses = res['address']\n try:\n addresses = map(dict, filter(None, addresses))\n except ValueError:\n # Sometimes legislators only have one address, in which\n # case this awful hack is helpful.\n addresses = map(dict, filter(None, [addresses]))\n\n for address in addresses[:]:\n\n # Toss results that don't have required keys.\n if not set(['street', 'city', 'state_zip']) < set(address):\n if address in addresses:\n addresses.remove(address)\n\n # Re-key the addresses\n offices = []\n if addresses:\n # Mariko Yamada's addresses wouldn't parse correctly as of\n # 3/23/2013, so here we're forced to test whether any\n # addresses were even found.\n addresses[0].update(type='capitol', name='Capitol Office')\n offices.append(addresses[0])\n\n # CA reps have working emails, but they're not putting them on\n # their public pages anymore\n offices[0]['email'] = \\\n self._construct_email(chamber, res['full_name'])\n\n for office in addresses[1:]:\n office.update(type='district', name='District Office')\n offices.append(office)\n\n for office in offices:\n street = office['street']\n state_zip = re.sub(r'\\s+', ' ', office['state_zip'])\n street = '%s\\n%s, %s' % (street, office['city'], state_zip)\n office['address'] = street\n office['fax'] = None\n if 'email' not in office:\n office['email'] = None\n\n del office['street'], office['city'], office['state_zip']\n\n res['offices'] = offices\n del res['address']\n\n # Normalize party.\n for party in res['party'][:]:\n if party:\n if party == 'Democrat':\n party = 'Democratic'\n res['party'] = party\n break\n else:\n res['party'] = None\n\n # Mariko Yamada also didn't have a url that lxml would parse\n # as of 3/22/2013.\n if res['url']:\n res['url'] = res['url'].pop()\n else:\n del res['url']\n\n # strip leading zero\n res['district'] = str(int(res['district'].pop()))\n\n # Add a source for the url.\n leg = Legislator(term, chamber, **res)\n leg.update(**res)\n\n return leg\n\n def _construct_email(self, chamber, full_name):\n last_name = re.split(r'\\s+', full_name)[-1].lower()\n\n # translate accents to non-accented versions for use in an\n # email and drop apostrophes\n last_name = ''.join(c for c in\n unicodedata.normalize('NFD', unicode(last_name))\n if unicodedata.category(c) != 'Mn')\n last_name = last_name.replace(\"'\", \"\")\n\n if chamber == 'lower':\n return 'assemblymember.' + last_name + '@assembly.ca.gov'\n else:\n return 'senator.' + last_name + '@senator.ca.gov'\n", "path": "openstates/ca/legislators.py"}]}
| 2,929 | 121 |
gh_patches_debug_24365
|
rasdani/github-patches
|
git_diff
|
canonical__snapcraft-4370
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
remote-build: add control logic when the project is in a git repo
### What needs to get done
This adds control logic to determine whether to execute the new or legacy remote-build code.
If the project is not part of a git repository, then execute the legacy remote-build code. Otherwise, execute the new remote-build code.
This check can be done by using the GitPython wrapper class (#4320).

### Why it needs to get done
This check exists to minimize changes for existing workflows.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `snapcraft/commands/remote.py`
Content:
```
1 # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
2 #
3 # Copyright 2022-2023 Canonical Ltd.
4 #
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License version 3 as
7 # published by the Free Software Foundation.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 """Snapcraft remote build command."""
18
19 import argparse
20 import os
21 import textwrap
22 from enum import Enum
23 from typing import Optional
24
25 from craft_cli import BaseCommand, emit
26 from craft_cli.helptexts import HIDDEN
27 from overrides import overrides
28
29 from snapcraft.errors import MaintenanceBase, SnapcraftError
30 from snapcraft.legacy_cli import run_legacy
31 from snapcraft.parts import yaml_utils
32 from snapcraft.utils import confirm_with_user, humanize_list
33 from snapcraft_legacy.internal.remote_build.errors import AcceptPublicUploadError
34
35 _CONFIRMATION_PROMPT = (
36 "All data sent to remote builders will be publicly available. "
37 "Are you sure you want to continue?"
38 )
39
40
41 _STRATEGY_ENVVAR = "SNAPCRAFT_REMOTE_BUILD_STRATEGY"
42
43
44 class _Strategies(Enum):
45 """Possible values of the build strategy."""
46
47 DISABLE_FALLBACK = "disable-fallback"
48 FORCE_FALLBACK = "force-fallback"
49
50
51 class RemoteBuildCommand(BaseCommand):
52 """Command passthrough for the remote-build command."""
53
54 name = "remote-build"
55 help_msg = "Dispatch a snap for remote build"
56 overview = textwrap.dedent(
57 """
58 Command remote-build sends the current project to be built
59 remotely. After the build is complete, packages for each
60 architecture are retrieved and will be available in the
61 local filesystem.
62
63 If not specified in the snapcraft.yaml file, the list of
64 architectures to build can be set using the --build-on option.
65 If both are specified, an error will occur.
66
67 Interrupted remote builds can be resumed using the --recover
68 option, followed by the build number informed when the remote
69 build was originally dispatched. The current state of the
70 remote build for each architecture can be checked using the
71 --status option."""
72 )
73
74 @overrides
75 def fill_parser(self, parser: argparse.ArgumentParser) -> None:
76 parser.add_argument(
77 "--recover", action="store_true", help="recover an interrupted build"
78 )
79 parser.add_argument(
80 "--status", action="store_true", help="display remote build status"
81 )
82 parser_target = parser.add_mutually_exclusive_group()
83 parser_target.add_argument(
84 "--build-on",
85 metavar="arch",
86 nargs="+",
87 help=HIDDEN,
88 )
89 parser_target.add_argument(
90 "--build-for",
91 metavar="arch",
92 nargs="+",
93 help="architecture to build for",
94 )
95 parser.add_argument(
96 "--build-id", metavar="build-id", help="specific build id to retrieve"
97 )
98 parser.add_argument(
99 "--launchpad-accept-public-upload",
100 action="store_true",
101 help="acknowledge that uploaded code will be publicly available.",
102 )
103
104 def _get_build_strategy(self) -> Optional[_Strategies]:
105 """Get the build strategy from the envvar `SNAPCRAFT_REMOTE_BUILD_STRATEGY`.
106
107 :returns: The strategy or None.
108
109 :raises SnapcraftError: If the variable is set to an invalid value.
110 """
111 strategy = os.getenv(_STRATEGY_ENVVAR)
112
113 if not strategy:
114 return None
115
116 try:
117 return _Strategies(strategy)
118 except ValueError as err:
119 valid_strategies = humanize_list(
120 (strategy.value for strategy in _Strategies), "and"
121 )
122 raise SnapcraftError(
123 f"Unknown value {strategy!r} in environment variable "
124 f"{_STRATEGY_ENVVAR!r}. Valid values are {valid_strategies}."
125 ) from err
126
127 def _get_effective_base(self) -> str:
128 """Get a valid effective base from the project's snapcraft.yaml.
129
130 :returns: The project's effective base.
131
132 :raises SnapcraftError: If the base is unknown or missing or if the
133 snapcraft.yaml cannot be loaded.
134 :raises MaintenanceBase: If the base is not supported
135 """
136 snapcraft_yaml = yaml_utils.get_snap_project().project_file
137
138 with open(snapcraft_yaml, encoding="utf-8") as file:
139 base = yaml_utils.get_base(file)
140
141 if base is None:
142 raise SnapcraftError(
143 f"Could not determine base from {str(snapcraft_yaml)!r}."
144 )
145
146 emit.debug(f"Got base {base!r} from {str(snapcraft_yaml)!r}.")
147
148 if base in yaml_utils.ESM_BASES:
149 raise MaintenanceBase(base)
150
151 if base not in yaml_utils.BASES:
152 raise SnapcraftError(f"Unknown base {base!r} in {str(snapcraft_yaml)!r}.")
153
154 return base
155
156 def _run_remote_build(self, base: str) -> None:
157 # bases newer than core22 must use the new remote-build
158 if base in yaml_utils.CURRENT_BASES - {"core22"}:
159 emit.debug(
160 "Using fallback remote-build because new remote-build is not available."
161 )
162 # TODO: use new remote-build code (#4323)
163 run_legacy()
164 return
165
166 strategy = self._get_build_strategy()
167
168 if strategy == _Strategies.DISABLE_FALLBACK:
169 emit.debug(
170 f"Environment variable {_STRATEGY_ENVVAR!r} is "
171 f"{_Strategies.DISABLE_FALLBACK.value!r} but running fallback "
172 "remote-build because new remote-build is not available."
173 )
174 run_legacy()
175 return
176
177 if strategy == _Strategies.FORCE_FALLBACK:
178 emit.debug(
179 "Running fallback remote-build because environment variable "
180 f"{_STRATEGY_ENVVAR!r} is {_Strategies.FORCE_FALLBACK.value!r}."
181 )
182 run_legacy()
183 return
184
185 emit.debug("Running fallback remote-build.")
186 run_legacy()
187
188 @overrides
189 def run(self, parsed_args) -> None:
190 if os.getenv("SUDO_USER") and os.geteuid() == 0:
191 emit.message(
192 "Running with 'sudo' may cause permission errors and is discouraged."
193 )
194
195 emit.message(
196 "snapcraft remote-build is experimental and is subject to change "
197 "- use with caution."
198 )
199
200 if parsed_args.build_on:
201 emit.message("Use --build-for instead of --build-on")
202 parsed_args.build_for = parsed_args.build_on
203
204 if not parsed_args.launchpad_accept_public_upload and not confirm_with_user(
205 _CONFIRMATION_PROMPT
206 ):
207 raise AcceptPublicUploadError()
208
209 base = self._get_effective_base()
210 self._run_remote_build(base)
211
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/snapcraft/commands/remote.py b/snapcraft/commands/remote.py
--- a/snapcraft/commands/remote.py
+++ b/snapcraft/commands/remote.py
@@ -20,6 +20,7 @@
import os
import textwrap
from enum import Enum
+from pathlib import Path
from typing import Optional
from craft_cli import BaseCommand, emit
@@ -29,6 +30,7 @@
from snapcraft.errors import MaintenanceBase, SnapcraftError
from snapcraft.legacy_cli import run_legacy
from snapcraft.parts import yaml_utils
+from snapcraft.remote import is_repo
from snapcraft.utils import confirm_with_user, humanize_list
from snapcraft_legacy.internal.remote_build.errors import AcceptPublicUploadError
@@ -182,6 +184,14 @@
run_legacy()
return
+ if is_repo(Path().absolute()):
+ emit.debug(
+ "Project is in a git repository but running fallback remote-build "
+ "because new remote-build is not available."
+ )
+ run_legacy()
+ return
+
emit.debug("Running fallback remote-build.")
run_legacy()
|
{"golden_diff": "diff --git a/snapcraft/commands/remote.py b/snapcraft/commands/remote.py\n--- a/snapcraft/commands/remote.py\n+++ b/snapcraft/commands/remote.py\n@@ -20,6 +20,7 @@\n import os\n import textwrap\n from enum import Enum\n+from pathlib import Path\n from typing import Optional\n \n from craft_cli import BaseCommand, emit\n@@ -29,6 +30,7 @@\n from snapcraft.errors import MaintenanceBase, SnapcraftError\n from snapcraft.legacy_cli import run_legacy\n from snapcraft.parts import yaml_utils\n+from snapcraft.remote import is_repo\n from snapcraft.utils import confirm_with_user, humanize_list\n from snapcraft_legacy.internal.remote_build.errors import AcceptPublicUploadError\n \n@@ -182,6 +184,14 @@\n run_legacy()\n return\n \n+ if is_repo(Path().absolute()):\n+ emit.debug(\n+ \"Project is in a git repository but running fallback remote-build \"\n+ \"because new remote-build is not available.\"\n+ )\n+ run_legacy()\n+ return\n+\n emit.debug(\"Running fallback remote-build.\")\n run_legacy()\n", "issue": "remote-build: add control logic when the project is in a git repo\n### What needs to get done\n\nThis adds control logic to determine whether to execute the new or legacy remote-build code.\r\n\r\nIf the project is not part of a git repository, then execute the legacy remote-build code. Otherwise, execute the new remote-build code.\r\n\r\nThis check can be done by using the GitPython wrapper class (#4320).\r\n\r\n\n\n### Why it needs to get done\n\nThis check exists to minimize changes for existing workflows.\n", "before_files": [{"content": "# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n#\n# Copyright 2022-2023 Canonical Ltd.\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Snapcraft remote build command.\"\"\"\n\nimport argparse\nimport os\nimport textwrap\nfrom enum import Enum\nfrom typing import Optional\n\nfrom craft_cli import BaseCommand, emit\nfrom craft_cli.helptexts import HIDDEN\nfrom overrides import overrides\n\nfrom snapcraft.errors import MaintenanceBase, SnapcraftError\nfrom snapcraft.legacy_cli import run_legacy\nfrom snapcraft.parts import yaml_utils\nfrom snapcraft.utils import confirm_with_user, humanize_list\nfrom snapcraft_legacy.internal.remote_build.errors import AcceptPublicUploadError\n\n_CONFIRMATION_PROMPT = (\n \"All data sent to remote builders will be publicly available. \"\n \"Are you sure you want to continue?\"\n)\n\n\n_STRATEGY_ENVVAR = \"SNAPCRAFT_REMOTE_BUILD_STRATEGY\"\n\n\nclass _Strategies(Enum):\n \"\"\"Possible values of the build strategy.\"\"\"\n\n DISABLE_FALLBACK = \"disable-fallback\"\n FORCE_FALLBACK = \"force-fallback\"\n\n\nclass RemoteBuildCommand(BaseCommand):\n \"\"\"Command passthrough for the remote-build command.\"\"\"\n\n name = \"remote-build\"\n help_msg = \"Dispatch a snap for remote build\"\n overview = textwrap.dedent(\n \"\"\"\n Command remote-build sends the current project to be built\n remotely. After the build is complete, packages for each\n architecture are retrieved and will be available in the\n local filesystem.\n\n If not specified in the snapcraft.yaml file, the list of\n architectures to build can be set using the --build-on option.\n If both are specified, an error will occur.\n\n Interrupted remote builds can be resumed using the --recover\n option, followed by the build number informed when the remote\n build was originally dispatched. The current state of the\n remote build for each architecture can be checked using the\n --status option.\"\"\"\n )\n\n @overrides\n def fill_parser(self, parser: argparse.ArgumentParser) -> None:\n parser.add_argument(\n \"--recover\", action=\"store_true\", help=\"recover an interrupted build\"\n )\n parser.add_argument(\n \"--status\", action=\"store_true\", help=\"display remote build status\"\n )\n parser_target = parser.add_mutually_exclusive_group()\n parser_target.add_argument(\n \"--build-on\",\n metavar=\"arch\",\n nargs=\"+\",\n help=HIDDEN,\n )\n parser_target.add_argument(\n \"--build-for\",\n metavar=\"arch\",\n nargs=\"+\",\n help=\"architecture to build for\",\n )\n parser.add_argument(\n \"--build-id\", metavar=\"build-id\", help=\"specific build id to retrieve\"\n )\n parser.add_argument(\n \"--launchpad-accept-public-upload\",\n action=\"store_true\",\n help=\"acknowledge that uploaded code will be publicly available.\",\n )\n\n def _get_build_strategy(self) -> Optional[_Strategies]:\n \"\"\"Get the build strategy from the envvar `SNAPCRAFT_REMOTE_BUILD_STRATEGY`.\n\n :returns: The strategy or None.\n\n :raises SnapcraftError: If the variable is set to an invalid value.\n \"\"\"\n strategy = os.getenv(_STRATEGY_ENVVAR)\n\n if not strategy:\n return None\n\n try:\n return _Strategies(strategy)\n except ValueError as err:\n valid_strategies = humanize_list(\n (strategy.value for strategy in _Strategies), \"and\"\n )\n raise SnapcraftError(\n f\"Unknown value {strategy!r} in environment variable \"\n f\"{_STRATEGY_ENVVAR!r}. Valid values are {valid_strategies}.\"\n ) from err\n\n def _get_effective_base(self) -> str:\n \"\"\"Get a valid effective base from the project's snapcraft.yaml.\n\n :returns: The project's effective base.\n\n :raises SnapcraftError: If the base is unknown or missing or if the\n snapcraft.yaml cannot be loaded.\n :raises MaintenanceBase: If the base is not supported\n \"\"\"\n snapcraft_yaml = yaml_utils.get_snap_project().project_file\n\n with open(snapcraft_yaml, encoding=\"utf-8\") as file:\n base = yaml_utils.get_base(file)\n\n if base is None:\n raise SnapcraftError(\n f\"Could not determine base from {str(snapcraft_yaml)!r}.\"\n )\n\n emit.debug(f\"Got base {base!r} from {str(snapcraft_yaml)!r}.\")\n\n if base in yaml_utils.ESM_BASES:\n raise MaintenanceBase(base)\n\n if base not in yaml_utils.BASES:\n raise SnapcraftError(f\"Unknown base {base!r} in {str(snapcraft_yaml)!r}.\")\n\n return base\n\n def _run_remote_build(self, base: str) -> None:\n # bases newer than core22 must use the new remote-build\n if base in yaml_utils.CURRENT_BASES - {\"core22\"}:\n emit.debug(\n \"Using fallback remote-build because new remote-build is not available.\"\n )\n # TODO: use new remote-build code (#4323)\n run_legacy()\n return\n\n strategy = self._get_build_strategy()\n\n if strategy == _Strategies.DISABLE_FALLBACK:\n emit.debug(\n f\"Environment variable {_STRATEGY_ENVVAR!r} is \"\n f\"{_Strategies.DISABLE_FALLBACK.value!r} but running fallback \"\n \"remote-build because new remote-build is not available.\"\n )\n run_legacy()\n return\n\n if strategy == _Strategies.FORCE_FALLBACK:\n emit.debug(\n \"Running fallback remote-build because environment variable \"\n f\"{_STRATEGY_ENVVAR!r} is {_Strategies.FORCE_FALLBACK.value!r}.\"\n )\n run_legacy()\n return\n\n emit.debug(\"Running fallback remote-build.\")\n run_legacy()\n\n @overrides\n def run(self, parsed_args) -> None:\n if os.getenv(\"SUDO_USER\") and os.geteuid() == 0:\n emit.message(\n \"Running with 'sudo' may cause permission errors and is discouraged.\"\n )\n\n emit.message(\n \"snapcraft remote-build is experimental and is subject to change \"\n \"- use with caution.\"\n )\n\n if parsed_args.build_on:\n emit.message(\"Use --build-for instead of --build-on\")\n parsed_args.build_for = parsed_args.build_on\n\n if not parsed_args.launchpad_accept_public_upload and not confirm_with_user(\n _CONFIRMATION_PROMPT\n ):\n raise AcceptPublicUploadError()\n\n base = self._get_effective_base()\n self._run_remote_build(base)\n", "path": "snapcraft/commands/remote.py"}], "after_files": [{"content": "# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n#\n# Copyright 2022-2023 Canonical Ltd.\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Snapcraft remote build command.\"\"\"\n\nimport argparse\nimport os\nimport textwrap\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import Optional\n\nfrom craft_cli import BaseCommand, emit\nfrom craft_cli.helptexts import HIDDEN\nfrom overrides import overrides\n\nfrom snapcraft.errors import MaintenanceBase, SnapcraftError\nfrom snapcraft.legacy_cli import run_legacy\nfrom snapcraft.parts import yaml_utils\nfrom snapcraft.remote import is_repo\nfrom snapcraft.utils import confirm_with_user, humanize_list\nfrom snapcraft_legacy.internal.remote_build.errors import AcceptPublicUploadError\n\n_CONFIRMATION_PROMPT = (\n \"All data sent to remote builders will be publicly available. \"\n \"Are you sure you want to continue?\"\n)\n\n\n_STRATEGY_ENVVAR = \"SNAPCRAFT_REMOTE_BUILD_STRATEGY\"\n\n\nclass _Strategies(Enum):\n \"\"\"Possible values of the build strategy.\"\"\"\n\n DISABLE_FALLBACK = \"disable-fallback\"\n FORCE_FALLBACK = \"force-fallback\"\n\n\nclass RemoteBuildCommand(BaseCommand):\n \"\"\"Command passthrough for the remote-build command.\"\"\"\n\n name = \"remote-build\"\n help_msg = \"Dispatch a snap for remote build\"\n overview = textwrap.dedent(\n \"\"\"\n Command remote-build sends the current project to be built\n remotely. After the build is complete, packages for each\n architecture are retrieved and will be available in the\n local filesystem.\n\n If not specified in the snapcraft.yaml file, the list of\n architectures to build can be set using the --build-on option.\n If both are specified, an error will occur.\n\n Interrupted remote builds can be resumed using the --recover\n option, followed by the build number informed when the remote\n build was originally dispatched. The current state of the\n remote build for each architecture can be checked using the\n --status option.\"\"\"\n )\n\n @overrides\n def fill_parser(self, parser: argparse.ArgumentParser) -> None:\n parser.add_argument(\n \"--recover\", action=\"store_true\", help=\"recover an interrupted build\"\n )\n parser.add_argument(\n \"--status\", action=\"store_true\", help=\"display remote build status\"\n )\n parser_target = parser.add_mutually_exclusive_group()\n parser_target.add_argument(\n \"--build-on\",\n metavar=\"arch\",\n nargs=\"+\",\n help=HIDDEN,\n )\n parser_target.add_argument(\n \"--build-for\",\n metavar=\"arch\",\n nargs=\"+\",\n help=\"architecture to build for\",\n )\n parser.add_argument(\n \"--build-id\", metavar=\"build-id\", help=\"specific build id to retrieve\"\n )\n parser.add_argument(\n \"--launchpad-accept-public-upload\",\n action=\"store_true\",\n help=\"acknowledge that uploaded code will be publicly available.\",\n )\n\n def _get_build_strategy(self) -> Optional[_Strategies]:\n \"\"\"Get the build strategy from the envvar `SNAPCRAFT_REMOTE_BUILD_STRATEGY`.\n\n :returns: The strategy or None.\n\n :raises SnapcraftError: If the variable is set to an invalid value.\n \"\"\"\n strategy = os.getenv(_STRATEGY_ENVVAR)\n\n if not strategy:\n return None\n\n try:\n return _Strategies(strategy)\n except ValueError as err:\n valid_strategies = humanize_list(\n (strategy.value for strategy in _Strategies), \"and\"\n )\n raise SnapcraftError(\n f\"Unknown value {strategy!r} in environment variable \"\n f\"{_STRATEGY_ENVVAR!r}. Valid values are {valid_strategies}.\"\n ) from err\n\n def _get_effective_base(self) -> str:\n \"\"\"Get a valid effective base from the project's snapcraft.yaml.\n\n :returns: The project's effective base.\n\n :raises SnapcraftError: If the base is unknown or missing or if the\n snapcraft.yaml cannot be loaded.\n :raises MaintenanceBase: If the base is not supported\n \"\"\"\n snapcraft_yaml = yaml_utils.get_snap_project().project_file\n\n with open(snapcraft_yaml, encoding=\"utf-8\") as file:\n base = yaml_utils.get_base(file)\n\n if base is None:\n raise SnapcraftError(\n f\"Could not determine base from {str(snapcraft_yaml)!r}.\"\n )\n\n emit.debug(f\"Got base {base!r} from {str(snapcraft_yaml)!r}.\")\n\n if base in yaml_utils.ESM_BASES:\n raise MaintenanceBase(base)\n\n if base not in yaml_utils.BASES:\n raise SnapcraftError(f\"Unknown base {base!r} in {str(snapcraft_yaml)!r}.\")\n\n return base\n\n def _run_remote_build(self, base: str) -> None:\n # bases newer than core22 must use the new remote-build\n if base in yaml_utils.CURRENT_BASES - {\"core22\"}:\n emit.debug(\n \"Using fallback remote-build because new remote-build is not available.\"\n )\n # TODO: use new remote-build code (#4323)\n run_legacy()\n return\n\n strategy = self._get_build_strategy()\n\n if strategy == _Strategies.DISABLE_FALLBACK:\n emit.debug(\n f\"Environment variable {_STRATEGY_ENVVAR!r} is \"\n f\"{_Strategies.DISABLE_FALLBACK.value!r} but running fallback \"\n \"remote-build because new remote-build is not available.\"\n )\n run_legacy()\n return\n\n if strategy == _Strategies.FORCE_FALLBACK:\n emit.debug(\n \"Running fallback remote-build because environment variable \"\n f\"{_STRATEGY_ENVVAR!r} is {_Strategies.FORCE_FALLBACK.value!r}.\"\n )\n run_legacy()\n return\n\n if is_repo(Path().absolute()):\n emit.debug(\n \"Project is in a git repository but running fallback remote-build \"\n \"because new remote-build is not available.\"\n )\n run_legacy()\n return\n\n emit.debug(\"Running fallback remote-build.\")\n run_legacy()\n\n @overrides\n def run(self, parsed_args) -> None:\n if os.getenv(\"SUDO_USER\") and os.geteuid() == 0:\n emit.message(\n \"Running with 'sudo' may cause permission errors and is discouraged.\"\n )\n\n emit.message(\n \"snapcraft remote-build is experimental and is subject to change \"\n \"- use with caution.\"\n )\n\n if parsed_args.build_on:\n emit.message(\"Use --build-for instead of --build-on\")\n parsed_args.build_for = parsed_args.build_on\n\n if not parsed_args.launchpad_accept_public_upload and not confirm_with_user(\n _CONFIRMATION_PROMPT\n ):\n raise AcceptPublicUploadError()\n\n base = self._get_effective_base()\n self._run_remote_build(base)\n", "path": "snapcraft/commands/remote.py"}]}
| 2,524 | 254 |
gh_patches_debug_3608
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-5620
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Correctly handle data values <= 0 on a log scale
This is a continuation from issue #5389, partially adressed by PR #5477. There persists an issue where negative data is not handled correctly. All data <= 0 should be discarded before generating the plot.
As is, if `values = np.linspace(-0.1, 0.9), a JS error complains that it "could not set initial ranges", probably because `log(n)` for `n<=0` is not defined.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sphinx/source/docs/user_guide/examples/plotting_log_scale_axis.py`
Content:
```
1 from bokeh.plotting import figure, output_file, show
2
3 x = [0.1, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0]
4 y = [10**xx for xx in x]
5
6 output_file("log.html")
7
8 # create a new plot with a log axis type
9 p = figure(plot_width=400, plot_height=400,
10 y_axis_type="log", y_range=(10**-1, 10**4))
11
12 p.line(x, y, line_width=2)
13 p.circle(x, y, fill_color="white", size=8)
14
15 show(p)
16
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sphinx/source/docs/user_guide/examples/plotting_log_scale_axis.py b/sphinx/source/docs/user_guide/examples/plotting_log_scale_axis.py
--- a/sphinx/source/docs/user_guide/examples/plotting_log_scale_axis.py
+++ b/sphinx/source/docs/user_guide/examples/plotting_log_scale_axis.py
@@ -6,8 +6,7 @@
output_file("log.html")
# create a new plot with a log axis type
-p = figure(plot_width=400, plot_height=400,
- y_axis_type="log", y_range=(10**-1, 10**4))
+p = figure(plot_width=400, plot_height=400, y_axis_type="log")
p.line(x, y, line_width=2)
p.circle(x, y, fill_color="white", size=8)
|
{"golden_diff": "diff --git a/sphinx/source/docs/user_guide/examples/plotting_log_scale_axis.py b/sphinx/source/docs/user_guide/examples/plotting_log_scale_axis.py\n--- a/sphinx/source/docs/user_guide/examples/plotting_log_scale_axis.py\n+++ b/sphinx/source/docs/user_guide/examples/plotting_log_scale_axis.py\n@@ -6,8 +6,7 @@\n output_file(\"log.html\")\n \n # create a new plot with a log axis type\n-p = figure(plot_width=400, plot_height=400,\n- y_axis_type=\"log\", y_range=(10**-1, 10**4))\n+p = figure(plot_width=400, plot_height=400, y_axis_type=\"log\")\n \n p.line(x, y, line_width=2)\n p.circle(x, y, fill_color=\"white\", size=8)\n", "issue": "Correctly handle data values <= 0 on a log scale\nThis is a continuation from issue #5389, partially adressed by PR #5477. There persists an issue where negative data is not handled correctly. All data <= 0 should be discarded before generating the plot.\r\n\r\nAs is, if `values = np.linspace(-0.1, 0.9), a JS error complains that it \"could not set initial ranges\", probably because `log(n)` for `n<=0` is not defined.\n", "before_files": [{"content": "from bokeh.plotting import figure, output_file, show\n\nx = [0.1, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0]\ny = [10**xx for xx in x]\n\noutput_file(\"log.html\")\n\n# create a new plot with a log axis type\np = figure(plot_width=400, plot_height=400,\n y_axis_type=\"log\", y_range=(10**-1, 10**4))\n\np.line(x, y, line_width=2)\np.circle(x, y, fill_color=\"white\", size=8)\n\nshow(p)\n", "path": "sphinx/source/docs/user_guide/examples/plotting_log_scale_axis.py"}], "after_files": [{"content": "from bokeh.plotting import figure, output_file, show\n\nx = [0.1, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0]\ny = [10**xx for xx in x]\n\noutput_file(\"log.html\")\n\n# create a new plot with a log axis type\np = figure(plot_width=400, plot_height=400, y_axis_type=\"log\")\n\np.line(x, y, line_width=2)\np.circle(x, y, fill_color=\"white\", size=8)\n\nshow(p)\n", "path": "sphinx/source/docs/user_guide/examples/plotting_log_scale_axis.py"}]}
| 554 | 186 |
gh_patches_debug_17576
|
rasdani/github-patches
|
git_diff
|
nipy__nipype-2429
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mrtrix3 `dwi2response` - bad algorithm argument position
### Summary
Th Mrtrix3 `dwi2response` CL wrapper generates the following runtime error:
```shell
dwi2response:
mrinfo: [ERROR] no diffusion encoding information found in image "<DWI_FILE>"
dwi2response: [ERROR] Script requires diffusion gradient table: either in image header, or using -grad / -fslgrad option
```
It turns out that the command generated by `nipype` does not respect (my version of) the Mrtrix3 CL format.
### Actual behavior
Generated command (not runnable):
```shell
dwi2response -fslgrad <BVEC_FILE> <BVAL_FILE> -mask <MASK_FILE> tournier <WM_FILE>
```
### Expected behavior
Runnable command:
```shell
dwi2response tournier -fslgrad <BVEC_FILE> <BVAL_FILE> -mask <MASK_FILE> <WM_FILE>
```
### Environment
- `MRtrix 3.0_RC2-117-gf098f097 dwi2response bin version: 3.0_RC2-117-gf098f097`
- `Python 2.7.12`
- `nipype v1.0.0`
### Quick and dirty solution
I'm really not sure how clean it is, but it worked for me; in the `ResponseSDInputSpec` class, I changed `position=-6` to `position=1` in the `algorithm` traits.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nipype/interfaces/mrtrix3/preprocess.py`
Content:
```
1 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
2 # vi: set ft=python sts=4 ts=4 sw=4 et:
3 # -*- coding: utf-8 -*-
4 """
5 Change directory to provide relative paths for doctests
6 >>> import os
7 >>> filepath = os.path.dirname(os.path.realpath(__file__ ))
8 >>> datadir = os.path.realpath(os.path.join(filepath,
9 ... '../../testing/data'))
10 >>> os.chdir(datadir)
11
12 """
13 from __future__ import (print_function, division, unicode_literals,
14 absolute_import)
15
16 import os.path as op
17
18 from ..base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec,
19 File, isdefined, Undefined)
20 from .base import MRTrix3BaseInputSpec, MRTrix3Base
21
22
23 class ResponseSDInputSpec(MRTrix3BaseInputSpec):
24 algorithm = traits.Enum(
25 'msmt_5tt',
26 'dhollander',
27 'tournier',
28 'tax',
29 argstr='%s',
30 position=-6,
31 mandatory=True,
32 desc='response estimation algorithm (multi-tissue)')
33 in_file = File(
34 exists=True,
35 argstr='%s',
36 position=-5,
37 mandatory=True,
38 desc='input DWI image')
39 mtt_file = File(argstr='%s', position=-4, desc='input 5tt image')
40 wm_file = File(
41 'wm.txt',
42 argstr='%s',
43 position=-3,
44 usedefault=True,
45 desc='output WM response text file')
46 gm_file = File(
47 argstr='%s', position=-2, desc='output GM response text file')
48 csf_file = File(
49 argstr='%s', position=-1, desc='output CSF response text file')
50 in_mask = File(
51 exists=True, argstr='-mask %s', desc='provide initial mask image')
52 max_sh = traits.Int(
53 8,
54 argstr='-lmax %d',
55 desc='maximum harmonic degree of response function')
56
57
58 class ResponseSDOutputSpec(TraitedSpec):
59 wm_file = File(argstr='%s', desc='output WM response text file')
60 gm_file = File(argstr='%s', desc='output GM response text file')
61 csf_file = File(argstr='%s', desc='output CSF response text file')
62
63
64 class ResponseSD(MRTrix3Base):
65 """
66 Estimate response function(s) for spherical deconvolution using the specified algorithm.
67
68 Example
69 -------
70
71 >>> import nipype.interfaces.mrtrix3 as mrt
72 >>> resp = mrt.ResponseSD()
73 >>> resp.inputs.in_file = 'dwi.mif'
74 >>> resp.inputs.algorithm = 'tournier'
75 >>> resp.inputs.grad_fsl = ('bvecs', 'bvals')
76 >>> resp.cmdline # doctest: +ELLIPSIS
77 'dwi2response -fslgrad bvecs bvals tournier dwi.mif wm.txt'
78 >>> resp.run() # doctest: +SKIP
79 """
80
81 _cmd = 'dwi2response'
82 input_spec = ResponseSDInputSpec
83 output_spec = ResponseSDOutputSpec
84
85 def _list_outputs(self):
86 outputs = self.output_spec().get()
87 outputs['wm_file'] = op.abspath(self.inputs.wm_file)
88 if self.inputs.gm_file != Undefined:
89 outputs['gm_file'] = op.abspath(self.inputs.gm_file)
90 if self.inputs.csf_file != Undefined:
91 outputs['csf_file'] = op.abspath(self.inputs.csf_file)
92 return outputs
93
94
95 class ACTPrepareFSLInputSpec(CommandLineInputSpec):
96 in_file = File(
97 exists=True,
98 argstr='%s',
99 mandatory=True,
100 position=-2,
101 desc='input anatomical image')
102
103 out_file = File(
104 'act_5tt.mif',
105 argstr='%s',
106 mandatory=True,
107 position=-1,
108 usedefault=True,
109 desc='output file after processing')
110
111
112 class ACTPrepareFSLOutputSpec(TraitedSpec):
113 out_file = File(exists=True, desc='the output response file')
114
115
116 class ACTPrepareFSL(CommandLine):
117 """
118 Generate anatomical information necessary for Anatomically
119 Constrained Tractography (ACT).
120
121 Example
122 -------
123
124 >>> import nipype.interfaces.mrtrix3 as mrt
125 >>> prep = mrt.ACTPrepareFSL()
126 >>> prep.inputs.in_file = 'T1.nii.gz'
127 >>> prep.cmdline # doctest: +ELLIPSIS
128 'act_anat_prepare_fsl T1.nii.gz act_5tt.mif'
129 >>> prep.run() # doctest: +SKIP
130 """
131
132 _cmd = 'act_anat_prepare_fsl'
133 input_spec = ACTPrepareFSLInputSpec
134 output_spec = ACTPrepareFSLOutputSpec
135
136 def _list_outputs(self):
137 outputs = self.output_spec().get()
138 outputs['out_file'] = op.abspath(self.inputs.out_file)
139 return outputs
140
141
142 class ReplaceFSwithFIRSTInputSpec(CommandLineInputSpec):
143 in_file = File(
144 exists=True,
145 argstr='%s',
146 mandatory=True,
147 position=-4,
148 desc='input anatomical image')
149 in_t1w = File(
150 exists=True,
151 argstr='%s',
152 mandatory=True,
153 position=-3,
154 desc='input T1 image')
155 in_config = File(
156 exists=True,
157 argstr='%s',
158 position=-2,
159 desc='connectome configuration file')
160
161 out_file = File(
162 'aparc+first.mif',
163 argstr='%s',
164 mandatory=True,
165 position=-1,
166 usedefault=True,
167 desc='output file after processing')
168
169
170 class ReplaceFSwithFIRSTOutputSpec(TraitedSpec):
171 out_file = File(exists=True, desc='the output response file')
172
173
174 class ReplaceFSwithFIRST(CommandLine):
175 """
176 Replace deep gray matter structures segmented with FSL FIRST in a
177 FreeSurfer parcellation.
178
179 Example
180 -------
181
182 >>> import nipype.interfaces.mrtrix3 as mrt
183 >>> prep = mrt.ReplaceFSwithFIRST()
184 >>> prep.inputs.in_file = 'aparc+aseg.nii'
185 >>> prep.inputs.in_t1w = 'T1.nii.gz'
186 >>> prep.inputs.in_config = 'mrtrix3_labelconfig.txt'
187 >>> prep.cmdline # doctest: +ELLIPSIS
188 'fs_parc_replace_sgm_first aparc+aseg.nii T1.nii.gz \
189 mrtrix3_labelconfig.txt aparc+first.mif'
190 >>> prep.run() # doctest: +SKIP
191 """
192
193 _cmd = 'fs_parc_replace_sgm_first'
194 input_spec = ReplaceFSwithFIRSTInputSpec
195 output_spec = ReplaceFSwithFIRSTOutputSpec
196
197 def _list_outputs(self):
198 outputs = self.output_spec().get()
199 outputs['out_file'] = op.abspath(self.inputs.out_file)
200 return outputs
201
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py
--- a/nipype/interfaces/mrtrix3/preprocess.py
+++ b/nipype/interfaces/mrtrix3/preprocess.py
@@ -27,7 +27,7 @@
'tournier',
'tax',
argstr='%s',
- position=-6,
+ position=1,
mandatory=True,
desc='response estimation algorithm (multi-tissue)')
in_file = File(
@@ -74,7 +74,7 @@
>>> resp.inputs.algorithm = 'tournier'
>>> resp.inputs.grad_fsl = ('bvecs', 'bvals')
>>> resp.cmdline # doctest: +ELLIPSIS
- 'dwi2response -fslgrad bvecs bvals tournier dwi.mif wm.txt'
+ 'dwi2response tournier -fslgrad bvecs bvals dwi.mif wm.txt'
>>> resp.run() # doctest: +SKIP
"""
|
{"golden_diff": "diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py\n--- a/nipype/interfaces/mrtrix3/preprocess.py\n+++ b/nipype/interfaces/mrtrix3/preprocess.py\n@@ -27,7 +27,7 @@\n 'tournier',\n 'tax',\n argstr='%s',\n- position=-6,\n+ position=1,\n mandatory=True,\n desc='response estimation algorithm (multi-tissue)')\n in_file = File(\n@@ -74,7 +74,7 @@\n >>> resp.inputs.algorithm = 'tournier'\n >>> resp.inputs.grad_fsl = ('bvecs', 'bvals')\n >>> resp.cmdline # doctest: +ELLIPSIS\n- 'dwi2response -fslgrad bvecs bvals tournier dwi.mif wm.txt'\n+ 'dwi2response tournier -fslgrad bvecs bvals dwi.mif wm.txt'\n >>> resp.run() # doctest: +SKIP\n \"\"\"\n", "issue": "Mrtrix3 `dwi2response` - bad algorithm argument position\n### Summary\r\n\r\nTh Mrtrix3 `dwi2response` CL wrapper generates the following runtime error:\r\n```shell\r\ndwi2response:\r\nmrinfo: [ERROR] no diffusion encoding information found in image \"<DWI_FILE>\"\r\ndwi2response: [ERROR] Script requires diffusion gradient table: either in image header, or using -grad / -fslgrad option\r\n```\r\nIt turns out that the command generated by `nipype` does not respect (my version of) the Mrtrix3 CL format.\r\n\r\n### Actual behavior\r\n\r\nGenerated command (not runnable):\r\n```shell\r\ndwi2response -fslgrad <BVEC_FILE> <BVAL_FILE> -mask <MASK_FILE> tournier <WM_FILE>\r\n```\r\n\r\n### Expected behavior\r\n\r\nRunnable command:\r\n```shell\r\ndwi2response tournier -fslgrad <BVEC_FILE> <BVAL_FILE> -mask <MASK_FILE> <WM_FILE>\r\n```\r\n\r\n### Environment\r\n\r\n- `MRtrix 3.0_RC2-117-gf098f097 dwi2response bin version: 3.0_RC2-117-gf098f097`\r\n- `Python 2.7.12`\r\n- `nipype v1.0.0`\r\n \r\n\r\n### Quick and dirty solution\r\n\r\nI'm really not sure how clean it is, but it worked for me; in the `ResponseSDInputSpec` class, I changed `position=-6` to `position=1` in the `algorithm` traits.\r\n\r\n\n", "before_files": [{"content": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n# -*- coding: utf-8 -*-\n\"\"\"\n Change directory to provide relative paths for doctests\n >>> import os\n >>> filepath = os.path.dirname(os.path.realpath(__file__ ))\n >>> datadir = os.path.realpath(os.path.join(filepath,\n ... '../../testing/data'))\n >>> os.chdir(datadir)\n\n\"\"\"\nfrom __future__ import (print_function, division, unicode_literals,\n absolute_import)\n\nimport os.path as op\n\nfrom ..base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec,\n File, isdefined, Undefined)\nfrom .base import MRTrix3BaseInputSpec, MRTrix3Base\n\n\nclass ResponseSDInputSpec(MRTrix3BaseInputSpec):\n algorithm = traits.Enum(\n 'msmt_5tt',\n 'dhollander',\n 'tournier',\n 'tax',\n argstr='%s',\n position=-6,\n mandatory=True,\n desc='response estimation algorithm (multi-tissue)')\n in_file = File(\n exists=True,\n argstr='%s',\n position=-5,\n mandatory=True,\n desc='input DWI image')\n mtt_file = File(argstr='%s', position=-4, desc='input 5tt image')\n wm_file = File(\n 'wm.txt',\n argstr='%s',\n position=-3,\n usedefault=True,\n desc='output WM response text file')\n gm_file = File(\n argstr='%s', position=-2, desc='output GM response text file')\n csf_file = File(\n argstr='%s', position=-1, desc='output CSF response text file')\n in_mask = File(\n exists=True, argstr='-mask %s', desc='provide initial mask image')\n max_sh = traits.Int(\n 8,\n argstr='-lmax %d',\n desc='maximum harmonic degree of response function')\n\n\nclass ResponseSDOutputSpec(TraitedSpec):\n wm_file = File(argstr='%s', desc='output WM response text file')\n gm_file = File(argstr='%s', desc='output GM response text file')\n csf_file = File(argstr='%s', desc='output CSF response text file')\n\n\nclass ResponseSD(MRTrix3Base):\n \"\"\"\n Estimate response function(s) for spherical deconvolution using the specified algorithm.\n\n Example\n -------\n\n >>> import nipype.interfaces.mrtrix3 as mrt\n >>> resp = mrt.ResponseSD()\n >>> resp.inputs.in_file = 'dwi.mif'\n >>> resp.inputs.algorithm = 'tournier'\n >>> resp.inputs.grad_fsl = ('bvecs', 'bvals')\n >>> resp.cmdline # doctest: +ELLIPSIS\n 'dwi2response -fslgrad bvecs bvals tournier dwi.mif wm.txt'\n >>> resp.run() # doctest: +SKIP\n \"\"\"\n\n _cmd = 'dwi2response'\n input_spec = ResponseSDInputSpec\n output_spec = ResponseSDOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['wm_file'] = op.abspath(self.inputs.wm_file)\n if self.inputs.gm_file != Undefined:\n outputs['gm_file'] = op.abspath(self.inputs.gm_file)\n if self.inputs.csf_file != Undefined:\n outputs['csf_file'] = op.abspath(self.inputs.csf_file)\n return outputs\n\n\nclass ACTPrepareFSLInputSpec(CommandLineInputSpec):\n in_file = File(\n exists=True,\n argstr='%s',\n mandatory=True,\n position=-2,\n desc='input anatomical image')\n\n out_file = File(\n 'act_5tt.mif',\n argstr='%s',\n mandatory=True,\n position=-1,\n usedefault=True,\n desc='output file after processing')\n\n\nclass ACTPrepareFSLOutputSpec(TraitedSpec):\n out_file = File(exists=True, desc='the output response file')\n\n\nclass ACTPrepareFSL(CommandLine):\n \"\"\"\n Generate anatomical information necessary for Anatomically\n Constrained Tractography (ACT).\n\n Example\n -------\n\n >>> import nipype.interfaces.mrtrix3 as mrt\n >>> prep = mrt.ACTPrepareFSL()\n >>> prep.inputs.in_file = 'T1.nii.gz'\n >>> prep.cmdline # doctest: +ELLIPSIS\n 'act_anat_prepare_fsl T1.nii.gz act_5tt.mif'\n >>> prep.run() # doctest: +SKIP\n \"\"\"\n\n _cmd = 'act_anat_prepare_fsl'\n input_spec = ACTPrepareFSLInputSpec\n output_spec = ACTPrepareFSLOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['out_file'] = op.abspath(self.inputs.out_file)\n return outputs\n\n\nclass ReplaceFSwithFIRSTInputSpec(CommandLineInputSpec):\n in_file = File(\n exists=True,\n argstr='%s',\n mandatory=True,\n position=-4,\n desc='input anatomical image')\n in_t1w = File(\n exists=True,\n argstr='%s',\n mandatory=True,\n position=-3,\n desc='input T1 image')\n in_config = File(\n exists=True,\n argstr='%s',\n position=-2,\n desc='connectome configuration file')\n\n out_file = File(\n 'aparc+first.mif',\n argstr='%s',\n mandatory=True,\n position=-1,\n usedefault=True,\n desc='output file after processing')\n\n\nclass ReplaceFSwithFIRSTOutputSpec(TraitedSpec):\n out_file = File(exists=True, desc='the output response file')\n\n\nclass ReplaceFSwithFIRST(CommandLine):\n \"\"\"\n Replace deep gray matter structures segmented with FSL FIRST in a\n FreeSurfer parcellation.\n\n Example\n -------\n\n >>> import nipype.interfaces.mrtrix3 as mrt\n >>> prep = mrt.ReplaceFSwithFIRST()\n >>> prep.inputs.in_file = 'aparc+aseg.nii'\n >>> prep.inputs.in_t1w = 'T1.nii.gz'\n >>> prep.inputs.in_config = 'mrtrix3_labelconfig.txt'\n >>> prep.cmdline # doctest: +ELLIPSIS\n 'fs_parc_replace_sgm_first aparc+aseg.nii T1.nii.gz \\\nmrtrix3_labelconfig.txt aparc+first.mif'\n >>> prep.run() # doctest: +SKIP\n \"\"\"\n\n _cmd = 'fs_parc_replace_sgm_first'\n input_spec = ReplaceFSwithFIRSTInputSpec\n output_spec = ReplaceFSwithFIRSTOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['out_file'] = op.abspath(self.inputs.out_file)\n return outputs\n", "path": "nipype/interfaces/mrtrix3/preprocess.py"}], "after_files": [{"content": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n# -*- coding: utf-8 -*-\n\"\"\"\n Change directory to provide relative paths for doctests\n >>> import os\n >>> filepath = os.path.dirname(os.path.realpath(__file__ ))\n >>> datadir = os.path.realpath(os.path.join(filepath,\n ... '../../testing/data'))\n >>> os.chdir(datadir)\n\n\"\"\"\nfrom __future__ import (print_function, division, unicode_literals,\n absolute_import)\n\nimport os.path as op\n\nfrom ..base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec,\n File, isdefined, Undefined)\nfrom .base import MRTrix3BaseInputSpec, MRTrix3Base\n\n\nclass ResponseSDInputSpec(MRTrix3BaseInputSpec):\n algorithm = traits.Enum(\n 'msmt_5tt',\n 'dhollander',\n 'tournier',\n 'tax',\n argstr='%s',\n position=1,\n mandatory=True,\n desc='response estimation algorithm (multi-tissue)')\n in_file = File(\n exists=True,\n argstr='%s',\n position=-5,\n mandatory=True,\n desc='input DWI image')\n mtt_file = File(argstr='%s', position=-4, desc='input 5tt image')\n wm_file = File(\n 'wm.txt',\n argstr='%s',\n position=-3,\n usedefault=True,\n desc='output WM response text file')\n gm_file = File(\n argstr='%s', position=-2, desc='output GM response text file')\n csf_file = File(\n argstr='%s', position=-1, desc='output CSF response text file')\n in_mask = File(\n exists=True, argstr='-mask %s', desc='provide initial mask image')\n max_sh = traits.Int(\n 8,\n argstr='-lmax %d',\n desc='maximum harmonic degree of response function')\n\n\nclass ResponseSDOutputSpec(TraitedSpec):\n wm_file = File(argstr='%s', desc='output WM response text file')\n gm_file = File(argstr='%s', desc='output GM response text file')\n csf_file = File(argstr='%s', desc='output CSF response text file')\n\n\nclass ResponseSD(MRTrix3Base):\n \"\"\"\n Estimate response function(s) for spherical deconvolution using the specified algorithm.\n\n Example\n -------\n\n >>> import nipype.interfaces.mrtrix3 as mrt\n >>> resp = mrt.ResponseSD()\n >>> resp.inputs.in_file = 'dwi.mif'\n >>> resp.inputs.algorithm = 'tournier'\n >>> resp.inputs.grad_fsl = ('bvecs', 'bvals')\n >>> resp.cmdline # doctest: +ELLIPSIS\n 'dwi2response tournier -fslgrad bvecs bvals dwi.mif wm.txt'\n >>> resp.run() # doctest: +SKIP\n \"\"\"\n\n _cmd = 'dwi2response'\n input_spec = ResponseSDInputSpec\n output_spec = ResponseSDOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['wm_file'] = op.abspath(self.inputs.wm_file)\n if self.inputs.gm_file != Undefined:\n outputs['gm_file'] = op.abspath(self.inputs.gm_file)\n if self.inputs.csf_file != Undefined:\n outputs['csf_file'] = op.abspath(self.inputs.csf_file)\n return outputs\n\n\nclass ACTPrepareFSLInputSpec(CommandLineInputSpec):\n in_file = File(\n exists=True,\n argstr='%s',\n mandatory=True,\n position=-2,\n desc='input anatomical image')\n\n out_file = File(\n 'act_5tt.mif',\n argstr='%s',\n mandatory=True,\n position=-1,\n usedefault=True,\n desc='output file after processing')\n\n\nclass ACTPrepareFSLOutputSpec(TraitedSpec):\n out_file = File(exists=True, desc='the output response file')\n\n\nclass ACTPrepareFSL(CommandLine):\n \"\"\"\n Generate anatomical information necessary for Anatomically\n Constrained Tractography (ACT).\n\n Example\n -------\n\n >>> import nipype.interfaces.mrtrix3 as mrt\n >>> prep = mrt.ACTPrepareFSL()\n >>> prep.inputs.in_file = 'T1.nii.gz'\n >>> prep.cmdline # doctest: +ELLIPSIS\n 'act_anat_prepare_fsl T1.nii.gz act_5tt.mif'\n >>> prep.run() # doctest: +SKIP\n \"\"\"\n\n _cmd = 'act_anat_prepare_fsl'\n input_spec = ACTPrepareFSLInputSpec\n output_spec = ACTPrepareFSLOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['out_file'] = op.abspath(self.inputs.out_file)\n return outputs\n\n\nclass ReplaceFSwithFIRSTInputSpec(CommandLineInputSpec):\n in_file = File(\n exists=True,\n argstr='%s',\n mandatory=True,\n position=-4,\n desc='input anatomical image')\n in_t1w = File(\n exists=True,\n argstr='%s',\n mandatory=True,\n position=-3,\n desc='input T1 image')\n in_config = File(\n exists=True,\n argstr='%s',\n position=-2,\n desc='connectome configuration file')\n\n out_file = File(\n 'aparc+first.mif',\n argstr='%s',\n mandatory=True,\n position=-1,\n usedefault=True,\n desc='output file after processing')\n\n\nclass ReplaceFSwithFIRSTOutputSpec(TraitedSpec):\n out_file = File(exists=True, desc='the output response file')\n\n\nclass ReplaceFSwithFIRST(CommandLine):\n \"\"\"\n Replace deep gray matter structures segmented with FSL FIRST in a\n FreeSurfer parcellation.\n\n Example\n -------\n\n >>> import nipype.interfaces.mrtrix3 as mrt\n >>> prep = mrt.ReplaceFSwithFIRST()\n >>> prep.inputs.in_file = 'aparc+aseg.nii'\n >>> prep.inputs.in_t1w = 'T1.nii.gz'\n >>> prep.inputs.in_config = 'mrtrix3_labelconfig.txt'\n >>> prep.cmdline # doctest: +ELLIPSIS\n 'fs_parc_replace_sgm_first aparc+aseg.nii T1.nii.gz \\\nmrtrix3_labelconfig.txt aparc+first.mif'\n >>> prep.run() # doctest: +SKIP\n \"\"\"\n\n _cmd = 'fs_parc_replace_sgm_first'\n input_spec = ReplaceFSwithFIRSTInputSpec\n output_spec = ReplaceFSwithFIRSTOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['out_file'] = op.abspath(self.inputs.out_file)\n return outputs\n", "path": "nipype/interfaces/mrtrix3/preprocess.py"}]}
| 2,641 | 240 |
gh_patches_debug_20688
|
rasdani/github-patches
|
git_diff
|
uccser__cs-unplugged-887
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove admin application
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `csunplugged/config/settings/base.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 Base Django settings for CS Unplugged project.
4
5 For more information on this file, see
6 https://docs.djangoproject.com/en/dev/topics/settings/
7
8 For the full list of settings and their values, see
9 https://docs.djangoproject.com/en/dev/ref/settings/
10 """
11
12 import environ
13 import os.path
14
15 # Add custom languages not provided by Django
16 import django.conf.locale
17 from django.conf import global_settings
18 from django.utils.translation import ugettext_lazy as _
19
20 # cs-unplugged/csunplugged/config/settings/base.py - 3 = csunplugged/
21 ROOT_DIR = environ.Path(__file__) - 3
22
23 # Load operating system environment variables and then prepare to use them
24 env = environ.Env()
25
26 # APP CONFIGURATION
27 # ----------------------------------------------------------------------------
28 DJANGO_APPS = [
29 # Default Django apps:
30 "django.contrib.auth",
31 "django.contrib.contenttypes",
32 "django.contrib.sessions",
33 "django.contrib.messages",
34 "django.contrib.staticfiles",
35 "django.contrib.postgres",
36
37 # Useful template tags
38 "django.contrib.humanize",
39
40 # Admin
41 "django.contrib.admin",
42 ]
43
44 THIRD_PARTY_APPS = [
45 "django_bootstrap_breadcrumbs",
46 "haystack",
47 "widget_tweaks",
48 "modeltranslation",
49 "bidiutils",
50 ]
51
52 # Apps specific for this project go here.
53 LOCAL_APPS = [
54 "general.apps.GeneralConfig",
55 "topics.apps.TopicsConfig",
56 "resources.apps.ResourcesConfig",
57 "search.apps.SearchConfig",
58 ]
59
60 # See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
61 INSTALLED_APPS = DJANGO_APPS + LOCAL_APPS + THIRD_PARTY_APPS
62
63 # MIDDLEWARE CONFIGURATION
64 # ----------------------------------------------------------------------------
65 MIDDLEWARE = [
66 "django.middleware.security.SecurityMiddleware",
67 "django.contrib.sessions.middleware.SessionMiddleware",
68 "django.middleware.locale.LocaleMiddleware",
69 "django.middleware.common.CommonMiddleware",
70 "django.middleware.csrf.CsrfViewMiddleware",
71 "django.contrib.auth.middleware.AuthenticationMiddleware",
72 "django.contrib.messages.middleware.MessageMiddleware",
73 "django.middleware.clickjacking.XFrameOptionsMiddleware",
74 ]
75
76 # DEBUG
77 # ----------------------------------------------------------------------------
78 # See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
79 DEBUG = env.bool("DJANGO_DEBUG", False)
80
81 # FIXTURE CONFIGURATION
82 # ----------------------------------------------------------------------------
83 # See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
84 FIXTURE_DIRS = (
85 str(ROOT_DIR.path("fixtures")),
86 )
87
88 # EMAIL CONFIGURATION
89 # -----------------------------------------------------------------------------
90 # EMAIL_BACKEND = env("DJANGO_EMAIL_BACKEND",
91 # default="django.core.mail.backends.smtp.EmailBackend")
92
93 # MANAGER CONFIGURATION
94 # ----------------------------------------------------------------------------
95 # See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
96 # ADMINS = [
97 # ("University of Canterbury Computer Science Research Group",
98 # "[email protected]"),
99 # ]
100
101 # See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
102 # MANAGERS = ADMINS
103
104 # GENERAL CONFIGURATION
105 # ----------------------------------------------------------------------------
106 # Local time zone for this installation. Choices can be found here:
107 # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
108 # although not all choices may be available on all operating systems.
109 # In a Windows environment this must be set to your system time zone.
110 TIME_ZONE = "UTC"
111
112 # See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
113 LANGUAGE_CODE = "en"
114
115 INCONTEXT_L10N_PSEUDOLANGUAGE = "xx-lr"
116 INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI = "yy-rl"
117 INCONTEXT_L10N_PSEUDOLANGUAGES = (
118 INCONTEXT_L10N_PSEUDOLANGUAGE,
119 INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI
120 )
121
122 LANGUAGES = (
123 ("en", "English"),
124 )
125
126 if env.bool("INCLUDE_INCONTEXT_L10N", False):
127 EXTRA_LANGUAGES = [
128 (INCONTEXT_L10N_PSEUDOLANGUAGE, "Translation mode"),
129 (INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI, "Translation mode (Bi-directional)"),
130 ]
131
132 EXTRA_LANG_INFO = {
133 INCONTEXT_L10N_PSEUDOLANGUAGE: {
134 'bidi': False,
135 'code': INCONTEXT_L10N_PSEUDOLANGUAGE,
136 'name': "Translation mode",
137 'name_local': _("Translation mode"),
138 },
139 INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI: {
140 'bidi': True,
141 'code': INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI,
142 'name': "Translation mode (Bi-directional)",
143 'name_local': _("Translation mode (Bi-directional)"),
144 }
145 }
146
147 django.conf.locale.LANG_INFO.update(EXTRA_LANG_INFO)
148 # Add new languages to the list of all django languages
149 global_settings.LANGUAGES = global_settings.LANGUAGES + EXTRA_LANGUAGES
150 global_settings.LANGUAGES_BIDI = (global_settings.LANGUAGES_BIDI +
151 [INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI.split('-')[0]])
152 # Add new languages to the list of languages used for this project
153 LANGUAGES += tuple(EXTRA_LANGUAGES)
154 LANGUAGES_BIDI = global_settings.LANGUAGES_BIDI
155
156
157 # See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
158 SITE_ID = 1
159
160 # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
161 USE_I18N = True
162
163 # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
164 USE_L10N = True
165
166 # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
167 USE_TZ = True
168
169 # See: https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths
170 LOCALE_PATHS = ["locale"]
171
172 # TEMPLATE CONFIGURATION
173 # ----------------------------------------------------------------------------
174 # See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
175 TEMPLATES = [
176 {
177 # See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
178 "BACKEND": "django.template.backends.django.DjangoTemplates",
179 # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
180 "DIRS": [
181 str(ROOT_DIR.path("templates")),
182 ],
183 "OPTIONS": {
184 # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
185 "debug": DEBUG,
186 # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
187 # https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
188 "loaders": [
189 "django.template.loaders.filesystem.Loader",
190 "django.template.loaders.app_directories.Loader",
191 ],
192 # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
193 "context_processors": [
194 "django.template.context_processors.debug",
195 "django.template.context_processors.request",
196 "django.contrib.auth.context_processors.auth",
197 "django.template.context_processors.i18n",
198 "django.template.context_processors.media",
199 "django.template.context_processors.static",
200 "django.template.context_processors.tz",
201 "django.contrib.messages.context_processors.messages",
202 "config.context_processors.version_number.version_number",
203 "config.context_processors.deployed.deployed",
204 "bidiutils.context_processors.bidi",
205 ],
206 "libraries": {
207 "render_html_field": "config.templatetags.render_html_field",
208 "translate_url": "config.templatetags.translate_url",
209 "query_replace": "config.templatetags.query_replace",
210 },
211 },
212 },
213 ]
214
215 # STATIC FILE CONFIGURATION
216 # ------------------------------------------------------------------------------
217 # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
218 STATIC_ROOT = os.path.join(str(ROOT_DIR.path("staticfiles")), "")
219
220 # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
221 BUILD_ROOT = os.path.join(str(ROOT_DIR.path("build")), "")
222 STATIC_URL = "/staticfiles/"
223
224 # See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
225 STATICFILES_DIRS = [
226 BUILD_ROOT,
227 ]
228
229 # See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
230 STATICFILES_FINDERS = [
231 "django.contrib.staticfiles.finders.FileSystemFinder",
232 "django.contrib.staticfiles.finders.AppDirectoriesFinder",
233 ]
234
235 # MEDIA CONFIGURATION
236 # ------------------------------------------------------------------------------
237 # See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
238 MEDIA_ROOT = str(ROOT_DIR("media"))
239
240 # See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
241 MEDIA_URL = "/media/"
242
243 # URL Configuration
244 # ------------------------------------------------------------------------------
245 ROOT_URLCONF = "config.urls"
246
247 # See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
248 WSGI_APPLICATION = "config.wsgi.application"
249
250 # PASSWORD VALIDATION
251 # https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
252 # ------------------------------------------------------------------------------
253
254 AUTH_PASSWORD_VALIDATORS = [
255 {
256 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
257 },
258 {
259 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
260 },
261 {
262 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
263 },
264 {
265 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
266 },
267 ]
268
269 # SEARCH CONFIGURATION
270 # ------------------------------------------------------------------------------
271 # See: http://django-haystack.readthedocs.io/en/v2.6.0/settings.html
272 HAYSTACK_CONNECTIONS = {
273 "default": {
274 "ENGINE": "haystack.backends.whoosh_backend.WhooshEngine",
275 "PATH": str(ROOT_DIR.path("whoosh_index")),
276 },
277 }
278 HAYSTACK_SEARCH_RESULTS_PER_PAGE = 10
279
280 # OTHER SETTINGS
281 # ------------------------------------------------------------------------------
282 DJANGO_PRODUCTION = env.bool("DJANGO_PRODUCTION")
283 TOPICS_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path("topics")), "content")
284 RESOURCES_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path("resources")), "content")
285 RESOURCE_GENERATION_LOCATION = os.path.join(str(ROOT_DIR.path("staticfiles")), "resources")
286 RESOURCE_GENERATORS_PACKAGE = "resources.generators"
287 RESOURCE_COPY_AMOUNT = 20
288 SCRATCH_GENERATION_LOCATION = str(ROOT_DIR.path("temp"))
289 CUSTOM_VERTO_TEMPLATES = os.path.join(str(ROOT_DIR.path("utils")), "custom_converter_templates", "")
290 MODELTRANSLATION_CUSTOM_FIELDS = ("JSONField",)
291
```
Path: `csunplugged/config/urls.py`
Content:
```
1 """URL configuration for the Django system.
2
3 The `urlpatterns` list routes URLs to views. For more information please see:
4 https://docs.djangoproject.com/en/dev/topics/http/urls/
5 """
6
7 from django.conf import settings
8 from django.conf.urls import include, url
9 from django.conf.urls.i18n import i18n_patterns
10 from django.contrib import admin
11 from general import views
12
13 urlpatterns = i18n_patterns(
14 url(r"", include("general.urls", namespace="general")),
15 url(r"^topics/", include("topics.urls", namespace="topics")),
16 url(r"^resources/", include("resources.urls", namespace="resources")),
17 url(r"^admin/", include(admin.site.urls)),
18 url(r"^search/", include("search.urls", namespace="search")),
19 )
20
21 urlpatterns += [
22 url(r"", include("classic.urls")),
23 url(r"^_ah/health", views.health_check),
24 ]
25
26 if settings.DEBUG: # pragma: no cover
27 import debug_toolbar
28 urlpatterns += [
29 url(r"^__debug__/", include(debug_toolbar.urls)),
30 ]
31 urlpatterns += i18n_patterns(
32 url(r"^__dev__/", include("dev.urls", namespace="dev")),
33 )
34 # These patterns allows these error pages to be debugged during development.
35 from django.views import defaults
36 urlpatterns += [
37 url(r'^400/$', defaults.bad_request, kwargs={'exception': Exception("Bad request")}),
38 url(r'^403/$', defaults.permission_denied, kwargs={'exception': Exception("Permissin denied")}),
39 url(r'^404/$', defaults.page_not_found, kwargs={'exception': Exception("Page not found")}),
40 url(r'^500/$', defaults.server_error),
41 ]
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/csunplugged/config/settings/base.py b/csunplugged/config/settings/base.py
--- a/csunplugged/config/settings/base.py
+++ b/csunplugged/config/settings/base.py
@@ -36,9 +36,6 @@
# Useful template tags
"django.contrib.humanize",
-
- # Admin
- "django.contrib.admin",
]
THIRD_PARTY_APPS = [
diff --git a/csunplugged/config/urls.py b/csunplugged/config/urls.py
--- a/csunplugged/config/urls.py
+++ b/csunplugged/config/urls.py
@@ -7,14 +7,12 @@
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.i18n import i18n_patterns
-from django.contrib import admin
from general import views
urlpatterns = i18n_patterns(
url(r"", include("general.urls", namespace="general")),
url(r"^topics/", include("topics.urls", namespace="topics")),
url(r"^resources/", include("resources.urls", namespace="resources")),
- url(r"^admin/", include(admin.site.urls)),
url(r"^search/", include("search.urls", namespace="search")),
)
|
{"golden_diff": "diff --git a/csunplugged/config/settings/base.py b/csunplugged/config/settings/base.py\n--- a/csunplugged/config/settings/base.py\n+++ b/csunplugged/config/settings/base.py\n@@ -36,9 +36,6 @@\n \n # Useful template tags\n \"django.contrib.humanize\",\n-\n- # Admin\n- \"django.contrib.admin\",\n ]\n \n THIRD_PARTY_APPS = [\ndiff --git a/csunplugged/config/urls.py b/csunplugged/config/urls.py\n--- a/csunplugged/config/urls.py\n+++ b/csunplugged/config/urls.py\n@@ -7,14 +7,12 @@\n from django.conf import settings\n from django.conf.urls import include, url\n from django.conf.urls.i18n import i18n_patterns\n-from django.contrib import admin\n from general import views\n \n urlpatterns = i18n_patterns(\n url(r\"\", include(\"general.urls\", namespace=\"general\")),\n url(r\"^topics/\", include(\"topics.urls\", namespace=\"topics\")),\n url(r\"^resources/\", include(\"resources.urls\", namespace=\"resources\")),\n- url(r\"^admin/\", include(admin.site.urls)),\n url(r\"^search/\", include(\"search.urls\", namespace=\"search\")),\n )\n", "issue": "Remove admin application\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nBase Django settings for CS Unplugged project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/dev/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/dev/ref/settings/\n\"\"\"\n\nimport environ\nimport os.path\n\n# Add custom languages not provided by Django\nimport django.conf.locale\nfrom django.conf import global_settings\nfrom django.utils.translation import ugettext_lazy as _\n\n# cs-unplugged/csunplugged/config/settings/base.py - 3 = csunplugged/\nROOT_DIR = environ.Path(__file__) - 3\n\n# Load operating system environment variables and then prepare to use them\nenv = environ.Env()\n\n# APP CONFIGURATION\n# ----------------------------------------------------------------------------\nDJANGO_APPS = [\n # Default Django apps:\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.postgres\",\n\n # Useful template tags\n \"django.contrib.humanize\",\n\n # Admin\n \"django.contrib.admin\",\n]\n\nTHIRD_PARTY_APPS = [\n \"django_bootstrap_breadcrumbs\",\n \"haystack\",\n \"widget_tweaks\",\n \"modeltranslation\",\n \"bidiutils\",\n]\n\n# Apps specific for this project go here.\nLOCAL_APPS = [\n \"general.apps.GeneralConfig\",\n \"topics.apps.TopicsConfig\",\n \"resources.apps.ResourcesConfig\",\n \"search.apps.SearchConfig\",\n]\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps\nINSTALLED_APPS = DJANGO_APPS + LOCAL_APPS + THIRD_PARTY_APPS\n\n# MIDDLEWARE CONFIGURATION\n# ----------------------------------------------------------------------------\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\n# DEBUG\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug\nDEBUG = env.bool(\"DJANGO_DEBUG\", False)\n\n# FIXTURE CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS\nFIXTURE_DIRS = (\n str(ROOT_DIR.path(\"fixtures\")),\n)\n\n# EMAIL CONFIGURATION\n# -----------------------------------------------------------------------------\n# EMAIL_BACKEND = env(\"DJANGO_EMAIL_BACKEND\",\n# default=\"django.core.mail.backends.smtp.EmailBackend\")\n\n# MANAGER CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins\n# ADMINS = [\n# (\"University of Canterbury Computer Science Research Group\",\n# \"[email protected]\"),\n# ]\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers\n# MANAGERS = ADMINS\n\n# GENERAL CONFIGURATION\n# ----------------------------------------------------------------------------\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = \"UTC\"\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code\nLANGUAGE_CODE = \"en\"\n\nINCONTEXT_L10N_PSEUDOLANGUAGE = \"xx-lr\"\nINCONTEXT_L10N_PSEUDOLANGUAGE_BIDI = \"yy-rl\"\nINCONTEXT_L10N_PSEUDOLANGUAGES = (\n INCONTEXT_L10N_PSEUDOLANGUAGE,\n INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI\n)\n\nLANGUAGES = (\n (\"en\", \"English\"),\n)\n\nif env.bool(\"INCLUDE_INCONTEXT_L10N\", False):\n EXTRA_LANGUAGES = [\n (INCONTEXT_L10N_PSEUDOLANGUAGE, \"Translation mode\"),\n (INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI, \"Translation mode (Bi-directional)\"),\n ]\n\n EXTRA_LANG_INFO = {\n INCONTEXT_L10N_PSEUDOLANGUAGE: {\n 'bidi': False,\n 'code': INCONTEXT_L10N_PSEUDOLANGUAGE,\n 'name': \"Translation mode\",\n 'name_local': _(\"Translation mode\"),\n },\n INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI: {\n 'bidi': True,\n 'code': INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI,\n 'name': \"Translation mode (Bi-directional)\",\n 'name_local': _(\"Translation mode (Bi-directional)\"),\n }\n }\n\n django.conf.locale.LANG_INFO.update(EXTRA_LANG_INFO)\n # Add new languages to the list of all django languages\n global_settings.LANGUAGES = global_settings.LANGUAGES + EXTRA_LANGUAGES\n global_settings.LANGUAGES_BIDI = (global_settings.LANGUAGES_BIDI +\n [INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI.split('-')[0]])\n # Add new languages to the list of languages used for this project\n LANGUAGES += tuple(EXTRA_LANGUAGES)\n LANGUAGES_BIDI = global_settings.LANGUAGES_BIDI\n\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id\nSITE_ID = 1\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n\nUSE_I18N = True\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n\nUSE_L10N = True\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz\nUSE_TZ = True\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths\nLOCALE_PATHS = [\"locale\"]\n\n# TEMPLATE CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates\nTEMPLATES = [\n {\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs\n \"DIRS\": [\n str(ROOT_DIR.path(\"templates\")),\n ],\n \"OPTIONS\": {\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug\n \"debug\": DEBUG,\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders\n # https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types\n \"loaders\": [\n \"django.template.loaders.filesystem.Loader\",\n \"django.template.loaders.app_directories.Loader\",\n ],\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.template.context_processors.i18n\",\n \"django.template.context_processors.media\",\n \"django.template.context_processors.static\",\n \"django.template.context_processors.tz\",\n \"django.contrib.messages.context_processors.messages\",\n \"config.context_processors.version_number.version_number\",\n \"config.context_processors.deployed.deployed\",\n \"bidiutils.context_processors.bidi\",\n ],\n \"libraries\": {\n \"render_html_field\": \"config.templatetags.render_html_field\",\n \"translate_url\": \"config.templatetags.translate_url\",\n \"query_replace\": \"config.templatetags.query_replace\",\n },\n },\n },\n]\n\n# STATIC FILE CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root\nSTATIC_ROOT = os.path.join(str(ROOT_DIR.path(\"staticfiles\")), \"\")\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url\nBUILD_ROOT = os.path.join(str(ROOT_DIR.path(\"build\")), \"\")\nSTATIC_URL = \"/staticfiles/\"\n\n# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS\nSTATICFILES_DIRS = [\n BUILD_ROOT,\n]\n\n# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders\nSTATICFILES_FINDERS = [\n \"django.contrib.staticfiles.finders.FileSystemFinder\",\n \"django.contrib.staticfiles.finders.AppDirectoriesFinder\",\n]\n\n# MEDIA CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root\nMEDIA_ROOT = str(ROOT_DIR(\"media\"))\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url\nMEDIA_URL = \"/media/\"\n\n# URL Configuration\n# ------------------------------------------------------------------------------\nROOT_URLCONF = \"config.urls\"\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# PASSWORD VALIDATION\n# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators\n# ------------------------------------------------------------------------------\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n# SEARCH CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: http://django-haystack.readthedocs.io/en/v2.6.0/settings.html\nHAYSTACK_CONNECTIONS = {\n \"default\": {\n \"ENGINE\": \"haystack.backends.whoosh_backend.WhooshEngine\",\n \"PATH\": str(ROOT_DIR.path(\"whoosh_index\")),\n },\n}\nHAYSTACK_SEARCH_RESULTS_PER_PAGE = 10\n\n# OTHER SETTINGS\n# ------------------------------------------------------------------------------\nDJANGO_PRODUCTION = env.bool(\"DJANGO_PRODUCTION\")\nTOPICS_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path(\"topics\")), \"content\")\nRESOURCES_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path(\"resources\")), \"content\")\nRESOURCE_GENERATION_LOCATION = os.path.join(str(ROOT_DIR.path(\"staticfiles\")), \"resources\")\nRESOURCE_GENERATORS_PACKAGE = \"resources.generators\"\nRESOURCE_COPY_AMOUNT = 20\nSCRATCH_GENERATION_LOCATION = str(ROOT_DIR.path(\"temp\"))\nCUSTOM_VERTO_TEMPLATES = os.path.join(str(ROOT_DIR.path(\"utils\")), \"custom_converter_templates\", \"\")\nMODELTRANSLATION_CUSTOM_FIELDS = (\"JSONField\",)\n", "path": "csunplugged/config/settings/base.py"}, {"content": "\"\"\"URL configuration for the Django system.\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/dev/topics/http/urls/\n\"\"\"\n\nfrom django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.contrib import admin\nfrom general import views\n\nurlpatterns = i18n_patterns(\n url(r\"\", include(\"general.urls\", namespace=\"general\")),\n url(r\"^topics/\", include(\"topics.urls\", namespace=\"topics\")),\n url(r\"^resources/\", include(\"resources.urls\", namespace=\"resources\")),\n url(r\"^admin/\", include(admin.site.urls)),\n url(r\"^search/\", include(\"search.urls\", namespace=\"search\")),\n)\n\nurlpatterns += [\n url(r\"\", include(\"classic.urls\")),\n url(r\"^_ah/health\", views.health_check),\n]\n\nif settings.DEBUG: # pragma: no cover\n import debug_toolbar\n urlpatterns += [\n url(r\"^__debug__/\", include(debug_toolbar.urls)),\n ]\n urlpatterns += i18n_patterns(\n url(r\"^__dev__/\", include(\"dev.urls\", namespace=\"dev\")),\n )\n # These patterns allows these error pages to be debugged during development.\n from django.views import defaults\n urlpatterns += [\n url(r'^400/$', defaults.bad_request, kwargs={'exception': Exception(\"Bad request\")}),\n url(r'^403/$', defaults.permission_denied, kwargs={'exception': Exception(\"Permissin denied\")}),\n url(r'^404/$', defaults.page_not_found, kwargs={'exception': Exception(\"Page not found\")}),\n url(r'^500/$', defaults.server_error),\n ]\n", "path": "csunplugged/config/urls.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nBase Django settings for CS Unplugged project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/dev/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/dev/ref/settings/\n\"\"\"\n\nimport environ\nimport os.path\n\n# Add custom languages not provided by Django\nimport django.conf.locale\nfrom django.conf import global_settings\nfrom django.utils.translation import ugettext_lazy as _\n\n# cs-unplugged/csunplugged/config/settings/base.py - 3 = csunplugged/\nROOT_DIR = environ.Path(__file__) - 3\n\n# Load operating system environment variables and then prepare to use them\nenv = environ.Env()\n\n# APP CONFIGURATION\n# ----------------------------------------------------------------------------\nDJANGO_APPS = [\n # Default Django apps:\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.postgres\",\n\n # Useful template tags\n \"django.contrib.humanize\",\n]\n\nTHIRD_PARTY_APPS = [\n \"django_bootstrap_breadcrumbs\",\n \"haystack\",\n \"widget_tweaks\",\n \"modeltranslation\",\n \"bidiutils\",\n]\n\n# Apps specific for this project go here.\nLOCAL_APPS = [\n \"general.apps.GeneralConfig\",\n \"topics.apps.TopicsConfig\",\n \"resources.apps.ResourcesConfig\",\n \"search.apps.SearchConfig\",\n]\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps\nINSTALLED_APPS = DJANGO_APPS + LOCAL_APPS + THIRD_PARTY_APPS\n\n# MIDDLEWARE CONFIGURATION\n# ----------------------------------------------------------------------------\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\n# DEBUG\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug\nDEBUG = env.bool(\"DJANGO_DEBUG\", False)\n\n# FIXTURE CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS\nFIXTURE_DIRS = (\n str(ROOT_DIR.path(\"fixtures\")),\n)\n\n# EMAIL CONFIGURATION\n# -----------------------------------------------------------------------------\n# EMAIL_BACKEND = env(\"DJANGO_EMAIL_BACKEND\",\n# default=\"django.core.mail.backends.smtp.EmailBackend\")\n\n# MANAGER CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins\n# ADMINS = [\n# (\"University of Canterbury Computer Science Research Group\",\n# \"[email protected]\"),\n# ]\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers\n# MANAGERS = ADMINS\n\n# GENERAL CONFIGURATION\n# ----------------------------------------------------------------------------\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = \"UTC\"\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code\nLANGUAGE_CODE = \"en\"\n\nINCONTEXT_L10N_PSEUDOLANGUAGE = \"xx-lr\"\nINCONTEXT_L10N_PSEUDOLANGUAGE_BIDI = \"yy-rl\"\nINCONTEXT_L10N_PSEUDOLANGUAGES = (\n INCONTEXT_L10N_PSEUDOLANGUAGE,\n INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI\n)\n\nLANGUAGES = (\n (\"en\", \"English\"),\n)\n\nif env.bool(\"INCLUDE_INCONTEXT_L10N\", False):\n EXTRA_LANGUAGES = [\n (INCONTEXT_L10N_PSEUDOLANGUAGE, \"Translation mode\"),\n (INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI, \"Translation mode (Bi-directional)\"),\n ]\n\n EXTRA_LANG_INFO = {\n INCONTEXT_L10N_PSEUDOLANGUAGE: {\n 'bidi': False,\n 'code': INCONTEXT_L10N_PSEUDOLANGUAGE,\n 'name': \"Translation mode\",\n 'name_local': _(\"Translation mode\"),\n },\n INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI: {\n 'bidi': True,\n 'code': INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI,\n 'name': \"Translation mode (Bi-directional)\",\n 'name_local': _(\"Translation mode (Bi-directional)\"),\n }\n }\n\n django.conf.locale.LANG_INFO.update(EXTRA_LANG_INFO)\n # Add new languages to the list of all django languages\n global_settings.LANGUAGES = global_settings.LANGUAGES + EXTRA_LANGUAGES\n global_settings.LANGUAGES_BIDI = (global_settings.LANGUAGES_BIDI +\n [INCONTEXT_L10N_PSEUDOLANGUAGE_BIDI.split('-')[0]])\n # Add new languages to the list of languages used for this project\n LANGUAGES += tuple(EXTRA_LANGUAGES)\n LANGUAGES_BIDI = global_settings.LANGUAGES_BIDI\n\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id\nSITE_ID = 1\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n\nUSE_I18N = True\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n\nUSE_L10N = True\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz\nUSE_TZ = True\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths\nLOCALE_PATHS = [\"locale\"]\n\n# TEMPLATE CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates\nTEMPLATES = [\n {\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs\n \"DIRS\": [\n str(ROOT_DIR.path(\"templates\")),\n ],\n \"OPTIONS\": {\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug\n \"debug\": DEBUG,\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders\n # https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types\n \"loaders\": [\n \"django.template.loaders.filesystem.Loader\",\n \"django.template.loaders.app_directories.Loader\",\n ],\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.template.context_processors.i18n\",\n \"django.template.context_processors.media\",\n \"django.template.context_processors.static\",\n \"django.template.context_processors.tz\",\n \"django.contrib.messages.context_processors.messages\",\n \"config.context_processors.version_number.version_number\",\n \"config.context_processors.deployed.deployed\",\n \"bidiutils.context_processors.bidi\",\n ],\n \"libraries\": {\n \"render_html_field\": \"config.templatetags.render_html_field\",\n \"translate_url\": \"config.templatetags.translate_url\",\n \"query_replace\": \"config.templatetags.query_replace\",\n },\n },\n },\n]\n\n# STATIC FILE CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root\nSTATIC_ROOT = os.path.join(str(ROOT_DIR.path(\"staticfiles\")), \"\")\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url\nBUILD_ROOT = os.path.join(str(ROOT_DIR.path(\"build\")), \"\")\nSTATIC_URL = \"/staticfiles/\"\n\n# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS\nSTATICFILES_DIRS = [\n BUILD_ROOT,\n]\n\n# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders\nSTATICFILES_FINDERS = [\n \"django.contrib.staticfiles.finders.FileSystemFinder\",\n \"django.contrib.staticfiles.finders.AppDirectoriesFinder\",\n]\n\n# MEDIA CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root\nMEDIA_ROOT = str(ROOT_DIR(\"media\"))\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url\nMEDIA_URL = \"/media/\"\n\n# URL Configuration\n# ------------------------------------------------------------------------------\nROOT_URLCONF = \"config.urls\"\n\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# PASSWORD VALIDATION\n# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators\n# ------------------------------------------------------------------------------\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n# SEARCH CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: http://django-haystack.readthedocs.io/en/v2.6.0/settings.html\nHAYSTACK_CONNECTIONS = {\n \"default\": {\n \"ENGINE\": \"haystack.backends.whoosh_backend.WhooshEngine\",\n \"PATH\": str(ROOT_DIR.path(\"whoosh_index\")),\n },\n}\nHAYSTACK_SEARCH_RESULTS_PER_PAGE = 10\n\n# OTHER SETTINGS\n# ------------------------------------------------------------------------------\nDJANGO_PRODUCTION = env.bool(\"DJANGO_PRODUCTION\")\nTOPICS_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path(\"topics\")), \"content\")\nRESOURCES_CONTENT_BASE_PATH = os.path.join(str(ROOT_DIR.path(\"resources\")), \"content\")\nRESOURCE_GENERATION_LOCATION = os.path.join(str(ROOT_DIR.path(\"staticfiles\")), \"resources\")\nRESOURCE_GENERATORS_PACKAGE = \"resources.generators\"\nRESOURCE_COPY_AMOUNT = 20\nSCRATCH_GENERATION_LOCATION = str(ROOT_DIR.path(\"temp\"))\nCUSTOM_VERTO_TEMPLATES = os.path.join(str(ROOT_DIR.path(\"utils\")), \"custom_converter_templates\", \"\")\nMODELTRANSLATION_CUSTOM_FIELDS = (\"JSONField\",)\n", "path": "csunplugged/config/settings/base.py"}, {"content": "\"\"\"URL configuration for the Django system.\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/dev/topics/http/urls/\n\"\"\"\n\nfrom django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.conf.urls.i18n import i18n_patterns\nfrom general import views\n\nurlpatterns = i18n_patterns(\n url(r\"\", include(\"general.urls\", namespace=\"general\")),\n url(r\"^topics/\", include(\"topics.urls\", namespace=\"topics\")),\n url(r\"^resources/\", include(\"resources.urls\", namespace=\"resources\")),\n url(r\"^search/\", include(\"search.urls\", namespace=\"search\")),\n)\n\nurlpatterns += [\n url(r\"\", include(\"classic.urls\")),\n url(r\"^_ah/health\", views.health_check),\n]\n\nif settings.DEBUG: # pragma: no cover\n import debug_toolbar\n urlpatterns += [\n url(r\"^__debug__/\", include(debug_toolbar.urls)),\n ]\n urlpatterns += i18n_patterns(\n url(r\"^__dev__/\", include(\"dev.urls\", namespace=\"dev\")),\n )\n # These patterns allows these error pages to be debugged during development.\n from django.views import defaults\n urlpatterns += [\n url(r'^400/$', defaults.bad_request, kwargs={'exception': Exception(\"Bad request\")}),\n url(r'^403/$', defaults.permission_denied, kwargs={'exception': Exception(\"Permissin denied\")}),\n url(r'^404/$', defaults.page_not_found, kwargs={'exception': Exception(\"Page not found\")}),\n url(r'^500/$', defaults.server_error),\n ]\n", "path": "csunplugged/config/urls.py"}]}
| 3,763 | 268 |
gh_patches_debug_8762
|
rasdani/github-patches
|
git_diff
|
aws__sagemaker-python-sdk-1848
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug: AttributeError: 'NoneType' object has no attribute 'module' when applying the v2 upgrade tool
**Describe the bug**
When applying the `sagemaker-upgrade-v2` automated CLI tool, earlier ast modification leads to an `AttributeError` exception.
**To reproduce**
1. Craft a script that will be modified by a SerDe import renamer
1. Apply the V2 upgrade CLI tool:
An `AttributeError` exception is raised.
**Expected behavior**
The V2 upgrade CLI would properly upgrade the script.
**Screenshots or logs**
```
❯ cat v1.py
import sagemaker
from sagemaker.predictor import csv_serializer
csv_serializer.__doc___
❯ sagemaker-upgrade-v2 --in-file v1.py --out-file v2.py
Traceback (most recent call last):
File "~/testvenv/bin/sagemaker-upgrade-v2", line 8, in <module>
sys.exit(main())
File "~/testvenv/lib/python3.8/site-packages/sagemaker/cli/compatibility/v2/sagemaker_upgrade_v2.py", line 78, in main
_update_file(args.in_file, args.out_file)
File "~/testvenv/lib/python3.8/site-packages/sagemaker/cli/compatibility/v2/sagemaker_upgrade_v2.py", line 50, in _update_file
updater_cls(input_path=input_file, output_path=output_file).update()
File "~/testvenv/lib/python3.8/site-packages/sagemaker/cli/compatibility/v2/files.py", line 72, in update
output = self._update_ast(self._read_input_file())
File "~/testvenv/lib/python3.8/site-packages/sagemaker/cli/compatibility/v2/files.py", line 86, in _update_ast
return ASTTransformer().visit(input_ast)
File "/usr/lib/python3.8/ast.py", line 363, in visit
return visitor(node)
File "~/testvenv/lib/python3.8/site-packages/sagemaker/cli/compatibility/v2/ast_transformer.py", line 136, in visit_Module
self.generic_visit(node)
File "/usr/lib/python3.8/ast.py", line 439, in generic_visit
value = self.visit(value)
File "/usr/lib/python3.8/ast.py", line 363, in visit
return visitor(node)
File "~/testvenv/lib/python3.8/site-packages/sagemaker/cli/compatibility/v2/ast_transformer.py", line 155, in visit_ImportFrom
node = import_checker.check_and_modify_node(node)
File "~/testvenv/lib/python3.8/site-packages/sagemaker/cli/compatibility/v2/modifiers/modifier.py", line 26, in check_and_modify_node
if self.node_should_be_modified(node):
File "~/testvenv/lib/python3.8/site-packages/sagemaker/cli/compatibility/v2/modifiers/image_uris.py", line 115, in node_should_be_modified
return node.module in GET_IMAGE_URI_NAMESPACES and any(
AttributeError: 'NoneType' object has no attribute 'module'
```
**System information**
A description of your system. Please provide:
- **SageMaker Python SDK version**: 2.4.1
- **Framework name (eg. PyTorch) or algorithm (eg. KMeans)**:
- **Framework version**:
- **Python version**:
- **CPU or GPU**:
- **Custom Docker image (Y/N)**:
**Additional context**
The problem comes from [the ordering](https://github.com/aws/sagemaker-python-sdk/blob/v2.4.1/src/sagemaker/cli/compatibility/v2/ast_transformer.py#L59-L60), which the existing, isolated unit tests do not cover. The earlier renamer modifies the ast, and the later renamer cannot handle this situation:
```
59 modifiers.serde.SerdeImportFromPredictorRenamer(),
60 modifiers.image_uris.ImageURIRetrieveImportFromRenamer(),
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sagemaker/cli/compatibility/v2/modifiers/image_uris.py`
Content:
```
1 # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 """Classes to modify image uri retrieve methods for Python SDK v2.0 and later."""
14 from __future__ import absolute_import
15
16 import ast
17
18 from sagemaker.cli.compatibility.v2.modifiers import matching
19 from sagemaker.cli.compatibility.v2.modifiers.modifier import Modifier
20
21 GET_IMAGE_URI_NAME = "get_image_uri"
22 GET_IMAGE_URI_NAMESPACES = (
23 "sagemaker",
24 "sagemaker.amazon_estimator",
25 "sagemaker.amazon.amazon_estimator",
26 "amazon_estimator",
27 "amazon.amazon_estimator",
28 )
29
30
31 class ImageURIRetrieveRefactor(Modifier):
32 """A class to refactor *get_image_uri() method."""
33
34 def node_should_be_modified(self, node):
35 """Checks if the ``ast.Call`` node calls a function of interest.
36
37 This looks for the following calls:
38
39 - ``sagemaker.get_image_uri``
40 - ``sagemaker.amazon_estimator.get_image_uri``
41 - ``get_image_uri``
42
43 Args:
44 node (ast.Call): a node that represents a function call. For more,
45 see https://docs.python.org/3/library/ast.html#abstract-grammar.
46
47 Returns:
48 bool: If the ``ast.Call`` instantiates a class of interest.
49 """
50 return matching.matches_name_or_namespaces(
51 node, GET_IMAGE_URI_NAME, GET_IMAGE_URI_NAMESPACES
52 )
53
54 def modify_node(self, node):
55 """Modifies the ``ast.Call`` node to call ``image_uris.retrieve`` instead.
56 And switch the first two parameters from (region, repo) to (framework, region)
57
58 Args:
59 node (ast.Call): a node that represents a *image_uris.retrieve call.
60 """
61 original_args = [None] * 3
62 for kw in node.keywords:
63 if kw.arg == "repo_name":
64 original_args[0] = ast.Str(kw.value.s)
65 elif kw.arg == "repo_region":
66 original_args[1] = ast.Str(kw.value.s)
67 elif kw.arg == "repo_version":
68 original_args[2] = ast.Str(kw.value.s)
69
70 if len(node.args) > 0:
71 original_args[1] = ast.Str(node.args[0].s)
72 if len(node.args) > 1:
73 original_args[0] = ast.Str(node.args[1].s)
74 if len(node.args) > 2:
75 original_args[2] = ast.Str(node.args[2].s)
76
77 args = []
78 for arg in original_args:
79 if arg:
80 args.append(arg)
81
82 func = node.func
83 has_sagemaker = False
84 while hasattr(func, "value"):
85 if hasattr(func.value, "id") and func.value.id == "sagemaker":
86 has_sagemaker = True
87 break
88 func = func.value
89
90 if has_sagemaker:
91 node.func = ast.Attribute(
92 value=ast.Attribute(attr="image_uris", value=ast.Name(id="sagemaker")),
93 attr="retrieve",
94 )
95 else:
96 node.func = ast.Attribute(value=ast.Name(id="image_uris"), attr="retrieve")
97 node.args = args
98 node.keywords = []
99 return node
100
101
102 class ImageURIRetrieveImportFromRenamer(Modifier):
103 """A class to update import statements of ``get_image_uri``."""
104
105 def node_should_be_modified(self, node):
106 """Checks if the import statement imports ``get_image_uri`` from the correct module.
107
108 Args:
109 node (ast.ImportFrom): a node that represents a ``from ... import ... `` statement.
110 For more, see https://docs.python.org/3/library/ast.html#abstract-grammar.
111
112 Returns:
113 bool: If the import statement imports ``get_image_uri`` from the correct module.
114 """
115 return node.module in GET_IMAGE_URI_NAMESPACES and any(
116 name.name == GET_IMAGE_URI_NAME for name in node.names
117 )
118
119 def modify_node(self, node):
120 """Changes the ``ast.ImportFrom`` node's name from ``get_image_uri`` to ``image_uris``.
121
122 Args:
123 node (ast.ImportFrom): a node that represents a ``from ... import ... `` statement.
124 For more, see https://docs.python.org/3/library/ast.html#abstract-grammar.
125
126 Returns:
127 ast.AST: the original node, which has been potentially modified.
128 """
129 for name in node.names:
130 if name.name == GET_IMAGE_URI_NAME:
131 name.name = "image_uris"
132 if node.module in GET_IMAGE_URI_NAMESPACES:
133 node.module = "sagemaker"
134 return node
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/sagemaker/cli/compatibility/v2/modifiers/image_uris.py b/src/sagemaker/cli/compatibility/v2/modifiers/image_uris.py
--- a/src/sagemaker/cli/compatibility/v2/modifiers/image_uris.py
+++ b/src/sagemaker/cli/compatibility/v2/modifiers/image_uris.py
@@ -112,8 +112,10 @@
Returns:
bool: If the import statement imports ``get_image_uri`` from the correct module.
"""
- return node.module in GET_IMAGE_URI_NAMESPACES and any(
- name.name == GET_IMAGE_URI_NAME for name in node.names
+ return (
+ node is not None
+ and node.module in GET_IMAGE_URI_NAMESPACES
+ and any(name.name == GET_IMAGE_URI_NAME for name in node.names)
)
def modify_node(self, node):
|
{"golden_diff": "diff --git a/src/sagemaker/cli/compatibility/v2/modifiers/image_uris.py b/src/sagemaker/cli/compatibility/v2/modifiers/image_uris.py\n--- a/src/sagemaker/cli/compatibility/v2/modifiers/image_uris.py\n+++ b/src/sagemaker/cli/compatibility/v2/modifiers/image_uris.py\n@@ -112,8 +112,10 @@\n Returns:\n bool: If the import statement imports ``get_image_uri`` from the correct module.\n \"\"\"\n- return node.module in GET_IMAGE_URI_NAMESPACES and any(\n- name.name == GET_IMAGE_URI_NAME for name in node.names\n+ return (\n+ node is not None\n+ and node.module in GET_IMAGE_URI_NAMESPACES\n+ and any(name.name == GET_IMAGE_URI_NAME for name in node.names)\n )\n \n def modify_node(self, node):\n", "issue": "bug: AttributeError: 'NoneType' object has no attribute 'module' when applying the v2 upgrade tool\n**Describe the bug**\r\n\r\nWhen applying the `sagemaker-upgrade-v2` automated CLI tool, earlier ast modification leads to an `AttributeError` exception.\r\n\r\n**To reproduce**\r\n\r\n1. Craft a script that will be modified by a SerDe import renamer\r\n1. Apply the V2 upgrade CLI tool: \r\n\r\nAn `AttributeError` exception is raised.\r\n\r\n**Expected behavior**\r\n\r\nThe V2 upgrade CLI would properly upgrade the script.\r\n\r\n**Screenshots or logs**\r\n\r\n```\r\n\u276f cat v1.py\r\nimport sagemaker\r\n\r\nfrom sagemaker.predictor import csv_serializer\r\n\r\ncsv_serializer.__doc___\r\n\r\n\u276f sagemaker-upgrade-v2 --in-file v1.py --out-file v2.py\r\nTraceback (most recent call last):\r\n File \"~/testvenv/bin/sagemaker-upgrade-v2\", line 8, in <module>\r\n sys.exit(main())\r\n File \"~/testvenv/lib/python3.8/site-packages/sagemaker/cli/compatibility/v2/sagemaker_upgrade_v2.py\", line 78, in main\r\n _update_file(args.in_file, args.out_file)\r\n File \"~/testvenv/lib/python3.8/site-packages/sagemaker/cli/compatibility/v2/sagemaker_upgrade_v2.py\", line 50, in _update_file\r\n updater_cls(input_path=input_file, output_path=output_file).update()\r\n File \"~/testvenv/lib/python3.8/site-packages/sagemaker/cli/compatibility/v2/files.py\", line 72, in update\r\n output = self._update_ast(self._read_input_file())\r\n File \"~/testvenv/lib/python3.8/site-packages/sagemaker/cli/compatibility/v2/files.py\", line 86, in _update_ast\r\n return ASTTransformer().visit(input_ast)\r\n File \"/usr/lib/python3.8/ast.py\", line 363, in visit\r\n return visitor(node)\r\n File \"~/testvenv/lib/python3.8/site-packages/sagemaker/cli/compatibility/v2/ast_transformer.py\", line 136, in visit_Module\r\n self.generic_visit(node)\r\n File \"/usr/lib/python3.8/ast.py\", line 439, in generic_visit\r\n value = self.visit(value)\r\n File \"/usr/lib/python3.8/ast.py\", line 363, in visit\r\n return visitor(node)\r\n File \"~/testvenv/lib/python3.8/site-packages/sagemaker/cli/compatibility/v2/ast_transformer.py\", line 155, in visit_ImportFrom\r\n node = import_checker.check_and_modify_node(node)\r\n File \"~/testvenv/lib/python3.8/site-packages/sagemaker/cli/compatibility/v2/modifiers/modifier.py\", line 26, in check_and_modify_node\r\n if self.node_should_be_modified(node):\r\n File \"~/testvenv/lib/python3.8/site-packages/sagemaker/cli/compatibility/v2/modifiers/image_uris.py\", line 115, in node_should_be_modified\r\n return node.module in GET_IMAGE_URI_NAMESPACES and any(\r\nAttributeError: 'NoneType' object has no attribute 'module'\r\n```\r\n\r\n**System information**\r\nA description of your system. Please provide:\r\n- **SageMaker Python SDK version**: 2.4.1\r\n- **Framework name (eg. PyTorch) or algorithm (eg. KMeans)**:\r\n- **Framework version**:\r\n- **Python version**:\r\n- **CPU or GPU**:\r\n- **Custom Docker image (Y/N)**:\r\n\r\n**Additional context**\r\n\r\nThe problem comes from [the ordering](https://github.com/aws/sagemaker-python-sdk/blob/v2.4.1/src/sagemaker/cli/compatibility/v2/ast_transformer.py#L59-L60), which the existing, isolated unit tests do not cover. The earlier renamer modifies the ast, and the later renamer cannot handle this situation:\r\n\r\n```\r\n 59 modifiers.serde.SerdeImportFromPredictorRenamer(),\r\n 60 modifiers.image_uris.ImageURIRetrieveImportFromRenamer(),\r\n```\n", "before_files": [{"content": "# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"Classes to modify image uri retrieve methods for Python SDK v2.0 and later.\"\"\"\nfrom __future__ import absolute_import\n\nimport ast\n\nfrom sagemaker.cli.compatibility.v2.modifiers import matching\nfrom sagemaker.cli.compatibility.v2.modifiers.modifier import Modifier\n\nGET_IMAGE_URI_NAME = \"get_image_uri\"\nGET_IMAGE_URI_NAMESPACES = (\n \"sagemaker\",\n \"sagemaker.amazon_estimator\",\n \"sagemaker.amazon.amazon_estimator\",\n \"amazon_estimator\",\n \"amazon.amazon_estimator\",\n)\n\n\nclass ImageURIRetrieveRefactor(Modifier):\n \"\"\"A class to refactor *get_image_uri() method.\"\"\"\n\n def node_should_be_modified(self, node):\n \"\"\"Checks if the ``ast.Call`` node calls a function of interest.\n\n This looks for the following calls:\n\n - ``sagemaker.get_image_uri``\n - ``sagemaker.amazon_estimator.get_image_uri``\n - ``get_image_uri``\n\n Args:\n node (ast.Call): a node that represents a function call. For more,\n see https://docs.python.org/3/library/ast.html#abstract-grammar.\n\n Returns:\n bool: If the ``ast.Call`` instantiates a class of interest.\n \"\"\"\n return matching.matches_name_or_namespaces(\n node, GET_IMAGE_URI_NAME, GET_IMAGE_URI_NAMESPACES\n )\n\n def modify_node(self, node):\n \"\"\"Modifies the ``ast.Call`` node to call ``image_uris.retrieve`` instead.\n And switch the first two parameters from (region, repo) to (framework, region)\n\n Args:\n node (ast.Call): a node that represents a *image_uris.retrieve call.\n \"\"\"\n original_args = [None] * 3\n for kw in node.keywords:\n if kw.arg == \"repo_name\":\n original_args[0] = ast.Str(kw.value.s)\n elif kw.arg == \"repo_region\":\n original_args[1] = ast.Str(kw.value.s)\n elif kw.arg == \"repo_version\":\n original_args[2] = ast.Str(kw.value.s)\n\n if len(node.args) > 0:\n original_args[1] = ast.Str(node.args[0].s)\n if len(node.args) > 1:\n original_args[0] = ast.Str(node.args[1].s)\n if len(node.args) > 2:\n original_args[2] = ast.Str(node.args[2].s)\n\n args = []\n for arg in original_args:\n if arg:\n args.append(arg)\n\n func = node.func\n has_sagemaker = False\n while hasattr(func, \"value\"):\n if hasattr(func.value, \"id\") and func.value.id == \"sagemaker\":\n has_sagemaker = True\n break\n func = func.value\n\n if has_sagemaker:\n node.func = ast.Attribute(\n value=ast.Attribute(attr=\"image_uris\", value=ast.Name(id=\"sagemaker\")),\n attr=\"retrieve\",\n )\n else:\n node.func = ast.Attribute(value=ast.Name(id=\"image_uris\"), attr=\"retrieve\")\n node.args = args\n node.keywords = []\n return node\n\n\nclass ImageURIRetrieveImportFromRenamer(Modifier):\n \"\"\"A class to update import statements of ``get_image_uri``.\"\"\"\n\n def node_should_be_modified(self, node):\n \"\"\"Checks if the import statement imports ``get_image_uri`` from the correct module.\n\n Args:\n node (ast.ImportFrom): a node that represents a ``from ... import ... `` statement.\n For more, see https://docs.python.org/3/library/ast.html#abstract-grammar.\n\n Returns:\n bool: If the import statement imports ``get_image_uri`` from the correct module.\n \"\"\"\n return node.module in GET_IMAGE_URI_NAMESPACES and any(\n name.name == GET_IMAGE_URI_NAME for name in node.names\n )\n\n def modify_node(self, node):\n \"\"\"Changes the ``ast.ImportFrom`` node's name from ``get_image_uri`` to ``image_uris``.\n\n Args:\n node (ast.ImportFrom): a node that represents a ``from ... import ... `` statement.\n For more, see https://docs.python.org/3/library/ast.html#abstract-grammar.\n\n Returns:\n ast.AST: the original node, which has been potentially modified.\n \"\"\"\n for name in node.names:\n if name.name == GET_IMAGE_URI_NAME:\n name.name = \"image_uris\"\n if node.module in GET_IMAGE_URI_NAMESPACES:\n node.module = \"sagemaker\"\n return node\n", "path": "src/sagemaker/cli/compatibility/v2/modifiers/image_uris.py"}], "after_files": [{"content": "# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"Classes to modify image uri retrieve methods for Python SDK v2.0 and later.\"\"\"\nfrom __future__ import absolute_import\n\nimport ast\n\nfrom sagemaker.cli.compatibility.v2.modifiers import matching\nfrom sagemaker.cli.compatibility.v2.modifiers.modifier import Modifier\n\nGET_IMAGE_URI_NAME = \"get_image_uri\"\nGET_IMAGE_URI_NAMESPACES = (\n \"sagemaker\",\n \"sagemaker.amazon_estimator\",\n \"sagemaker.amazon.amazon_estimator\",\n \"amazon_estimator\",\n \"amazon.amazon_estimator\",\n)\n\n\nclass ImageURIRetrieveRefactor(Modifier):\n \"\"\"A class to refactor *get_image_uri() method.\"\"\"\n\n def node_should_be_modified(self, node):\n \"\"\"Checks if the ``ast.Call`` node calls a function of interest.\n\n This looks for the following calls:\n\n - ``sagemaker.get_image_uri``\n - ``sagemaker.amazon_estimator.get_image_uri``\n - ``get_image_uri``\n\n Args:\n node (ast.Call): a node that represents a function call. For more,\n see https://docs.python.org/3/library/ast.html#abstract-grammar.\n\n Returns:\n bool: If the ``ast.Call`` instantiates a class of interest.\n \"\"\"\n return matching.matches_name_or_namespaces(\n node, GET_IMAGE_URI_NAME, GET_IMAGE_URI_NAMESPACES\n )\n\n def modify_node(self, node):\n \"\"\"Modifies the ``ast.Call`` node to call ``image_uris.retrieve`` instead.\n And switch the first two parameters from (region, repo) to (framework, region)\n\n Args:\n node (ast.Call): a node that represents a *image_uris.retrieve call.\n \"\"\"\n original_args = [None] * 3\n for kw in node.keywords:\n if kw.arg == \"repo_name\":\n original_args[0] = ast.Str(kw.value.s)\n elif kw.arg == \"repo_region\":\n original_args[1] = ast.Str(kw.value.s)\n elif kw.arg == \"repo_version\":\n original_args[2] = ast.Str(kw.value.s)\n\n if len(node.args) > 0:\n original_args[1] = ast.Str(node.args[0].s)\n if len(node.args) > 1:\n original_args[0] = ast.Str(node.args[1].s)\n if len(node.args) > 2:\n original_args[2] = ast.Str(node.args[2].s)\n\n args = []\n for arg in original_args:\n if arg:\n args.append(arg)\n\n func = node.func\n has_sagemaker = False\n while hasattr(func, \"value\"):\n if hasattr(func.value, \"id\") and func.value.id == \"sagemaker\":\n has_sagemaker = True\n break\n func = func.value\n\n if has_sagemaker:\n node.func = ast.Attribute(\n value=ast.Attribute(attr=\"image_uris\", value=ast.Name(id=\"sagemaker\")),\n attr=\"retrieve\",\n )\n else:\n node.func = ast.Attribute(value=ast.Name(id=\"image_uris\"), attr=\"retrieve\")\n node.args = args\n node.keywords = []\n return node\n\n\nclass ImageURIRetrieveImportFromRenamer(Modifier):\n \"\"\"A class to update import statements of ``get_image_uri``.\"\"\"\n\n def node_should_be_modified(self, node):\n \"\"\"Checks if the import statement imports ``get_image_uri`` from the correct module.\n\n Args:\n node (ast.ImportFrom): a node that represents a ``from ... import ... `` statement.\n For more, see https://docs.python.org/3/library/ast.html#abstract-grammar.\n\n Returns:\n bool: If the import statement imports ``get_image_uri`` from the correct module.\n \"\"\"\n return (\n node is not None\n and node.module in GET_IMAGE_URI_NAMESPACES\n and any(name.name == GET_IMAGE_URI_NAME for name in node.names)\n )\n\n def modify_node(self, node):\n \"\"\"Changes the ``ast.ImportFrom`` node's name from ``get_image_uri`` to ``image_uris``.\n\n Args:\n node (ast.ImportFrom): a node that represents a ``from ... import ... `` statement.\n For more, see https://docs.python.org/3/library/ast.html#abstract-grammar.\n\n Returns:\n ast.AST: the original node, which has been potentially modified.\n \"\"\"\n for name in node.names:\n if name.name == GET_IMAGE_URI_NAME:\n name.name = \"image_uris\"\n if node.module in GET_IMAGE_URI_NAMESPACES:\n node.module = \"sagemaker\"\n return node\n", "path": "src/sagemaker/cli/compatibility/v2/modifiers/image_uris.py"}]}
| 2,604 | 196 |
gh_patches_debug_26426
|
rasdani/github-patches
|
git_diff
|
weni-ai__bothub-engine-106
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Disallow samples without intent or entities
Disallow samples creation without an intent or one entity at least.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bothub/api/validators.py`
Content:
```
1 from django.utils.translation import gettext as _
2 from rest_framework.exceptions import PermissionDenied
3 from rest_framework.exceptions import ValidationError
4
5 from bothub.common.models import RepositoryTranslatedExample
6
7
8 class CanContributeInRepositoryValidator(object):
9 def __call__(self, value):
10 user_authorization = value.get_user_authorization(
11 self.request.user)
12 if not user_authorization.can_contribute:
13 raise PermissionDenied(
14 _('You can\'t contribute in this repository'))
15
16 def set_context(self, serializer):
17 self.request = serializer.context.get('request')
18
19
20 class CanContributeInRepositoryExampleValidator(object):
21 def __call__(self, value):
22 repository = value.repository_update.repository
23 user_authorization = repository.get_user_authorization(
24 self.request.user)
25 if not user_authorization.can_contribute:
26 raise PermissionDenied(
27 _('You can\'t contribute in this repository'))
28
29 def set_context(self, serializer):
30 self.request = serializer.context.get('request')
31
32
33 class CanContributeInRepositoryTranslatedExampleValidator(object):
34 def __call__(self, value):
35 repository = value.original_example.repository_update.repository
36 user_authorization = repository.get_user_authorization(
37 self.request.user)
38 if not user_authorization.can_contribute:
39 raise PermissionDenied(
40 _('You can\'t contribute in this repository'))
41
42 def set_context(self, serializer):
43 self.request = serializer.context.get('request')
44
45
46 class TranslatedExampleEntitiesValidator(object):
47 def __call__(self, attrs):
48 original_example = attrs.get('original_example')
49 entities_valid = RepositoryTranslatedExample.same_entities_validator(
50 list(map(lambda x: dict(x), attrs.get('entities'))),
51 list(map(lambda x: x.to_dict, original_example.entities.all())))
52 if not entities_valid:
53 raise ValidationError({'entities': _('Invalid entities')})
54
55
56 class TranslatedExampleLanguageValidator(object):
57 def __call__(self, attrs):
58 original_example = attrs.get('original_example')
59 language = attrs.get('language')
60 if original_example.repository_update.language == language:
61 raise ValidationError({'language': _(
62 'Can\'t translate to the same language')})
63
```
Path: `bothub/api/serializers/example.py`
Content:
```
1 from rest_framework import serializers
2
3 from django.utils.translation import gettext as _
4
5 from bothub.common.models import Repository
6 from bothub.common.models import RepositoryExample
7 from bothub.common.models import RepositoryExampleEntity
8
9 from ..fields import EntityText
10 from ..validators import CanContributeInRepositoryExampleValidator
11 from ..validators import CanContributeInRepositoryValidator
12 from .translate import RepositoryTranslatedExampleSerializer
13
14
15 class RepositoryExampleEntitySerializer(serializers.ModelSerializer):
16 class Meta:
17 model = RepositoryExampleEntity
18 fields = [
19 'id',
20 'repository_example',
21 'start',
22 'end',
23 'entity',
24 'created_at',
25 'value',
26 ]
27
28 repository_example = serializers.PrimaryKeyRelatedField(
29 queryset=RepositoryExample.objects,
30 validators=[
31 CanContributeInRepositoryExampleValidator(),
32 ],
33 help_text=_('Example\'s ID'))
34 value = serializers.SerializerMethodField()
35
36 def get_value(self, obj):
37 return obj.value
38
39
40 class NewRepositoryExampleEntitySerializer(serializers.ModelSerializer):
41 class Meta:
42 model = RepositoryExampleEntity
43 fields = [
44 'repository_example',
45 'start',
46 'end',
47 'entity',
48 ]
49
50
51 class RepositoryExampleSerializer(serializers.ModelSerializer):
52 class Meta:
53 model = RepositoryExample
54 fields = [
55 'id',
56 'repository_update',
57 'deleted_in',
58 'text',
59 'intent',
60 'language',
61 'created_at',
62 'entities',
63 'translations',
64 ]
65 read_only_fields = [
66 'repository_update',
67 'deleted_in',
68 ]
69
70 entities = RepositoryExampleEntitySerializer(
71 many=True,
72 read_only=True)
73 translations = RepositoryTranslatedExampleSerializer(
74 many=True,
75 read_only=True)
76 language = serializers.SerializerMethodField()
77
78 def get_language(self, obj):
79 return obj.language
80
81
82 class NewRepositoryExampleSerializer(serializers.ModelSerializer):
83 class Meta:
84 model = RepositoryExample
85 fields = [
86 'id',
87 'repository',
88 'repository_update',
89 'text',
90 'entities',
91 'intent',
92 ]
93
94 id = serializers.PrimaryKeyRelatedField(
95 read_only=True,
96 style={'show': False})
97 text = EntityText(style={'entities_field': 'entities'})
98 repository = serializers.PrimaryKeyRelatedField(
99 queryset=Repository.objects,
100 validators=[
101 CanContributeInRepositoryValidator(),
102 ],
103 source='repository_update',
104 style={'show': False})
105 repository_update = serializers.PrimaryKeyRelatedField(
106 read_only=True,
107 style={'show': False})
108 entities = NewRepositoryExampleEntitySerializer(
109 many=True,
110 style={'text_field': 'text'})
111
112 def validate_repository(self, repository):
113 return repository.current_update()
114
115 def create(self, validated_data):
116 entities_data = validated_data.pop('entities')
117 example = self.Meta.model.objects.create(**validated_data)
118 for entity_data in entities_data:
119 RepositoryExampleEntity.objects.create(
120 repository_example=example,
121 **entity_data)
122 return example
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bothub/api/serializers/example.py b/bothub/api/serializers/example.py
--- a/bothub/api/serializers/example.py
+++ b/bothub/api/serializers/example.py
@@ -9,6 +9,7 @@
from ..fields import EntityText
from ..validators import CanContributeInRepositoryExampleValidator
from ..validators import CanContributeInRepositoryValidator
+from ..validators import ExampleWithIntentOrEntityValidator
from .translate import RepositoryTranslatedExampleSerializer
@@ -109,6 +110,10 @@
many=True,
style={'text_field': 'text'})
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.validators.append(ExampleWithIntentOrEntityValidator())
+
def validate_repository(self, repository):
return repository.current_update()
diff --git a/bothub/api/validators.py b/bothub/api/validators.py
--- a/bothub/api/validators.py
+++ b/bothub/api/validators.py
@@ -60,3 +60,12 @@
if original_example.repository_update.language == language:
raise ValidationError({'language': _(
'Can\'t translate to the same language')})
+
+
+class ExampleWithIntentOrEntityValidator(object):
+ def __call__(self, attrs):
+ intent = attrs.get('intent')
+ entities = attrs.get('entities')
+
+ if not intent and not entities:
+ raise ValidationError(_('Define a intent or one entity'))
|
{"golden_diff": "diff --git a/bothub/api/serializers/example.py b/bothub/api/serializers/example.py\n--- a/bothub/api/serializers/example.py\n+++ b/bothub/api/serializers/example.py\n@@ -9,6 +9,7 @@\n from ..fields import EntityText\n from ..validators import CanContributeInRepositoryExampleValidator\n from ..validators import CanContributeInRepositoryValidator\n+from ..validators import ExampleWithIntentOrEntityValidator\n from .translate import RepositoryTranslatedExampleSerializer\n \n \n@@ -109,6 +110,10 @@\n many=True,\n style={'text_field': 'text'})\n \n+ def __init__(self, *args, **kwargs):\n+ super().__init__(*args, **kwargs)\n+ self.validators.append(ExampleWithIntentOrEntityValidator())\n+\n def validate_repository(self, repository):\n return repository.current_update()\n \ndiff --git a/bothub/api/validators.py b/bothub/api/validators.py\n--- a/bothub/api/validators.py\n+++ b/bothub/api/validators.py\n@@ -60,3 +60,12 @@\n if original_example.repository_update.language == language:\n raise ValidationError({'language': _(\n 'Can\\'t translate to the same language')})\n+\n+\n+class ExampleWithIntentOrEntityValidator(object):\n+ def __call__(self, attrs):\n+ intent = attrs.get('intent')\n+ entities = attrs.get('entities')\n+\n+ if not intent and not entities:\n+ raise ValidationError(_('Define a intent or one entity'))\n", "issue": "Disallow samples without intent or entities\nDisallow samples creation without an intent or one entity at least.\n", "before_files": [{"content": "from django.utils.translation import gettext as _\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.exceptions import ValidationError\n\nfrom bothub.common.models import RepositoryTranslatedExample\n\n\nclass CanContributeInRepositoryValidator(object):\n def __call__(self, value):\n user_authorization = value.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass CanContributeInRepositoryExampleValidator(object):\n def __call__(self, value):\n repository = value.repository_update.repository\n user_authorization = repository.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass CanContributeInRepositoryTranslatedExampleValidator(object):\n def __call__(self, value):\n repository = value.original_example.repository_update.repository\n user_authorization = repository.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass TranslatedExampleEntitiesValidator(object):\n def __call__(self, attrs):\n original_example = attrs.get('original_example')\n entities_valid = RepositoryTranslatedExample.same_entities_validator(\n list(map(lambda x: dict(x), attrs.get('entities'))),\n list(map(lambda x: x.to_dict, original_example.entities.all())))\n if not entities_valid:\n raise ValidationError({'entities': _('Invalid entities')})\n\n\nclass TranslatedExampleLanguageValidator(object):\n def __call__(self, attrs):\n original_example = attrs.get('original_example')\n language = attrs.get('language')\n if original_example.repository_update.language == language:\n raise ValidationError({'language': _(\n 'Can\\'t translate to the same language')})\n", "path": "bothub/api/validators.py"}, {"content": "from rest_framework import serializers\n\nfrom django.utils.translation import gettext as _\n\nfrom bothub.common.models import Repository\nfrom bothub.common.models import RepositoryExample\nfrom bothub.common.models import RepositoryExampleEntity\n\nfrom ..fields import EntityText\nfrom ..validators import CanContributeInRepositoryExampleValidator\nfrom ..validators import CanContributeInRepositoryValidator\nfrom .translate import RepositoryTranslatedExampleSerializer\n\n\nclass RepositoryExampleEntitySerializer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryExampleEntity\n fields = [\n 'id',\n 'repository_example',\n 'start',\n 'end',\n 'entity',\n 'created_at',\n 'value',\n ]\n\n repository_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryExample.objects,\n validators=[\n CanContributeInRepositoryExampleValidator(),\n ],\n help_text=_('Example\\'s ID'))\n value = serializers.SerializerMethodField()\n\n def get_value(self, obj):\n return obj.value\n\n\nclass NewRepositoryExampleEntitySerializer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryExampleEntity\n fields = [\n 'repository_example',\n 'start',\n 'end',\n 'entity',\n ]\n\n\nclass RepositoryExampleSerializer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryExample\n fields = [\n 'id',\n 'repository_update',\n 'deleted_in',\n 'text',\n 'intent',\n 'language',\n 'created_at',\n 'entities',\n 'translations',\n ]\n read_only_fields = [\n 'repository_update',\n 'deleted_in',\n ]\n\n entities = RepositoryExampleEntitySerializer(\n many=True,\n read_only=True)\n translations = RepositoryTranslatedExampleSerializer(\n many=True,\n read_only=True)\n language = serializers.SerializerMethodField()\n\n def get_language(self, obj):\n return obj.language\n\n\nclass NewRepositoryExampleSerializer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryExample\n fields = [\n 'id',\n 'repository',\n 'repository_update',\n 'text',\n 'entities',\n 'intent',\n ]\n\n id = serializers.PrimaryKeyRelatedField(\n read_only=True,\n style={'show': False})\n text = EntityText(style={'entities_field': 'entities'})\n repository = serializers.PrimaryKeyRelatedField(\n queryset=Repository.objects,\n validators=[\n CanContributeInRepositoryValidator(),\n ],\n source='repository_update',\n style={'show': False})\n repository_update = serializers.PrimaryKeyRelatedField(\n read_only=True,\n style={'show': False})\n entities = NewRepositoryExampleEntitySerializer(\n many=True,\n style={'text_field': 'text'})\n\n def validate_repository(self, repository):\n return repository.current_update()\n\n def create(self, validated_data):\n entities_data = validated_data.pop('entities')\n example = self.Meta.model.objects.create(**validated_data)\n for entity_data in entities_data:\n RepositoryExampleEntity.objects.create(\n repository_example=example,\n **entity_data)\n return example\n", "path": "bothub/api/serializers/example.py"}], "after_files": [{"content": "from django.utils.translation import gettext as _\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.exceptions import ValidationError\n\nfrom bothub.common.models import RepositoryTranslatedExample\n\n\nclass CanContributeInRepositoryValidator(object):\n def __call__(self, value):\n user_authorization = value.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass CanContributeInRepositoryExampleValidator(object):\n def __call__(self, value):\n repository = value.repository_update.repository\n user_authorization = repository.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass CanContributeInRepositoryTranslatedExampleValidator(object):\n def __call__(self, value):\n repository = value.original_example.repository_update.repository\n user_authorization = repository.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass TranslatedExampleEntitiesValidator(object):\n def __call__(self, attrs):\n original_example = attrs.get('original_example')\n entities_valid = RepositoryTranslatedExample.same_entities_validator(\n list(map(lambda x: dict(x), attrs.get('entities'))),\n list(map(lambda x: x.to_dict, original_example.entities.all())))\n if not entities_valid:\n raise ValidationError({'entities': _('Invalid entities')})\n\n\nclass TranslatedExampleLanguageValidator(object):\n def __call__(self, attrs):\n original_example = attrs.get('original_example')\n language = attrs.get('language')\n if original_example.repository_update.language == language:\n raise ValidationError({'language': _(\n 'Can\\'t translate to the same language')})\n\n\nclass ExampleWithIntentOrEntityValidator(object):\n def __call__(self, attrs):\n intent = attrs.get('intent')\n entities = attrs.get('entities')\n\n if not intent and not entities:\n raise ValidationError(_('Define a intent or one entity'))\n", "path": "bothub/api/validators.py"}, {"content": "from rest_framework import serializers\n\nfrom django.utils.translation import gettext as _\n\nfrom bothub.common.models import Repository\nfrom bothub.common.models import RepositoryExample\nfrom bothub.common.models import RepositoryExampleEntity\n\nfrom ..fields import EntityText\nfrom ..validators import CanContributeInRepositoryExampleValidator\nfrom ..validators import CanContributeInRepositoryValidator\nfrom ..validators import ExampleWithIntentOrEntityValidator\nfrom .translate import RepositoryTranslatedExampleSerializer\n\n\nclass RepositoryExampleEntitySerializer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryExampleEntity\n fields = [\n 'id',\n 'repository_example',\n 'start',\n 'end',\n 'entity',\n 'created_at',\n 'value',\n ]\n\n repository_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryExample.objects,\n validators=[\n CanContributeInRepositoryExampleValidator(),\n ],\n help_text=_('Example\\'s ID'))\n value = serializers.SerializerMethodField()\n\n def get_value(self, obj):\n return obj.value\n\n\nclass NewRepositoryExampleEntitySerializer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryExampleEntity\n fields = [\n 'repository_example',\n 'start',\n 'end',\n 'entity',\n ]\n\n\nclass RepositoryExampleSerializer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryExample\n fields = [\n 'id',\n 'repository_update',\n 'deleted_in',\n 'text',\n 'intent',\n 'language',\n 'created_at',\n 'entities',\n 'translations',\n ]\n read_only_fields = [\n 'repository_update',\n 'deleted_in',\n ]\n\n entities = RepositoryExampleEntitySerializer(\n many=True,\n read_only=True)\n translations = RepositoryTranslatedExampleSerializer(\n many=True,\n read_only=True)\n language = serializers.SerializerMethodField()\n\n def get_language(self, obj):\n return obj.language\n\n\nclass NewRepositoryExampleSerializer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryExample\n fields = [\n 'id',\n 'repository',\n 'repository_update',\n 'text',\n 'entities',\n 'intent',\n ]\n\n id = serializers.PrimaryKeyRelatedField(\n read_only=True,\n style={'show': False})\n text = EntityText(style={'entities_field': 'entities'})\n repository = serializers.PrimaryKeyRelatedField(\n queryset=Repository.objects,\n validators=[\n CanContributeInRepositoryValidator(),\n ],\n source='repository_update',\n style={'show': False})\n repository_update = serializers.PrimaryKeyRelatedField(\n read_only=True,\n style={'show': False})\n entities = NewRepositoryExampleEntitySerializer(\n many=True,\n style={'text_field': 'text'})\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.validators.append(ExampleWithIntentOrEntityValidator())\n\n def validate_repository(self, repository):\n return repository.current_update()\n\n def create(self, validated_data):\n entities_data = validated_data.pop('entities')\n example = self.Meta.model.objects.create(**validated_data)\n for entity_data in entities_data:\n RepositoryExampleEntity.objects.create(\n repository_example=example,\n **entity_data)\n return example\n", "path": "bothub/api/serializers/example.py"}]}
| 1,758 | 338 |
gh_patches_debug_57917
|
rasdani/github-patches
|
git_diff
|
dj-stripe__dj-stripe-701
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
1.1.0 not compatible with python 2
First of all, thanks a lot for your hard work! We've been using dj-stripe for a long time and it has served us well. Now we're ready to upgrade! Let the fun begin ;).
We're using python 2 and Django 1.11. I'm correct that 1.2 should support that right? Anyway we have to move to 1.1 first for the migrations. There is one problem though in the 1.1 release.
In commit https://github.com/dj-stripe/dj-stripe/commit/6a6f048a3a432a3ba40fba8bf90f8789139daec4 `StripeEnumField` was added with the non python 2 compatible `super()` call:
```name, path, args, kwargs = super().deconstruct()```
What do you guys think? Can we backport a hotfix fix to 1.1.1 or something?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `djstripe/fields.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 .. module:: djstripe.fields.
4
5 :synopsis: dj-stripe Custom Field Definitions
6
7 .. moduleauthor:: Bill Huneke (@wahuneke)
8 """
9 from __future__ import absolute_import, division, print_function, unicode_literals
10
11 import decimal
12
13 from django.core.exceptions import FieldError, ImproperlyConfigured
14 from django.core.validators import MaxValueValidator, MinValueValidator
15 from django.db import models
16
17 from .settings import USE_NATIVE_JSONFIELD
18 from .utils import convert_tstamp, dict_nested_accessor
19
20
21 if USE_NATIVE_JSONFIELD:
22 from django.contrib.postgres.fields import JSONField
23 else:
24 from jsonfield import JSONField
25
26
27 class PaymentMethodForeignKey(models.ForeignKey):
28 def __init__(self, **kwargs):
29 kwargs.setdefault("to", "PaymentMethod")
30 super(PaymentMethodForeignKey, self).__init__(**kwargs)
31
32
33 class StripeFieldMixin(object):
34 """
35 Custom fields for all Stripe data.
36
37 This allows keeping track of which database fields are suitable for
38 sending to or receiving from Stripe. Also, allows a few handy extra parameters.
39 """
40
41 # Used if the name at stripe is different from the name in our database
42 # Include a . in name if value is nested in dict in Stripe's object
43 # (e.g. stripe_name = "data.id" --> obj["data"]["id"])
44 stripe_name = None
45
46 # If stripe_name is None, this can also be used to specify a nested value, but
47 # the final value is assumed to be the database field name
48 # (e.g. nested_name = "data" --> obj["data"][db_field_name]
49 nested_name = None
50
51 # This indicates that this field will always appear in a stripe object. It will be
52 # an Exception if we try to parse a stripe object that does not include this field
53 # in the data. If set to False then null=True attribute will be automatically set
54 stripe_required = True
55
56 # If a field was populated in previous API versions but we don't want to drop the old
57 # data for some reason, mark it as deprecated. This will make sure we never try to send
58 # it to Stripe or expect in Stripe data received
59 # This setting automatically implies Null=True
60 deprecated = False
61
62 def __init__(self, *args, **kwargs):
63 """
64 Assign class instance variables based on kwargs.
65
66 Assign extra class instance variables if stripe_required is defined or
67 if deprecated is defined.
68 """
69 self.stripe_name = kwargs.pop('stripe_name', self.stripe_name)
70 self.nested_name = kwargs.pop('nested_name', self.nested_name)
71 self.stripe_required = kwargs.pop('stripe_required', self.stripe_required)
72 self.deprecated = kwargs.pop('deprecated', self.deprecated)
73 if not self.stripe_required:
74 kwargs["null"] = True
75
76 if self.deprecated:
77 kwargs["null"] = True
78 kwargs["default"] = None
79 super(StripeFieldMixin, self).__init__(*args, **kwargs)
80
81 def stripe_to_db(self, data):
82 """Try converting stripe fields to defined database fields."""
83 if not self.deprecated:
84 try:
85 if self.stripe_name:
86 result = dict_nested_accessor(data, self.stripe_name)
87 elif self.nested_name:
88 result = dict_nested_accessor(data, self.nested_name + "." + self.name)
89 else:
90 result = data[self.name]
91 except (KeyError, TypeError):
92 if self.stripe_required:
93 model_name = self.model._meta.object_name if hasattr(self, "model") else ""
94 raise FieldError("Required stripe field '{field_name}' was not"
95 " provided in {model_name} data object.".format(field_name=self.name,
96 model_name=model_name))
97 else:
98 result = None
99
100 return result
101
102
103 class StripePercentField(StripeFieldMixin, models.DecimalField):
104 """A field used to define a percent according to djstripe logic."""
105
106 def __init__(self, *args, **kwargs):
107 """Assign default args to this field."""
108 defaults = {
109 'decimal_places': 2,
110 'max_digits': 5,
111 'validators': [MinValueValidator(1.00), MaxValueValidator(100.00)]
112 }
113 defaults.update(kwargs)
114 super(StripePercentField, self).__init__(*args, **defaults)
115
116
117 class StripeCurrencyField(StripeFieldMixin, models.DecimalField):
118 """
119 A field used to define currency according to djstripe logic.
120
121 Stripe is always in cents. djstripe stores everything in dollars.
122 """
123
124 def __init__(self, *args, **kwargs):
125 """Assign default args to this field."""
126 defaults = {
127 'decimal_places': 2,
128 'max_digits': 8,
129 }
130 defaults.update(kwargs)
131 super(StripeCurrencyField, self).__init__(*args, **defaults)
132
133 def stripe_to_db(self, data):
134 """Convert the raw value to decimal representation."""
135 val = super(StripeCurrencyField, self).stripe_to_db(data)
136
137 # Note: 0 is a possible return value, which is 'falseish'
138 if val is not None:
139 return val / decimal.Decimal("100")
140
141
142 class StripeBooleanField(StripeFieldMixin, models.BooleanField):
143 """A field used to define a boolean value according to djstripe logic."""
144
145 def __init__(self, *args, **kwargs):
146 """Throw an error when a user tries to deprecate."""
147 if kwargs.get("deprecated", False):
148 raise ImproperlyConfigured("Boolean field cannot be deprecated. Change field type to "
149 "StripeNullBooleanField")
150 super(StripeBooleanField, self).__init__(*args, **kwargs)
151
152
153 class StripeNullBooleanField(StripeFieldMixin, models.NullBooleanField):
154 """A field used to define a NullBooleanField value according to djstripe logic."""
155
156 pass
157
158
159 class StripeCharField(StripeFieldMixin, models.CharField):
160 """A field used to define a CharField value according to djstripe logic."""
161
162 pass
163
164
165 class StripeEnumField(StripeCharField):
166 def __init__(self, enum, *args, **kwargs):
167 self.enum = enum
168 choices = enum.choices
169 defaults = {
170 "choices": choices,
171 "max_length": max(len(k) for k, v in choices)
172 }
173 defaults.update(kwargs)
174 super(StripeEnumField, self).__init__(*args, **defaults)
175
176 def deconstruct(self):
177 name, path, args, kwargs = super().deconstruct()
178 kwargs["enum"] = self.enum
179 del kwargs["choices"]
180 return name, path, args, kwargs
181
182
183 class StripeIdField(StripeCharField):
184 """A field with enough space to hold any stripe ID."""
185
186 def __init__(self, *args, **kwargs):
187 """
188 Assign default args to this field.
189
190 As per: https://stripe.com/docs/upgrades
191 You can safely assume object IDs we generate will never exceed 255
192 characters, but you should be able to handle IDs of up to that
193 length.
194 """
195 defaults = {
196 'max_length': 255,
197 'blank': False,
198 'null': False,
199 }
200 defaults.update(kwargs)
201 super(StripeIdField, self).__init__(*args, **defaults)
202
203
204 class StripeTextField(StripeFieldMixin, models.TextField):
205 """A field used to define a TextField value according to djstripe logic."""
206
207 pass
208
209
210 class StripeDateTimeField(StripeFieldMixin, models.DateTimeField):
211 """A field used to define a DateTimeField value according to djstripe logic."""
212
213 def stripe_to_db(self, data):
214 """Convert the raw timestamp value to a DateTime representation."""
215 val = super(StripeDateTimeField, self).stripe_to_db(data)
216
217 # Note: 0 is a possible return value, which is 'falseish'
218 if val is not None:
219 return convert_tstamp(val)
220
221
222 class StripeIntegerField(StripeFieldMixin, models.IntegerField):
223 """A field used to define a IntegerField value according to djstripe logic."""
224
225 pass
226
227
228 class StripePositiveIntegerField(StripeFieldMixin, models.PositiveIntegerField):
229 """A field used to define a PositiveIntegerField value according to djstripe logic."""
230
231 pass
232
233
234 class StripeJSONField(StripeFieldMixin, JSONField):
235 """A field used to define a JSONField value according to djstripe logic."""
236
237 pass
238
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/djstripe/fields.py b/djstripe/fields.py
--- a/djstripe/fields.py
+++ b/djstripe/fields.py
@@ -174,7 +174,7 @@
super(StripeEnumField, self).__init__(*args, **defaults)
def deconstruct(self):
- name, path, args, kwargs = super().deconstruct()
+ name, path, args, kwargs = super(StripeEnumField, self).deconstruct()
kwargs["enum"] = self.enum
del kwargs["choices"]
return name, path, args, kwargs
|
{"golden_diff": "diff --git a/djstripe/fields.py b/djstripe/fields.py\n--- a/djstripe/fields.py\n+++ b/djstripe/fields.py\n@@ -174,7 +174,7 @@\n super(StripeEnumField, self).__init__(*args, **defaults)\n \n def deconstruct(self):\n- name, path, args, kwargs = super().deconstruct()\n+ name, path, args, kwargs = super(StripeEnumField, self).deconstruct()\n kwargs[\"enum\"] = self.enum\n del kwargs[\"choices\"]\n return name, path, args, kwargs\n", "issue": "1.1.0 not compatible with python 2\nFirst of all, thanks a lot for your hard work! We've been using dj-stripe for a long time and it has served us well. Now we're ready to upgrade! Let the fun begin ;).\r\n\r\nWe're using python 2 and Django 1.11. I'm correct that 1.2 should support that right? Anyway we have to move to 1.1 first for the migrations. There is one problem though in the 1.1 release.\r\n\r\nIn commit https://github.com/dj-stripe/dj-stripe/commit/6a6f048a3a432a3ba40fba8bf90f8789139daec4 `StripeEnumField` was added with the non python 2 compatible `super()` call:\r\n\r\n```name, path, args, kwargs = super().deconstruct()```\r\n\r\nWhat do you guys think? Can we backport a hotfix fix to 1.1.1 or something?\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n.. module:: djstripe.fields.\n\n :synopsis: dj-stripe Custom Field Definitions\n\n.. moduleauthor:: Bill Huneke (@wahuneke)\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport decimal\n\nfrom django.core.exceptions import FieldError, ImproperlyConfigured\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.db import models\n\nfrom .settings import USE_NATIVE_JSONFIELD\nfrom .utils import convert_tstamp, dict_nested_accessor\n\n\nif USE_NATIVE_JSONFIELD:\n from django.contrib.postgres.fields import JSONField\nelse:\n from jsonfield import JSONField\n\n\nclass PaymentMethodForeignKey(models.ForeignKey):\n def __init__(self, **kwargs):\n kwargs.setdefault(\"to\", \"PaymentMethod\")\n super(PaymentMethodForeignKey, self).__init__(**kwargs)\n\n\nclass StripeFieldMixin(object):\n \"\"\"\n Custom fields for all Stripe data.\n\n This allows keeping track of which database fields are suitable for\n sending to or receiving from Stripe. Also, allows a few handy extra parameters.\n \"\"\"\n\n # Used if the name at stripe is different from the name in our database\n # Include a . in name if value is nested in dict in Stripe's object\n # (e.g. stripe_name = \"data.id\" --> obj[\"data\"][\"id\"])\n stripe_name = None\n\n # If stripe_name is None, this can also be used to specify a nested value, but\n # the final value is assumed to be the database field name\n # (e.g. nested_name = \"data\" --> obj[\"data\"][db_field_name]\n nested_name = None\n\n # This indicates that this field will always appear in a stripe object. It will be\n # an Exception if we try to parse a stripe object that does not include this field\n # in the data. If set to False then null=True attribute will be automatically set\n stripe_required = True\n\n # If a field was populated in previous API versions but we don't want to drop the old\n # data for some reason, mark it as deprecated. This will make sure we never try to send\n # it to Stripe or expect in Stripe data received\n # This setting automatically implies Null=True\n deprecated = False\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Assign class instance variables based on kwargs.\n\n Assign extra class instance variables if stripe_required is defined or\n if deprecated is defined.\n \"\"\"\n self.stripe_name = kwargs.pop('stripe_name', self.stripe_name)\n self.nested_name = kwargs.pop('nested_name', self.nested_name)\n self.stripe_required = kwargs.pop('stripe_required', self.stripe_required)\n self.deprecated = kwargs.pop('deprecated', self.deprecated)\n if not self.stripe_required:\n kwargs[\"null\"] = True\n\n if self.deprecated:\n kwargs[\"null\"] = True\n kwargs[\"default\"] = None\n super(StripeFieldMixin, self).__init__(*args, **kwargs)\n\n def stripe_to_db(self, data):\n \"\"\"Try converting stripe fields to defined database fields.\"\"\"\n if not self.deprecated:\n try:\n if self.stripe_name:\n result = dict_nested_accessor(data, self.stripe_name)\n elif self.nested_name:\n result = dict_nested_accessor(data, self.nested_name + \".\" + self.name)\n else:\n result = data[self.name]\n except (KeyError, TypeError):\n if self.stripe_required:\n model_name = self.model._meta.object_name if hasattr(self, \"model\") else \"\"\n raise FieldError(\"Required stripe field '{field_name}' was not\"\n \" provided in {model_name} data object.\".format(field_name=self.name,\n model_name=model_name))\n else:\n result = None\n\n return result\n\n\nclass StripePercentField(StripeFieldMixin, models.DecimalField):\n \"\"\"A field used to define a percent according to djstripe logic.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Assign default args to this field.\"\"\"\n defaults = {\n 'decimal_places': 2,\n 'max_digits': 5,\n 'validators': [MinValueValidator(1.00), MaxValueValidator(100.00)]\n }\n defaults.update(kwargs)\n super(StripePercentField, self).__init__(*args, **defaults)\n\n\nclass StripeCurrencyField(StripeFieldMixin, models.DecimalField):\n \"\"\"\n A field used to define currency according to djstripe logic.\n\n Stripe is always in cents. djstripe stores everything in dollars.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Assign default args to this field.\"\"\"\n defaults = {\n 'decimal_places': 2,\n 'max_digits': 8,\n }\n defaults.update(kwargs)\n super(StripeCurrencyField, self).__init__(*args, **defaults)\n\n def stripe_to_db(self, data):\n \"\"\"Convert the raw value to decimal representation.\"\"\"\n val = super(StripeCurrencyField, self).stripe_to_db(data)\n\n # Note: 0 is a possible return value, which is 'falseish'\n if val is not None:\n return val / decimal.Decimal(\"100\")\n\n\nclass StripeBooleanField(StripeFieldMixin, models.BooleanField):\n \"\"\"A field used to define a boolean value according to djstripe logic.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Throw an error when a user tries to deprecate.\"\"\"\n if kwargs.get(\"deprecated\", False):\n raise ImproperlyConfigured(\"Boolean field cannot be deprecated. Change field type to \"\n \"StripeNullBooleanField\")\n super(StripeBooleanField, self).__init__(*args, **kwargs)\n\n\nclass StripeNullBooleanField(StripeFieldMixin, models.NullBooleanField):\n \"\"\"A field used to define a NullBooleanField value according to djstripe logic.\"\"\"\n\n pass\n\n\nclass StripeCharField(StripeFieldMixin, models.CharField):\n \"\"\"A field used to define a CharField value according to djstripe logic.\"\"\"\n\n pass\n\n\nclass StripeEnumField(StripeCharField):\n def __init__(self, enum, *args, **kwargs):\n self.enum = enum\n choices = enum.choices\n defaults = {\n \"choices\": choices,\n \"max_length\": max(len(k) for k, v in choices)\n }\n defaults.update(kwargs)\n super(StripeEnumField, self).__init__(*args, **defaults)\n\n def deconstruct(self):\n name, path, args, kwargs = super().deconstruct()\n kwargs[\"enum\"] = self.enum\n del kwargs[\"choices\"]\n return name, path, args, kwargs\n\n\nclass StripeIdField(StripeCharField):\n \"\"\"A field with enough space to hold any stripe ID.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Assign default args to this field.\n\n As per: https://stripe.com/docs/upgrades\n You can safely assume object IDs we generate will never exceed 255\n characters, but you should be able to handle IDs of up to that\n length.\n \"\"\"\n defaults = {\n 'max_length': 255,\n 'blank': False,\n 'null': False,\n }\n defaults.update(kwargs)\n super(StripeIdField, self).__init__(*args, **defaults)\n\n\nclass StripeTextField(StripeFieldMixin, models.TextField):\n \"\"\"A field used to define a TextField value according to djstripe logic.\"\"\"\n\n pass\n\n\nclass StripeDateTimeField(StripeFieldMixin, models.DateTimeField):\n \"\"\"A field used to define a DateTimeField value according to djstripe logic.\"\"\"\n\n def stripe_to_db(self, data):\n \"\"\"Convert the raw timestamp value to a DateTime representation.\"\"\"\n val = super(StripeDateTimeField, self).stripe_to_db(data)\n\n # Note: 0 is a possible return value, which is 'falseish'\n if val is not None:\n return convert_tstamp(val)\n\n\nclass StripeIntegerField(StripeFieldMixin, models.IntegerField):\n \"\"\"A field used to define a IntegerField value according to djstripe logic.\"\"\"\n\n pass\n\n\nclass StripePositiveIntegerField(StripeFieldMixin, models.PositiveIntegerField):\n \"\"\"A field used to define a PositiveIntegerField value according to djstripe logic.\"\"\"\n\n pass\n\n\nclass StripeJSONField(StripeFieldMixin, JSONField):\n \"\"\"A field used to define a JSONField value according to djstripe logic.\"\"\"\n\n pass\n", "path": "djstripe/fields.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n.. module:: djstripe.fields.\n\n :synopsis: dj-stripe Custom Field Definitions\n\n.. moduleauthor:: Bill Huneke (@wahuneke)\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport decimal\n\nfrom django.core.exceptions import FieldError, ImproperlyConfigured\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.db import models\n\nfrom .settings import USE_NATIVE_JSONFIELD\nfrom .utils import convert_tstamp, dict_nested_accessor\n\n\nif USE_NATIVE_JSONFIELD:\n from django.contrib.postgres.fields import JSONField\nelse:\n from jsonfield import JSONField\n\n\nclass PaymentMethodForeignKey(models.ForeignKey):\n def __init__(self, **kwargs):\n kwargs.setdefault(\"to\", \"PaymentMethod\")\n super(PaymentMethodForeignKey, self).__init__(**kwargs)\n\n\nclass StripeFieldMixin(object):\n \"\"\"\n Custom fields for all Stripe data.\n\n This allows keeping track of which database fields are suitable for\n sending to or receiving from Stripe. Also, allows a few handy extra parameters.\n \"\"\"\n\n # Used if the name at stripe is different from the name in our database\n # Include a . in name if value is nested in dict in Stripe's object\n # (e.g. stripe_name = \"data.id\" --> obj[\"data\"][\"id\"])\n stripe_name = None\n\n # If stripe_name is None, this can also be used to specify a nested value, but\n # the final value is assumed to be the database field name\n # (e.g. nested_name = \"data\" --> obj[\"data\"][db_field_name]\n nested_name = None\n\n # This indicates that this field will always appear in a stripe object. It will be\n # an Exception if we try to parse a stripe object that does not include this field\n # in the data. If set to False then null=True attribute will be automatically set\n stripe_required = True\n\n # If a field was populated in previous API versions but we don't want to drop the old\n # data for some reason, mark it as deprecated. This will make sure we never try to send\n # it to Stripe or expect in Stripe data received\n # This setting automatically implies Null=True\n deprecated = False\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Assign class instance variables based on kwargs.\n\n Assign extra class instance variables if stripe_required is defined or\n if deprecated is defined.\n \"\"\"\n self.stripe_name = kwargs.pop('stripe_name', self.stripe_name)\n self.nested_name = kwargs.pop('nested_name', self.nested_name)\n self.stripe_required = kwargs.pop('stripe_required', self.stripe_required)\n self.deprecated = kwargs.pop('deprecated', self.deprecated)\n if not self.stripe_required:\n kwargs[\"null\"] = True\n\n if self.deprecated:\n kwargs[\"null\"] = True\n kwargs[\"default\"] = None\n super(StripeFieldMixin, self).__init__(*args, **kwargs)\n\n def stripe_to_db(self, data):\n \"\"\"Try converting stripe fields to defined database fields.\"\"\"\n if not self.deprecated:\n try:\n if self.stripe_name:\n result = dict_nested_accessor(data, self.stripe_name)\n elif self.nested_name:\n result = dict_nested_accessor(data, self.nested_name + \".\" + self.name)\n else:\n result = data[self.name]\n except (KeyError, TypeError):\n if self.stripe_required:\n model_name = self.model._meta.object_name if hasattr(self, \"model\") else \"\"\n raise FieldError(\"Required stripe field '{field_name}' was not\"\n \" provided in {model_name} data object.\".format(field_name=self.name,\n model_name=model_name))\n else:\n result = None\n\n return result\n\n\nclass StripePercentField(StripeFieldMixin, models.DecimalField):\n \"\"\"A field used to define a percent according to djstripe logic.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Assign default args to this field.\"\"\"\n defaults = {\n 'decimal_places': 2,\n 'max_digits': 5,\n 'validators': [MinValueValidator(1.00), MaxValueValidator(100.00)]\n }\n defaults.update(kwargs)\n super(StripePercentField, self).__init__(*args, **defaults)\n\n\nclass StripeCurrencyField(StripeFieldMixin, models.DecimalField):\n \"\"\"\n A field used to define currency according to djstripe logic.\n\n Stripe is always in cents. djstripe stores everything in dollars.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Assign default args to this field.\"\"\"\n defaults = {\n 'decimal_places': 2,\n 'max_digits': 8,\n }\n defaults.update(kwargs)\n super(StripeCurrencyField, self).__init__(*args, **defaults)\n\n def stripe_to_db(self, data):\n \"\"\"Convert the raw value to decimal representation.\"\"\"\n val = super(StripeCurrencyField, self).stripe_to_db(data)\n\n # Note: 0 is a possible return value, which is 'falseish'\n if val is not None:\n return val / decimal.Decimal(\"100\")\n\n\nclass StripeBooleanField(StripeFieldMixin, models.BooleanField):\n \"\"\"A field used to define a boolean value according to djstripe logic.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Throw an error when a user tries to deprecate.\"\"\"\n if kwargs.get(\"deprecated\", False):\n raise ImproperlyConfigured(\"Boolean field cannot be deprecated. Change field type to \"\n \"StripeNullBooleanField\")\n super(StripeBooleanField, self).__init__(*args, **kwargs)\n\n\nclass StripeNullBooleanField(StripeFieldMixin, models.NullBooleanField):\n \"\"\"A field used to define a NullBooleanField value according to djstripe logic.\"\"\"\n\n pass\n\n\nclass StripeCharField(StripeFieldMixin, models.CharField):\n \"\"\"A field used to define a CharField value according to djstripe logic.\"\"\"\n\n pass\n\n\nclass StripeEnumField(StripeCharField):\n def __init__(self, enum, *args, **kwargs):\n self.enum = enum\n choices = enum.choices\n defaults = {\n \"choices\": choices,\n \"max_length\": max(len(k) for k, v in choices)\n }\n defaults.update(kwargs)\n super(StripeEnumField, self).__init__(*args, **defaults)\n\n def deconstruct(self):\n name, path, args, kwargs = super(StripeEnumField, self).deconstruct()\n kwargs[\"enum\"] = self.enum\n del kwargs[\"choices\"]\n return name, path, args, kwargs\n\n\nclass StripeIdField(StripeCharField):\n \"\"\"A field with enough space to hold any stripe ID.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Assign default args to this field.\n\n As per: https://stripe.com/docs/upgrades\n You can safely assume object IDs we generate will never exceed 255\n characters, but you should be able to handle IDs of up to that\n length.\n \"\"\"\n defaults = {\n 'max_length': 255,\n 'blank': False,\n 'null': False,\n }\n defaults.update(kwargs)\n super(StripeIdField, self).__init__(*args, **defaults)\n\n\nclass StripeTextField(StripeFieldMixin, models.TextField):\n \"\"\"A field used to define a TextField value according to djstripe logic.\"\"\"\n\n pass\n\n\nclass StripeDateTimeField(StripeFieldMixin, models.DateTimeField):\n \"\"\"A field used to define a DateTimeField value according to djstripe logic.\"\"\"\n\n def stripe_to_db(self, data):\n \"\"\"Convert the raw timestamp value to a DateTime representation.\"\"\"\n val = super(StripeDateTimeField, self).stripe_to_db(data)\n\n # Note: 0 is a possible return value, which is 'falseish'\n if val is not None:\n return convert_tstamp(val)\n\n\nclass StripeIntegerField(StripeFieldMixin, models.IntegerField):\n \"\"\"A field used to define a IntegerField value according to djstripe logic.\"\"\"\n\n pass\n\n\nclass StripePositiveIntegerField(StripeFieldMixin, models.PositiveIntegerField):\n \"\"\"A field used to define a PositiveIntegerField value according to djstripe logic.\"\"\"\n\n pass\n\n\nclass StripeJSONField(StripeFieldMixin, JSONField):\n \"\"\"A field used to define a JSONField value according to djstripe logic.\"\"\"\n\n pass\n", "path": "djstripe/fields.py"}]}
| 2,946 | 134 |
gh_patches_debug_28488
|
rasdani/github-patches
|
git_diff
|
microsoft__ptvsd-443
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Execution when debugging is incorrect
From developer community
https://developercommunity.visualstudio.com/content/problem/254447/python-execute-problem.html?childToView=255082#comment-255082
```
import pymysql
db = pymysql.connect(host="localhost",user="root",password="",database="zhidao",charset='utf8')
cursor_select = db.cursor()
sql_select = "select question_id from zhidao"
size_select = cursor_select.execute(sql_select)
for i in range(size_select):
data = cursor_select.fetchone()
print(data[0])
db.close()
```
Table zhidao is a small one, which has about 1400 rows, it works fine if you execute the code directly, but if you set a breakpoint and execute "size_select = cursor_select.execute(sql_select)" using F10, data will become NoneType somehow, if you check this problem deeper, you can find execution using F10 will somehow change cursor_select.rownumber to nonzero, related code of pymysql is here: https://github.com/PyMySQL/PyMySQL/blob/master/pymysql/cursors.py
I hope this is helpful.
Version:
Win10 1803
VS Community 2017 - 15.7.1
Python - 15.7.18116.1
Python 3.6(64-bit)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ptvsd/safe_repr.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License. See LICENSE in the project root
3 # for license information.
4
5 import sys
6
7
8 # Py3 compat - alias unicode to str, and xrange to range
9 try:
10 unicode # noqa
11 except NameError:
12 unicode = str
13 try:
14 xrange # noqa
15 except NameError:
16 xrange = range
17
18
19 class SafeRepr(object):
20 # String types are truncated to maxstring_outer when at the outer-
21 # most level, and truncated to maxstring_inner characters inside
22 # collections.
23 maxstring_outer = 2 ** 16
24 maxstring_inner = 30
25 if sys.version_info >= (3, 0):
26 string_types = (str, bytes)
27 set_info = (set, '{', '}', False)
28 frozenset_info = (frozenset, 'frozenset({', '})', False)
29 int_types = (int,)
30 else:
31 string_types = (str, unicode)
32 set_info = (set, 'set([', '])', False)
33 frozenset_info = (frozenset, 'frozenset([', '])', False)
34 int_types = (int, long) # noqa
35
36 # Collection types are recursively iterated for each limit in
37 # maxcollection.
38 maxcollection = (15, 10)
39
40 # Specifies type, prefix string, suffix string, and whether to include a
41 # comma if there is only one element. (Using a sequence rather than a
42 # mapping because we use isinstance() to determine the matching type.)
43 collection_types = [
44 (tuple, '(', ')', True),
45 (list, '[', ']', False),
46 frozenset_info,
47 set_info,
48 ]
49 try:
50 from collections import deque
51 collection_types.append((deque, 'deque([', '])', False))
52 except Exception:
53 pass
54
55 # type, prefix string, suffix string, item prefix string,
56 # item key/value separator, item suffix string
57 dict_types = [(dict, '{', '}', '', ': ', '')]
58 try:
59 from collections import OrderedDict
60 dict_types.append((OrderedDict, 'OrderedDict([', '])', '(', ', ', ')'))
61 except Exception:
62 pass
63
64 # All other types are treated identically to strings, but using
65 # different limits.
66 maxother_outer = 2 ** 16
67 maxother_inner = 30
68
69 convert_to_hex = False
70 raw_value = False
71
72 def __call__(self, obj):
73 try:
74 return ''.join(self._repr(obj, 0))
75 except Exception:
76 try:
77 return 'An exception was raised: %r' % sys.exc_info()[1]
78 except Exception:
79 return 'An exception was raised'
80
81 def _repr(self, obj, level):
82 '''Returns an iterable of the parts in the final repr string.'''
83
84 try:
85 obj_repr = type(obj).__repr__
86 except Exception:
87 obj_repr = None
88
89 def has_obj_repr(t):
90 r = t.__repr__
91 try:
92 return obj_repr == r
93 except Exception:
94 return obj_repr is r
95
96 for t, prefix, suffix, comma in self.collection_types:
97 if isinstance(obj, t) and has_obj_repr(t):
98 return self._repr_iter(obj, level, prefix, suffix, comma)
99
100 for t, prefix, suffix, item_prefix, item_sep, item_suffix in self.dict_types: # noqa
101 if isinstance(obj, t) and has_obj_repr(t):
102 return self._repr_dict(obj, level, prefix, suffix,
103 item_prefix, item_sep, item_suffix)
104
105 for t in self.string_types:
106 if isinstance(obj, t) and has_obj_repr(t):
107 return self._repr_str(obj, level)
108
109 if self._is_long_iter(obj):
110 return self._repr_long_iter(obj)
111
112 return self._repr_other(obj, level)
113
114 # Determines whether an iterable exceeds the limits set in
115 # maxlimits, and is therefore unsafe to repr().
116 def _is_long_iter(self, obj, level=0):
117 try:
118 # Strings have their own limits (and do not nest). Because
119 # they don't have __iter__ in 2.x, this check goes before
120 # the next one.
121 if isinstance(obj, self.string_types):
122 return len(obj) > self.maxstring_inner
123
124 # If it's not an iterable (and not a string), it's fine.
125 if not hasattr(obj, '__iter__'):
126 return False
127
128 # Iterable is its own iterator - this is a one-off iterable
129 # like generator or enumerate(). We can't really count that,
130 # but repr() for these should not include any elements anyway,
131 # so we can treat it the same as non-iterables.
132 if obj is iter(obj):
133 return False
134
135 # xrange reprs fine regardless of length.
136 if isinstance(obj, xrange):
137 return False
138
139 # numpy and scipy collections (ndarray etc) have
140 # self-truncating repr, so they're always safe.
141 try:
142 module = type(obj).__module__.partition('.')[0]
143 if module in ('numpy', 'scipy'):
144 return False
145 except Exception:
146 pass
147
148 # Iterables that nest too deep are considered long.
149 if level >= len(self.maxcollection):
150 return True
151
152 # It is too long if the length exceeds the limit, or any
153 # of its elements are long iterables.
154 if hasattr(obj, '__len__'):
155 try:
156 size = len(obj)
157 except Exception:
158 size = None
159 if size is not None and size > self.maxcollection[level]:
160 return True
161 return any((self._is_long_iter(item, level + 1) for item in obj)) # noqa
162 return any(i > self.maxcollection[level] or self._is_long_iter(item, level + 1) for i, item in enumerate(obj)) # noqa
163
164 except Exception:
165 # If anything breaks, assume the worst case.
166 return True
167
168 def _repr_iter(self, obj, level, prefix, suffix,
169 comma_after_single_element=False):
170 yield prefix
171
172 if level >= len(self.maxcollection):
173 yield '...'
174 else:
175 count = self.maxcollection[level]
176 yield_comma = False
177 for item in obj:
178 if yield_comma:
179 yield ', '
180 yield_comma = True
181
182 count -= 1
183 if count <= 0:
184 yield '...'
185 break
186
187 for p in self._repr(item, 100 if item is obj else level + 1):
188 yield p
189 else:
190 if comma_after_single_element:
191 if count == self.maxcollection[level] - 1:
192 yield ','
193 yield suffix
194
195 def _repr_long_iter(self, obj):
196 try:
197 length = hex(len(obj)) if self.convert_to_hex else len(obj)
198 obj_repr = '<%s, len() = %s>' % (type(obj).__name__, length)
199 except Exception:
200 try:
201 obj_repr = '<' + type(obj).__name__ + '>'
202 except Exception:
203 obj_repr = '<no repr available for object>'
204 yield obj_repr
205
206 def _repr_dict(self, obj, level, prefix, suffix,
207 item_prefix, item_sep, item_suffix):
208 if not obj:
209 yield prefix + suffix
210 return
211 if level >= len(self.maxcollection):
212 yield prefix + '...' + suffix
213 return
214
215 yield prefix
216
217 count = self.maxcollection[level]
218 yield_comma = False
219
220 try:
221 sorted_keys = sorted(obj)
222 except Exception:
223 sorted_keys = list(obj)
224
225 for key in sorted_keys:
226 if yield_comma:
227 yield ', '
228 yield_comma = True
229
230 count -= 1
231 if count <= 0:
232 yield '...'
233 break
234
235 yield item_prefix
236 for p in self._repr(key, level + 1):
237 yield p
238
239 yield item_sep
240
241 try:
242 item = obj[key]
243 except Exception:
244 yield '<?>'
245 else:
246 for p in self._repr(item, 100 if item is obj else level + 1):
247 yield p
248 yield item_suffix
249
250 yield suffix
251
252 def _repr_str(self, obj, level):
253 return self._repr_obj(obj, level,
254 self.maxstring_inner, self.maxstring_outer)
255
256 def _repr_other(self, obj, level):
257 return self._repr_obj(obj, level,
258 self.maxother_inner, self.maxother_outer)
259
260 def _repr_obj(self, obj, level, limit_inner, limit_outer):
261 try:
262 if self.raw_value:
263 # For raw value retrieval, ignore all limits.
264 try:
265 mv = memoryview(obj)
266 except Exception:
267 yield unicode(obj)
268 return
269 else:
270 # Map bytes to Unicode codepoints with same values.
271 yield mv.tobytes().decode('latin-1')
272 return
273 elif self.convert_to_hex and isinstance(obj, self.int_types):
274 obj_repr = hex(obj)
275 else:
276 obj_repr = repr(obj)
277 except Exception:
278 try:
279 obj_repr = object.__repr__(obj)
280 except Exception:
281 try:
282 obj_repr = '<no repr available for ' + type(obj).__name__ + '>' # noqa
283 except Exception:
284 obj_repr = '<no repr available for object>'
285
286 limit = limit_inner if level > 0 else limit_outer
287
288 if limit >= len(obj_repr):
289 yield obj_repr
290 return
291
292 # Slightly imprecise calculations - we may end up with a string that is
293 # up to 3 characters longer than limit. If you need precise formatting,
294 # you are using the wrong class.
295 left_count, right_count = max(1, int(2 * limit / 3)), max(1, int(limit / 3)) # noqa
296
297 yield obj_repr[:left_count]
298 yield '...'
299 yield obj_repr[-right_count:]
300
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ptvsd/safe_repr.py b/ptvsd/safe_repr.py
--- a/ptvsd/safe_repr.py
+++ b/ptvsd/safe_repr.py
@@ -27,11 +27,15 @@
set_info = (set, '{', '}', False)
frozenset_info = (frozenset, 'frozenset({', '})', False)
int_types = (int,)
+ long_iter_types = (list, tuple, bytearray, range,
+ dict, set, frozenset)
else:
string_types = (str, unicode)
set_info = (set, 'set([', '])', False)
frozenset_info = (frozenset, 'frozenset([', '])', False)
int_types = (int, long) # noqa
+ long_iter_types = (list, tuple, bytearray, xrange,
+ dict, set, frozenset, buffer) # noqa
# Collection types are recursively iterated for each limit in
# maxcollection.
@@ -125,6 +129,12 @@
if not hasattr(obj, '__iter__'):
return False
+ # If it's not an instance of these collection types then it
+ # is fine. Note: this is a fix for
+ # https://github.com/Microsoft/ptvsd/issues/406
+ if not isinstance(obj, self.long_iter_types):
+ return False
+
# Iterable is its own iterator - this is a one-off iterable
# like generator or enumerate(). We can't really count that,
# but repr() for these should not include any elements anyway,
|
{"golden_diff": "diff --git a/ptvsd/safe_repr.py b/ptvsd/safe_repr.py\n--- a/ptvsd/safe_repr.py\n+++ b/ptvsd/safe_repr.py\n@@ -27,11 +27,15 @@\n set_info = (set, '{', '}', False)\n frozenset_info = (frozenset, 'frozenset({', '})', False)\n int_types = (int,)\n+ long_iter_types = (list, tuple, bytearray, range,\n+ dict, set, frozenset)\n else:\n string_types = (str, unicode)\n set_info = (set, 'set([', '])', False)\n frozenset_info = (frozenset, 'frozenset([', '])', False)\n int_types = (int, long) # noqa\n+ long_iter_types = (list, tuple, bytearray, xrange,\n+ dict, set, frozenset, buffer) # noqa\n \n # Collection types are recursively iterated for each limit in\n # maxcollection.\n@@ -125,6 +129,12 @@\n if not hasattr(obj, '__iter__'):\n return False\n \n+ # If it's not an instance of these collection types then it\n+ # is fine. Note: this is a fix for\n+ # https://github.com/Microsoft/ptvsd/issues/406\n+ if not isinstance(obj, self.long_iter_types):\n+ return False\n+\n # Iterable is its own iterator - this is a one-off iterable\n # like generator or enumerate(). We can't really count that,\n # but repr() for these should not include any elements anyway,\n", "issue": "Execution when debugging is incorrect\nFrom developer community\r\nhttps://developercommunity.visualstudio.com/content/problem/254447/python-execute-problem.html?childToView=255082#comment-255082\r\n\r\n```\r\nimport pymysql\r\ndb = pymysql.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"zhidao\",charset='utf8')\r\ncursor_select = db.cursor()\r\nsql_select = \"select question_id from zhidao\"\r\nsize_select = cursor_select.execute(sql_select)\r\nfor i in range(size_select):\r\n\tdata = cursor_select.fetchone()\r\n\tprint(data[0])\r\ndb.close()\r\n```\r\n\r\nTable zhidao is a small one, which has about 1400 rows, it works fine if you execute the code directly, but if you set a breakpoint and execute \"size_select = cursor_select.execute(sql_select)\" using F10, data will become NoneType somehow, if you check this problem deeper, you can find execution using F10 will somehow change cursor_select.rownumber to nonzero, related code of pymysql is here: https://github.com/PyMySQL/PyMySQL/blob/master/pymysql/cursors.py\r\nI hope this is helpful.\r\nVersion:\r\nWin10 1803\r\nVS Community 2017 - 15.7.1\r\nPython - 15.7.18116.1\r\nPython 3.6(64-bit) \r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport sys\n\n\n# Py3 compat - alias unicode to str, and xrange to range\ntry:\n unicode # noqa\nexcept NameError:\n unicode = str\ntry:\n xrange # noqa\nexcept NameError:\n xrange = range\n\n\nclass SafeRepr(object):\n # String types are truncated to maxstring_outer when at the outer-\n # most level, and truncated to maxstring_inner characters inside\n # collections.\n maxstring_outer = 2 ** 16\n maxstring_inner = 30\n if sys.version_info >= (3, 0):\n string_types = (str, bytes)\n set_info = (set, '{', '}', False)\n frozenset_info = (frozenset, 'frozenset({', '})', False)\n int_types = (int,)\n else:\n string_types = (str, unicode)\n set_info = (set, 'set([', '])', False)\n frozenset_info = (frozenset, 'frozenset([', '])', False)\n int_types = (int, long) # noqa\n\n # Collection types are recursively iterated for each limit in\n # maxcollection.\n maxcollection = (15, 10)\n\n # Specifies type, prefix string, suffix string, and whether to include a\n # comma if there is only one element. (Using a sequence rather than a\n # mapping because we use isinstance() to determine the matching type.)\n collection_types = [\n (tuple, '(', ')', True),\n (list, '[', ']', False),\n frozenset_info,\n set_info,\n ]\n try:\n from collections import deque\n collection_types.append((deque, 'deque([', '])', False))\n except Exception:\n pass\n\n # type, prefix string, suffix string, item prefix string,\n # item key/value separator, item suffix string\n dict_types = [(dict, '{', '}', '', ': ', '')]\n try:\n from collections import OrderedDict\n dict_types.append((OrderedDict, 'OrderedDict([', '])', '(', ', ', ')'))\n except Exception:\n pass\n\n # All other types are treated identically to strings, but using\n # different limits.\n maxother_outer = 2 ** 16\n maxother_inner = 30\n\n convert_to_hex = False\n raw_value = False\n\n def __call__(self, obj):\n try:\n return ''.join(self._repr(obj, 0))\n except Exception:\n try:\n return 'An exception was raised: %r' % sys.exc_info()[1]\n except Exception:\n return 'An exception was raised'\n\n def _repr(self, obj, level):\n '''Returns an iterable of the parts in the final repr string.'''\n\n try:\n obj_repr = type(obj).__repr__\n except Exception:\n obj_repr = None\n\n def has_obj_repr(t):\n r = t.__repr__\n try:\n return obj_repr == r\n except Exception:\n return obj_repr is r\n\n for t, prefix, suffix, comma in self.collection_types:\n if isinstance(obj, t) and has_obj_repr(t):\n return self._repr_iter(obj, level, prefix, suffix, comma)\n\n for t, prefix, suffix, item_prefix, item_sep, item_suffix in self.dict_types: # noqa\n if isinstance(obj, t) and has_obj_repr(t):\n return self._repr_dict(obj, level, prefix, suffix,\n item_prefix, item_sep, item_suffix)\n\n for t in self.string_types:\n if isinstance(obj, t) and has_obj_repr(t):\n return self._repr_str(obj, level)\n\n if self._is_long_iter(obj):\n return self._repr_long_iter(obj)\n\n return self._repr_other(obj, level)\n\n # Determines whether an iterable exceeds the limits set in\n # maxlimits, and is therefore unsafe to repr().\n def _is_long_iter(self, obj, level=0):\n try:\n # Strings have their own limits (and do not nest). Because\n # they don't have __iter__ in 2.x, this check goes before\n # the next one.\n if isinstance(obj, self.string_types):\n return len(obj) > self.maxstring_inner\n\n # If it's not an iterable (and not a string), it's fine.\n if not hasattr(obj, '__iter__'):\n return False\n\n # Iterable is its own iterator - this is a one-off iterable\n # like generator or enumerate(). We can't really count that,\n # but repr() for these should not include any elements anyway,\n # so we can treat it the same as non-iterables.\n if obj is iter(obj):\n return False\n\n # xrange reprs fine regardless of length.\n if isinstance(obj, xrange):\n return False\n\n # numpy and scipy collections (ndarray etc) have\n # self-truncating repr, so they're always safe.\n try:\n module = type(obj).__module__.partition('.')[0]\n if module in ('numpy', 'scipy'):\n return False\n except Exception:\n pass\n\n # Iterables that nest too deep are considered long.\n if level >= len(self.maxcollection):\n return True\n\n # It is too long if the length exceeds the limit, or any\n # of its elements are long iterables.\n if hasattr(obj, '__len__'):\n try:\n size = len(obj)\n except Exception:\n size = None\n if size is not None and size > self.maxcollection[level]:\n return True\n return any((self._is_long_iter(item, level + 1) for item in obj)) # noqa\n return any(i > self.maxcollection[level] or self._is_long_iter(item, level + 1) for i, item in enumerate(obj)) # noqa\n\n except Exception:\n # If anything breaks, assume the worst case.\n return True\n\n def _repr_iter(self, obj, level, prefix, suffix,\n comma_after_single_element=False):\n yield prefix\n\n if level >= len(self.maxcollection):\n yield '...'\n else:\n count = self.maxcollection[level]\n yield_comma = False\n for item in obj:\n if yield_comma:\n yield ', '\n yield_comma = True\n\n count -= 1\n if count <= 0:\n yield '...'\n break\n\n for p in self._repr(item, 100 if item is obj else level + 1):\n yield p\n else:\n if comma_after_single_element:\n if count == self.maxcollection[level] - 1:\n yield ','\n yield suffix\n\n def _repr_long_iter(self, obj):\n try:\n length = hex(len(obj)) if self.convert_to_hex else len(obj)\n obj_repr = '<%s, len() = %s>' % (type(obj).__name__, length)\n except Exception:\n try:\n obj_repr = '<' + type(obj).__name__ + '>'\n except Exception:\n obj_repr = '<no repr available for object>'\n yield obj_repr\n\n def _repr_dict(self, obj, level, prefix, suffix,\n item_prefix, item_sep, item_suffix):\n if not obj:\n yield prefix + suffix\n return\n if level >= len(self.maxcollection):\n yield prefix + '...' + suffix\n return\n\n yield prefix\n\n count = self.maxcollection[level]\n yield_comma = False\n\n try:\n sorted_keys = sorted(obj)\n except Exception:\n sorted_keys = list(obj)\n\n for key in sorted_keys:\n if yield_comma:\n yield ', '\n yield_comma = True\n\n count -= 1\n if count <= 0:\n yield '...'\n break\n\n yield item_prefix\n for p in self._repr(key, level + 1):\n yield p\n\n yield item_sep\n\n try:\n item = obj[key]\n except Exception:\n yield '<?>'\n else:\n for p in self._repr(item, 100 if item is obj else level + 1):\n yield p\n yield item_suffix\n\n yield suffix\n\n def _repr_str(self, obj, level):\n return self._repr_obj(obj, level,\n self.maxstring_inner, self.maxstring_outer)\n\n def _repr_other(self, obj, level):\n return self._repr_obj(obj, level,\n self.maxother_inner, self.maxother_outer)\n\n def _repr_obj(self, obj, level, limit_inner, limit_outer):\n try:\n if self.raw_value:\n # For raw value retrieval, ignore all limits.\n try:\n mv = memoryview(obj)\n except Exception:\n yield unicode(obj)\n return\n else:\n # Map bytes to Unicode codepoints with same values.\n yield mv.tobytes().decode('latin-1')\n return\n elif self.convert_to_hex and isinstance(obj, self.int_types):\n obj_repr = hex(obj)\n else:\n obj_repr = repr(obj)\n except Exception:\n try:\n obj_repr = object.__repr__(obj)\n except Exception:\n try:\n obj_repr = '<no repr available for ' + type(obj).__name__ + '>' # noqa\n except Exception:\n obj_repr = '<no repr available for object>'\n\n limit = limit_inner if level > 0 else limit_outer\n\n if limit >= len(obj_repr):\n yield obj_repr\n return\n\n # Slightly imprecise calculations - we may end up with a string that is\n # up to 3 characters longer than limit. If you need precise formatting,\n # you are using the wrong class.\n left_count, right_count = max(1, int(2 * limit / 3)), max(1, int(limit / 3)) # noqa\n\n yield obj_repr[:left_count]\n yield '...'\n yield obj_repr[-right_count:]\n", "path": "ptvsd/safe_repr.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport sys\n\n\n# Py3 compat - alias unicode to str, and xrange to range\ntry:\n unicode # noqa\nexcept NameError:\n unicode = str\ntry:\n xrange # noqa\nexcept NameError:\n xrange = range\n\n\nclass SafeRepr(object):\n # String types are truncated to maxstring_outer when at the outer-\n # most level, and truncated to maxstring_inner characters inside\n # collections.\n maxstring_outer = 2 ** 16\n maxstring_inner = 30\n if sys.version_info >= (3, 0):\n string_types = (str, bytes)\n set_info = (set, '{', '}', False)\n frozenset_info = (frozenset, 'frozenset({', '})', False)\n int_types = (int,)\n long_iter_types = (list, tuple, bytearray, range,\n dict, set, frozenset)\n else:\n string_types = (str, unicode)\n set_info = (set, 'set([', '])', False)\n frozenset_info = (frozenset, 'frozenset([', '])', False)\n int_types = (int, long) # noqa\n long_iter_types = (list, tuple, bytearray, xrange,\n dict, set, frozenset, buffer) # noqa\n\n # Collection types are recursively iterated for each limit in\n # maxcollection.\n maxcollection = (15, 10)\n\n # Specifies type, prefix string, suffix string, and whether to include a\n # comma if there is only one element. (Using a sequence rather than a\n # mapping because we use isinstance() to determine the matching type.)\n collection_types = [\n (tuple, '(', ')', True),\n (list, '[', ']', False),\n frozenset_info,\n set_info,\n ]\n try:\n from collections import deque\n collection_types.append((deque, 'deque([', '])', False))\n except Exception:\n pass\n\n # type, prefix string, suffix string, item prefix string,\n # item key/value separator, item suffix string\n dict_types = [(dict, '{', '}', '', ': ', '')]\n try:\n from collections import OrderedDict\n dict_types.append((OrderedDict, 'OrderedDict([', '])', '(', ', ', ')'))\n except Exception:\n pass\n\n # All other types are treated identically to strings, but using\n # different limits.\n maxother_outer = 2 ** 16\n maxother_inner = 30\n\n convert_to_hex = False\n raw_value = False\n\n def __call__(self, obj):\n try:\n return ''.join(self._repr(obj, 0))\n except Exception:\n try:\n return 'An exception was raised: %r' % sys.exc_info()[1]\n except Exception:\n return 'An exception was raised'\n\n def _repr(self, obj, level):\n '''Returns an iterable of the parts in the final repr string.'''\n\n try:\n obj_repr = type(obj).__repr__\n except Exception:\n obj_repr = None\n\n def has_obj_repr(t):\n r = t.__repr__\n try:\n return obj_repr == r\n except Exception:\n return obj_repr is r\n\n for t, prefix, suffix, comma in self.collection_types:\n if isinstance(obj, t) and has_obj_repr(t):\n return self._repr_iter(obj, level, prefix, suffix, comma)\n\n for t, prefix, suffix, item_prefix, item_sep, item_suffix in self.dict_types: # noqa\n if isinstance(obj, t) and has_obj_repr(t):\n return self._repr_dict(obj, level, prefix, suffix,\n item_prefix, item_sep, item_suffix)\n\n for t in self.string_types:\n if isinstance(obj, t) and has_obj_repr(t):\n return self._repr_str(obj, level)\n\n if self._is_long_iter(obj):\n return self._repr_long_iter(obj)\n\n return self._repr_other(obj, level)\n\n # Determines whether an iterable exceeds the limits set in\n # maxlimits, and is therefore unsafe to repr().\n def _is_long_iter(self, obj, level=0):\n try:\n # Strings have their own limits (and do not nest). Because\n # they don't have __iter__ in 2.x, this check goes before\n # the next one.\n if isinstance(obj, self.string_types):\n return len(obj) > self.maxstring_inner\n\n # If it's not an iterable (and not a string), it's fine.\n if not hasattr(obj, '__iter__'):\n return False\n\n # If it's not an instance of these collection types then it\n # is fine. Note: this is a fix for\n # https://github.com/Microsoft/ptvsd/issues/406\n if not isinstance(obj, self.long_iter_types):\n return False\n\n # Iterable is its own iterator - this is a one-off iterable\n # like generator or enumerate(). We can't really count that,\n # but repr() for these should not include any elements anyway,\n # so we can treat it the same as non-iterables.\n if obj is iter(obj):\n return False\n\n # xrange reprs fine regardless of length.\n if isinstance(obj, xrange):\n return False\n\n # numpy and scipy collections (ndarray etc) have\n # self-truncating repr, so they're always safe.\n try:\n module = type(obj).__module__.partition('.')[0]\n if module in ('numpy', 'scipy'):\n return False\n except Exception:\n pass\n\n # Iterables that nest too deep are considered long.\n if level >= len(self.maxcollection):\n return True\n\n # It is too long if the length exceeds the limit, or any\n # of its elements are long iterables.\n if hasattr(obj, '__len__'):\n try:\n size = len(obj)\n except Exception:\n size = None\n if size is not None and size > self.maxcollection[level]:\n return True\n return any((self._is_long_iter(item, level + 1) for item in obj)) # noqa\n return any(i > self.maxcollection[level] or self._is_long_iter(item, level + 1) for i, item in enumerate(obj)) # noqa\n\n except Exception:\n # If anything breaks, assume the worst case.\n return True\n\n def _repr_iter(self, obj, level, prefix, suffix,\n comma_after_single_element=False):\n yield prefix\n\n if level >= len(self.maxcollection):\n yield '...'\n else:\n count = self.maxcollection[level]\n yield_comma = False\n for item in obj:\n if yield_comma:\n yield ', '\n yield_comma = True\n\n count -= 1\n if count <= 0:\n yield '...'\n break\n\n for p in self._repr(item, 100 if item is obj else level + 1):\n yield p\n else:\n if comma_after_single_element:\n if count == self.maxcollection[level] - 1:\n yield ','\n yield suffix\n\n def _repr_long_iter(self, obj):\n try:\n length = hex(len(obj)) if self.convert_to_hex else len(obj)\n obj_repr = '<%s, len() = %s>' % (type(obj).__name__, length)\n except Exception:\n try:\n obj_repr = '<' + type(obj).__name__ + '>'\n except Exception:\n obj_repr = '<no repr available for object>'\n yield obj_repr\n\n def _repr_dict(self, obj, level, prefix, suffix,\n item_prefix, item_sep, item_suffix):\n if not obj:\n yield prefix + suffix\n return\n if level >= len(self.maxcollection):\n yield prefix + '...' + suffix\n return\n\n yield prefix\n\n count = self.maxcollection[level]\n yield_comma = False\n\n try:\n sorted_keys = sorted(obj)\n except Exception:\n sorted_keys = list(obj)\n\n for key in sorted_keys:\n if yield_comma:\n yield ', '\n yield_comma = True\n\n count -= 1\n if count <= 0:\n yield '...'\n break\n\n yield item_prefix\n for p in self._repr(key, level + 1):\n yield p\n\n yield item_sep\n\n try:\n item = obj[key]\n except Exception:\n yield '<?>'\n else:\n for p in self._repr(item, 100 if item is obj else level + 1):\n yield p\n yield item_suffix\n\n yield suffix\n\n def _repr_str(self, obj, level):\n return self._repr_obj(obj, level,\n self.maxstring_inner, self.maxstring_outer)\n\n def _repr_other(self, obj, level):\n return self._repr_obj(obj, level,\n self.maxother_inner, self.maxother_outer)\n\n def _repr_obj(self, obj, level, limit_inner, limit_outer):\n try:\n if self.raw_value:\n # For raw value retrieval, ignore all limits.\n try:\n mv = memoryview(obj)\n except Exception:\n yield unicode(obj)\n return\n else:\n # Map bytes to Unicode codepoints with same values.\n yield mv.tobytes().decode('latin-1')\n return\n elif self.convert_to_hex and isinstance(obj, self.int_types):\n obj_repr = hex(obj)\n else:\n obj_repr = repr(obj)\n except Exception:\n try:\n obj_repr = object.__repr__(obj)\n except Exception:\n try:\n obj_repr = '<no repr available for ' + type(obj).__name__ + '>' # noqa\n except Exception:\n obj_repr = '<no repr available for object>'\n\n limit = limit_inner if level > 0 else limit_outer\n\n if limit >= len(obj_repr):\n yield obj_repr\n return\n\n # Slightly imprecise calculations - we may end up with a string that is\n # up to 3 characters longer than limit. If you need precise formatting,\n # you are using the wrong class.\n left_count, right_count = max(1, int(2 * limit / 3)), max(1, int(limit / 3)) # noqa\n\n yield obj_repr[:left_count]\n yield '...'\n yield obj_repr[-right_count:]\n", "path": "ptvsd/safe_repr.py"}]}
| 3,585 | 380 |
gh_patches_debug_14973
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-104
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove `chainer-cuda-requirements` that is deprecated
`pip install chainer-cuda-deps` is recommended, and `chainer-cuda-requirements` is deprecated now. It will be removed in the future minor release.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 from setuptools import setup
3
4 setup(
5 name='chainer',
6 version='1.0.1',
7 description='A flexible framework of neural networks',
8 author='Seiya Tokui',
9 author_email='[email protected]',
10 url='http://chainer.org/',
11 packages=['chainer',
12 'chainer.cudnn',
13 'chainer.functions',
14 'chainer.optimizers',
15 'chainer.requirements',
16 'chainer.utils'],
17 package_data={'chainer.requirements': ['cuda-requirements.txt']},
18 install_requires=['numpy',
19 'six>=1.9.0'],
20 scripts=['scripts/chainer-cuda-requirements'],
21 tests_require=['nose'],
22 )
23
```
Path: `chainer/requirements/__init__.py`
Content:
```
1 import os
2
3
4 def get_cuda_requirements_path():
5 return os.path.join(os.path.dirname(__file__), 'cuda-requirements.txt')
6
7
8 def get_cuda_requirements():
9 with open(get_cuda_requirements_path()) as f:
10 return f.read()
11
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/chainer/requirements/__init__.py b/chainer/requirements/__init__.py
deleted file mode 100644
--- a/chainer/requirements/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-import os
-
-
-def get_cuda_requirements_path():
- return os.path.join(os.path.dirname(__file__), 'cuda-requirements.txt')
-
-
-def get_cuda_requirements():
- with open(get_cuda_requirements_path()) as f:
- return f.read()
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -12,11 +12,8 @@
'chainer.cudnn',
'chainer.functions',
'chainer.optimizers',
- 'chainer.requirements',
'chainer.utils'],
- package_data={'chainer.requirements': ['cuda-requirements.txt']},
install_requires=['numpy',
'six>=1.9.0'],
- scripts=['scripts/chainer-cuda-requirements'],
tests_require=['nose'],
)
|
{"golden_diff": "diff --git a/chainer/requirements/__init__.py b/chainer/requirements/__init__.py\ndeleted file mode 100644\n--- a/chainer/requirements/__init__.py\n+++ /dev/null\n@@ -1,10 +0,0 @@\n-import os\n-\n-\n-def get_cuda_requirements_path():\n- return os.path.join(os.path.dirname(__file__), 'cuda-requirements.txt')\n-\n-\n-def get_cuda_requirements():\n- with open(get_cuda_requirements_path()) as f:\n- return f.read()\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -12,11 +12,8 @@\n 'chainer.cudnn',\n 'chainer.functions',\n 'chainer.optimizers',\n- 'chainer.requirements',\n 'chainer.utils'],\n- package_data={'chainer.requirements': ['cuda-requirements.txt']},\n install_requires=['numpy',\n 'six>=1.9.0'],\n- scripts=['scripts/chainer-cuda-requirements'],\n tests_require=['nose'],\n )\n", "issue": "Remove `chainer-cuda-requirements` that is deprecated\n`pip install chainer-cuda-deps` is recommended, and `chainer-cuda-requirements` is deprecated now. It will be removed in the future minor release.\n\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom setuptools import setup\n\nsetup(\n name='chainer',\n version='1.0.1',\n description='A flexible framework of neural networks',\n author='Seiya Tokui',\n author_email='[email protected]',\n url='http://chainer.org/',\n packages=['chainer',\n 'chainer.cudnn',\n 'chainer.functions',\n 'chainer.optimizers',\n 'chainer.requirements',\n 'chainer.utils'],\n package_data={'chainer.requirements': ['cuda-requirements.txt']},\n install_requires=['numpy',\n 'six>=1.9.0'],\n scripts=['scripts/chainer-cuda-requirements'],\n tests_require=['nose'],\n)\n", "path": "setup.py"}, {"content": "import os\n\n\ndef get_cuda_requirements_path():\n return os.path.join(os.path.dirname(__file__), 'cuda-requirements.txt')\n\n\ndef get_cuda_requirements():\n with open(get_cuda_requirements_path()) as f:\n return f.read()\n", "path": "chainer/requirements/__init__.py"}], "after_files": [{"content": "#!/usr/bin/env python\nfrom setuptools import setup\n\nsetup(\n name='chainer',\n version='1.0.1',\n description='A flexible framework of neural networks',\n author='Seiya Tokui',\n author_email='[email protected]',\n url='http://chainer.org/',\n packages=['chainer',\n 'chainer.cudnn',\n 'chainer.functions',\n 'chainer.optimizers',\n 'chainer.utils'],\n install_requires=['numpy',\n 'six>=1.9.0'],\n tests_require=['nose'],\n)\n", "path": "setup.py"}, {"content": null, "path": "chainer/requirements/__init__.py"}]}
| 582 | 234 |
gh_patches_debug_6793
|
rasdani/github-patches
|
git_diff
|
networkx__networkx-6674
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve test coverage for clique.py
There is a line left uncovered in https://app.codecov.io/gh/networkx/networkx/blob/main/networkx/algorithms/approximation/clique.py and I am looking into it. There is a bit of an issue though when I try G = None. An attribute error is raised not a value error.
Steps to Reproduce
from networkx.algorithms.approximation.clique import maximum_independent_set,max_clique
G=nx.Graph()
G.add_nodes_from([(2,3),(5,6),(3,6)])
max_clique(G=None)
output:
AttributeError Traceback (most recent call last)
Cell In[84], line 1
----> 1 max_clique(G)
File <class 'networkx.utils.decorators.argmap'> compilation 32:3, in argmap_max_clique_28(G)
1 import bz2
2 import collections
----> 3 import gzip
4 import inspect
5 import itertools
File ~\anaconda3\lib\site-packages\networkx\utils\decorators.py:83, in not_implemented_for.<locals>._not_implemented_for(g)
82 def _not_implemented_for(g):
---> 83 if (mval is None or mval == g.is_multigraph()) and (
84 dval is None or dval == g.is_directed()
85 ):
86 raise nx.NetworkXNotImplemented(errmsg)
88 return g
AttributeError: 'NoneType' object has no attribute 'is_multigraph'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `networkx/algorithms/approximation/clique.py`
Content:
```
1 """Functions for computing large cliques and maximum independent sets."""
2 import networkx as nx
3 from networkx.algorithms.approximation import ramsey
4 from networkx.utils import not_implemented_for
5
6 __all__ = [
7 "clique_removal",
8 "max_clique",
9 "large_clique_size",
10 "maximum_independent_set",
11 ]
12
13
14 @not_implemented_for("directed")
15 @not_implemented_for("multigraph")
16 def maximum_independent_set(G):
17 """Returns an approximate maximum independent set.
18
19 Independent set or stable set is a set of vertices in a graph, no two of
20 which are adjacent. That is, it is a set I of vertices such that for every
21 two vertices in I, there is no edge connecting the two. Equivalently, each
22 edge in the graph has at most one endpoint in I. The size of an independent
23 set is the number of vertices it contains [1]_.
24
25 A maximum independent set is a largest independent set for a given graph G
26 and its size is denoted $\\alpha(G)$. The problem of finding such a set is called
27 the maximum independent set problem and is an NP-hard optimization problem.
28 As such, it is unlikely that there exists an efficient algorithm for finding
29 a maximum independent set of a graph.
30
31 The Independent Set algorithm is based on [2]_.
32
33 Parameters
34 ----------
35 G : NetworkX graph
36 Undirected graph
37
38 Returns
39 -------
40 iset : Set
41 The apx-maximum independent set
42
43 Examples
44 --------
45 >>> G = nx.path_graph(10)
46 >>> nx.approximation.maximum_independent_set(G)
47 {0, 2, 4, 6, 9}
48
49 Raises
50 ------
51 NetworkXNotImplemented
52 If the graph is directed or is a multigraph.
53
54 Notes
55 -----
56 Finds the $O(|V|/(log|V|)^2)$ apx of independent set in the worst case.
57
58 References
59 ----------
60 .. [1] `Wikipedia: Independent set
61 <https://en.wikipedia.org/wiki/Independent_set_(graph_theory)>`_
62 .. [2] Boppana, R., & Halldórsson, M. M. (1992).
63 Approximating maximum independent sets by excluding subgraphs.
64 BIT Numerical Mathematics, 32(2), 180–196. Springer.
65 """
66 iset, _ = clique_removal(G)
67 return iset
68
69
70 @not_implemented_for("directed")
71 @not_implemented_for("multigraph")
72 def max_clique(G):
73 r"""Find the Maximum Clique
74
75 Finds the $O(|V|/(log|V|)^2)$ apx of maximum clique/independent set
76 in the worst case.
77
78 Parameters
79 ----------
80 G : NetworkX graph
81 Undirected graph
82
83 Returns
84 -------
85 clique : set
86 The apx-maximum clique of the graph
87
88 Examples
89 --------
90 >>> G = nx.path_graph(10)
91 >>> nx.approximation.max_clique(G)
92 {8, 9}
93
94 Raises
95 ------
96 NetworkXNotImplemented
97 If the graph is directed or is a multigraph.
98
99 Notes
100 -----
101 A clique in an undirected graph G = (V, E) is a subset of the vertex set
102 `C \subseteq V` such that for every two vertices in C there exists an edge
103 connecting the two. This is equivalent to saying that the subgraph
104 induced by C is complete (in some cases, the term clique may also refer
105 to the subgraph).
106
107 A maximum clique is a clique of the largest possible size in a given graph.
108 The clique number `\omega(G)` of a graph G is the number of
109 vertices in a maximum clique in G. The intersection number of
110 G is the smallest number of cliques that together cover all edges of G.
111
112 https://en.wikipedia.org/wiki/Maximum_clique
113
114 References
115 ----------
116 .. [1] Boppana, R., & Halldórsson, M. M. (1992).
117 Approximating maximum independent sets by excluding subgraphs.
118 BIT Numerical Mathematics, 32(2), 180–196. Springer.
119 doi:10.1007/BF01994876
120 """
121 if G is None:
122 raise ValueError("Expected NetworkX graph!")
123
124 # finding the maximum clique in a graph is equivalent to finding
125 # the independent set in the complementary graph
126 cgraph = nx.complement(G)
127 iset, _ = clique_removal(cgraph)
128 return iset
129
130
131 @not_implemented_for("directed")
132 @not_implemented_for("multigraph")
133 def clique_removal(G):
134 r"""Repeatedly remove cliques from the graph.
135
136 Results in a $O(|V|/(\log |V|)^2)$ approximation of maximum clique
137 and independent set. Returns the largest independent set found, along
138 with found maximal cliques.
139
140 Parameters
141 ----------
142 G : NetworkX graph
143 Undirected graph
144
145 Returns
146 -------
147 max_ind_cliques : (set, list) tuple
148 2-tuple of Maximal Independent Set and list of maximal cliques (sets).
149
150 Examples
151 --------
152 >>> G = nx.path_graph(10)
153 >>> nx.approximation.clique_removal(G)
154 ({0, 2, 4, 6, 9}, [{0, 1}, {2, 3}, {4, 5}, {6, 7}, {8, 9}])
155
156 Raises
157 ------
158 NetworkXNotImplemented
159 If the graph is directed or is a multigraph.
160
161 References
162 ----------
163 .. [1] Boppana, R., & Halldórsson, M. M. (1992).
164 Approximating maximum independent sets by excluding subgraphs.
165 BIT Numerical Mathematics, 32(2), 180–196. Springer.
166 """
167 graph = G.copy()
168 c_i, i_i = ramsey.ramsey_R2(graph)
169 cliques = [c_i]
170 isets = [i_i]
171 while graph:
172 graph.remove_nodes_from(c_i)
173 c_i, i_i = ramsey.ramsey_R2(graph)
174 if c_i:
175 cliques.append(c_i)
176 if i_i:
177 isets.append(i_i)
178 # Determine the largest independent set as measured by cardinality.
179 maxiset = max(isets, key=len)
180 return maxiset, cliques
181
182
183 @not_implemented_for("directed")
184 @not_implemented_for("multigraph")
185 def large_clique_size(G):
186 """Find the size of a large clique in a graph.
187
188 A *clique* is a subset of nodes in which each pair of nodes is
189 adjacent. This function is a heuristic for finding the size of a
190 large clique in the graph.
191
192 Parameters
193 ----------
194 G : NetworkX graph
195
196 Returns
197 -------
198 k: integer
199 The size of a large clique in the graph.
200
201 Examples
202 --------
203 >>> G = nx.path_graph(10)
204 >>> nx.approximation.large_clique_size(G)
205 2
206
207 Raises
208 ------
209 NetworkXNotImplemented
210 If the graph is directed or is a multigraph.
211
212 Notes
213 -----
214 This implementation is from [1]_. Its worst case time complexity is
215 :math:`O(n d^2)`, where *n* is the number of nodes in the graph and
216 *d* is the maximum degree.
217
218 This function is a heuristic, which means it may work well in
219 practice, but there is no rigorous mathematical guarantee on the
220 ratio between the returned number and the actual largest clique size
221 in the graph.
222
223 References
224 ----------
225 .. [1] Pattabiraman, Bharath, et al.
226 "Fast Algorithms for the Maximum Clique Problem on Massive Graphs
227 with Applications to Overlapping Community Detection."
228 *Internet Mathematics* 11.4-5 (2015): 421--448.
229 <https://doi.org/10.1080/15427951.2014.986778>
230
231 See also
232 --------
233
234 :func:`networkx.algorithms.approximation.clique.max_clique`
235 A function that returns an approximate maximum clique with a
236 guarantee on the approximation ratio.
237
238 :mod:`networkx.algorithms.clique`
239 Functions for finding the exact maximum clique in a graph.
240
241 """
242 degrees = G.degree
243
244 def _clique_heuristic(G, U, size, best_size):
245 if not U:
246 return max(best_size, size)
247 u = max(U, key=degrees)
248 U.remove(u)
249 N_prime = {v for v in G[u] if degrees[v] >= best_size}
250 return _clique_heuristic(G, U & N_prime, size + 1, best_size)
251
252 best_size = 0
253 nodes = (u for u in G if degrees[u] >= best_size)
254 for u in nodes:
255 neighbors = {v for v in G[u] if degrees[v] >= best_size}
256 best_size = _clique_heuristic(G, neighbors, 1, best_size)
257 return best_size
258
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/networkx/algorithms/approximation/clique.py b/networkx/algorithms/approximation/clique.py
--- a/networkx/algorithms/approximation/clique.py
+++ b/networkx/algorithms/approximation/clique.py
@@ -118,9 +118,6 @@
BIT Numerical Mathematics, 32(2), 180–196. Springer.
doi:10.1007/BF01994876
"""
- if G is None:
- raise ValueError("Expected NetworkX graph!")
-
# finding the maximum clique in a graph is equivalent to finding
# the independent set in the complementary graph
cgraph = nx.complement(G)
|
{"golden_diff": "diff --git a/networkx/algorithms/approximation/clique.py b/networkx/algorithms/approximation/clique.py\n--- a/networkx/algorithms/approximation/clique.py\n+++ b/networkx/algorithms/approximation/clique.py\n@@ -118,9 +118,6 @@\n BIT Numerical Mathematics, 32(2), 180\u2013196. Springer.\n doi:10.1007/BF01994876\n \"\"\"\n- if G is None:\n- raise ValueError(\"Expected NetworkX graph!\")\n-\n # finding the maximum clique in a graph is equivalent to finding\n # the independent set in the complementary graph\n cgraph = nx.complement(G)\n", "issue": "Improve test coverage for clique.py\nThere is a line left uncovered in https://app.codecov.io/gh/networkx/networkx/blob/main/networkx/algorithms/approximation/clique.py and I am looking into it. There is a bit of an issue though when I try G = None. An attribute error is raised not a value error. \r\n\r\nSteps to Reproduce\r\n\r\nfrom networkx.algorithms.approximation.clique import maximum_independent_set,max_clique\r\nG=nx.Graph()\r\nG.add_nodes_from([(2,3),(5,6),(3,6)])\r\nmax_clique(G=None)\r\n\r\noutput:\r\n\r\nAttributeError Traceback (most recent call last)\r\nCell In[84], line 1\r\n----> 1 max_clique(G)\r\n\r\nFile <class 'networkx.utils.decorators.argmap'> compilation 32:3, in argmap_max_clique_28(G)\r\n 1 import bz2\r\n 2 import collections\r\n----> 3 import gzip\r\n 4 import inspect\r\n 5 import itertools\r\n\r\nFile ~\\anaconda3\\lib\\site-packages\\networkx\\utils\\decorators.py:83, in not_implemented_for.<locals>._not_implemented_for(g)\r\n 82 def _not_implemented_for(g):\r\n---> 83 if (mval is None or mval == g.is_multigraph()) and (\r\n 84 dval is None or dval == g.is_directed()\r\n 85 ):\r\n 86 raise nx.NetworkXNotImplemented(errmsg)\r\n 88 return g\r\n\r\nAttributeError: 'NoneType' object has no attribute 'is_multigraph'\n", "before_files": [{"content": "\"\"\"Functions for computing large cliques and maximum independent sets.\"\"\"\nimport networkx as nx\nfrom networkx.algorithms.approximation import ramsey\nfrom networkx.utils import not_implemented_for\n\n__all__ = [\n \"clique_removal\",\n \"max_clique\",\n \"large_clique_size\",\n \"maximum_independent_set\",\n]\n\n\n@not_implemented_for(\"directed\")\n@not_implemented_for(\"multigraph\")\ndef maximum_independent_set(G):\n \"\"\"Returns an approximate maximum independent set.\n\n Independent set or stable set is a set of vertices in a graph, no two of\n which are adjacent. That is, it is a set I of vertices such that for every\n two vertices in I, there is no edge connecting the two. Equivalently, each\n edge in the graph has at most one endpoint in I. The size of an independent\n set is the number of vertices it contains [1]_.\n\n A maximum independent set is a largest independent set for a given graph G\n and its size is denoted $\\\\alpha(G)$. The problem of finding such a set is called\n the maximum independent set problem and is an NP-hard optimization problem.\n As such, it is unlikely that there exists an efficient algorithm for finding\n a maximum independent set of a graph.\n\n The Independent Set algorithm is based on [2]_.\n\n Parameters\n ----------\n G : NetworkX graph\n Undirected graph\n\n Returns\n -------\n iset : Set\n The apx-maximum independent set\n\n Examples\n --------\n >>> G = nx.path_graph(10)\n >>> nx.approximation.maximum_independent_set(G)\n {0, 2, 4, 6, 9}\n\n Raises\n ------\n NetworkXNotImplemented\n If the graph is directed or is a multigraph.\n\n Notes\n -----\n Finds the $O(|V|/(log|V|)^2)$ apx of independent set in the worst case.\n\n References\n ----------\n .. [1] `Wikipedia: Independent set\n <https://en.wikipedia.org/wiki/Independent_set_(graph_theory)>`_\n .. [2] Boppana, R., & Halld\u00f3rsson, M. M. (1992).\n Approximating maximum independent sets by excluding subgraphs.\n BIT Numerical Mathematics, 32(2), 180\u2013196. Springer.\n \"\"\"\n iset, _ = clique_removal(G)\n return iset\n\n\n@not_implemented_for(\"directed\")\n@not_implemented_for(\"multigraph\")\ndef max_clique(G):\n r\"\"\"Find the Maximum Clique\n\n Finds the $O(|V|/(log|V|)^2)$ apx of maximum clique/independent set\n in the worst case.\n\n Parameters\n ----------\n G : NetworkX graph\n Undirected graph\n\n Returns\n -------\n clique : set\n The apx-maximum clique of the graph\n\n Examples\n --------\n >>> G = nx.path_graph(10)\n >>> nx.approximation.max_clique(G)\n {8, 9}\n\n Raises\n ------\n NetworkXNotImplemented\n If the graph is directed or is a multigraph.\n\n Notes\n -----\n A clique in an undirected graph G = (V, E) is a subset of the vertex set\n `C \\subseteq V` such that for every two vertices in C there exists an edge\n connecting the two. This is equivalent to saying that the subgraph\n induced by C is complete (in some cases, the term clique may also refer\n to the subgraph).\n\n A maximum clique is a clique of the largest possible size in a given graph.\n The clique number `\\omega(G)` of a graph G is the number of\n vertices in a maximum clique in G. The intersection number of\n G is the smallest number of cliques that together cover all edges of G.\n\n https://en.wikipedia.org/wiki/Maximum_clique\n\n References\n ----------\n .. [1] Boppana, R., & Halld\u00f3rsson, M. M. (1992).\n Approximating maximum independent sets by excluding subgraphs.\n BIT Numerical Mathematics, 32(2), 180\u2013196. Springer.\n doi:10.1007/BF01994876\n \"\"\"\n if G is None:\n raise ValueError(\"Expected NetworkX graph!\")\n\n # finding the maximum clique in a graph is equivalent to finding\n # the independent set in the complementary graph\n cgraph = nx.complement(G)\n iset, _ = clique_removal(cgraph)\n return iset\n\n\n@not_implemented_for(\"directed\")\n@not_implemented_for(\"multigraph\")\ndef clique_removal(G):\n r\"\"\"Repeatedly remove cliques from the graph.\n\n Results in a $O(|V|/(\\log |V|)^2)$ approximation of maximum clique\n and independent set. Returns the largest independent set found, along\n with found maximal cliques.\n\n Parameters\n ----------\n G : NetworkX graph\n Undirected graph\n\n Returns\n -------\n max_ind_cliques : (set, list) tuple\n 2-tuple of Maximal Independent Set and list of maximal cliques (sets).\n\n Examples\n --------\n >>> G = nx.path_graph(10)\n >>> nx.approximation.clique_removal(G)\n ({0, 2, 4, 6, 9}, [{0, 1}, {2, 3}, {4, 5}, {6, 7}, {8, 9}])\n\n Raises\n ------\n NetworkXNotImplemented\n If the graph is directed or is a multigraph.\n\n References\n ----------\n .. [1] Boppana, R., & Halld\u00f3rsson, M. M. (1992).\n Approximating maximum independent sets by excluding subgraphs.\n BIT Numerical Mathematics, 32(2), 180\u2013196. Springer.\n \"\"\"\n graph = G.copy()\n c_i, i_i = ramsey.ramsey_R2(graph)\n cliques = [c_i]\n isets = [i_i]\n while graph:\n graph.remove_nodes_from(c_i)\n c_i, i_i = ramsey.ramsey_R2(graph)\n if c_i:\n cliques.append(c_i)\n if i_i:\n isets.append(i_i)\n # Determine the largest independent set as measured by cardinality.\n maxiset = max(isets, key=len)\n return maxiset, cliques\n\n\n@not_implemented_for(\"directed\")\n@not_implemented_for(\"multigraph\")\ndef large_clique_size(G):\n \"\"\"Find the size of a large clique in a graph.\n\n A *clique* is a subset of nodes in which each pair of nodes is\n adjacent. This function is a heuristic for finding the size of a\n large clique in the graph.\n\n Parameters\n ----------\n G : NetworkX graph\n\n Returns\n -------\n k: integer\n The size of a large clique in the graph.\n\n Examples\n --------\n >>> G = nx.path_graph(10)\n >>> nx.approximation.large_clique_size(G)\n 2\n\n Raises\n ------\n NetworkXNotImplemented\n If the graph is directed or is a multigraph.\n\n Notes\n -----\n This implementation is from [1]_. Its worst case time complexity is\n :math:`O(n d^2)`, where *n* is the number of nodes in the graph and\n *d* is the maximum degree.\n\n This function is a heuristic, which means it may work well in\n practice, but there is no rigorous mathematical guarantee on the\n ratio between the returned number and the actual largest clique size\n in the graph.\n\n References\n ----------\n .. [1] Pattabiraman, Bharath, et al.\n \"Fast Algorithms for the Maximum Clique Problem on Massive Graphs\n with Applications to Overlapping Community Detection.\"\n *Internet Mathematics* 11.4-5 (2015): 421--448.\n <https://doi.org/10.1080/15427951.2014.986778>\n\n See also\n --------\n\n :func:`networkx.algorithms.approximation.clique.max_clique`\n A function that returns an approximate maximum clique with a\n guarantee on the approximation ratio.\n\n :mod:`networkx.algorithms.clique`\n Functions for finding the exact maximum clique in a graph.\n\n \"\"\"\n degrees = G.degree\n\n def _clique_heuristic(G, U, size, best_size):\n if not U:\n return max(best_size, size)\n u = max(U, key=degrees)\n U.remove(u)\n N_prime = {v for v in G[u] if degrees[v] >= best_size}\n return _clique_heuristic(G, U & N_prime, size + 1, best_size)\n\n best_size = 0\n nodes = (u for u in G if degrees[u] >= best_size)\n for u in nodes:\n neighbors = {v for v in G[u] if degrees[v] >= best_size}\n best_size = _clique_heuristic(G, neighbors, 1, best_size)\n return best_size\n", "path": "networkx/algorithms/approximation/clique.py"}], "after_files": [{"content": "\"\"\"Functions for computing large cliques and maximum independent sets.\"\"\"\nimport networkx as nx\nfrom networkx.algorithms.approximation import ramsey\nfrom networkx.utils import not_implemented_for\n\n__all__ = [\n \"clique_removal\",\n \"max_clique\",\n \"large_clique_size\",\n \"maximum_independent_set\",\n]\n\n\n@not_implemented_for(\"directed\")\n@not_implemented_for(\"multigraph\")\ndef maximum_independent_set(G):\n \"\"\"Returns an approximate maximum independent set.\n\n Independent set or stable set is a set of vertices in a graph, no two of\n which are adjacent. That is, it is a set I of vertices such that for every\n two vertices in I, there is no edge connecting the two. Equivalently, each\n edge in the graph has at most one endpoint in I. The size of an independent\n set is the number of vertices it contains [1]_.\n\n A maximum independent set is a largest independent set for a given graph G\n and its size is denoted $\\\\alpha(G)$. The problem of finding such a set is called\n the maximum independent set problem and is an NP-hard optimization problem.\n As such, it is unlikely that there exists an efficient algorithm for finding\n a maximum independent set of a graph.\n\n The Independent Set algorithm is based on [2]_.\n\n Parameters\n ----------\n G : NetworkX graph\n Undirected graph\n\n Returns\n -------\n iset : Set\n The apx-maximum independent set\n\n Examples\n --------\n >>> G = nx.path_graph(10)\n >>> nx.approximation.maximum_independent_set(G)\n {0, 2, 4, 6, 9}\n\n Raises\n ------\n NetworkXNotImplemented\n If the graph is directed or is a multigraph.\n\n Notes\n -----\n Finds the $O(|V|/(log|V|)^2)$ apx of independent set in the worst case.\n\n References\n ----------\n .. [1] `Wikipedia: Independent set\n <https://en.wikipedia.org/wiki/Independent_set_(graph_theory)>`_\n .. [2] Boppana, R., & Halld\u00f3rsson, M. M. (1992).\n Approximating maximum independent sets by excluding subgraphs.\n BIT Numerical Mathematics, 32(2), 180\u2013196. Springer.\n \"\"\"\n iset, _ = clique_removal(G)\n return iset\n\n\n@not_implemented_for(\"directed\")\n@not_implemented_for(\"multigraph\")\ndef max_clique(G):\n r\"\"\"Find the Maximum Clique\n\n Finds the $O(|V|/(log|V|)^2)$ apx of maximum clique/independent set\n in the worst case.\n\n Parameters\n ----------\n G : NetworkX graph\n Undirected graph\n\n Returns\n -------\n clique : set\n The apx-maximum clique of the graph\n\n Examples\n --------\n >>> G = nx.path_graph(10)\n >>> nx.approximation.max_clique(G)\n {8, 9}\n\n Raises\n ------\n NetworkXNotImplemented\n If the graph is directed or is a multigraph.\n\n Notes\n -----\n A clique in an undirected graph G = (V, E) is a subset of the vertex set\n `C \\subseteq V` such that for every two vertices in C there exists an edge\n connecting the two. This is equivalent to saying that the subgraph\n induced by C is complete (in some cases, the term clique may also refer\n to the subgraph).\n\n A maximum clique is a clique of the largest possible size in a given graph.\n The clique number `\\omega(G)` of a graph G is the number of\n vertices in a maximum clique in G. The intersection number of\n G is the smallest number of cliques that together cover all edges of G.\n\n https://en.wikipedia.org/wiki/Maximum_clique\n\n References\n ----------\n .. [1] Boppana, R., & Halld\u00f3rsson, M. M. (1992).\n Approximating maximum independent sets by excluding subgraphs.\n BIT Numerical Mathematics, 32(2), 180\u2013196. Springer.\n doi:10.1007/BF01994876\n \"\"\"\n # finding the maximum clique in a graph is equivalent to finding\n # the independent set in the complementary graph\n cgraph = nx.complement(G)\n iset, _ = clique_removal(cgraph)\n return iset\n\n\n@not_implemented_for(\"directed\")\n@not_implemented_for(\"multigraph\")\ndef clique_removal(G):\n r\"\"\"Repeatedly remove cliques from the graph.\n\n Results in a $O(|V|/(\\log |V|)^2)$ approximation of maximum clique\n and independent set. Returns the largest independent set found, along\n with found maximal cliques.\n\n Parameters\n ----------\n G : NetworkX graph\n Undirected graph\n\n Returns\n -------\n max_ind_cliques : (set, list) tuple\n 2-tuple of Maximal Independent Set and list of maximal cliques (sets).\n\n Examples\n --------\n >>> G = nx.path_graph(10)\n >>> nx.approximation.clique_removal(G)\n ({0, 2, 4, 6, 9}, [{0, 1}, {2, 3}, {4, 5}, {6, 7}, {8, 9}])\n\n Raises\n ------\n NetworkXNotImplemented\n If the graph is directed or is a multigraph.\n\n References\n ----------\n .. [1] Boppana, R., & Halld\u00f3rsson, M. M. (1992).\n Approximating maximum independent sets by excluding subgraphs.\n BIT Numerical Mathematics, 32(2), 180\u2013196. Springer.\n \"\"\"\n graph = G.copy()\n c_i, i_i = ramsey.ramsey_R2(graph)\n cliques = [c_i]\n isets = [i_i]\n while graph:\n graph.remove_nodes_from(c_i)\n c_i, i_i = ramsey.ramsey_R2(graph)\n if c_i:\n cliques.append(c_i)\n if i_i:\n isets.append(i_i)\n # Determine the largest independent set as measured by cardinality.\n maxiset = max(isets, key=len)\n return maxiset, cliques\n\n\n@not_implemented_for(\"directed\")\n@not_implemented_for(\"multigraph\")\ndef large_clique_size(G):\n \"\"\"Find the size of a large clique in a graph.\n\n A *clique* is a subset of nodes in which each pair of nodes is\n adjacent. This function is a heuristic for finding the size of a\n large clique in the graph.\n\n Parameters\n ----------\n G : NetworkX graph\n\n Returns\n -------\n k: integer\n The size of a large clique in the graph.\n\n Examples\n --------\n >>> G = nx.path_graph(10)\n >>> nx.approximation.large_clique_size(G)\n 2\n\n Raises\n ------\n NetworkXNotImplemented\n If the graph is directed or is a multigraph.\n\n Notes\n -----\n This implementation is from [1]_. Its worst case time complexity is\n :math:`O(n d^2)`, where *n* is the number of nodes in the graph and\n *d* is the maximum degree.\n\n This function is a heuristic, which means it may work well in\n practice, but there is no rigorous mathematical guarantee on the\n ratio between the returned number and the actual largest clique size\n in the graph.\n\n References\n ----------\n .. [1] Pattabiraman, Bharath, et al.\n \"Fast Algorithms for the Maximum Clique Problem on Massive Graphs\n with Applications to Overlapping Community Detection.\"\n *Internet Mathematics* 11.4-5 (2015): 421--448.\n <https://doi.org/10.1080/15427951.2014.986778>\n\n See also\n --------\n\n :func:`networkx.algorithms.approximation.clique.max_clique`\n A function that returns an approximate maximum clique with a\n guarantee on the approximation ratio.\n\n :mod:`networkx.algorithms.clique`\n Functions for finding the exact maximum clique in a graph.\n\n \"\"\"\n degrees = G.degree\n\n def _clique_heuristic(G, U, size, best_size):\n if not U:\n return max(best_size, size)\n u = max(U, key=degrees)\n U.remove(u)\n N_prime = {v for v in G[u] if degrees[v] >= best_size}\n return _clique_heuristic(G, U & N_prime, size + 1, best_size)\n\n best_size = 0\n nodes = (u for u in G if degrees[u] >= best_size)\n for u in nodes:\n neighbors = {v for v in G[u] if degrees[v] >= best_size}\n best_size = _clique_heuristic(G, neighbors, 1, best_size)\n return best_size\n", "path": "networkx/algorithms/approximation/clique.py"}]}
| 3,390 | 163 |
gh_patches_debug_11666
|
rasdani/github-patches
|
git_diff
|
iterative__dvc-1413
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tests: Logs are throwing duplicated output
You can review any log (this one, for example: https://travis-ci.org/iterative/dvc/jobs/457244685#L1544-L1571)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/logger.py`
Content:
```
1 import sys
2 import logging
3 import colorama
4 import traceback
5 import re
6
7
8 colorama.init()
9
10
11 def visual_width(line):
12 """ Get the the number of columns required to display a string """
13
14 return len(re.sub(colorama.ansitowin32.AnsiToWin32.ANSI_CSI_RE, '', line))
15
16
17 def visual_center(line, width):
18 """ Center align string according to it's visual width """
19
20 spaces = max(width - visual_width(line), 0)
21 left_padding = int(spaces / 2)
22 right_padding = (spaces - left_padding)
23
24 return (left_padding * ' ') + line + (right_padding * ' ')
25
26
27 class Logger(object):
28 FMT = '%(message)s'
29 DEFAULT_LEVEL = logging.INFO
30
31 LEVEL_MAP = {
32 'debug': logging.DEBUG,
33 'info': logging.INFO,
34 'warn': logging.WARNING,
35 'error': logging.ERROR
36 }
37
38 COLOR_MAP = {
39 'green': colorama.Fore.GREEN,
40 'yellow': colorama.Fore.YELLOW,
41 'blue': colorama.Fore.BLUE,
42 'red': colorama.Fore.RED,
43 }
44
45 LEVEL_COLOR_MAP = {
46 'debug': 'blue',
47 'warn': 'yellow',
48 'error': 'red',
49 }
50
51 def __init__(self, loglevel=None):
52 if loglevel:
53 Logger.set_level(loglevel)
54
55 @staticmethod
56 def init():
57
58 class LogLevelFilter(logging.Filter):
59 def filter(self, record):
60 return record.levelno <= logging.WARNING
61
62 sh_out = logging.StreamHandler(sys.stdout)
63 sh_out.setFormatter(logging.Formatter(Logger.FMT))
64 sh_out.setLevel(logging.DEBUG)
65 sh_out.addFilter(LogLevelFilter())
66
67 sh_err = logging.StreamHandler(sys.stderr)
68 sh_err.setFormatter(logging.Formatter(Logger.FMT))
69 sh_err.setLevel(logging.ERROR)
70
71 Logger.logger().addHandler(sh_out)
72 Logger.logger().addHandler(sh_err)
73 Logger.set_level()
74
75 @staticmethod
76 def logger():
77 return logging.getLogger('dvc')
78
79 @staticmethod
80 def set_level(level=None):
81 if not level:
82 lvl = Logger.DEFAULT_LEVEL
83 else:
84 lvl = Logger.LEVEL_MAP.get(level.lower(), Logger.DEFAULT_LEVEL)
85 Logger.logger().setLevel(lvl)
86
87 @staticmethod
88 def be_quiet():
89 Logger.logger().setLevel(logging.CRITICAL)
90
91 @staticmethod
92 def be_verbose():
93 Logger.logger().setLevel(logging.DEBUG)
94
95 @staticmethod
96 def colorize(msg, color):
97 header = ''
98 footer = ''
99
100 if sys.stdout.isatty(): # pragma: no cover
101 header = Logger.COLOR_MAP.get(color.lower(), '')
102 footer = colorama.Style.RESET_ALL
103
104 return u'{}{}{}'.format(header, msg, footer)
105
106 @staticmethod
107 def parse_exc(exc, tb=None):
108 str_tb = tb if tb else None
109 str_exc = str(exc) if exc else ""
110 l_str_exc = []
111
112 if len(str_exc) != 0:
113 l_str_exc.append(str_exc)
114
115 if exc and hasattr(exc, 'cause') and exc.cause:
116 cause_tb = exc.cause_tb if hasattr(exc, 'cause_tb') else None
117 l_cause_str_exc, cause_str_tb = Logger.parse_exc(exc.cause,
118 cause_tb)
119
120 str_tb = cause_str_tb
121 l_str_exc += l_cause_str_exc
122
123 return (l_str_exc, str_tb)
124
125 @staticmethod
126 def _prefix(msg, typ):
127 color = Logger.LEVEL_COLOR_MAP.get(typ.lower(), '')
128 return Logger.colorize('{}'.format(msg), color)
129
130 @staticmethod
131 def error_prefix():
132 return Logger._prefix('Error', 'error')
133
134 @staticmethod
135 def warning_prefix():
136 return Logger._prefix('Warning', 'warn')
137
138 @staticmethod
139 def debug_prefix():
140 return Logger._prefix('Debug', 'debug')
141
142 @staticmethod
143 def _with_progress(func, msg):
144 from dvc.progress import progress
145 with progress:
146 func(msg)
147
148 @staticmethod
149 def _with_exc(func, prefix, msg, suffix="", exc=None):
150 l_str_exc, str_tb = Logger.parse_exc(exc)
151
152 if exc is not None and Logger.is_verbose():
153 str_tb = str_tb if str_tb else traceback.format_exc()
154 Logger._with_progress(Logger.logger().error, str_tb)
155
156 l_msg = [prefix]
157 if msg is not None and len(msg) != 0:
158 l_msg.append(msg)
159 l_msg += l_str_exc
160
161 Logger._with_progress(func, ': '.join(l_msg) + suffix)
162
163 @staticmethod
164 def error(msg, exc=None):
165 chat = "\n\nHaving any troubles? Hit us up at dvc.org/support, " \
166 "we are always happy to help!"
167 Logger._with_exc(Logger.logger().error,
168 Logger.error_prefix(),
169 msg,
170 suffix=chat,
171 exc=exc)
172
173 @classmethod
174 def warn(cls, msg, exc=None):
175 cls._with_exc(cls.logger().warning,
176 cls.warning_prefix(),
177 msg,
178 exc=exc)
179
180 @classmethod
181 def debug(cls, msg, exc=None):
182 cls._with_exc(cls.logger().debug,
183 cls.debug_prefix(),
184 msg,
185 exc=exc)
186
187 @staticmethod
188 def info(msg):
189 Logger._with_progress(Logger.logger().info, msg)
190
191 @staticmethod
192 def is_quiet():
193 return Logger.logger().level == logging.CRITICAL
194
195 @staticmethod
196 def is_verbose():
197 return Logger.logger().getEffectiveLevel() == logging.DEBUG
198
199 @classmethod
200 def box(cls, msg, border_color=''):
201 if cls.is_quiet():
202 return
203
204 lines = msg.split('\n')
205 max_width = max(visual_width(line) for line in lines)
206
207 padding_horizontal = 5
208 padding_vertical = 1
209
210 box_size_horizontal = (max_width + (padding_horizontal * 2))
211
212 chars = {
213 'corner': '+',
214 'horizontal': '-',
215 'vertical': '|',
216 'empty': ' ',
217 }
218
219 margin = "{corner}{line}{corner}\n".format(
220 corner=chars['corner'],
221 line=chars['horizontal'] * box_size_horizontal,
222 )
223
224 padding_lines = [
225 "{border}{space}{border}\n".format(
226 border=cls.colorize(chars['vertical'], border_color),
227 space=chars['empty'] * box_size_horizontal,
228 ) * padding_vertical
229 ]
230
231 content_lines = [
232 "{border}{space}{content}{space}{border}\n".format(
233 border=cls.colorize(chars['vertical'], border_color),
234 space=chars['empty'] * padding_horizontal,
235 content=visual_center(line, max_width),
236 ) for line in lines
237 ]
238
239 box = "{margin}{padding}{content}{padding}{margin}".format(
240 margin=cls.colorize(margin, border_color),
241 padding=''.join(padding_lines),
242 content=''.join(content_lines),
243 )
244
245 print(box)
246
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dvc/logger.py b/dvc/logger.py
--- a/dvc/logger.py
+++ b/dvc/logger.py
@@ -59,6 +59,9 @@
def filter(self, record):
return record.levelno <= logging.WARNING
+ if Logger._already_initialized():
+ return
+
sh_out = logging.StreamHandler(sys.stdout)
sh_out.setFormatter(logging.Formatter(Logger.FMT))
sh_out.setLevel(logging.DEBUG)
@@ -243,3 +246,7 @@
)
print(box)
+
+ @staticmethod
+ def _already_initialized():
+ return bool(Logger.logger().handlers)
|
{"golden_diff": "diff --git a/dvc/logger.py b/dvc/logger.py\n--- a/dvc/logger.py\n+++ b/dvc/logger.py\n@@ -59,6 +59,9 @@\n def filter(self, record):\n return record.levelno <= logging.WARNING\n \n+ if Logger._already_initialized():\n+ return\n+\n sh_out = logging.StreamHandler(sys.stdout)\n sh_out.setFormatter(logging.Formatter(Logger.FMT))\n sh_out.setLevel(logging.DEBUG)\n@@ -243,3 +246,7 @@\n )\n \n print(box)\n+\n+ @staticmethod\n+ def _already_initialized():\n+ return bool(Logger.logger().handlers)\n", "issue": "tests: Logs are throwing duplicated output\nYou can review any log (this one, for example: https://travis-ci.org/iterative/dvc/jobs/457244685#L1544-L1571)\n", "before_files": [{"content": "import sys\nimport logging\nimport colorama\nimport traceback\nimport re\n\n\ncolorama.init()\n\n\ndef visual_width(line):\n \"\"\" Get the the number of columns required to display a string \"\"\"\n\n return len(re.sub(colorama.ansitowin32.AnsiToWin32.ANSI_CSI_RE, '', line))\n\n\ndef visual_center(line, width):\n \"\"\" Center align string according to it's visual width \"\"\"\n\n spaces = max(width - visual_width(line), 0)\n left_padding = int(spaces / 2)\n right_padding = (spaces - left_padding)\n\n return (left_padding * ' ') + line + (right_padding * ' ')\n\n\nclass Logger(object):\n FMT = '%(message)s'\n DEFAULT_LEVEL = logging.INFO\n\n LEVEL_MAP = {\n 'debug': logging.DEBUG,\n 'info': logging.INFO,\n 'warn': logging.WARNING,\n 'error': logging.ERROR\n }\n\n COLOR_MAP = {\n 'green': colorama.Fore.GREEN,\n 'yellow': colorama.Fore.YELLOW,\n 'blue': colorama.Fore.BLUE,\n 'red': colorama.Fore.RED,\n }\n\n LEVEL_COLOR_MAP = {\n 'debug': 'blue',\n 'warn': 'yellow',\n 'error': 'red',\n }\n\n def __init__(self, loglevel=None):\n if loglevel:\n Logger.set_level(loglevel)\n\n @staticmethod\n def init():\n\n class LogLevelFilter(logging.Filter):\n def filter(self, record):\n return record.levelno <= logging.WARNING\n\n sh_out = logging.StreamHandler(sys.stdout)\n sh_out.setFormatter(logging.Formatter(Logger.FMT))\n sh_out.setLevel(logging.DEBUG)\n sh_out.addFilter(LogLevelFilter())\n\n sh_err = logging.StreamHandler(sys.stderr)\n sh_err.setFormatter(logging.Formatter(Logger.FMT))\n sh_err.setLevel(logging.ERROR)\n\n Logger.logger().addHandler(sh_out)\n Logger.logger().addHandler(sh_err)\n Logger.set_level()\n\n @staticmethod\n def logger():\n return logging.getLogger('dvc')\n\n @staticmethod\n def set_level(level=None):\n if not level:\n lvl = Logger.DEFAULT_LEVEL\n else:\n lvl = Logger.LEVEL_MAP.get(level.lower(), Logger.DEFAULT_LEVEL)\n Logger.logger().setLevel(lvl)\n\n @staticmethod\n def be_quiet():\n Logger.logger().setLevel(logging.CRITICAL)\n\n @staticmethod\n def be_verbose():\n Logger.logger().setLevel(logging.DEBUG)\n\n @staticmethod\n def colorize(msg, color):\n header = ''\n footer = ''\n\n if sys.stdout.isatty(): # pragma: no cover\n header = Logger.COLOR_MAP.get(color.lower(), '')\n footer = colorama.Style.RESET_ALL\n\n return u'{}{}{}'.format(header, msg, footer)\n\n @staticmethod\n def parse_exc(exc, tb=None):\n str_tb = tb if tb else None\n str_exc = str(exc) if exc else \"\"\n l_str_exc = []\n\n if len(str_exc) != 0:\n l_str_exc.append(str_exc)\n\n if exc and hasattr(exc, 'cause') and exc.cause:\n cause_tb = exc.cause_tb if hasattr(exc, 'cause_tb') else None\n l_cause_str_exc, cause_str_tb = Logger.parse_exc(exc.cause,\n cause_tb)\n\n str_tb = cause_str_tb\n l_str_exc += l_cause_str_exc\n\n return (l_str_exc, str_tb)\n\n @staticmethod\n def _prefix(msg, typ):\n color = Logger.LEVEL_COLOR_MAP.get(typ.lower(), '')\n return Logger.colorize('{}'.format(msg), color)\n\n @staticmethod\n def error_prefix():\n return Logger._prefix('Error', 'error')\n\n @staticmethod\n def warning_prefix():\n return Logger._prefix('Warning', 'warn')\n\n @staticmethod\n def debug_prefix():\n return Logger._prefix('Debug', 'debug')\n\n @staticmethod\n def _with_progress(func, msg):\n from dvc.progress import progress\n with progress:\n func(msg)\n\n @staticmethod\n def _with_exc(func, prefix, msg, suffix=\"\", exc=None):\n l_str_exc, str_tb = Logger.parse_exc(exc)\n\n if exc is not None and Logger.is_verbose():\n str_tb = str_tb if str_tb else traceback.format_exc()\n Logger._with_progress(Logger.logger().error, str_tb)\n\n l_msg = [prefix]\n if msg is not None and len(msg) != 0:\n l_msg.append(msg)\n l_msg += l_str_exc\n\n Logger._with_progress(func, ': '.join(l_msg) + suffix)\n\n @staticmethod\n def error(msg, exc=None):\n chat = \"\\n\\nHaving any troubles? Hit us up at dvc.org/support, \" \\\n \"we are always happy to help!\"\n Logger._with_exc(Logger.logger().error,\n Logger.error_prefix(),\n msg,\n suffix=chat,\n exc=exc)\n\n @classmethod\n def warn(cls, msg, exc=None):\n cls._with_exc(cls.logger().warning,\n cls.warning_prefix(),\n msg,\n exc=exc)\n\n @classmethod\n def debug(cls, msg, exc=None):\n cls._with_exc(cls.logger().debug,\n cls.debug_prefix(),\n msg,\n exc=exc)\n\n @staticmethod\n def info(msg):\n Logger._with_progress(Logger.logger().info, msg)\n\n @staticmethod\n def is_quiet():\n return Logger.logger().level == logging.CRITICAL\n\n @staticmethod\n def is_verbose():\n return Logger.logger().getEffectiveLevel() == logging.DEBUG\n\n @classmethod\n def box(cls, msg, border_color=''):\n if cls.is_quiet():\n return\n\n lines = msg.split('\\n')\n max_width = max(visual_width(line) for line in lines)\n\n padding_horizontal = 5\n padding_vertical = 1\n\n box_size_horizontal = (max_width + (padding_horizontal * 2))\n\n chars = {\n 'corner': '+',\n 'horizontal': '-',\n 'vertical': '|',\n 'empty': ' ',\n }\n\n margin = \"{corner}{line}{corner}\\n\".format(\n corner=chars['corner'],\n line=chars['horizontal'] * box_size_horizontal,\n )\n\n padding_lines = [\n \"{border}{space}{border}\\n\".format(\n border=cls.colorize(chars['vertical'], border_color),\n space=chars['empty'] * box_size_horizontal,\n ) * padding_vertical\n ]\n\n content_lines = [\n \"{border}{space}{content}{space}{border}\\n\".format(\n border=cls.colorize(chars['vertical'], border_color),\n space=chars['empty'] * padding_horizontal,\n content=visual_center(line, max_width),\n ) for line in lines\n ]\n\n box = \"{margin}{padding}{content}{padding}{margin}\".format(\n margin=cls.colorize(margin, border_color),\n padding=''.join(padding_lines),\n content=''.join(content_lines),\n )\n\n print(box)\n", "path": "dvc/logger.py"}], "after_files": [{"content": "import sys\nimport logging\nimport colorama\nimport traceback\nimport re\n\n\ncolorama.init()\n\n\ndef visual_width(line):\n \"\"\" Get the the number of columns required to display a string \"\"\"\n\n return len(re.sub(colorama.ansitowin32.AnsiToWin32.ANSI_CSI_RE, '', line))\n\n\ndef visual_center(line, width):\n \"\"\" Center align string according to it's visual width \"\"\"\n\n spaces = max(width - visual_width(line), 0)\n left_padding = int(spaces / 2)\n right_padding = (spaces - left_padding)\n\n return (left_padding * ' ') + line + (right_padding * ' ')\n\n\nclass Logger(object):\n FMT = '%(message)s'\n DEFAULT_LEVEL = logging.INFO\n\n LEVEL_MAP = {\n 'debug': logging.DEBUG,\n 'info': logging.INFO,\n 'warn': logging.WARNING,\n 'error': logging.ERROR\n }\n\n COLOR_MAP = {\n 'green': colorama.Fore.GREEN,\n 'yellow': colorama.Fore.YELLOW,\n 'blue': colorama.Fore.BLUE,\n 'red': colorama.Fore.RED,\n }\n\n LEVEL_COLOR_MAP = {\n 'debug': 'blue',\n 'warn': 'yellow',\n 'error': 'red',\n }\n\n def __init__(self, loglevel=None):\n if loglevel:\n Logger.set_level(loglevel)\n\n @staticmethod\n def init():\n\n class LogLevelFilter(logging.Filter):\n def filter(self, record):\n return record.levelno <= logging.WARNING\n\n if Logger._already_initialized():\n return\n\n sh_out = logging.StreamHandler(sys.stdout)\n sh_out.setFormatter(logging.Formatter(Logger.FMT))\n sh_out.setLevel(logging.DEBUG)\n sh_out.addFilter(LogLevelFilter())\n\n sh_err = logging.StreamHandler(sys.stderr)\n sh_err.setFormatter(logging.Formatter(Logger.FMT))\n sh_err.setLevel(logging.ERROR)\n\n Logger.logger().addHandler(sh_out)\n Logger.logger().addHandler(sh_err)\n Logger.set_level()\n\n @staticmethod\n def logger():\n return logging.getLogger('dvc')\n\n @staticmethod\n def set_level(level=None):\n if not level:\n lvl = Logger.DEFAULT_LEVEL\n else:\n lvl = Logger.LEVEL_MAP.get(level.lower(), Logger.DEFAULT_LEVEL)\n Logger.logger().setLevel(lvl)\n\n @staticmethod\n def be_quiet():\n Logger.logger().setLevel(logging.CRITICAL)\n\n @staticmethod\n def be_verbose():\n Logger.logger().setLevel(logging.DEBUG)\n\n @staticmethod\n def colorize(msg, color):\n header = ''\n footer = ''\n\n if sys.stdout.isatty(): # pragma: no cover\n header = Logger.COLOR_MAP.get(color.lower(), '')\n footer = colorama.Style.RESET_ALL\n\n return u'{}{}{}'.format(header, msg, footer)\n\n @staticmethod\n def parse_exc(exc, tb=None):\n str_tb = tb if tb else None\n str_exc = str(exc) if exc else \"\"\n l_str_exc = []\n\n if len(str_exc) != 0:\n l_str_exc.append(str_exc)\n\n if exc and hasattr(exc, 'cause') and exc.cause:\n cause_tb = exc.cause_tb if hasattr(exc, 'cause_tb') else None\n l_cause_str_exc, cause_str_tb = Logger.parse_exc(exc.cause,\n cause_tb)\n\n str_tb = cause_str_tb\n l_str_exc += l_cause_str_exc\n\n return (l_str_exc, str_tb)\n\n @staticmethod\n def _prefix(msg, typ):\n color = Logger.LEVEL_COLOR_MAP.get(typ.lower(), '')\n return Logger.colorize('{}'.format(msg), color)\n\n @staticmethod\n def error_prefix():\n return Logger._prefix('Error', 'error')\n\n @staticmethod\n def warning_prefix():\n return Logger._prefix('Warning', 'warn')\n\n @staticmethod\n def debug_prefix():\n return Logger._prefix('Debug', 'debug')\n\n @staticmethod\n def _with_progress(func, msg):\n from dvc.progress import progress\n with progress:\n func(msg)\n\n @staticmethod\n def _with_exc(func, prefix, msg, suffix=\"\", exc=None):\n l_str_exc, str_tb = Logger.parse_exc(exc)\n\n if exc is not None and Logger.is_verbose():\n str_tb = str_tb if str_tb else traceback.format_exc()\n Logger._with_progress(Logger.logger().error, str_tb)\n\n l_msg = [prefix]\n if msg is not None and len(msg) != 0:\n l_msg.append(msg)\n l_msg += l_str_exc\n\n Logger._with_progress(func, ': '.join(l_msg) + suffix)\n\n @staticmethod\n def error(msg, exc=None):\n chat = \"\\n\\nHaving any troubles? Hit us up at dvc.org/support, \" \\\n \"we are always happy to help!\"\n Logger._with_exc(Logger.logger().error,\n Logger.error_prefix(),\n msg,\n suffix=chat,\n exc=exc)\n\n @classmethod\n def warn(cls, msg, exc=None):\n cls._with_exc(cls.logger().warning,\n cls.warning_prefix(),\n msg,\n exc=exc)\n\n @classmethod\n def debug(cls, msg, exc=None):\n cls._with_exc(cls.logger().debug,\n cls.debug_prefix(),\n msg,\n exc=exc)\n\n @staticmethod\n def info(msg):\n Logger._with_progress(Logger.logger().info, msg)\n\n @staticmethod\n def is_quiet():\n return Logger.logger().level == logging.CRITICAL\n\n @staticmethod\n def is_verbose():\n return Logger.logger().getEffectiveLevel() == logging.DEBUG\n\n @classmethod\n def box(cls, msg, border_color=''):\n if cls.is_quiet():\n return\n\n lines = msg.split('\\n')\n max_width = max(visual_width(line) for line in lines)\n\n padding_horizontal = 5\n padding_vertical = 1\n\n box_size_horizontal = (max_width + (padding_horizontal * 2))\n\n chars = {\n 'corner': '+',\n 'horizontal': '-',\n 'vertical': '|',\n 'empty': ' ',\n }\n\n margin = \"{corner}{line}{corner}\\n\".format(\n corner=chars['corner'],\n line=chars['horizontal'] * box_size_horizontal,\n )\n\n padding_lines = [\n \"{border}{space}{border}\\n\".format(\n border=cls.colorize(chars['vertical'], border_color),\n space=chars['empty'] * box_size_horizontal,\n ) * padding_vertical\n ]\n\n content_lines = [\n \"{border}{space}{content}{space}{border}\\n\".format(\n border=cls.colorize(chars['vertical'], border_color),\n space=chars['empty'] * padding_horizontal,\n content=visual_center(line, max_width),\n ) for line in lines\n ]\n\n box = \"{margin}{padding}{content}{padding}{margin}\".format(\n margin=cls.colorize(margin, border_color),\n padding=''.join(padding_lines),\n content=''.join(content_lines),\n )\n\n print(box)\n\n @staticmethod\n def _already_initialized():\n return bool(Logger.logger().handlers)\n", "path": "dvc/logger.py"}]}
| 2,488 | 140 |
gh_patches_debug_26504
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-python-253
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[0.7.0] `CeleryIntegration` captures retries
Greetings fellows!
We are having an issue with `CeleryIntegration` in Sentry SDK.
### Current versions
Python 3.6.7
Django 2.1.5
Celery 4.1.1
Sentry SDK 0.7.0-0.7.1
### Current behavior
In our code (internal and 3rd-party) we are using [Celery tasks retry functionality](http://docs.celeryproject.org/en/latest/userguide/tasks.html#retrying).
> The app.Task.retry() call will raise an exception so any code after the retry won’t be reached. This is the Retry exception, it isn’t handled as an error but rather as a semi-predicate to signify to the worker that the task is to be retried, so that it can store the correct state when a result backend is enabled.
We did switch recently from Raven to Sentry SDK 0.6.9, everything seemed working as before.
But today we updated it to [0.7.0 release](https://github.com/getsentry/sentry-python/blob/master/CHANGES.md#070) (and later to 0.7.1)
This caused **every [`celery.exceptions.Retry`](http://docs.celeryproject.org/en/latest/reference/celery.exceptions.html#celery.exceptions.Retry) to be sent to Sentry**, which quickly filled Sentry server with thousands of events.
Previously (in old SDK and Raven), those exceptions were ignored and not sent to Sentry server.
### Expected behaviour
`CeleryIntegration` is not flooding Sentry server with every retry exception. Basically, the same behavior as it was in Raven and Sentry SDK<0.7.0.
### Open questions
I am not sure if the old behavior was done intentionally or by mistake.
If that was intended, we should reimplement it in current integration.
If not, there should be a way to filter/ignore that kind of exceptions (I am not sure if we can filter all retries from internal and 3rd-party code in`before_send` in a clean way).
Could you help me to clarify this issue?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sentry_sdk/integrations/celery.py`
Content:
```
1 from __future__ import absolute_import
2
3 import sys
4
5 from celery.exceptions import SoftTimeLimitExceeded
6
7 from sentry_sdk.hub import Hub
8 from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
9 from sentry_sdk._compat import reraise
10 from sentry_sdk.integrations import Integration
11 from sentry_sdk.integrations.logging import ignore_logger
12
13
14 class CeleryIntegration(Integration):
15 identifier = "celery"
16
17 @staticmethod
18 def setup_once():
19 import celery.app.trace as trace
20
21 old_build_tracer = trace.build_tracer
22
23 def sentry_build_tracer(name, task, *args, **kwargs):
24 # Need to patch both methods because older celery sometimes
25 # short-circuits to task.run if it thinks it's safe.
26 task.__call__ = _wrap_task_call(task.__call__)
27 task.run = _wrap_task_call(task.run)
28 return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs))
29
30 trace.build_tracer = sentry_build_tracer
31
32 # This logger logs every status of every task that ran on the worker.
33 # Meaning that every task's breadcrumbs are full of stuff like "Task
34 # <foo> raised unexpected <bar>".
35 ignore_logger("celery.worker.job")
36
37
38 def _wrap_tracer(task, f):
39 # Need to wrap tracer for pushing the scope before prerun is sent, and
40 # popping it after postrun is sent.
41 #
42 # This is the reason we don't use signals for hooking in the first place.
43 # Also because in Celery 3, signal dispatch returns early if one handler
44 # crashes.
45 def _inner(*args, **kwargs):
46 hub = Hub.current
47 if hub.get_integration(CeleryIntegration) is None:
48 return f(*args, **kwargs)
49
50 with hub.push_scope() as scope:
51 scope._name = "celery"
52 scope.add_event_processor(_make_event_processor(task, *args, **kwargs))
53
54 return f(*args, **kwargs)
55
56 return _inner
57
58
59 def _wrap_task_call(f):
60 # Need to wrap task call because the exception is caught before we get to
61 # see it. Also celery's reported stacktrace is untrustworthy.
62 def _inner(*args, **kwargs):
63 try:
64 return f(*args, **kwargs)
65 except Exception:
66 reraise(*_capture_exception())
67
68 return _inner
69
70
71 def _make_event_processor(task, uuid, args, kwargs, request=None):
72 def event_processor(event, hint):
73 with capture_internal_exceptions():
74 event["transaction"] = task.name
75
76 with capture_internal_exceptions():
77 extra = event.setdefault("extra", {})
78 extra["celery-job"] = {
79 "task_name": task.name,
80 "args": args,
81 "kwargs": kwargs,
82 }
83
84 if "exc_info" in hint:
85 with capture_internal_exceptions():
86 if issubclass(hint["exc_info"][0], SoftTimeLimitExceeded):
87 event["fingerprint"] = [
88 "celery",
89 "SoftTimeLimitExceeded",
90 getattr(task, "name", task),
91 ]
92
93 with capture_internal_exceptions():
94 if hasattr(task, "throws") and isinstance(
95 hint["exc_info"][1], task.throws
96 ):
97 return None
98
99 return event
100
101 return event_processor
102
103
104 def _capture_exception():
105 hub = Hub.current
106 exc_info = sys.exc_info()
107
108 if hub.get_integration(CeleryIntegration) is not None:
109 event, hint = event_from_exception(
110 exc_info,
111 client_options=hub.client.options,
112 mechanism={"type": "celery", "handled": False},
113 )
114 hub.capture_event(event, hint=hint)
115
116 return exc_info
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sentry_sdk/integrations/celery.py b/sentry_sdk/integrations/celery.py
--- a/sentry_sdk/integrations/celery.py
+++ b/sentry_sdk/integrations/celery.py
@@ -2,7 +2,7 @@
import sys
-from celery.exceptions import SoftTimeLimitExceeded
+from celery.exceptions import SoftTimeLimitExceeded, Retry
from sentry_sdk.hub import Hub
from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
@@ -82,6 +82,15 @@
}
if "exc_info" in hint:
+ with capture_internal_exceptions():
+ if isinstance(hint["exc_info"][1], Retry):
+ return None
+
+ if hasattr(task, "throws") and isinstance(
+ hint["exc_info"][1], task.throws
+ ):
+ return None
+
with capture_internal_exceptions():
if issubclass(hint["exc_info"][0], SoftTimeLimitExceeded):
event["fingerprint"] = [
@@ -90,12 +99,6 @@
getattr(task, "name", task),
]
- with capture_internal_exceptions():
- if hasattr(task, "throws") and isinstance(
- hint["exc_info"][1], task.throws
- ):
- return None
-
return event
return event_processor
|
{"golden_diff": "diff --git a/sentry_sdk/integrations/celery.py b/sentry_sdk/integrations/celery.py\n--- a/sentry_sdk/integrations/celery.py\n+++ b/sentry_sdk/integrations/celery.py\n@@ -2,7 +2,7 @@\n \n import sys\n \n-from celery.exceptions import SoftTimeLimitExceeded\n+from celery.exceptions import SoftTimeLimitExceeded, Retry\n \n from sentry_sdk.hub import Hub\n from sentry_sdk.utils import capture_internal_exceptions, event_from_exception\n@@ -82,6 +82,15 @@\n }\n \n if \"exc_info\" in hint:\n+ with capture_internal_exceptions():\n+ if isinstance(hint[\"exc_info\"][1], Retry):\n+ return None\n+\n+ if hasattr(task, \"throws\") and isinstance(\n+ hint[\"exc_info\"][1], task.throws\n+ ):\n+ return None\n+\n with capture_internal_exceptions():\n if issubclass(hint[\"exc_info\"][0], SoftTimeLimitExceeded):\n event[\"fingerprint\"] = [\n@@ -90,12 +99,6 @@\n getattr(task, \"name\", task),\n ]\n \n- with capture_internal_exceptions():\n- if hasattr(task, \"throws\") and isinstance(\n- hint[\"exc_info\"][1], task.throws\n- ):\n- return None\n-\n return event\n \n return event_processor\n", "issue": "[0.7.0] `CeleryIntegration` captures retries\nGreetings fellows!\r\n\r\nWe are having an issue with `CeleryIntegration` in Sentry SDK.\r\n\r\n### Current versions\r\nPython 3.6.7\r\nDjango 2.1.5\r\nCelery 4.1.1\r\nSentry SDK 0.7.0-0.7.1\r\n\r\n### Current behavior\r\nIn our code (internal and 3rd-party) we are using [Celery tasks retry functionality](http://docs.celeryproject.org/en/latest/userguide/tasks.html#retrying).\r\n\r\n> The app.Task.retry() call will raise an exception so any code after the retry won\u2019t be reached. This is the Retry exception, it isn\u2019t handled as an error but rather as a semi-predicate to signify to the worker that the task is to be retried, so that it can store the correct state when a result backend is enabled.\r\n\r\nWe did switch recently from Raven to Sentry SDK 0.6.9, everything seemed working as before.\r\nBut today we updated it to [0.7.0 release](https://github.com/getsentry/sentry-python/blob/master/CHANGES.md#070) (and later to 0.7.1)\r\n\r\nThis caused **every [`celery.exceptions.Retry`](http://docs.celeryproject.org/en/latest/reference/celery.exceptions.html#celery.exceptions.Retry) to be sent to Sentry**, which quickly filled Sentry server with thousands of events.\r\nPreviously (in old SDK and Raven), those exceptions were ignored and not sent to Sentry server.\r\n\r\n### Expected behaviour\r\n`CeleryIntegration` is not flooding Sentry server with every retry exception. Basically, the same behavior as it was in Raven and Sentry SDK<0.7.0.\r\n\r\n### Open questions\r\nI am not sure if the old behavior was done intentionally or by mistake.\r\nIf that was intended, we should reimplement it in current integration.\r\nIf not, there should be a way to filter/ignore that kind of exceptions (I am not sure if we can filter all retries from internal and 3rd-party code in`before_send` in a clean way).\r\n\r\nCould you help me to clarify this issue?\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport sys\n\nfrom celery.exceptions import SoftTimeLimitExceeded\n\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.utils import capture_internal_exceptions, event_from_exception\nfrom sentry_sdk._compat import reraise\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.integrations.logging import ignore_logger\n\n\nclass CeleryIntegration(Integration):\n identifier = \"celery\"\n\n @staticmethod\n def setup_once():\n import celery.app.trace as trace\n\n old_build_tracer = trace.build_tracer\n\n def sentry_build_tracer(name, task, *args, **kwargs):\n # Need to patch both methods because older celery sometimes\n # short-circuits to task.run if it thinks it's safe.\n task.__call__ = _wrap_task_call(task.__call__)\n task.run = _wrap_task_call(task.run)\n return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs))\n\n trace.build_tracer = sentry_build_tracer\n\n # This logger logs every status of every task that ran on the worker.\n # Meaning that every task's breadcrumbs are full of stuff like \"Task\n # <foo> raised unexpected <bar>\".\n ignore_logger(\"celery.worker.job\")\n\n\ndef _wrap_tracer(task, f):\n # Need to wrap tracer for pushing the scope before prerun is sent, and\n # popping it after postrun is sent.\n #\n # This is the reason we don't use signals for hooking in the first place.\n # Also because in Celery 3, signal dispatch returns early if one handler\n # crashes.\n def _inner(*args, **kwargs):\n hub = Hub.current\n if hub.get_integration(CeleryIntegration) is None:\n return f(*args, **kwargs)\n\n with hub.push_scope() as scope:\n scope._name = \"celery\"\n scope.add_event_processor(_make_event_processor(task, *args, **kwargs))\n\n return f(*args, **kwargs)\n\n return _inner\n\n\ndef _wrap_task_call(f):\n # Need to wrap task call because the exception is caught before we get to\n # see it. Also celery's reported stacktrace is untrustworthy.\n def _inner(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except Exception:\n reraise(*_capture_exception())\n\n return _inner\n\n\ndef _make_event_processor(task, uuid, args, kwargs, request=None):\n def event_processor(event, hint):\n with capture_internal_exceptions():\n event[\"transaction\"] = task.name\n\n with capture_internal_exceptions():\n extra = event.setdefault(\"extra\", {})\n extra[\"celery-job\"] = {\n \"task_name\": task.name,\n \"args\": args,\n \"kwargs\": kwargs,\n }\n\n if \"exc_info\" in hint:\n with capture_internal_exceptions():\n if issubclass(hint[\"exc_info\"][0], SoftTimeLimitExceeded):\n event[\"fingerprint\"] = [\n \"celery\",\n \"SoftTimeLimitExceeded\",\n getattr(task, \"name\", task),\n ]\n\n with capture_internal_exceptions():\n if hasattr(task, \"throws\") and isinstance(\n hint[\"exc_info\"][1], task.throws\n ):\n return None\n\n return event\n\n return event_processor\n\n\ndef _capture_exception():\n hub = Hub.current\n exc_info = sys.exc_info()\n\n if hub.get_integration(CeleryIntegration) is not None:\n event, hint = event_from_exception(\n exc_info,\n client_options=hub.client.options,\n mechanism={\"type\": \"celery\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n\n return exc_info\n", "path": "sentry_sdk/integrations/celery.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport sys\n\nfrom celery.exceptions import SoftTimeLimitExceeded, Retry\n\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.utils import capture_internal_exceptions, event_from_exception\nfrom sentry_sdk._compat import reraise\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.integrations.logging import ignore_logger\n\n\nclass CeleryIntegration(Integration):\n identifier = \"celery\"\n\n @staticmethod\n def setup_once():\n import celery.app.trace as trace\n\n old_build_tracer = trace.build_tracer\n\n def sentry_build_tracer(name, task, *args, **kwargs):\n # Need to patch both methods because older celery sometimes\n # short-circuits to task.run if it thinks it's safe.\n task.__call__ = _wrap_task_call(task.__call__)\n task.run = _wrap_task_call(task.run)\n return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs))\n\n trace.build_tracer = sentry_build_tracer\n\n # This logger logs every status of every task that ran on the worker.\n # Meaning that every task's breadcrumbs are full of stuff like \"Task\n # <foo> raised unexpected <bar>\".\n ignore_logger(\"celery.worker.job\")\n\n\ndef _wrap_tracer(task, f):\n # Need to wrap tracer for pushing the scope before prerun is sent, and\n # popping it after postrun is sent.\n #\n # This is the reason we don't use signals for hooking in the first place.\n # Also because in Celery 3, signal dispatch returns early if one handler\n # crashes.\n def _inner(*args, **kwargs):\n hub = Hub.current\n if hub.get_integration(CeleryIntegration) is None:\n return f(*args, **kwargs)\n\n with hub.push_scope() as scope:\n scope._name = \"celery\"\n scope.add_event_processor(_make_event_processor(task, *args, **kwargs))\n\n return f(*args, **kwargs)\n\n return _inner\n\n\ndef _wrap_task_call(f):\n # Need to wrap task call because the exception is caught before we get to\n # see it. Also celery's reported stacktrace is untrustworthy.\n def _inner(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except Exception:\n reraise(*_capture_exception())\n\n return _inner\n\n\ndef _make_event_processor(task, uuid, args, kwargs, request=None):\n def event_processor(event, hint):\n with capture_internal_exceptions():\n event[\"transaction\"] = task.name\n\n with capture_internal_exceptions():\n extra = event.setdefault(\"extra\", {})\n extra[\"celery-job\"] = {\n \"task_name\": task.name,\n \"args\": args,\n \"kwargs\": kwargs,\n }\n\n if \"exc_info\" in hint:\n with capture_internal_exceptions():\n if isinstance(hint[\"exc_info\"][1], Retry):\n return None\n\n if hasattr(task, \"throws\") and isinstance(\n hint[\"exc_info\"][1], task.throws\n ):\n return None\n\n with capture_internal_exceptions():\n if issubclass(hint[\"exc_info\"][0], SoftTimeLimitExceeded):\n event[\"fingerprint\"] = [\n \"celery\",\n \"SoftTimeLimitExceeded\",\n getattr(task, \"name\", task),\n ]\n\n return event\n\n return event_processor\n\n\ndef _capture_exception():\n hub = Hub.current\n exc_info = sys.exc_info()\n\n if hub.get_integration(CeleryIntegration) is not None:\n event, hint = event_from_exception(\n exc_info,\n client_options=hub.client.options,\n mechanism={\"type\": \"celery\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n\n return exc_info\n", "path": "sentry_sdk/integrations/celery.py"}]}
| 1,797 | 305 |
gh_patches_debug_30509
|
rasdani/github-patches
|
git_diff
|
pytorch__vision-5560
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve the accuracy of Classification models by using SOTA recipes and primitives
## 🚀 Feature
Update the weights of all pre-trained models to improve their accuracy.
## Motivation
<h4 id="new-recipe-with-fixres">New Recipe + FixRes mitigations</h4>
```
torchrun --nproc_per_node=8 train.py --model $MODEL_NAME --batch-size 128 --lr 0.5 \
--lr-scheduler cosineannealinglr --lr-warmup-epochs 5 --lr-warmup-method linear \
--auto-augment ta_wide --epochs 600 --random-erase 0.1 --weight-decay 0.00002 \
--norm-weight-decay 0.0 --label-smoothing 0.1 --mixup-alpha 0.2 --cutmix-alpha 1.0 \
--train-crop-size 176 --model-ema --val-resize-size 232
```
Using a recipe which includes Warmup, Cosine Annealing, Label Smoothing, Mixup, Cutmix, Random Erasing, TrivialAugment, No BN weight decay, EMA and long training cycles and optional FixRes mitigations we are able to improve the `resnet50` accuracy by over 4.5 points. For more information on the training recipe, check [here](https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/):
```
Old ResNet50:
Acc@1 76.130 Acc@5 92.862
New ResNet50:
Acc@1 80.674 Acc@5 95.166
```
Running other models through the same recipe, achieves the following improved accuracies:
```
ResNet101:
Acc@1 81.728 Acc@5 95.670
ResNet152:
Acc@1 82.042 Acc@5 95.926
ResNeXt50_32x4d:
Acc@1 81.116 Acc@5 95.478
ResNeXt101_32x8d:
Acc@1 82.834 Acc@5 96.228
MobileNetV3 Large:
Acc@1 74.938 Acc@5 92.496
Wide ResNet50 2:
Acc@1 81.602 Acc@5 95.758 (@prabhat00155)
Wide ResNet101 2:
Acc@1 82.492 Acc@5 96.110 (@prabhat00155)
regnet_x_400mf:
Acc@1 74.864 Acc@5 92.322 (@kazhang)
regnet_x_800mf:
Acc@1 77.522 Acc@5 93.826 (@kazhang)
regnet_x_1_6gf:
Acc@1 79.668 Acc@5 94.922 (@kazhang)
```
<h4 id="new-recipe">New Recipe (without FixRes mitigations)</h4>
```
torchrun --nproc_per_node=8 train.py --model $MODEL_NAME --batch-size 128 --lr 0.5 \
--lr-scheduler cosineannealinglr --lr-warmup-epochs 5 --lr-warmup-method linear \
--auto-augment ta_wide --epochs 600 --random-erase 0.1 --weight-decay 0.00002 \
--norm-weight-decay 0.0 --label-smoothing 0.1 --mixup-alpha 0.2 --cutmix-alpha 1.0 \
--model-ema --val-resize-size 232
```
Removing the optional FixRes mitigations seems to yield better results for some deeper architectures and variants with larger receptive fields:
```
ResNet101:
Acc@1 81.886 Acc@5 95.780
ResNet152:
Acc@1 82.284 Acc@5 96.002
ResNeXt50_32x4d:
Acc@1 81.198 Acc@5 95.340
ResNeXt101_32x8d:
Acc@1 82.812 Acc@5 96.226
MobileNetV3 Large:
Acc@1 75.152 Acc@5 92.634
Wide ResNet50_2:
Acc@1 81.452 Acc@5 95.544 (@prabhat00155)
Wide ResNet101_2:
Acc@1 82.510 Acc@5 96.020 (@prabhat00155)
regnet_x_3_2gf:
Acc@1 81.196 Acc@5 95.430
regnet_x_8gf:
Acc@1 81.682 Acc@5 95.678
regnet_x_16g:
Acc@1 82.716 Acc@5 96.196
regnet_x_32gf:
Acc@1 83.014 Acc@5 96.288
regnet_y_400mf:
Acc@1 75.804 Acc@5 92.742
regnet_y_800mf:
Acc@1 78.828 Acc@5 94.502
regnet_y_1_6gf:
Acc@1 80.876 Acc@5 95.444
regnet_y_3_2gf:
Acc@1 81.982 Acc@5 95.972
regnet_y_8gf:
Acc@1 82.828 Acc@5 96.330
regnet_y_16gf:
Acc@1 82.886 Acc@5 96.328
regnet_y_32gf:
Acc@1 83.368 Acc@5 96.498
```
<h4 id="new-recipe-with-reg-tuning">New Recipe + Regularization tuning</h4>
```
torchrun --nproc_per_node=8 train.py --model $MODEL_NAME --batch-size 128 --lr 0.5 \
--lr-scheduler cosineannealinglr --lr-warmup-epochs 5 --lr-warmup-method linear \
--auto-augment ta_wide --epochs 600 --random-erase 0.1 --weight-decay 0.00001 \
--norm-weight-decay 0.0 --label-smoothing 0.1 --mixup-alpha 0.2 --cutmix-alpha 1.0 \
--model-ema --val-resize-size 232
```
Adjusting slightly the regularization can help us improve the following:
```
MobileNetV3 Large:
Acc@1 75.274 Acc@5 92.566
```
In addition to regularization adjustment we can also apply the Repeated Augmentation trick ` --ra-sampler --ra-reps 4`:
```
MobileNetV2:
Acc@1 72.154 Acc@5 90.822
```
<h4 id="ptq-models">Post-Training Quantized models</h4>
```
ResNet50:
Acc@1 80.282 Acc@5 94.976
ResNeXt101_32x8d:
Acc@1 82.574 Acc@5 96.132
```
<h4 id="new-recipe-with-lr-wd-crop-tuning">New Recipe (LR+weight_decay+train_crop_size tuning)</h4>
```
torchrun --ngpus 8 --nodes 1 --model $MODEL_NAME --batch-size 128 --lr 1 \
--lr-scheduler cosineannealinglr --lr-warmup-epochs 5 --lr-warmup-method linear \
--auto-augment ta_wide --epochs 600 --random-erase 0.1 --weight-decay 0.000002 \
--norm-weight-decay 0.0 --label-smoothing 0.1 --mixup-alpha 0.2 --cutmix-alpha 1.0 \
--train-crop-size 208 --model-ema --val-crop-size 240 --val-resize-size 255
```
```
EfficientNet-B1:
Acc@1 79.838 Acc@5 94.934
```
## Pitch
To be able to improve the pre-trained model accuracy, we need to complete the "Batteries Included" work as #3911. Moreover we will need to extend our existing model builders to support multiple weights as described at #4611. Then we will be able to:
- Update our reference scripts for classification to support the new primitives added by the "Batteries Included" initiative.
- Find a good training recipe for the most important pre-trained models and re-train them. Note that different training configuration might be required for different types of models (for example mobile models are less likely to overfit comparing to bigger models and thus make use of different recipes/primitives)
- Update the weights of the models in the library.
cc @datumbox @vfdev-5
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchvision/prototype/models/mobilenetv2.py`
Content:
```
1 from functools import partial
2 from typing import Any, Optional
3
4 from torchvision.prototype.transforms import ImageNetEval
5 from torchvision.transforms.functional import InterpolationMode
6
7 from ...models.mobilenetv2 import MobileNetV2
8 from ._api import WeightsEnum, Weights
9 from ._meta import _IMAGENET_CATEGORIES
10 from ._utils import handle_legacy_interface, _ovewrite_named_param
11
12
13 __all__ = ["MobileNetV2", "MobileNet_V2_Weights", "mobilenet_v2"]
14
15
16 class MobileNet_V2_Weights(WeightsEnum):
17 IMAGENET1K_V1 = Weights(
18 url="https://download.pytorch.org/models/mobilenet_v2-b0353104.pth",
19 transforms=partial(ImageNetEval, crop_size=224),
20 meta={
21 "task": "image_classification",
22 "architecture": "MobileNetV2",
23 "publication_year": 2018,
24 "num_params": 3504872,
25 "size": (224, 224),
26 "min_size": (1, 1),
27 "categories": _IMAGENET_CATEGORIES,
28 "interpolation": InterpolationMode.BILINEAR,
29 "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv2",
30 "acc@1": 71.878,
31 "acc@5": 90.286,
32 },
33 )
34 DEFAULT = IMAGENET1K_V1
35
36
37 @handle_legacy_interface(weights=("pretrained", MobileNet_V2_Weights.IMAGENET1K_V1))
38 def mobilenet_v2(
39 *, weights: Optional[MobileNet_V2_Weights] = None, progress: bool = True, **kwargs: Any
40 ) -> MobileNetV2:
41 weights = MobileNet_V2_Weights.verify(weights)
42
43 if weights is not None:
44 _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
45
46 model = MobileNetV2(**kwargs)
47
48 if weights is not None:
49 model.load_state_dict(weights.get_state_dict(progress=progress))
50
51 return model
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torchvision/prototype/models/mobilenetv2.py b/torchvision/prototype/models/mobilenetv2.py
--- a/torchvision/prototype/models/mobilenetv2.py
+++ b/torchvision/prototype/models/mobilenetv2.py
@@ -13,25 +13,40 @@
__all__ = ["MobileNetV2", "MobileNet_V2_Weights", "mobilenet_v2"]
+_COMMON_META = {
+ "task": "image_classification",
+ "architecture": "MobileNetV2",
+ "publication_year": 2018,
+ "num_params": 3504872,
+ "size": (224, 224),
+ "min_size": (1, 1),
+ "categories": _IMAGENET_CATEGORIES,
+ "interpolation": InterpolationMode.BILINEAR,
+}
+
+
class MobileNet_V2_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/mobilenet_v2-b0353104.pth",
transforms=partial(ImageNetEval, crop_size=224),
meta={
- "task": "image_classification",
- "architecture": "MobileNetV2",
- "publication_year": 2018,
- "num_params": 3504872,
- "size": (224, 224),
- "min_size": (1, 1),
- "categories": _IMAGENET_CATEGORIES,
- "interpolation": InterpolationMode.BILINEAR,
+ **_COMMON_META,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv2",
"acc@1": 71.878,
"acc@5": 90.286,
},
)
- DEFAULT = IMAGENET1K_V1
+ IMAGENET1K_V2 = Weights(
+ url="https://download.pytorch.org/models/mobilenet_v2-7ebf99e0.pth",
+ transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
+ meta={
+ **_COMMON_META,
+ "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuning",
+ "acc@1": 72.154,
+ "acc@5": 90.822,
+ },
+ )
+ DEFAULT = IMAGENET1K_V2
@handle_legacy_interface(weights=("pretrained", MobileNet_V2_Weights.IMAGENET1K_V1))
|
{"golden_diff": "diff --git a/torchvision/prototype/models/mobilenetv2.py b/torchvision/prototype/models/mobilenetv2.py\n--- a/torchvision/prototype/models/mobilenetv2.py\n+++ b/torchvision/prototype/models/mobilenetv2.py\n@@ -13,25 +13,40 @@\n __all__ = [\"MobileNetV2\", \"MobileNet_V2_Weights\", \"mobilenet_v2\"]\n \n \n+_COMMON_META = {\n+ \"task\": \"image_classification\",\n+ \"architecture\": \"MobileNetV2\",\n+ \"publication_year\": 2018,\n+ \"num_params\": 3504872,\n+ \"size\": (224, 224),\n+ \"min_size\": (1, 1),\n+ \"categories\": _IMAGENET_CATEGORIES,\n+ \"interpolation\": InterpolationMode.BILINEAR,\n+}\n+\n+\n class MobileNet_V2_Weights(WeightsEnum):\n IMAGENET1K_V1 = Weights(\n url=\"https://download.pytorch.org/models/mobilenet_v2-b0353104.pth\",\n transforms=partial(ImageNetEval, crop_size=224),\n meta={\n- \"task\": \"image_classification\",\n- \"architecture\": \"MobileNetV2\",\n- \"publication_year\": 2018,\n- \"num_params\": 3504872,\n- \"size\": (224, 224),\n- \"min_size\": (1, 1),\n- \"categories\": _IMAGENET_CATEGORIES,\n- \"interpolation\": InterpolationMode.BILINEAR,\n+ **_COMMON_META,\n \"recipe\": \"https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv2\",\n \"acc@1\": 71.878,\n \"acc@5\": 90.286,\n },\n )\n- DEFAULT = IMAGENET1K_V1\n+ IMAGENET1K_V2 = Weights(\n+ url=\"https://download.pytorch.org/models/mobilenet_v2-7ebf99e0.pth\",\n+ transforms=partial(ImageNetEval, crop_size=224, resize_size=232),\n+ meta={\n+ **_COMMON_META,\n+ \"recipe\": \"https://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuning\",\n+ \"acc@1\": 72.154,\n+ \"acc@5\": 90.822,\n+ },\n+ )\n+ DEFAULT = IMAGENET1K_V2\n \n \n @handle_legacy_interface(weights=(\"pretrained\", MobileNet_V2_Weights.IMAGENET1K_V1))\n", "issue": "Improve the accuracy of Classification models by using SOTA recipes and primitives\n## \ud83d\ude80 Feature\r\nUpdate the weights of all pre-trained models to improve their accuracy.\r\n\r\n## Motivation\r\n\r\n<h4 id=\"new-recipe-with-fixres\">New Recipe + FixRes mitigations</h4>\r\n\r\n```\r\ntorchrun --nproc_per_node=8 train.py --model $MODEL_NAME --batch-size 128 --lr 0.5 \\\r\n--lr-scheduler cosineannealinglr --lr-warmup-epochs 5 --lr-warmup-method linear \\\r\n--auto-augment ta_wide --epochs 600 --random-erase 0.1 --weight-decay 0.00002 \\\r\n--norm-weight-decay 0.0 --label-smoothing 0.1 --mixup-alpha 0.2 --cutmix-alpha 1.0 \\\r\n--train-crop-size 176 --model-ema --val-resize-size 232\r\n```\r\n\r\nUsing a recipe which includes Warmup, Cosine Annealing, Label Smoothing, Mixup, Cutmix, Random Erasing, TrivialAugment, No BN weight decay, EMA and long training cycles and optional FixRes mitigations we are able to improve the `resnet50` accuracy by over 4.5 points. For more information on the training recipe, check [here](https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/):\r\n```\r\nOld ResNet50:\r\nAcc@1 76.130 Acc@5 92.862\r\n\r\nNew ResNet50:\r\nAcc@1 80.674 Acc@5 95.166\r\n```\r\n\r\nRunning other models through the same recipe, achieves the following improved accuracies:\r\n```\r\nResNet101:\r\nAcc@1 81.728 Acc@5 95.670\r\n\r\nResNet152:\r\nAcc@1 82.042 Acc@5 95.926\r\n\r\nResNeXt50_32x4d:\r\nAcc@1 81.116 Acc@5 95.478\r\n\r\nResNeXt101_32x8d:\r\nAcc@1 82.834 Acc@5 96.228\r\n\r\nMobileNetV3 Large:\r\nAcc@1 74.938 Acc@5 92.496\r\n\r\nWide ResNet50 2:\r\nAcc@1 81.602 Acc@5 95.758 (@prabhat00155)\r\n\r\nWide ResNet101 2:\r\nAcc@1 82.492 Acc@5 96.110 (@prabhat00155)\r\n\r\nregnet_x_400mf:\r\nAcc@1 74.864 Acc@5 92.322 (@kazhang)\r\n\r\nregnet_x_800mf:\r\nAcc@1 77.522 Acc@5 93.826 (@kazhang)\r\n\r\nregnet_x_1_6gf:\r\nAcc@1 79.668 Acc@5 94.922 (@kazhang)\r\n```\r\n\r\n<h4 id=\"new-recipe\">New Recipe (without FixRes mitigations)</h4>\r\n\r\n```\r\ntorchrun --nproc_per_node=8 train.py --model $MODEL_NAME --batch-size 128 --lr 0.5 \\\r\n--lr-scheduler cosineannealinglr --lr-warmup-epochs 5 --lr-warmup-method linear \\\r\n--auto-augment ta_wide --epochs 600 --random-erase 0.1 --weight-decay 0.00002 \\\r\n--norm-weight-decay 0.0 --label-smoothing 0.1 --mixup-alpha 0.2 --cutmix-alpha 1.0 \\\r\n--model-ema --val-resize-size 232\r\n```\r\n\r\nRemoving the optional FixRes mitigations seems to yield better results for some deeper architectures and variants with larger receptive fields:\r\n```\r\nResNet101:\r\nAcc@1 81.886 Acc@5 95.780\r\n\r\nResNet152:\r\nAcc@1 82.284 Acc@5 96.002\r\n\r\nResNeXt50_32x4d:\r\nAcc@1 81.198 Acc@5 95.340\r\n\r\nResNeXt101_32x8d:\r\nAcc@1 82.812 Acc@5 96.226\r\n\r\nMobileNetV3 Large:\r\nAcc@1 75.152 Acc@5 92.634\r\n\r\nWide ResNet50_2:\r\nAcc@1 81.452 Acc@5 95.544 (@prabhat00155)\r\n\r\nWide ResNet101_2:\r\nAcc@1 82.510 Acc@5 96.020 (@prabhat00155)\r\n\r\nregnet_x_3_2gf:\r\nAcc@1 81.196 Acc@5 95.430\r\n\r\nregnet_x_8gf:\r\nAcc@1 81.682 Acc@5 95.678\r\n\r\nregnet_x_16g:\r\nAcc@1 82.716 Acc@5 96.196\r\n\r\nregnet_x_32gf:\r\nAcc@1 83.014 Acc@5 96.288\r\n\r\nregnet_y_400mf:\r\nAcc@1 75.804 Acc@5 92.742\r\n\r\nregnet_y_800mf:\r\nAcc@1 78.828 Acc@5 94.502\r\n\r\nregnet_y_1_6gf:\r\nAcc@1 80.876 Acc@5 95.444\r\n\r\nregnet_y_3_2gf:\r\nAcc@1 81.982 Acc@5 95.972\r\n\r\nregnet_y_8gf:\r\nAcc@1 82.828 Acc@5 96.330\r\n\r\nregnet_y_16gf:\r\nAcc@1 82.886 Acc@5 96.328\r\n\r\nregnet_y_32gf:\r\nAcc@1 83.368 Acc@5 96.498\r\n```\r\n\r\n<h4 id=\"new-recipe-with-reg-tuning\">New Recipe + Regularization tuning</h4>\r\n\r\n```\r\ntorchrun --nproc_per_node=8 train.py --model $MODEL_NAME --batch-size 128 --lr 0.5 \\\r\n--lr-scheduler cosineannealinglr --lr-warmup-epochs 5 --lr-warmup-method linear \\\r\n--auto-augment ta_wide --epochs 600 --random-erase 0.1 --weight-decay 0.00001 \\\r\n--norm-weight-decay 0.0 --label-smoothing 0.1 --mixup-alpha 0.2 --cutmix-alpha 1.0 \\\r\n--model-ema --val-resize-size 232\r\n```\r\n\r\nAdjusting slightly the regularization can help us improve the following:\r\n```\r\nMobileNetV3 Large:\r\nAcc@1 75.274 Acc@5 92.566\r\n```\r\nIn addition to regularization adjustment we can also apply the Repeated Augmentation trick ` --ra-sampler --ra-reps 4`:\r\n\r\n```\r\nMobileNetV2:\r\nAcc@1 72.154 Acc@5 90.822\r\n```\r\n\r\n<h4 id=\"ptq-models\">Post-Training Quantized models</h4>\r\n\r\n```\r\nResNet50:\r\nAcc@1 80.282 Acc@5 94.976\r\n\r\nResNeXt101_32x8d:\r\nAcc@1 82.574 Acc@5 96.132\r\n```\r\n\r\n<h4 id=\"new-recipe-with-lr-wd-crop-tuning\">New Recipe (LR+weight_decay+train_crop_size tuning)</h4>\r\n\r\n```\r\ntorchrun --ngpus 8 --nodes 1 --model $MODEL_NAME --batch-size 128 --lr 1 \\\r\n--lr-scheduler cosineannealinglr --lr-warmup-epochs 5 --lr-warmup-method linear \\\r\n--auto-augment ta_wide --epochs 600 --random-erase 0.1 --weight-decay 0.000002 \\\r\n--norm-weight-decay 0.0 --label-smoothing 0.1 --mixup-alpha 0.2 --cutmix-alpha 1.0 \\\r\n--train-crop-size 208 --model-ema --val-crop-size 240 --val-resize-size 255\r\n```\r\n\r\n```\r\nEfficientNet-B1:\r\nAcc@1 79.838 Acc@5 94.934\r\n```\r\n\r\n## Pitch\r\n\r\nTo be able to improve the pre-trained model accuracy, we need to complete the \"Batteries Included\" work as #3911. Moreover we will need to extend our existing model builders to support multiple weights as described at #4611. Then we will be able to:\r\n- Update our reference scripts for classification to support the new primitives added by the \"Batteries Included\" initiative.\r\n- Find a good training recipe for the most important pre-trained models and re-train them. Note that different training configuration might be required for different types of models (for example mobile models are less likely to overfit comparing to bigger models and thus make use of different recipes/primitives)\r\n- Update the weights of the models in the library.\r\n\r\ncc @datumbox @vfdev-5\n", "before_files": [{"content": "from functools import partial\nfrom typing import Any, Optional\n\nfrom torchvision.prototype.transforms import ImageNetEval\nfrom torchvision.transforms.functional import InterpolationMode\n\nfrom ...models.mobilenetv2 import MobileNetV2\nfrom ._api import WeightsEnum, Weights\nfrom ._meta import _IMAGENET_CATEGORIES\nfrom ._utils import handle_legacy_interface, _ovewrite_named_param\n\n\n__all__ = [\"MobileNetV2\", \"MobileNet_V2_Weights\", \"mobilenet_v2\"]\n\n\nclass MobileNet_V2_Weights(WeightsEnum):\n IMAGENET1K_V1 = Weights(\n url=\"https://download.pytorch.org/models/mobilenet_v2-b0353104.pth\",\n transforms=partial(ImageNetEval, crop_size=224),\n meta={\n \"task\": \"image_classification\",\n \"architecture\": \"MobileNetV2\",\n \"publication_year\": 2018,\n \"num_params\": 3504872,\n \"size\": (224, 224),\n \"min_size\": (1, 1),\n \"categories\": _IMAGENET_CATEGORIES,\n \"interpolation\": InterpolationMode.BILINEAR,\n \"recipe\": \"https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv2\",\n \"acc@1\": 71.878,\n \"acc@5\": 90.286,\n },\n )\n DEFAULT = IMAGENET1K_V1\n\n\n@handle_legacy_interface(weights=(\"pretrained\", MobileNet_V2_Weights.IMAGENET1K_V1))\ndef mobilenet_v2(\n *, weights: Optional[MobileNet_V2_Weights] = None, progress: bool = True, **kwargs: Any\n) -> MobileNetV2:\n weights = MobileNet_V2_Weights.verify(weights)\n\n if weights is not None:\n _ovewrite_named_param(kwargs, \"num_classes\", len(weights.meta[\"categories\"]))\n\n model = MobileNetV2(**kwargs)\n\n if weights is not None:\n model.load_state_dict(weights.get_state_dict(progress=progress))\n\n return model\n", "path": "torchvision/prototype/models/mobilenetv2.py"}], "after_files": [{"content": "from functools import partial\nfrom typing import Any, Optional\n\nfrom torchvision.prototype.transforms import ImageNetEval\nfrom torchvision.transforms.functional import InterpolationMode\n\nfrom ...models.mobilenetv2 import MobileNetV2\nfrom ._api import WeightsEnum, Weights\nfrom ._meta import _IMAGENET_CATEGORIES\nfrom ._utils import handle_legacy_interface, _ovewrite_named_param\n\n\n__all__ = [\"MobileNetV2\", \"MobileNet_V2_Weights\", \"mobilenet_v2\"]\n\n\n_COMMON_META = {\n \"task\": \"image_classification\",\n \"architecture\": \"MobileNetV2\",\n \"publication_year\": 2018,\n \"num_params\": 3504872,\n \"size\": (224, 224),\n \"min_size\": (1, 1),\n \"categories\": _IMAGENET_CATEGORIES,\n \"interpolation\": InterpolationMode.BILINEAR,\n}\n\n\nclass MobileNet_V2_Weights(WeightsEnum):\n IMAGENET1K_V1 = Weights(\n url=\"https://download.pytorch.org/models/mobilenet_v2-b0353104.pth\",\n transforms=partial(ImageNetEval, crop_size=224),\n meta={\n **_COMMON_META,\n \"recipe\": \"https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv2\",\n \"acc@1\": 71.878,\n \"acc@5\": 90.286,\n },\n )\n IMAGENET1K_V2 = Weights(\n url=\"https://download.pytorch.org/models/mobilenet_v2-7ebf99e0.pth\",\n transforms=partial(ImageNetEval, crop_size=224, resize_size=232),\n meta={\n **_COMMON_META,\n \"recipe\": \"https://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuning\",\n \"acc@1\": 72.154,\n \"acc@5\": 90.822,\n },\n )\n DEFAULT = IMAGENET1K_V2\n\n\n@handle_legacy_interface(weights=(\"pretrained\", MobileNet_V2_Weights.IMAGENET1K_V1))\ndef mobilenet_v2(\n *, weights: Optional[MobileNet_V2_Weights] = None, progress: bool = True, **kwargs: Any\n) -> MobileNetV2:\n weights = MobileNet_V2_Weights.verify(weights)\n\n if weights is not None:\n _ovewrite_named_param(kwargs, \"num_classes\", len(weights.meta[\"categories\"]))\n\n model = MobileNetV2(**kwargs)\n\n if weights is not None:\n model.load_state_dict(weights.get_state_dict(progress=progress))\n\n return model\n", "path": "torchvision/prototype/models/mobilenetv2.py"}]}
| 3,054 | 637 |
gh_patches_debug_997
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-841
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use correct test client parameters when sending json body payload
## Problem
Currently, When sending a request containing a json payload using the Django rest framework test client, the payload is being converted into a string using `json.dumps` but the Django rest framework provides convenience parameters that does this automatically.
## Proposed solution
Use the `format` parameter of the DRF test client or set the default payload format in the DRF settings, in order for the test client to be able to handle the conversion automatically
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `config/settings.py`
Content:
```
1 """
2 Django settings for config project.
3
4 Generated by 'django-admin startproject' using Django 3.1.7.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/3.1/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/3.1/ref/settings/
11 """
12
13 import os
14 from pathlib import Path
15
16 from decouple import Csv, config as decouple_config
17 from dj_database_url import parse as db_url
18
19
20 # We use a 'tuple' with pipes as delimiters as decople naively splits the global
21 # variables on commas when casting to Csv()
22 def pipe_delim(pipe_string):
23 # Remove opening and closing brackets
24 pipe_string = pipe_string[1:-1]
25 # Split on pipe delim
26 return pipe_string.split("|")
27
28
29 # Build paths inside the project like this: BASE_DIR / 'subdir'.
30 BASE_DIR = Path(__file__).resolve().parent.parent
31
32 # Application definition
33
34 INSTALLED_APPS = [
35 "django.contrib.admin",
36 "django.contrib.auth",
37 "django.contrib.contenttypes",
38 "django.contrib.sessions",
39 "django.contrib.messages",
40 "django.contrib.staticfiles",
41 "rest_framework",
42 "django_filters",
43 "django_property_filter",
44 "mathesar",
45 ]
46
47 MIDDLEWARE = [
48 "django.middleware.security.SecurityMiddleware",
49 "django.contrib.sessions.middleware.SessionMiddleware",
50 "django.middleware.common.CommonMiddleware",
51 "django.middleware.csrf.CsrfViewMiddleware",
52 "django.contrib.auth.middleware.AuthenticationMiddleware",
53 "django.contrib.messages.middleware.MessageMiddleware",
54 "django.middleware.clickjacking.XFrameOptionsMiddleware",
55 ]
56
57 ROOT_URLCONF = "config.urls"
58
59 TEMPLATES = [
60 {
61 "BACKEND": "django.template.backends.django.DjangoTemplates",
62 "DIRS": [],
63 "APP_DIRS": True,
64 "OPTIONS": {
65 "context_processors": [
66 "config.context_processors.frontend_settings",
67 "django.template.context_processors.debug",
68 "django.template.context_processors.request",
69 "django.contrib.auth.context_processors.auth",
70 "django.contrib.messages.context_processors.messages",
71 ],
72 },
73 },
74 ]
75
76 WSGI_APPLICATION = "config.wsgi.application"
77
78 # Database
79 # https://docs.djangoproject.com/en/3.1/ref/settings/#databases
80
81 # TODO: Add to documentation that database keys should not be than 128 characters.
82
83 # MATHESAR_DATABASES should be of the form '({db_name}|{db_url}), ({db_name}|{db_url})'
84 # See pipe_delim above for why we use pipes as delimiters
85 DATABASES = {
86 db_key: db_url(url_string)
87 for db_key, url_string in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim))
88 }
89 DATABASES[decouple_config('DJANGO_DATABASE_KEY')] = decouple_config('DJANGO_DATABASE_URL', cast=db_url)
90
91 for db_key, db_dict in DATABASES.items():
92 # Engine can be '.postgresql' or '.postgresql_psycopg2'
93 if not db_dict['ENGINE'].startswith('django.db.backends.postgresql'):
94 raise ValueError(
95 f"{db_key} is not a PostgreSQL database. "
96 f"{db_dict['ENGINE']} found for {db_key}'s engine."
97 )
98
99
100 # pytest-django will create a new database named 'test_{DATABASES[table_db]['NAME']}'
101 # and use it for our API tests if we don't specify DATABASES[table_db]['TEST']['NAME']
102 if decouple_config('TEST', default=False, cast=bool):
103 for db_key, _ in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim)):
104 DATABASES[db_key]['TEST'] = {'NAME': DATABASES[db_key]['NAME']}
105
106
107 # Quick-start development settings - unsuitable for production
108 # See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
109
110 # SECURITY WARNING: keep the secret key used in production secret!
111 SECRET_KEY = decouple_config('SECRET_KEY')
112
113 # SECURITY WARNING: don't run with debug turned on in production!
114 DEBUG = decouple_config('DEBUG', default=False, cast=bool)
115
116 ALLOWED_HOSTS = decouple_config('ALLOWED_HOSTS', cast=Csv())
117
118 # Password validation
119 # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
120
121 AUTH_PASSWORD_VALIDATORS = [
122 {
123 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
124 },
125 {
126 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
127 },
128 {
129 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
130 },
131 {
132 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
133 },
134 ]
135
136
137 # Internationalization
138 # https://docs.djangoproject.com/en/3.1/topics/i18n/
139
140 LANGUAGE_CODE = "en-us"
141
142 TIME_ZONE = "UTC"
143
144 USE_I18N = True
145
146 USE_L10N = True
147
148 USE_TZ = True
149
150
151 # Static files (CSS, JavaScript, Images)
152 # https://docs.djangoproject.com/en/3.1/howto/static-files/
153 # https://docs.djangoproject.com/en/3.1/ref/contrib/staticfiles/
154
155 STATIC_URL = "/static/"
156
157 # When running with DEBUG=False, the webserver needs to serve files from this location
158 # python manage.py collectstatic has to be run to collect all static files into this location
159 # The files need to served in brotli or gzip compressed format
160 STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
161
162 # Media files (uploaded by the user)
163
164 MEDIA_ROOT = os.path.join(BASE_DIR, '.media/')
165
166 MEDIA_URL = "/media/"
167
168 # Update Authentication classes, removed BasicAuthentication
169 # Defaults: https://www.django-rest-framework.org/api-guide/settings/
170 REST_FRAMEWORK = {
171 'DEFAULT_AUTHENTICATION_CLASSES': [
172 'rest_framework.authentication.SessionAuthentication'
173 ],
174 'DEFAULT_FILTER_BACKENDS': (
175 'django_filters.rest_framework.DjangoFilterBackend',
176 'rest_framework.filters.OrderingFilter',
177 ),
178 }
179
180 # Mathesar settings
181 MATHESAR_MODE = decouple_config('MODE', default='PRODUCTION')
182 MATHESAR_UI_BUILD_LOCATION = os.path.join(BASE_DIR, 'mathesar/static/mathesar/')
183 MATHESAR_MANIFEST_LOCATION = os.path.join(MATHESAR_UI_BUILD_LOCATION, 'manifest.json')
184 MATHESAR_CLIENT_DEV_URL = 'http://localhost:3000'
185
186
187 STATICFILES_DIRS = [MATHESAR_UI_BUILD_LOCATION]
188
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/config/settings.py b/config/settings.py
--- a/config/settings.py
+++ b/config/settings.py
@@ -175,6 +175,7 @@
'django_filters.rest_framework.DjangoFilterBackend',
'rest_framework.filters.OrderingFilter',
),
+ 'TEST_REQUEST_DEFAULT_FORMAT': 'json',
}
# Mathesar settings
|
{"golden_diff": "diff --git a/config/settings.py b/config/settings.py\n--- a/config/settings.py\n+++ b/config/settings.py\n@@ -175,6 +175,7 @@\n 'django_filters.rest_framework.DjangoFilterBackend',\n 'rest_framework.filters.OrderingFilter',\n ),\n+ 'TEST_REQUEST_DEFAULT_FORMAT': 'json',\n }\n \n # Mathesar settings\n", "issue": "Use correct test client parameters when sending json body payload\n## Problem\r\nCurrently, When sending a request containing a json payload using the Django rest framework test client, the payload is being converted into a string using `json.dumps` but the Django rest framework provides convenience parameters that does this automatically.\r\n\r\n## Proposed solution\r\nUse the `format` parameter of the DRF test client or set the default payload format in the DRF settings, in order for the test client to be able to handle the conversion automatically\n", "before_files": [{"content": "\"\"\"\nDjango settings for config project.\n\nGenerated by 'django-admin startproject' using Django 3.1.7.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.1/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.1/ref/settings/\n\"\"\"\n\nimport os\nfrom pathlib import Path\n\nfrom decouple import Csv, config as decouple_config\nfrom dj_database_url import parse as db_url\n\n\n# We use a 'tuple' with pipes as delimiters as decople naively splits the global\n# variables on commas when casting to Csv()\ndef pipe_delim(pipe_string):\n # Remove opening and closing brackets\n pipe_string = pipe_string[1:-1]\n # Split on pipe delim\n return pipe_string.split(\"|\")\n\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"rest_framework\",\n \"django_filters\",\n \"django_property_filter\",\n \"mathesar\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"config.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"config.context_processors.frontend_settings\",\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\n# TODO: Add to documentation that database keys should not be than 128 characters.\n\n# MATHESAR_DATABASES should be of the form '({db_name}|{db_url}), ({db_name}|{db_url})'\n# See pipe_delim above for why we use pipes as delimiters\nDATABASES = {\n db_key: db_url(url_string)\n for db_key, url_string in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim))\n}\nDATABASES[decouple_config('DJANGO_DATABASE_KEY')] = decouple_config('DJANGO_DATABASE_URL', cast=db_url)\n\nfor db_key, db_dict in DATABASES.items():\n # Engine can be '.postgresql' or '.postgresql_psycopg2'\n if not db_dict['ENGINE'].startswith('django.db.backends.postgresql'):\n raise ValueError(\n f\"{db_key} is not a PostgreSQL database. \"\n f\"{db_dict['ENGINE']} found for {db_key}'s engine.\"\n )\n\n\n# pytest-django will create a new database named 'test_{DATABASES[table_db]['NAME']}'\n# and use it for our API tests if we don't specify DATABASES[table_db]['TEST']['NAME']\nif decouple_config('TEST', default=False, cast=bool):\n for db_key, _ in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim)):\n DATABASES[db_key]['TEST'] = {'NAME': DATABASES[db_key]['NAME']}\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = decouple_config('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = decouple_config('DEBUG', default=False, cast=bool)\n\nALLOWED_HOSTS = decouple_config('ALLOWED_HOSTS', cast=Csv())\n\n# Password validation\n# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.1/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.1/howto/static-files/\n# https://docs.djangoproject.com/en/3.1/ref/contrib/staticfiles/\n\nSTATIC_URL = \"/static/\"\n\n# When running with DEBUG=False, the webserver needs to serve files from this location\n# python manage.py collectstatic has to be run to collect all static files into this location\n# The files need to served in brotli or gzip compressed format\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static/')\n\n# Media files (uploaded by the user)\n\nMEDIA_ROOT = os.path.join(BASE_DIR, '.media/')\n\nMEDIA_URL = \"/media/\"\n\n# Update Authentication classes, removed BasicAuthentication\n# Defaults: https://www.django-rest-framework.org/api-guide/settings/\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.SessionAuthentication'\n ],\n 'DEFAULT_FILTER_BACKENDS': (\n 'django_filters.rest_framework.DjangoFilterBackend',\n 'rest_framework.filters.OrderingFilter',\n ),\n}\n\n# Mathesar settings\nMATHESAR_MODE = decouple_config('MODE', default='PRODUCTION')\nMATHESAR_UI_BUILD_LOCATION = os.path.join(BASE_DIR, 'mathesar/static/mathesar/')\nMATHESAR_MANIFEST_LOCATION = os.path.join(MATHESAR_UI_BUILD_LOCATION, 'manifest.json')\nMATHESAR_CLIENT_DEV_URL = 'http://localhost:3000'\n\n\nSTATICFILES_DIRS = [MATHESAR_UI_BUILD_LOCATION]\n", "path": "config/settings.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for config project.\n\nGenerated by 'django-admin startproject' using Django 3.1.7.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.1/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.1/ref/settings/\n\"\"\"\n\nimport os\nfrom pathlib import Path\n\nfrom decouple import Csv, config as decouple_config\nfrom dj_database_url import parse as db_url\n\n\n# We use a 'tuple' with pipes as delimiters as decople naively splits the global\n# variables on commas when casting to Csv()\ndef pipe_delim(pipe_string):\n # Remove opening and closing brackets\n pipe_string = pipe_string[1:-1]\n # Split on pipe delim\n return pipe_string.split(\"|\")\n\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"rest_framework\",\n \"django_filters\",\n \"django_property_filter\",\n \"mathesar\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"config.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"config.context_processors.frontend_settings\",\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\n# TODO: Add to documentation that database keys should not be than 128 characters.\n\n# MATHESAR_DATABASES should be of the form '({db_name}|{db_url}), ({db_name}|{db_url})'\n# See pipe_delim above for why we use pipes as delimiters\nDATABASES = {\n db_key: db_url(url_string)\n for db_key, url_string in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim))\n}\nDATABASES[decouple_config('DJANGO_DATABASE_KEY')] = decouple_config('DJANGO_DATABASE_URL', cast=db_url)\n\nfor db_key, db_dict in DATABASES.items():\n # Engine can be '.postgresql' or '.postgresql_psycopg2'\n if not db_dict['ENGINE'].startswith('django.db.backends.postgresql'):\n raise ValueError(\n f\"{db_key} is not a PostgreSQL database. \"\n f\"{db_dict['ENGINE']} found for {db_key}'s engine.\"\n )\n\n\n# pytest-django will create a new database named 'test_{DATABASES[table_db]['NAME']}'\n# and use it for our API tests if we don't specify DATABASES[table_db]['TEST']['NAME']\nif decouple_config('TEST', default=False, cast=bool):\n for db_key, _ in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim)):\n DATABASES[db_key]['TEST'] = {'NAME': DATABASES[db_key]['NAME']}\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = decouple_config('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = decouple_config('DEBUG', default=False, cast=bool)\n\nALLOWED_HOSTS = decouple_config('ALLOWED_HOSTS', cast=Csv())\n\n# Password validation\n# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.1/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.1/howto/static-files/\n# https://docs.djangoproject.com/en/3.1/ref/contrib/staticfiles/\n\nSTATIC_URL = \"/static/\"\n\n# When running with DEBUG=False, the webserver needs to serve files from this location\n# python manage.py collectstatic has to be run to collect all static files into this location\n# The files need to served in brotli or gzip compressed format\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static/')\n\n# Media files (uploaded by the user)\n\nMEDIA_ROOT = os.path.join(BASE_DIR, '.media/')\n\nMEDIA_URL = \"/media/\"\n\n# Update Authentication classes, removed BasicAuthentication\n# Defaults: https://www.django-rest-framework.org/api-guide/settings/\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.SessionAuthentication'\n ],\n 'DEFAULT_FILTER_BACKENDS': (\n 'django_filters.rest_framework.DjangoFilterBackend',\n 'rest_framework.filters.OrderingFilter',\n ),\n 'TEST_REQUEST_DEFAULT_FORMAT': 'json',\n}\n\n# Mathesar settings\nMATHESAR_MODE = decouple_config('MODE', default='PRODUCTION')\nMATHESAR_UI_BUILD_LOCATION = os.path.join(BASE_DIR, 'mathesar/static/mathesar/')\nMATHESAR_MANIFEST_LOCATION = os.path.join(MATHESAR_UI_BUILD_LOCATION, 'manifest.json')\nMATHESAR_CLIENT_DEV_URL = 'http://localhost:3000'\n\n\nSTATICFILES_DIRS = [MATHESAR_UI_BUILD_LOCATION]\n", "path": "config/settings.py"}]}
| 2,221 | 78 |
gh_patches_debug_32076
|
rasdani/github-patches
|
git_diff
|
huggingface__optimum-652
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"disable_shape_inference" in ORTOptimizer does not work
### System Info
```shell
optimum==1.6.1
```
### Who can help?
@echarlaix @philschmid
### Information
- [X] The official example scripts
- [ ] My own modified scripts
### Tasks
- [ ] An officially supported task in the `examples` folder (such as GLUE/SQuAD, ...)
- [ ] My own task or dataset (give details below)
### Reproduction
Using following opt. config:
```
opt_conf = OptimizationConfig(optimization_level=0)
opt_conf.fp16 = True
opt_conf.enable_transformers_specific_optimizations = False
opt_conf.disable_embed_layer_norm_fusion = False
opt_conf.disable_shape_inference = True
```
The error keeps appearing (GPT-J model): `ZeroDivisionError: integer division or modulo by zero`
Error trace contains this:
```
File ~/miniconda3/envs/pytorch/lib/python3.10/site-packages/onnxruntime/transformers/models/gpt2/../../onnx_model.py:594, in OnnxModel.convert_float_to_float16(self, use_symbolic_shape_infer, **kwargs)
591 if use_symbolic_shape_infer:
592 # Use symbolic shape inference since custom operators (like Gelu, SkipLayerNormalization etc) are not recognized by onnx shape inference.
593 shape_infer_helper = SymbolicShapeInferenceHelper(model)
--> 594 model = shape_infer_helper.infer_shapes(model, auto_merge=True, guess_output_rank=False)
596 parameters = {"disable_shape_infer": use_symbolic_shape_infer}
597 parameters.update(
598 {
599 key: kwargs[key]
(...)
609 }
610 )
```
And if you check the code in optimum/onnxruntime/optimization.py on line number 159 you will find that
`optimizer.convert_float_to_float16(keep_io_types=True)` doesn't get a parameter that disables symbolic shape inference.
However, the signature of this method is:
`def convert_float_to_float16(self, use_symbolic_shape_infer=True, **kwargs):`
### Expected behavior
I would like the errors with shape inference to be better explained and to be able to turn it off for real.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `optimum/onnxruntime/optimization.py`
Content:
```
1 # Copyright 2021 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Main class for performing graph optimization with ONNX Runtime."""
15
16 import logging
17 import os
18 from pathlib import Path
19 from typing import TYPE_CHECKING, Dict, List, Optional, Union
20
21 from transformers.models.auto.configuration_auto import AutoConfig
22
23 from onnx import load_model
24 from onnxruntime.transformers.onnx_model_bert import BertOnnxModel
25 from onnxruntime.transformers.optimizer import optimize_model
26
27 from ..utils import CONFIG_NAME, NormalizedConfigManager
28 from ..utils.save_utils import maybe_save_preprocessors
29 from .configuration import OptimizationConfig, ORTConfig
30 from .modeling_ort import ORTModel
31 from .modeling_seq2seq import ORTModelForSeq2SeqLM
32 from .utils import ONNX_WEIGHTS_NAME, ORTConfigManager
33
34
35 if TYPE_CHECKING:
36 from transformers import PretrainedConfig
37
38
39 LOGGER = logging.getLogger(__name__)
40
41
42 class ORTOptimizer:
43 """
44 Handles the ONNX Runtime optimization process for models shared on huggingface.co/models.
45 """
46
47 def __init__(self, onnx_model_path: List[os.PathLike], config: "PretrainedConfig"):
48 """
49 Args:
50 onnx_model_path (`List[os.PathLike]`):
51 The paths of the onnx models to optimize.
52 config ([`~transformers.PretrainedConfig`]):
53 An instance of the configuration associated to the model to optimize.
54 """
55 super().__init__()
56 self.onnx_model_path = onnx_model_path
57 self.config = config
58 self.model_type = self.config.model_type
59 self.normalized_config = NormalizedConfigManager.get_normalized_config_class(self.model_type)(self.config)
60
61 @classmethod
62 def from_pretrained(
63 cls, model_or_path: Union[str, os.PathLike, ORTModel], file_names: Optional[List[str]] = None
64 ) -> "ORTOptimizer":
65 """
66 Args:
67 model_or_path (`Union[str, os.PathLike, ORTModel]`):
68 The path to a local directory hosting the model to optimize or an instance of an `ORTModel` to quantize.
69 Can be either:
70 - A path to a local *directory* containing the model to optimize.
71 - An instance of [`~optimum.onnxruntime.ORTModel`].
72 file_names(`Optional[List[str]]`, *optional*):
73 The list of file names of the models to optimize.
74 """
75 onnx_model_path = []
76 config = None
77 if isinstance(model_or_path, ORTModel):
78 if isinstance(model_or_path, ORTModelForSeq2SeqLM):
79 onnx_model_path += [
80 model_or_path.encoder_model_path,
81 model_or_path.decoder_model_path,
82 ]
83 # Add the decoder with past key/values if present
84 if model_or_path.use_cache:
85 onnx_model_path.append(model_or_path.decoder_with_past_model_path)
86 else:
87 onnx_model_path.append(model_or_path.model_path)
88 config = model_or_path.config
89 elif os.path.isdir(model_or_path):
90 file_names = [ONNX_WEIGHTS_NAME] if file_names is None else file_names
91 model_or_path = Path(model_or_path)
92 if CONFIG_NAME not in os.listdir(model_or_path):
93 raise ValueError(f"The local directory does not contain the configuration file {CONFIG_NAME}.")
94 config = AutoConfig.from_pretrained(model_or_path)
95 for file_name in file_names:
96 onnx_model_path.append(model_or_path.joinpath(file_name))
97 else:
98 raise ValueError(f"Unable to load the model from {model_or_path}.")
99 return cls(onnx_model_path, config=config)
100
101 def optimize(
102 self,
103 optimization_config: OptimizationConfig,
104 save_dir: Union[str, os.PathLike],
105 file_suffix: Optional[str] = "optimized",
106 use_external_data_format: bool = False,
107 one_external_file: bool = True,
108 ):
109 """
110 Optimizes a model given the optimization specifications defined in `optimization_config`.
111
112 Args:
113 optimization_config ([`~optimum.onnxruntime.OptimizationConfig`]):
114 The configuration containing the parameters related to optimization.
115 save_dir (`Union[str, os.PathLike]`):
116 The path used to save the optimized model.
117 file_suffix (`str`, *optional*, defaults to `"optimized"`):
118 The file suffix used to save the optimized model.
119 use_external_data_format (`bool`, *optional*, defaults to `False`):
120 Whether to use external data format to store model of size >= 2Gb.
121 one_external_file (`bool`, defaults to `True`):
122 When `use_external_data_format=True`, whether to save all tensors to one external file.
123 If false, save each tensor to a file named with the tensor name.
124
125 """
126 save_dir = Path(save_dir)
127 save_dir.mkdir(parents=True, exist_ok=True)
128 ORTConfigManager.check_optimization_supported_model(self.model_type)
129
130 self.config.save_pretrained(save_dir)
131 maybe_save_preprocessors(self.onnx_model_path[0].parent, save_dir)
132
133 # Create and save the configuration summarizing all the parameters related to optimization
134 ort_config = ORTConfig(
135 optimization=optimization_config,
136 use_external_data_format=use_external_data_format,
137 one_external_file=one_external_file,
138 )
139
140 model_type = ORTConfigManager.get_model_ort_type(self.config.model_type)
141 optimization_options = optimization_config.create_fusion_options(model_type)
142
143 LOGGER.info("Optimizing model...")
144
145 for model_path in self.onnx_model_path:
146 optimizer = optimize_model(
147 model_path.as_posix(),
148 model_type,
149 self.normalized_config.num_attention_heads,
150 self.normalized_config.hidden_size,
151 opt_level=optimization_config.optimization_level,
152 optimization_options=optimization_options,
153 use_gpu=optimization_config.optimize_for_gpu,
154 only_onnxruntime=not optimization_config.enable_transformers_specific_optimizations,
155 )
156
157 if optimization_config.fp16:
158 # keep_io_types to keep inputs/outputs as float32
159 optimizer.convert_float_to_float16(keep_io_types=True)
160
161 suffix = f"_{file_suffix}" if file_suffix else ""
162 output_path = save_dir.joinpath(f"{model_path.stem}{suffix}").with_suffix(model_path.suffix)
163 optimizer.save_model_to_file(output_path.as_posix(), use_external_data_format, one_external_file)
164
165 # Save the model configuration
166 self.config.save_pretrained(save_dir)
167 ort_config.save_pretrained(save_dir)
168
169 LOGGER.info(
170 f"Optimized model saved at: {save_dir} (external data format: "
171 f"{use_external_data_format}; saved all tensor to one file: "
172 f"{one_external_file})"
173 )
174
175 return Path(save_dir)
176
177 @staticmethod
178 def get_fused_operators(onnx_model_path: Union[str, os.PathLike]) -> Dict[str, int]:
179 """
180 Computes the dictionary mapping the name of the fused operators to their number of apparition in the model.
181
182 Args:
183 onnx_model_path (`Union[str, os.PathLike]`):
184 Path of the ONNX model.
185
186 Returns:
187 The dictionary mapping the name of the fused operators to their number of apparition in the model.
188 """
189 onnx_optimized_model = BertOnnxModel(load_model(onnx_model_path))
190 fused_operator = onnx_optimized_model.get_fused_operator_statistics()
191 LOGGER.info(
192 f"The following operators were fused : { ', '.join([k for k,v in fused_operator.items() if v > 0])}"
193 )
194 return {k: v for k, v in fused_operator.items() if v > 0}
195
196 @staticmethod
197 def get_nodes_number_difference(
198 onnx_model_path: Union[str, os.PathLike], onnx_optimized_model_path: Union[str, os.PathLike]
199 ) -> int:
200 """
201 Compute the difference in the number of nodes between the original and the optimized model.
202
203 Args:
204 onnx_model_path (`Union[str, os.PathLike]`):
205 Path of the ONNX model.
206 onnx_optimized_model_path (`Union[str, os.PathLike]`):
207 Path of the optimized ONNX model.
208
209 Returns:
210 The difference in the number of nodes between the original and the optimized model.
211 """
212 onnx_model = BertOnnxModel(load_model(onnx_model_path))
213 onnx_optimized_model = BertOnnxModel(load_model(onnx_optimized_model_path))
214
215 # Information in the number of nodes decrease resulting from optimization
216 nodes_number_onnx_model = len(onnx_model.nodes())
217 nodes_number_onnx_optimized_model = len(onnx_optimized_model.nodes())
218 difference_nodes_number = nodes_number_onnx_model - nodes_number_onnx_optimized_model
219 LOGGER.info(
220 f"There are {nodes_number_onnx_model} nodes before optimization and {nodes_number_onnx_optimized_model}"
221 f"nodes after. The number of nodes removed is {difference_nodes_number}"
222 )
223 return difference_nodes_number
224
225 @staticmethod
226 def get_operators_difference(
227 onnx_model_path: Union[str, os.PathLike], onnx_optimized_model_path: Union[str, os.PathLike]
228 ) -> Dict[str, int]:
229 """
230 Compute the dictionary mapping the operators name to the difference in the number of corresponding nodes between
231 the original and the optimized model.
232
233 Args:
234 onnx_model_path (`Union[str, os.PathLike]`):
235 Path of the ONNX model.
236 onnx_optimized_model_path (`Union[str, os.PathLike]`):
237 Path of the optimized ONNX model.
238
239 Returns:
240 The dictionary mapping the operators name to the difference in the number of corresponding nodes between the
241 original and the optimized model.
242 """
243 onnx_model = BertOnnxModel(load_model(onnx_model_path))
244 onnx_optimized_model = BertOnnxModel(load_model(onnx_optimized_model_path))
245
246 def nodes_difference_given_type(op_type):
247 onnx_model_nodes_with_op_type = len(onnx_model.get_nodes_by_op_type(op_type))
248 onnx_optimized_model_nodes_with_op_type = len(onnx_optimized_model.get_nodes_by_op_type(op_type))
249 return onnx_model_nodes_with_op_type - onnx_optimized_model_nodes_with_op_type
250
251 # Compute operators difference between the original and the optimized models
252 op_types = set()
253 for model in [onnx_model, onnx_optimized_model]:
254 for node in model.nodes():
255 op_types.add(node.op_type)
256
257 operators_difference = dict(map(lambda op_type: (op_type, nodes_difference_given_type(op_type)), op_types))
258 return {k: v for k, v in operators_difference.items() if v != 0}
259
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/optimum/onnxruntime/optimization.py b/optimum/onnxruntime/optimization.py
--- a/optimum/onnxruntime/optimization.py
+++ b/optimum/onnxruntime/optimization.py
@@ -143,20 +143,30 @@
LOGGER.info("Optimizing model...")
for model_path in self.onnx_model_path:
- optimizer = optimize_model(
- model_path.as_posix(),
- model_type,
- self.normalized_config.num_attention_heads,
- self.normalized_config.hidden_size,
- opt_level=optimization_config.optimization_level,
- optimization_options=optimization_options,
- use_gpu=optimization_config.optimize_for_gpu,
- only_onnxruntime=not optimization_config.enable_transformers_specific_optimizations,
- )
-
- if optimization_config.fp16:
- # keep_io_types to keep inputs/outputs as float32
- optimizer.convert_float_to_float16(keep_io_types=True)
+ try:
+ optimizer = optimize_model(
+ model_path.as_posix(),
+ model_type,
+ self.normalized_config.num_attention_heads,
+ self.normalized_config.hidden_size,
+ opt_level=optimization_config.optimization_level,
+ optimization_options=optimization_options,
+ use_gpu=optimization_config.optimize_for_gpu,
+ only_onnxruntime=not optimization_config.enable_transformers_specific_optimizations,
+ )
+
+ if optimization_config.fp16:
+ # keep_io_types to keep inputs/outputs as float32
+ optimizer.convert_float_to_float16(
+ use_symbolic_shape_infer=not optimization_config.disable_shape_inference, keep_io_types=True
+ )
+ except Exception as e:
+ if "Incomplete symbolic shape inference" in str(e):
+ err = RuntimeError(
+ f"{str(e)}. Try to set `disable_shape_inference=True` in your optimization configuration."
+ )
+ raise err from e
+ raise
suffix = f"_{file_suffix}" if file_suffix else ""
output_path = save_dir.joinpath(f"{model_path.stem}{suffix}").with_suffix(model_path.suffix)
|
{"golden_diff": "diff --git a/optimum/onnxruntime/optimization.py b/optimum/onnxruntime/optimization.py\n--- a/optimum/onnxruntime/optimization.py\n+++ b/optimum/onnxruntime/optimization.py\n@@ -143,20 +143,30 @@\n LOGGER.info(\"Optimizing model...\")\n \n for model_path in self.onnx_model_path:\n- optimizer = optimize_model(\n- model_path.as_posix(),\n- model_type,\n- self.normalized_config.num_attention_heads,\n- self.normalized_config.hidden_size,\n- opt_level=optimization_config.optimization_level,\n- optimization_options=optimization_options,\n- use_gpu=optimization_config.optimize_for_gpu,\n- only_onnxruntime=not optimization_config.enable_transformers_specific_optimizations,\n- )\n-\n- if optimization_config.fp16:\n- # keep_io_types to keep inputs/outputs as float32\n- optimizer.convert_float_to_float16(keep_io_types=True)\n+ try:\n+ optimizer = optimize_model(\n+ model_path.as_posix(),\n+ model_type,\n+ self.normalized_config.num_attention_heads,\n+ self.normalized_config.hidden_size,\n+ opt_level=optimization_config.optimization_level,\n+ optimization_options=optimization_options,\n+ use_gpu=optimization_config.optimize_for_gpu,\n+ only_onnxruntime=not optimization_config.enable_transformers_specific_optimizations,\n+ )\n+\n+ if optimization_config.fp16:\n+ # keep_io_types to keep inputs/outputs as float32\n+ optimizer.convert_float_to_float16(\n+ use_symbolic_shape_infer=not optimization_config.disable_shape_inference, keep_io_types=True\n+ )\n+ except Exception as e:\n+ if \"Incomplete symbolic shape inference\" in str(e):\n+ err = RuntimeError(\n+ f\"{str(e)}. Try to set `disable_shape_inference=True` in your optimization configuration.\"\n+ )\n+ raise err from e\n+ raise\n \n suffix = f\"_{file_suffix}\" if file_suffix else \"\"\n output_path = save_dir.joinpath(f\"{model_path.stem}{suffix}\").with_suffix(model_path.suffix)\n", "issue": "\"disable_shape_inference\" in ORTOptimizer does not work\n### System Info\r\n\r\n```shell\r\noptimum==1.6.1\r\n```\r\n\r\n\r\n### Who can help?\r\n\r\n@echarlaix @philschmid \r\n\r\n### Information\r\n\r\n- [X] The official example scripts\r\n- [ ] My own modified scripts\r\n\r\n### Tasks\r\n\r\n- [ ] An officially supported task in the `examples` folder (such as GLUE/SQuAD, ...)\r\n- [ ] My own task or dataset (give details below)\r\n\r\n### Reproduction\r\n\r\nUsing following opt. config:\r\n```\r\nopt_conf = OptimizationConfig(optimization_level=0)\r\nopt_conf.fp16 = True\r\nopt_conf.enable_transformers_specific_optimizations = False\r\nopt_conf.disable_embed_layer_norm_fusion = False\r\nopt_conf.disable_shape_inference = True\r\n```\r\nThe error keeps appearing (GPT-J model): `ZeroDivisionError: integer division or modulo by zero`\r\n\r\nError trace contains this:\r\n```\r\nFile ~/miniconda3/envs/pytorch/lib/python3.10/site-packages/onnxruntime/transformers/models/gpt2/../../onnx_model.py:594, in OnnxModel.convert_float_to_float16(self, use_symbolic_shape_infer, **kwargs)\r\n 591 if use_symbolic_shape_infer:\r\n 592 # Use symbolic shape inference since custom operators (like Gelu, SkipLayerNormalization etc) are not recognized by onnx shape inference.\r\n 593 shape_infer_helper = SymbolicShapeInferenceHelper(model)\r\n--> 594 model = shape_infer_helper.infer_shapes(model, auto_merge=True, guess_output_rank=False)\r\n 596 parameters = {\"disable_shape_infer\": use_symbolic_shape_infer}\r\n 597 parameters.update(\r\n 598 {\r\n 599 key: kwargs[key]\r\n (...)\r\n 609 }\r\n 610 )\r\n```\r\n\r\nAnd if you check the code in optimum/onnxruntime/optimization.py on line number 159 you will find that\r\n`optimizer.convert_float_to_float16(keep_io_types=True)` doesn't get a parameter that disables symbolic shape inference.\r\n\r\nHowever, the signature of this method is:\r\n`def convert_float_to_float16(self, use_symbolic_shape_infer=True, **kwargs):`\r\n\r\n### Expected behavior\r\n\r\nI would like the errors with shape inference to be better explained and to be able to turn it off for real.\n", "before_files": [{"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Main class for performing graph optimization with ONNX Runtime.\"\"\"\n\nimport logging\nimport os\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Dict, List, Optional, Union\n\nfrom transformers.models.auto.configuration_auto import AutoConfig\n\nfrom onnx import load_model\nfrom onnxruntime.transformers.onnx_model_bert import BertOnnxModel\nfrom onnxruntime.transformers.optimizer import optimize_model\n\nfrom ..utils import CONFIG_NAME, NormalizedConfigManager\nfrom ..utils.save_utils import maybe_save_preprocessors\nfrom .configuration import OptimizationConfig, ORTConfig\nfrom .modeling_ort import ORTModel\nfrom .modeling_seq2seq import ORTModelForSeq2SeqLM\nfrom .utils import ONNX_WEIGHTS_NAME, ORTConfigManager\n\n\nif TYPE_CHECKING:\n from transformers import PretrainedConfig\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass ORTOptimizer:\n \"\"\"\n Handles the ONNX Runtime optimization process for models shared on huggingface.co/models.\n \"\"\"\n\n def __init__(self, onnx_model_path: List[os.PathLike], config: \"PretrainedConfig\"):\n \"\"\"\n Args:\n onnx_model_path (`List[os.PathLike]`):\n The paths of the onnx models to optimize.\n config ([`~transformers.PretrainedConfig`]):\n An instance of the configuration associated to the model to optimize.\n \"\"\"\n super().__init__()\n self.onnx_model_path = onnx_model_path\n self.config = config\n self.model_type = self.config.model_type\n self.normalized_config = NormalizedConfigManager.get_normalized_config_class(self.model_type)(self.config)\n\n @classmethod\n def from_pretrained(\n cls, model_or_path: Union[str, os.PathLike, ORTModel], file_names: Optional[List[str]] = None\n ) -> \"ORTOptimizer\":\n \"\"\"\n Args:\n model_or_path (`Union[str, os.PathLike, ORTModel]`):\n The path to a local directory hosting the model to optimize or an instance of an `ORTModel` to quantize.\n Can be either:\n - A path to a local *directory* containing the model to optimize.\n - An instance of [`~optimum.onnxruntime.ORTModel`].\n file_names(`Optional[List[str]]`, *optional*):\n The list of file names of the models to optimize.\n \"\"\"\n onnx_model_path = []\n config = None\n if isinstance(model_or_path, ORTModel):\n if isinstance(model_or_path, ORTModelForSeq2SeqLM):\n onnx_model_path += [\n model_or_path.encoder_model_path,\n model_or_path.decoder_model_path,\n ]\n # Add the decoder with past key/values if present\n if model_or_path.use_cache:\n onnx_model_path.append(model_or_path.decoder_with_past_model_path)\n else:\n onnx_model_path.append(model_or_path.model_path)\n config = model_or_path.config\n elif os.path.isdir(model_or_path):\n file_names = [ONNX_WEIGHTS_NAME] if file_names is None else file_names\n model_or_path = Path(model_or_path)\n if CONFIG_NAME not in os.listdir(model_or_path):\n raise ValueError(f\"The local directory does not contain the configuration file {CONFIG_NAME}.\")\n config = AutoConfig.from_pretrained(model_or_path)\n for file_name in file_names:\n onnx_model_path.append(model_or_path.joinpath(file_name))\n else:\n raise ValueError(f\"Unable to load the model from {model_or_path}.\")\n return cls(onnx_model_path, config=config)\n\n def optimize(\n self,\n optimization_config: OptimizationConfig,\n save_dir: Union[str, os.PathLike],\n file_suffix: Optional[str] = \"optimized\",\n use_external_data_format: bool = False,\n one_external_file: bool = True,\n ):\n \"\"\"\n Optimizes a model given the optimization specifications defined in `optimization_config`.\n\n Args:\n optimization_config ([`~optimum.onnxruntime.OptimizationConfig`]):\n The configuration containing the parameters related to optimization.\n save_dir (`Union[str, os.PathLike]`):\n The path used to save the optimized model.\n file_suffix (`str`, *optional*, defaults to `\"optimized\"`):\n The file suffix used to save the optimized model.\n use_external_data_format (`bool`, *optional*, defaults to `False`):\n Whether to use external data format to store model of size >= 2Gb.\n one_external_file (`bool`, defaults to `True`):\n When `use_external_data_format=True`, whether to save all tensors to one external file.\n If false, save each tensor to a file named with the tensor name.\n\n \"\"\"\n save_dir = Path(save_dir)\n save_dir.mkdir(parents=True, exist_ok=True)\n ORTConfigManager.check_optimization_supported_model(self.model_type)\n\n self.config.save_pretrained(save_dir)\n maybe_save_preprocessors(self.onnx_model_path[0].parent, save_dir)\n\n # Create and save the configuration summarizing all the parameters related to optimization\n ort_config = ORTConfig(\n optimization=optimization_config,\n use_external_data_format=use_external_data_format,\n one_external_file=one_external_file,\n )\n\n model_type = ORTConfigManager.get_model_ort_type(self.config.model_type)\n optimization_options = optimization_config.create_fusion_options(model_type)\n\n LOGGER.info(\"Optimizing model...\")\n\n for model_path in self.onnx_model_path:\n optimizer = optimize_model(\n model_path.as_posix(),\n model_type,\n self.normalized_config.num_attention_heads,\n self.normalized_config.hidden_size,\n opt_level=optimization_config.optimization_level,\n optimization_options=optimization_options,\n use_gpu=optimization_config.optimize_for_gpu,\n only_onnxruntime=not optimization_config.enable_transformers_specific_optimizations,\n )\n\n if optimization_config.fp16:\n # keep_io_types to keep inputs/outputs as float32\n optimizer.convert_float_to_float16(keep_io_types=True)\n\n suffix = f\"_{file_suffix}\" if file_suffix else \"\"\n output_path = save_dir.joinpath(f\"{model_path.stem}{suffix}\").with_suffix(model_path.suffix)\n optimizer.save_model_to_file(output_path.as_posix(), use_external_data_format, one_external_file)\n\n # Save the model configuration\n self.config.save_pretrained(save_dir)\n ort_config.save_pretrained(save_dir)\n\n LOGGER.info(\n f\"Optimized model saved at: {save_dir} (external data format: \"\n f\"{use_external_data_format}; saved all tensor to one file: \"\n f\"{one_external_file})\"\n )\n\n return Path(save_dir)\n\n @staticmethod\n def get_fused_operators(onnx_model_path: Union[str, os.PathLike]) -> Dict[str, int]:\n \"\"\"\n Computes the dictionary mapping the name of the fused operators to their number of apparition in the model.\n\n Args:\n onnx_model_path (`Union[str, os.PathLike]`):\n Path of the ONNX model.\n\n Returns:\n The dictionary mapping the name of the fused operators to their number of apparition in the model.\n \"\"\"\n onnx_optimized_model = BertOnnxModel(load_model(onnx_model_path))\n fused_operator = onnx_optimized_model.get_fused_operator_statistics()\n LOGGER.info(\n f\"The following operators were fused : { ', '.join([k for k,v in fused_operator.items() if v > 0])}\"\n )\n return {k: v for k, v in fused_operator.items() if v > 0}\n\n @staticmethod\n def get_nodes_number_difference(\n onnx_model_path: Union[str, os.PathLike], onnx_optimized_model_path: Union[str, os.PathLike]\n ) -> int:\n \"\"\"\n Compute the difference in the number of nodes between the original and the optimized model.\n\n Args:\n onnx_model_path (`Union[str, os.PathLike]`):\n Path of the ONNX model.\n onnx_optimized_model_path (`Union[str, os.PathLike]`):\n Path of the optimized ONNX model.\n\n Returns:\n The difference in the number of nodes between the original and the optimized model.\n \"\"\"\n onnx_model = BertOnnxModel(load_model(onnx_model_path))\n onnx_optimized_model = BertOnnxModel(load_model(onnx_optimized_model_path))\n\n # Information in the number of nodes decrease resulting from optimization\n nodes_number_onnx_model = len(onnx_model.nodes())\n nodes_number_onnx_optimized_model = len(onnx_optimized_model.nodes())\n difference_nodes_number = nodes_number_onnx_model - nodes_number_onnx_optimized_model\n LOGGER.info(\n f\"There are {nodes_number_onnx_model} nodes before optimization and {nodes_number_onnx_optimized_model}\"\n f\"nodes after. The number of nodes removed is {difference_nodes_number}\"\n )\n return difference_nodes_number\n\n @staticmethod\n def get_operators_difference(\n onnx_model_path: Union[str, os.PathLike], onnx_optimized_model_path: Union[str, os.PathLike]\n ) -> Dict[str, int]:\n \"\"\"\n Compute the dictionary mapping the operators name to the difference in the number of corresponding nodes between\n the original and the optimized model.\n\n Args:\n onnx_model_path (`Union[str, os.PathLike]`):\n Path of the ONNX model.\n onnx_optimized_model_path (`Union[str, os.PathLike]`):\n Path of the optimized ONNX model.\n\n Returns:\n The dictionary mapping the operators name to the difference in the number of corresponding nodes between the\n original and the optimized model.\n \"\"\"\n onnx_model = BertOnnxModel(load_model(onnx_model_path))\n onnx_optimized_model = BertOnnxModel(load_model(onnx_optimized_model_path))\n\n def nodes_difference_given_type(op_type):\n onnx_model_nodes_with_op_type = len(onnx_model.get_nodes_by_op_type(op_type))\n onnx_optimized_model_nodes_with_op_type = len(onnx_optimized_model.get_nodes_by_op_type(op_type))\n return onnx_model_nodes_with_op_type - onnx_optimized_model_nodes_with_op_type\n\n # Compute operators difference between the original and the optimized models\n op_types = set()\n for model in [onnx_model, onnx_optimized_model]:\n for node in model.nodes():\n op_types.add(node.op_type)\n\n operators_difference = dict(map(lambda op_type: (op_type, nodes_difference_given_type(op_type)), op_types))\n return {k: v for k, v in operators_difference.items() if v != 0}\n", "path": "optimum/onnxruntime/optimization.py"}], "after_files": [{"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Main class for performing graph optimization with ONNX Runtime.\"\"\"\n\nimport logging\nimport os\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Dict, List, Optional, Union\n\nfrom transformers.models.auto.configuration_auto import AutoConfig\n\nfrom onnx import load_model\nfrom onnxruntime.transformers.onnx_model_bert import BertOnnxModel\nfrom onnxruntime.transformers.optimizer import optimize_model\n\nfrom ..utils import CONFIG_NAME, NormalizedConfigManager\nfrom ..utils.save_utils import maybe_save_preprocessors\nfrom .configuration import OptimizationConfig, ORTConfig\nfrom .modeling_ort import ORTModel\nfrom .modeling_seq2seq import ORTModelForSeq2SeqLM\nfrom .utils import ONNX_WEIGHTS_NAME, ORTConfigManager\n\n\nif TYPE_CHECKING:\n from transformers import PretrainedConfig\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass ORTOptimizer:\n \"\"\"\n Handles the ONNX Runtime optimization process for models shared on huggingface.co/models.\n \"\"\"\n\n def __init__(self, onnx_model_path: List[os.PathLike], config: \"PretrainedConfig\"):\n \"\"\"\n Args:\n onnx_model_path (`List[os.PathLike]`):\n The paths of the onnx models to optimize.\n config ([`~transformers.PretrainedConfig`]):\n An instance of the configuration associated to the model to optimize.\n \"\"\"\n super().__init__()\n self.onnx_model_path = onnx_model_path\n self.config = config\n self.model_type = self.config.model_type\n self.normalized_config = NormalizedConfigManager.get_normalized_config_class(self.model_type)(self.config)\n\n @classmethod\n def from_pretrained(\n cls, model_or_path: Union[str, os.PathLike, ORTModel], file_names: Optional[List[str]] = None\n ) -> \"ORTOptimizer\":\n \"\"\"\n Args:\n model_or_path (`Union[str, os.PathLike, ORTModel]`):\n The path to a local directory hosting the model to optimize or an instance of an `ORTModel` to quantize.\n Can be either:\n - A path to a local *directory* containing the model to optimize.\n - An instance of [`~optimum.onnxruntime.ORTModel`].\n file_names(`Optional[List[str]]`, *optional*):\n The list of file names of the models to optimize.\n \"\"\"\n onnx_model_path = []\n config = None\n if isinstance(model_or_path, ORTModel):\n if isinstance(model_or_path, ORTModelForSeq2SeqLM):\n onnx_model_path += [\n model_or_path.encoder_model_path,\n model_or_path.decoder_model_path,\n ]\n # Add the decoder with past key/values if present\n if model_or_path.use_cache:\n onnx_model_path.append(model_or_path.decoder_with_past_model_path)\n else:\n onnx_model_path.append(model_or_path.model_path)\n config = model_or_path.config\n elif os.path.isdir(model_or_path):\n file_names = [ONNX_WEIGHTS_NAME] if file_names is None else file_names\n model_or_path = Path(model_or_path)\n if CONFIG_NAME not in os.listdir(model_or_path):\n raise ValueError(f\"The local directory does not contain the configuration file {CONFIG_NAME}.\")\n config = AutoConfig.from_pretrained(model_or_path)\n for file_name in file_names:\n onnx_model_path.append(model_or_path.joinpath(file_name))\n else:\n raise ValueError(f\"Unable to load the model from {model_or_path}.\")\n return cls(onnx_model_path, config=config)\n\n def optimize(\n self,\n optimization_config: OptimizationConfig,\n save_dir: Union[str, os.PathLike],\n file_suffix: Optional[str] = \"optimized\",\n use_external_data_format: bool = False,\n one_external_file: bool = True,\n ):\n \"\"\"\n Optimizes a model given the optimization specifications defined in `optimization_config`.\n\n Args:\n optimization_config ([`~optimum.onnxruntime.OptimizationConfig`]):\n The configuration containing the parameters related to optimization.\n save_dir (`Union[str, os.PathLike]`):\n The path used to save the optimized model.\n file_suffix (`str`, *optional*, defaults to `\"optimized\"`):\n The file suffix used to save the optimized model.\n use_external_data_format (`bool`, *optional*, defaults to `False`):\n Whether to use external data format to store model of size >= 2Gb.\n one_external_file (`bool`, defaults to `True`):\n When `use_external_data_format=True`, whether to save all tensors to one external file.\n If false, save each tensor to a file named with the tensor name.\n\n \"\"\"\n save_dir = Path(save_dir)\n save_dir.mkdir(parents=True, exist_ok=True)\n ORTConfigManager.check_optimization_supported_model(self.model_type)\n\n self.config.save_pretrained(save_dir)\n maybe_save_preprocessors(self.onnx_model_path[0].parent, save_dir)\n\n # Create and save the configuration summarizing all the parameters related to optimization\n ort_config = ORTConfig(\n optimization=optimization_config,\n use_external_data_format=use_external_data_format,\n one_external_file=one_external_file,\n )\n\n model_type = ORTConfigManager.get_model_ort_type(self.config.model_type)\n optimization_options = optimization_config.create_fusion_options(model_type)\n\n LOGGER.info(\"Optimizing model...\")\n\n for model_path in self.onnx_model_path:\n try:\n optimizer = optimize_model(\n model_path.as_posix(),\n model_type,\n self.normalized_config.num_attention_heads,\n self.normalized_config.hidden_size,\n opt_level=optimization_config.optimization_level,\n optimization_options=optimization_options,\n use_gpu=optimization_config.optimize_for_gpu,\n only_onnxruntime=not optimization_config.enable_transformers_specific_optimizations,\n )\n\n if optimization_config.fp16:\n # keep_io_types to keep inputs/outputs as float32\n optimizer.convert_float_to_float16(\n use_symbolic_shape_infer=not optimization_config.disable_shape_inference, keep_io_types=True\n )\n except Exception as e:\n if \"Incomplete symbolic shape inference\" in str(e):\n err = RuntimeError(\n f\"{str(e)}. Try to set `disable_shape_inference=True` in your optimization configuration.\"\n )\n raise err from e\n raise\n\n suffix = f\"_{file_suffix}\" if file_suffix else \"\"\n output_path = save_dir.joinpath(f\"{model_path.stem}{suffix}\").with_suffix(model_path.suffix)\n optimizer.save_model_to_file(output_path.as_posix(), use_external_data_format, one_external_file)\n\n # Save the model configuration\n self.config.save_pretrained(save_dir)\n ort_config.save_pretrained(save_dir)\n\n LOGGER.info(\n f\"Optimized model saved at: {save_dir} (external data format: \"\n f\"{use_external_data_format}; saved all tensor to one file: \"\n f\"{one_external_file})\"\n )\n\n return Path(save_dir)\n\n @staticmethod\n def get_fused_operators(onnx_model_path: Union[str, os.PathLike]) -> Dict[str, int]:\n \"\"\"\n Computes the dictionary mapping the name of the fused operators to their number of apparition in the model.\n\n Args:\n onnx_model_path (`Union[str, os.PathLike]`):\n Path of the ONNX model.\n\n Returns:\n The dictionary mapping the name of the fused operators to their number of apparition in the model.\n \"\"\"\n onnx_optimized_model = BertOnnxModel(load_model(onnx_model_path))\n fused_operator = onnx_optimized_model.get_fused_operator_statistics()\n LOGGER.info(\n f\"The following operators were fused : { ', '.join([k for k,v in fused_operator.items() if v > 0])}\"\n )\n return {k: v for k, v in fused_operator.items() if v > 0}\n\n @staticmethod\n def get_nodes_number_difference(\n onnx_model_path: Union[str, os.PathLike], onnx_optimized_model_path: Union[str, os.PathLike]\n ) -> int:\n \"\"\"\n Compute the difference in the number of nodes between the original and the optimized model.\n\n Args:\n onnx_model_path (`Union[str, os.PathLike]`):\n Path of the ONNX model.\n onnx_optimized_model_path (`Union[str, os.PathLike]`):\n Path of the optimized ONNX model.\n\n Returns:\n The difference in the number of nodes between the original and the optimized model.\n \"\"\"\n onnx_model = BertOnnxModel(load_model(onnx_model_path))\n onnx_optimized_model = BertOnnxModel(load_model(onnx_optimized_model_path))\n\n # Information in the number of nodes decrease resulting from optimization\n nodes_number_onnx_model = len(onnx_model.nodes())\n nodes_number_onnx_optimized_model = len(onnx_optimized_model.nodes())\n difference_nodes_number = nodes_number_onnx_model - nodes_number_onnx_optimized_model\n LOGGER.info(\n f\"There are {nodes_number_onnx_model} nodes before optimization and {nodes_number_onnx_optimized_model}\"\n f\"nodes after. The number of nodes removed is {difference_nodes_number}\"\n )\n return difference_nodes_number\n\n @staticmethod\n def get_operators_difference(\n onnx_model_path: Union[str, os.PathLike], onnx_optimized_model_path: Union[str, os.PathLike]\n ) -> Dict[str, int]:\n \"\"\"\n Compute the dictionary mapping the operators name to the difference in the number of corresponding nodes between\n the original and the optimized model.\n\n Args:\n onnx_model_path (`Union[str, os.PathLike]`):\n Path of the ONNX model.\n onnx_optimized_model_path (`Union[str, os.PathLike]`):\n Path of the optimized ONNX model.\n\n Returns:\n The dictionary mapping the operators name to the difference in the number of corresponding nodes between the\n original and the optimized model.\n \"\"\"\n onnx_model = BertOnnxModel(load_model(onnx_model_path))\n onnx_optimized_model = BertOnnxModel(load_model(onnx_optimized_model_path))\n\n def nodes_difference_given_type(op_type):\n onnx_model_nodes_with_op_type = len(onnx_model.get_nodes_by_op_type(op_type))\n onnx_optimized_model_nodes_with_op_type = len(onnx_optimized_model.get_nodes_by_op_type(op_type))\n return onnx_model_nodes_with_op_type - onnx_optimized_model_nodes_with_op_type\n\n # Compute operators difference between the original and the optimized models\n op_types = set()\n for model in [onnx_model, onnx_optimized_model]:\n for node in model.nodes():\n op_types.add(node.op_type)\n\n operators_difference = dict(map(lambda op_type: (op_type, nodes_difference_given_type(op_type)), op_types))\n return {k: v for k, v in operators_difference.items() if v != 0}\n", "path": "optimum/onnxruntime/optimization.py"}]}
| 3,906 | 473 |
gh_patches_debug_4708
|
rasdani/github-patches
|
git_diff
|
biolab__orange3-text-360
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TheGuardianCredentials returns wrong valid property
##### Text version
0.3.0
##### Orange version
3.14
##### Expected behavior
``` python
credentials = TheGuardianCredentials('<your-api-key>')
print(credentials.valid)
```
Should correctly return if key is valid
##### Actual behavior
If the given key exceeds the API limit `credentials.valid` still returns True
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `orangecontrib/text/guardian.py`
Content:
```
1 """ This module fetches data from The Guardian API.
2
3 To use first create :class:`TheGuardianCredentials`:
4
5 >>> from orangecontrib.text.guardian import TheGuardianCredentials
6 >>> credentials = TheGuardianCredentials('<your-api-key>')
7
8 Then create :class:`TheGuardianAPI` object and use it for searching:
9
10 >>> from orangecontrib.text.guardian import TheGuardianAPI
11 >>> api = TheGuardianAPI(credentials)
12 >>> corpus = api.search('Slovenia', max_documents=10)
13 >>> len(corpus)
14 10
15
16 """
17
18 import requests
19 import math
20 import json
21 import os
22
23 from Orange import data
24
25 from orangecontrib.text.corpus import Corpus
26
27
28 BASE_URL = 'http://content.guardianapis.com/search'
29 ARTICLES_PER_PAGE = 10
30
31
32 class TheGuardianCredentials:
33 """ The Guardian API credentials. """
34 def __init__(self, key):
35 """
36 Args:
37 key (str): The Guardian API key. Use `test` for testing purposes.
38 """
39 self.key = key
40
41 @property
42 def valid(self):
43 """ Check if given API key is valid. """
44 response = requests.get(BASE_URL, {'api-key': self.key})
45 return response.status_code != 403 # 403 == Forbidden
46
47 def __eq__(self, other):
48 return self.key == other.key
49
50
51 class TheGuardianAPI:
52 attributes = []
53
54 class_vars = [
55 (data.DiscreteVariable('Section'), lambda doc: doc['sectionName']),
56 ]
57
58 tv = data.TimeVariable('Publication Date')
59 metas = [
60 (data.StringVariable('Headline'), lambda doc: doc['fields']['headline']),
61 (data.StringVariable('Content'), lambda doc: doc['fields']['bodyText']),
62 (data.StringVariable('Trail Text'), lambda doc: doc['fields']['trailText']),
63 (data.StringVariable('HTML'), lambda doc: doc['fields']['body']),
64 (tv, lambda doc: TheGuardianAPI.tv.parse(doc['webPublicationDate'])),
65 (data.DiscreteVariable('Type'), lambda doc: doc['type']),
66 (data.DiscreteVariable('Language'), lambda doc: doc['fields']['lang']),
67 (data.StringVariable('Tags'),
68 lambda doc: ', '.join(tag['webTitle'] for tag in doc['tags'])),
69 (data.StringVariable('URL'), lambda doc: doc['webUrl']),
70 (data.ContinuousVariable('Word Count', number_of_decimals=0),
71 lambda doc: doc['fields']['wordcount']),
72 ]
73
74 text_features = [metas[0][0], metas[1][0]] # Headline + Content
75 title_indices = [-1] # Headline
76
77 def __init__(self, credentials, on_progress=None, should_break=None):
78 """
79 Args:
80 credentials (:class:`TheGuardianCredentials`): The Guardian Creentials.
81 on_progress (callable): Function for progress reporting.
82 should_break (callable): Function for early stopping.
83 """
84 self.per_page = ARTICLES_PER_PAGE
85 self.pages = 0
86 self.credentials = credentials
87 self.on_progress = on_progress or (lambda x, y: None)
88 self.should_break = should_break or (lambda: False)
89
90 self.results = []
91
92 def _search(self, query, from_date, to_date, page=1):
93 data = self._build_query(query, from_date, to_date, page)
94
95 response = requests.get(BASE_URL, data)
96 parsed = json.loads(response.text)
97
98 if page == 1: # store number of pages
99 self.pages = parsed['response']['pages']
100
101 self.results.extend(parsed['response']['results'])
102
103 def _build_query(self, query, from_date=None, to_date=None, page=1):
104 data = {
105 'q': query,
106 'api-key': self.credentials.key,
107 'page': str(page),
108 'show-fields': 'headline,trailText,body,bodyText,lang,wordcount',
109 'show-tags': 'all',
110 }
111 if from_date is not None:
112 data['from-date'] = from_date
113 if to_date is not None:
114 data['to-date'] = to_date
115
116 return data
117
118 def search(self, query, from_date=None, to_date=None, max_documents=None,
119 accumulate=False):
120 """
121 Search The Guardian API for articles.
122
123 Args:
124 query (str): A query for searching the articles by
125 from_date (str): Search only articles newer than the date provided.
126 Date should be in ISO format; e.g. '2016-12-31'.
127 to_date (str): Search only articles older than the date provided.
128 Date should be in ISO format; e.g. '2016-12-31'.
129 max_documents (int): Maximum number of documents to retrieve.
130 When not given, retrieve all documents.
131 accumulate (bool): A flag indicating whether to accumulate results
132 of multiple consequent search calls.
133
134 Returns:
135 :ref:`Corpus`
136 """
137 if not accumulate:
138 self.results = []
139
140 self._search(query, from_date, to_date)
141
142 pages = math.ceil(max_documents/self.per_page) if max_documents else self.pages
143 self.on_progress(self.per_page, pages * self.per_page)
144
145 for p in range(2, pages+1): # to one based
146 if self.should_break():
147 break
148 self._search(query, from_date, to_date, p)
149 self.on_progress(p*self.per_page, pages * self.per_page)
150
151 c = Corpus.from_documents(
152 self.results, 'The Guardian', self.attributes, self.class_vars,
153 self.metas, title_indices=self.title_indices)
154 c.text_features = self.text_features
155 return c
156
157
158 if __name__ == '__main__':
159 key = os.getenv('THE_GUARDIAN_API_KEY', 'test')
160 credentials = TheGuardianCredentials(key)
161 print(credentials.valid)
162 api = TheGuardianAPI(credentials=credentials)
163 c = api.search('refugees', max_documents=10)
164 print(c)
165
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/orangecontrib/text/guardian.py b/orangecontrib/text/guardian.py
--- a/orangecontrib/text/guardian.py
+++ b/orangecontrib/text/guardian.py
@@ -42,7 +42,7 @@
def valid(self):
""" Check if given API key is valid. """
response = requests.get(BASE_URL, {'api-key': self.key})
- return response.status_code != 403 # 403 == Forbidden
+ return response.status_code == 200
def __eq__(self, other):
return self.key == other.key
|
{"golden_diff": "diff --git a/orangecontrib/text/guardian.py b/orangecontrib/text/guardian.py\n--- a/orangecontrib/text/guardian.py\n+++ b/orangecontrib/text/guardian.py\n@@ -42,7 +42,7 @@\n def valid(self):\n \"\"\" Check if given API key is valid. \"\"\"\n response = requests.get(BASE_URL, {'api-key': self.key})\n- return response.status_code != 403 # 403 == Forbidden\n+ return response.status_code == 200\n \n def __eq__(self, other):\n return self.key == other.key\n", "issue": "TheGuardianCredentials returns wrong valid property\n##### Text version\r\n0.3.0\r\n##### Orange version\r\n3.14\r\n##### Expected behavior\r\n``` python\r\ncredentials = TheGuardianCredentials('<your-api-key>')\r\nprint(credentials.valid)\r\n```\r\nShould correctly return if key is valid\r\n\r\n##### Actual behavior\r\nIf the given key exceeds the API limit `credentials.valid` still returns True\r\n\r\n\n", "before_files": [{"content": "\"\"\" This module fetches data from The Guardian API.\n\nTo use first create :class:`TheGuardianCredentials`:\n\n >>> from orangecontrib.text.guardian import TheGuardianCredentials\n >>> credentials = TheGuardianCredentials('<your-api-key>')\n\nThen create :class:`TheGuardianAPI` object and use it for searching:\n\n >>> from orangecontrib.text.guardian import TheGuardianAPI\n >>> api = TheGuardianAPI(credentials)\n >>> corpus = api.search('Slovenia', max_documents=10)\n >>> len(corpus)\n 10\n\n\"\"\"\n\nimport requests\nimport math\nimport json\nimport os\n\nfrom Orange import data\n\nfrom orangecontrib.text.corpus import Corpus\n\n\nBASE_URL = 'http://content.guardianapis.com/search'\nARTICLES_PER_PAGE = 10\n\n\nclass TheGuardianCredentials:\n \"\"\" The Guardian API credentials. \"\"\"\n def __init__(self, key):\n \"\"\"\n Args:\n key (str): The Guardian API key. Use `test` for testing purposes.\n \"\"\"\n self.key = key\n\n @property\n def valid(self):\n \"\"\" Check if given API key is valid. \"\"\"\n response = requests.get(BASE_URL, {'api-key': self.key})\n return response.status_code != 403 # 403 == Forbidden\n\n def __eq__(self, other):\n return self.key == other.key\n\n\nclass TheGuardianAPI:\n attributes = []\n\n class_vars = [\n (data.DiscreteVariable('Section'), lambda doc: doc['sectionName']),\n ]\n\n tv = data.TimeVariable('Publication Date')\n metas = [\n (data.StringVariable('Headline'), lambda doc: doc['fields']['headline']),\n (data.StringVariable('Content'), lambda doc: doc['fields']['bodyText']),\n (data.StringVariable('Trail Text'), lambda doc: doc['fields']['trailText']),\n (data.StringVariable('HTML'), lambda doc: doc['fields']['body']),\n (tv, lambda doc: TheGuardianAPI.tv.parse(doc['webPublicationDate'])),\n (data.DiscreteVariable('Type'), lambda doc: doc['type']),\n (data.DiscreteVariable('Language'), lambda doc: doc['fields']['lang']),\n (data.StringVariable('Tags'),\n lambda doc: ', '.join(tag['webTitle'] for tag in doc['tags'])),\n (data.StringVariable('URL'), lambda doc: doc['webUrl']),\n (data.ContinuousVariable('Word Count', number_of_decimals=0),\n lambda doc: doc['fields']['wordcount']),\n ]\n\n text_features = [metas[0][0], metas[1][0]] # Headline + Content\n title_indices = [-1] # Headline\n\n def __init__(self, credentials, on_progress=None, should_break=None):\n \"\"\"\n Args:\n credentials (:class:`TheGuardianCredentials`): The Guardian Creentials.\n on_progress (callable): Function for progress reporting.\n should_break (callable): Function for early stopping.\n \"\"\"\n self.per_page = ARTICLES_PER_PAGE\n self.pages = 0\n self.credentials = credentials\n self.on_progress = on_progress or (lambda x, y: None)\n self.should_break = should_break or (lambda: False)\n\n self.results = []\n\n def _search(self, query, from_date, to_date, page=1):\n data = self._build_query(query, from_date, to_date, page)\n\n response = requests.get(BASE_URL, data)\n parsed = json.loads(response.text)\n\n if page == 1: # store number of pages\n self.pages = parsed['response']['pages']\n\n self.results.extend(parsed['response']['results'])\n\n def _build_query(self, query, from_date=None, to_date=None, page=1):\n data = {\n 'q': query,\n 'api-key': self.credentials.key,\n 'page': str(page),\n 'show-fields': 'headline,trailText,body,bodyText,lang,wordcount',\n 'show-tags': 'all',\n }\n if from_date is not None:\n data['from-date'] = from_date\n if to_date is not None:\n data['to-date'] = to_date\n\n return data\n\n def search(self, query, from_date=None, to_date=None, max_documents=None,\n accumulate=False):\n \"\"\"\n Search The Guardian API for articles.\n\n Args:\n query (str): A query for searching the articles by\n from_date (str): Search only articles newer than the date provided.\n Date should be in ISO format; e.g. '2016-12-31'.\n to_date (str): Search only articles older than the date provided.\n Date should be in ISO format; e.g. '2016-12-31'.\n max_documents (int): Maximum number of documents to retrieve.\n When not given, retrieve all documents.\n accumulate (bool): A flag indicating whether to accumulate results\n of multiple consequent search calls.\n\n Returns:\n :ref:`Corpus`\n \"\"\"\n if not accumulate:\n self.results = []\n\n self._search(query, from_date, to_date)\n\n pages = math.ceil(max_documents/self.per_page) if max_documents else self.pages\n self.on_progress(self.per_page, pages * self.per_page)\n\n for p in range(2, pages+1): # to one based\n if self.should_break():\n break\n self._search(query, from_date, to_date, p)\n self.on_progress(p*self.per_page, pages * self.per_page)\n\n c = Corpus.from_documents(\n self.results, 'The Guardian', self.attributes, self.class_vars,\n self.metas, title_indices=self.title_indices)\n c.text_features = self.text_features\n return c\n\n\nif __name__ == '__main__':\n key = os.getenv('THE_GUARDIAN_API_KEY', 'test')\n credentials = TheGuardianCredentials(key)\n print(credentials.valid)\n api = TheGuardianAPI(credentials=credentials)\n c = api.search('refugees', max_documents=10)\n print(c)\n", "path": "orangecontrib/text/guardian.py"}], "after_files": [{"content": "\"\"\" This module fetches data from The Guardian API.\n\nTo use first create :class:`TheGuardianCredentials`:\n\n >>> from orangecontrib.text.guardian import TheGuardianCredentials\n >>> credentials = TheGuardianCredentials('<your-api-key>')\n\nThen create :class:`TheGuardianAPI` object and use it for searching:\n\n >>> from orangecontrib.text.guardian import TheGuardianAPI\n >>> api = TheGuardianAPI(credentials)\n >>> corpus = api.search('Slovenia', max_documents=10)\n >>> len(corpus)\n 10\n\n\"\"\"\n\nimport requests\nimport math\nimport json\nimport os\n\nfrom Orange import data\n\nfrom orangecontrib.text.corpus import Corpus\n\n\nBASE_URL = 'http://content.guardianapis.com/search'\nARTICLES_PER_PAGE = 10\n\n\nclass TheGuardianCredentials:\n \"\"\" The Guardian API credentials. \"\"\"\n def __init__(self, key):\n \"\"\"\n Args:\n key (str): The Guardian API key. Use `test` for testing purposes.\n \"\"\"\n self.key = key\n\n @property\n def valid(self):\n \"\"\" Check if given API key is valid. \"\"\"\n response = requests.get(BASE_URL, {'api-key': self.key})\n return response.status_code == 200\n\n def __eq__(self, other):\n return self.key == other.key\n\n\nclass TheGuardianAPI:\n attributes = []\n\n class_vars = [\n (data.DiscreteVariable('Section'), lambda doc: doc['sectionName']),\n ]\n\n tv = data.TimeVariable('Publication Date')\n metas = [\n (data.StringVariable('Headline'), lambda doc: doc['fields']['headline']),\n (data.StringVariable('Content'), lambda doc: doc['fields']['bodyText']),\n (data.StringVariable('Trail Text'), lambda doc: doc['fields']['trailText']),\n (data.StringVariable('HTML'), lambda doc: doc['fields']['body']),\n (tv, lambda doc: TheGuardianAPI.tv.parse(doc['webPublicationDate'])),\n (data.DiscreteVariable('Type'), lambda doc: doc['type']),\n (data.DiscreteVariable('Language'), lambda doc: doc['fields']['lang']),\n (data.StringVariable('Tags'),\n lambda doc: ', '.join(tag['webTitle'] for tag in doc['tags'])),\n (data.StringVariable('URL'), lambda doc: doc['webUrl']),\n (data.ContinuousVariable('Word Count', number_of_decimals=0),\n lambda doc: doc['fields']['wordcount']),\n ]\n\n text_features = [metas[0][0], metas[1][0]] # Headline + Content\n title_indices = [-1] # Headline\n\n def __init__(self, credentials, on_progress=None, should_break=None):\n \"\"\"\n Args:\n credentials (:class:`TheGuardianCredentials`): The Guardian Creentials.\n on_progress (callable): Function for progress reporting.\n should_break (callable): Function for early stopping.\n \"\"\"\n self.per_page = ARTICLES_PER_PAGE\n self.pages = 0\n self.credentials = credentials\n self.on_progress = on_progress or (lambda x, y: None)\n self.should_break = should_break or (lambda: False)\n\n self.results = []\n\n def _search(self, query, from_date, to_date, page=1):\n data = self._build_query(query, from_date, to_date, page)\n\n response = requests.get(BASE_URL, data)\n parsed = json.loads(response.text)\n\n if page == 1: # store number of pages\n self.pages = parsed['response']['pages']\n\n self.results.extend(parsed['response']['results'])\n\n def _build_query(self, query, from_date=None, to_date=None, page=1):\n data = {\n 'q': query,\n 'api-key': self.credentials.key,\n 'page': str(page),\n 'show-fields': 'headline,trailText,body,bodyText,lang,wordcount',\n 'show-tags': 'all',\n }\n if from_date is not None:\n data['from-date'] = from_date\n if to_date is not None:\n data['to-date'] = to_date\n\n return data\n\n def search(self, query, from_date=None, to_date=None, max_documents=None,\n accumulate=False):\n \"\"\"\n Search The Guardian API for articles.\n\n Args:\n query (str): A query for searching the articles by\n from_date (str): Search only articles newer than the date provided.\n Date should be in ISO format; e.g. '2016-12-31'.\n to_date (str): Search only articles older than the date provided.\n Date should be in ISO format; e.g. '2016-12-31'.\n max_documents (int): Maximum number of documents to retrieve.\n When not given, retrieve all documents.\n accumulate (bool): A flag indicating whether to accumulate results\n of multiple consequent search calls.\n\n Returns:\n :ref:`Corpus`\n \"\"\"\n if not accumulate:\n self.results = []\n\n self._search(query, from_date, to_date)\n\n pages = math.ceil(max_documents/self.per_page) if max_documents else self.pages\n self.on_progress(self.per_page, pages * self.per_page)\n\n for p in range(2, pages+1): # to one based\n if self.should_break():\n break\n self._search(query, from_date, to_date, p)\n self.on_progress(p*self.per_page, pages * self.per_page)\n\n c = Corpus.from_documents(\n self.results, 'The Guardian', self.attributes, self.class_vars,\n self.metas, title_indices=self.title_indices)\n c.text_features = self.text_features\n return c\n\n\nif __name__ == '__main__':\n key = os.getenv('THE_GUARDIAN_API_KEY', 'test')\n credentials = TheGuardianCredentials(key)\n print(credentials.valid)\n api = TheGuardianAPI(credentials=credentials)\n c = api.search('refugees', max_documents=10)\n print(c)\n", "path": "orangecontrib/text/guardian.py"}]}
| 2,077 | 136 |
gh_patches_debug_7540
|
rasdani/github-patches
|
git_diff
|
cloud-custodian__cloud-custodian-3544
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Request Feature: Request for cloud Trail events for Lambda
Hello ,
The following policy is giving me the error :2018-12-18 14:24:39,580: custodian.commands:ERROR Policy: lambda-tag-compliance is invalid: event shortcut not defined: CreateFunction
policy ---
```
- name: lambda-tag-compliance
resource: lambda
mode:
type: cloudtrail #### cloud trail not possible
role: arn:aws:iam::acctnumber:role/acctname
events:
- CreateFunction
filters:
- "tag:custodian": absent
```
i changed the policy to the following based on the cc docs and the got the following error
```
- name: lambda-tag-compliance
resource: lambda
mode:
type: cloudtrail #### cloud trail not possible
role: arn:aws:iam::acctnum:role/acctname
event: CreateFunction
ids: "requestParameters.functionName"
filters:
- "tag:custodian": absent
```
error ---- 2018-12-18 14:33:41,697: custodian.commands:ERROR Configuration invalid: Policy.yml
2018-12-18 14:33:41,704: custodian.commands:ERROR {'type': 'cloudtrail', 'role': 'arn:aws:iam::acctnum:role/acctname', 'event': 'CreateFunction', 'ids': 'requestParameters.functionName'} is not valid under any of the given schemas
can you add the shortcut to cloud-custodian/c7n/cwe.py ... or is there anything else that we can do resolve this ?
Thank you
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `c7n/cwe.py`
Content:
```
1 # Copyright 2016-2017 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from __future__ import absolute_import, division, print_function, unicode_literals
15
16 import jmespath
17 import six
18
19
20 class CloudWatchEvents(object):
21 """A mapping of events to resource types."""
22
23 # **These are just shortcuts**, you can use the policy definition to
24 # subscribe to any arbitrary cloud trail event that corresponds to
25 # a custodian resource.
26
27 # For common events that we want to match, just keep a short mapping.
28 # Users can specify arbitrary cloud watch events by specifying these
29 # values in their config, but keep the common case simple.
30
31 trail_events = {
32 # event source, resource type as keys, mapping to api call and
33 # jmespath expression
34 'ConsoleLogin': {
35 'ids': 'userIdentity.arn',
36 'source': 'signin.amazonaws.com'},
37
38 'CreateAutoScalingGroup': {
39 'ids': 'requestParameters.autoScalingGroupName',
40 'source': 'autoscaling.amazonaws.com'},
41
42 'UpdateAutoScalingGroup': {
43 'ids': 'requestParameters.autoScalingGroupName',
44 'source': 'autoscaling.amazonaws.com'},
45
46 'CreateBucket': {
47 'ids': 'requestParameters.bucketName',
48 'source': 's3.amazonaws.com'},
49
50 'CreateCluster': {
51 'ids': 'requestParameters.clusterIdentifier',
52 'source': 'redshift.amazonaws.com'},
53
54 'CreateLoadBalancer': {
55 'ids': 'requestParameters.loadBalancerName',
56 'source': 'elasticloadbalancing.amazonaws.com'},
57
58 'CreateLoadBalancerPolicy': {
59 'ids': 'requestParameters.loadBalancerName',
60 'source': 'elasticloadbalancing.amazonaws.com'},
61
62 'CreateDBInstance': {
63 'ids': 'requestParameters.dBInstanceIdentifier',
64 'source': 'rds.amazonaws.com'},
65
66 'CreateVolume': {
67 'ids': 'responseElements.volumeId',
68 'source': 'ec2.amazonaws.com'},
69
70 'SetLoadBalancerPoliciesOfListener': {
71 'ids': 'requestParameters.loadBalancerName',
72 'source': 'elasticloadbalancing.amazonaws.com'},
73
74 'CreateElasticsearchDomain': {
75 'ids': 'requestParameters.domainName',
76 'source': 'es.amazonaws.com'},
77
78 'CreateTable': {
79 'ids': 'requestParameters.tableName',
80 'source': 'dynamodb.amazonaws.com'},
81
82 'RunInstances': {
83 'ids': 'responseElements.instancesSet.items[].instanceId',
84 'source': 'ec2.amazonaws.com'}}
85
86 @classmethod
87 def get(cls, event_name):
88 return cls.trail_events.get(event_name)
89
90 @classmethod
91 def match(cls, event):
92 """Match a given cwe event as cloudtrail with an api call
93
94 That has its information filled out.
95 """
96 if 'detail' not in event:
97 return False
98 if 'eventName' not in event['detail']:
99 return False
100 k = event['detail']['eventName']
101
102 # We want callers to use a compiled expression, but want to avoid
103 # initialization cost of doing it without cause. Not thread safe,
104 # but usage context is lambda entry.
105 if k in cls.trail_events:
106 v = dict(cls.trail_events[k])
107 if isinstance(v['ids'], six.string_types):
108 v['ids'] = e = jmespath.compile('detail.%s' % v['ids'])
109 cls.trail_events[k]['ids'] = e
110 return v
111
112 return False
113
114 @classmethod
115 def get_trail_ids(cls, event, mode):
116 """extract resources ids from a cloud trail event."""
117 resource_ids = ()
118 event_name = event['detail']['eventName']
119 event_source = event['detail']['eventSource']
120 for e in mode.get('events', []):
121 if not isinstance(e, dict):
122 # Check if we have a short cut / alias
123 info = CloudWatchEvents.match(event)
124 if info:
125 return info['ids'].search(event)
126 continue
127 if event_name != e.get('event'):
128 continue
129 if event_source != e.get('source'):
130 continue
131
132 id_query = e.get('ids')
133 if not id_query:
134 raise ValueError("No id query configured")
135 evt = event
136 # be forgiving for users specifying with details or without
137 if not id_query.startswith('detail.'):
138 evt = event.get('detail', {})
139 resource_ids = jmespath.search(id_query, evt)
140 if resource_ids:
141 break
142 return resource_ids
143
144 @classmethod
145 def get_ids(cls, event, mode):
146 mode_type = mode.get('type')
147 if mode_type == 'ec2-instance-state':
148 resource_ids = [event.get('detail', {}).get('instance-id')]
149 elif mode_type == 'asg-instance-state':
150 resource_ids = [event.get('detail', {}).get('AutoScalingGroupName')]
151 elif mode_type != 'cloudtrail':
152 return None
153 else:
154 resource_ids = cls.get_trail_ids(event, mode)
155
156 if not isinstance(resource_ids, (tuple, list)):
157 resource_ids = [resource_ids]
158
159 return list(filter(None, resource_ids))
160
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/c7n/cwe.py b/c7n/cwe.py
--- a/c7n/cwe.py
+++ b/c7n/cwe.py
@@ -79,6 +79,11 @@
'ids': 'requestParameters.tableName',
'source': 'dynamodb.amazonaws.com'},
+ 'CreateFunction': {
+ 'event': 'CreateFunction20150331',
+ 'source': 'lambda.amazonaws.com',
+ 'ids': 'requestParameters.functionName'},
+
'RunInstances': {
'ids': 'responseElements.instancesSet.items[].instanceId',
'source': 'ec2.amazonaws.com'}}
|
{"golden_diff": "diff --git a/c7n/cwe.py b/c7n/cwe.py\n--- a/c7n/cwe.py\n+++ b/c7n/cwe.py\n@@ -79,6 +79,11 @@\n 'ids': 'requestParameters.tableName',\n 'source': 'dynamodb.amazonaws.com'},\n \n+ 'CreateFunction': {\n+ 'event': 'CreateFunction20150331',\n+ 'source': 'lambda.amazonaws.com',\n+ 'ids': 'requestParameters.functionName'},\n+\n 'RunInstances': {\n 'ids': 'responseElements.instancesSet.items[].instanceId',\n 'source': 'ec2.amazonaws.com'}}\n", "issue": "Request Feature: Request for cloud Trail events for Lambda\nHello , \r\nThe following policy is giving me the error :2018-12-18 14:24:39,580: custodian.commands:ERROR Policy: lambda-tag-compliance is invalid: event shortcut not defined: CreateFunction\r\n policy ---\r\n```\r\n- name: lambda-tag-compliance\r\n resource: lambda\r\n mode:\r\n type: cloudtrail #### cloud trail not possible\r\n role: arn:aws:iam::acctnumber:role/acctname\r\n events: \r\n - CreateFunction\r\n filters:\r\n - \"tag:custodian\": absent\r\n```\r\ni changed the policy to the following based on the cc docs and the got the following error \r\n```\r\n- name: lambda-tag-compliance\r\n resource: lambda\r\n mode:\r\n type: cloudtrail #### cloud trail not possible\r\n role: arn:aws:iam::acctnum:role/acctname\r\n event: CreateFunction\r\n ids: \"requestParameters.functionName\"\r\n filters:\r\n - \"tag:custodian\": absent\r\n```\r\nerror ---- 2018-12-18 14:33:41,697: custodian.commands:ERROR Configuration invalid: Policy.yml\r\n2018-12-18 14:33:41,704: custodian.commands:ERROR {'type': 'cloudtrail', 'role': 'arn:aws:iam::acctnum:role/acctname', 'event': 'CreateFunction', 'ids': 'requestParameters.functionName'} is not valid under any of the given schemas\r\n\r\ncan you add the shortcut to cloud-custodian/c7n/cwe.py ... or is there anything else that we can do resolve this ?\r\n\r\nThank you \n", "before_files": [{"content": "# Copyright 2016-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport jmespath\nimport six\n\n\nclass CloudWatchEvents(object):\n \"\"\"A mapping of events to resource types.\"\"\"\n\n # **These are just shortcuts**, you can use the policy definition to\n # subscribe to any arbitrary cloud trail event that corresponds to\n # a custodian resource.\n\n # For common events that we want to match, just keep a short mapping.\n # Users can specify arbitrary cloud watch events by specifying these\n # values in their config, but keep the common case simple.\n\n trail_events = {\n # event source, resource type as keys, mapping to api call and\n # jmespath expression\n 'ConsoleLogin': {\n 'ids': 'userIdentity.arn',\n 'source': 'signin.amazonaws.com'},\n\n 'CreateAutoScalingGroup': {\n 'ids': 'requestParameters.autoScalingGroupName',\n 'source': 'autoscaling.amazonaws.com'},\n\n 'UpdateAutoScalingGroup': {\n 'ids': 'requestParameters.autoScalingGroupName',\n 'source': 'autoscaling.amazonaws.com'},\n\n 'CreateBucket': {\n 'ids': 'requestParameters.bucketName',\n 'source': 's3.amazonaws.com'},\n\n 'CreateCluster': {\n 'ids': 'requestParameters.clusterIdentifier',\n 'source': 'redshift.amazonaws.com'},\n\n 'CreateLoadBalancer': {\n 'ids': 'requestParameters.loadBalancerName',\n 'source': 'elasticloadbalancing.amazonaws.com'},\n\n 'CreateLoadBalancerPolicy': {\n 'ids': 'requestParameters.loadBalancerName',\n 'source': 'elasticloadbalancing.amazonaws.com'},\n\n 'CreateDBInstance': {\n 'ids': 'requestParameters.dBInstanceIdentifier',\n 'source': 'rds.amazonaws.com'},\n\n 'CreateVolume': {\n 'ids': 'responseElements.volumeId',\n 'source': 'ec2.amazonaws.com'},\n\n 'SetLoadBalancerPoliciesOfListener': {\n 'ids': 'requestParameters.loadBalancerName',\n 'source': 'elasticloadbalancing.amazonaws.com'},\n\n 'CreateElasticsearchDomain': {\n 'ids': 'requestParameters.domainName',\n 'source': 'es.amazonaws.com'},\n\n 'CreateTable': {\n 'ids': 'requestParameters.tableName',\n 'source': 'dynamodb.amazonaws.com'},\n\n 'RunInstances': {\n 'ids': 'responseElements.instancesSet.items[].instanceId',\n 'source': 'ec2.amazonaws.com'}}\n\n @classmethod\n def get(cls, event_name):\n return cls.trail_events.get(event_name)\n\n @classmethod\n def match(cls, event):\n \"\"\"Match a given cwe event as cloudtrail with an api call\n\n That has its information filled out.\n \"\"\"\n if 'detail' not in event:\n return False\n if 'eventName' not in event['detail']:\n return False\n k = event['detail']['eventName']\n\n # We want callers to use a compiled expression, but want to avoid\n # initialization cost of doing it without cause. Not thread safe,\n # but usage context is lambda entry.\n if k in cls.trail_events:\n v = dict(cls.trail_events[k])\n if isinstance(v['ids'], six.string_types):\n v['ids'] = e = jmespath.compile('detail.%s' % v['ids'])\n cls.trail_events[k]['ids'] = e\n return v\n\n return False\n\n @classmethod\n def get_trail_ids(cls, event, mode):\n \"\"\"extract resources ids from a cloud trail event.\"\"\"\n resource_ids = ()\n event_name = event['detail']['eventName']\n event_source = event['detail']['eventSource']\n for e in mode.get('events', []):\n if not isinstance(e, dict):\n # Check if we have a short cut / alias\n info = CloudWatchEvents.match(event)\n if info:\n return info['ids'].search(event)\n continue\n if event_name != e.get('event'):\n continue\n if event_source != e.get('source'):\n continue\n\n id_query = e.get('ids')\n if not id_query:\n raise ValueError(\"No id query configured\")\n evt = event\n # be forgiving for users specifying with details or without\n if not id_query.startswith('detail.'):\n evt = event.get('detail', {})\n resource_ids = jmespath.search(id_query, evt)\n if resource_ids:\n break\n return resource_ids\n\n @classmethod\n def get_ids(cls, event, mode):\n mode_type = mode.get('type')\n if mode_type == 'ec2-instance-state':\n resource_ids = [event.get('detail', {}).get('instance-id')]\n elif mode_type == 'asg-instance-state':\n resource_ids = [event.get('detail', {}).get('AutoScalingGroupName')]\n elif mode_type != 'cloudtrail':\n return None\n else:\n resource_ids = cls.get_trail_ids(event, mode)\n\n if not isinstance(resource_ids, (tuple, list)):\n resource_ids = [resource_ids]\n\n return list(filter(None, resource_ids))\n", "path": "c7n/cwe.py"}], "after_files": [{"content": "# Copyright 2016-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport jmespath\nimport six\n\n\nclass CloudWatchEvents(object):\n \"\"\"A mapping of events to resource types.\"\"\"\n\n # **These are just shortcuts**, you can use the policy definition to\n # subscribe to any arbitrary cloud trail event that corresponds to\n # a custodian resource.\n\n # For common events that we want to match, just keep a short mapping.\n # Users can specify arbitrary cloud watch events by specifying these\n # values in their config, but keep the common case simple.\n\n trail_events = {\n # event source, resource type as keys, mapping to api call and\n # jmespath expression\n 'ConsoleLogin': {\n 'ids': 'userIdentity.arn',\n 'source': 'signin.amazonaws.com'},\n\n 'CreateAutoScalingGroup': {\n 'ids': 'requestParameters.autoScalingGroupName',\n 'source': 'autoscaling.amazonaws.com'},\n\n 'UpdateAutoScalingGroup': {\n 'ids': 'requestParameters.autoScalingGroupName',\n 'source': 'autoscaling.amazonaws.com'},\n\n 'CreateBucket': {\n 'ids': 'requestParameters.bucketName',\n 'source': 's3.amazonaws.com'},\n\n 'CreateCluster': {\n 'ids': 'requestParameters.clusterIdentifier',\n 'source': 'redshift.amazonaws.com'},\n\n 'CreateLoadBalancer': {\n 'ids': 'requestParameters.loadBalancerName',\n 'source': 'elasticloadbalancing.amazonaws.com'},\n\n 'CreateLoadBalancerPolicy': {\n 'ids': 'requestParameters.loadBalancerName',\n 'source': 'elasticloadbalancing.amazonaws.com'},\n\n 'CreateDBInstance': {\n 'ids': 'requestParameters.dBInstanceIdentifier',\n 'source': 'rds.amazonaws.com'},\n\n 'CreateVolume': {\n 'ids': 'responseElements.volumeId',\n 'source': 'ec2.amazonaws.com'},\n\n 'SetLoadBalancerPoliciesOfListener': {\n 'ids': 'requestParameters.loadBalancerName',\n 'source': 'elasticloadbalancing.amazonaws.com'},\n\n 'CreateElasticsearchDomain': {\n 'ids': 'requestParameters.domainName',\n 'source': 'es.amazonaws.com'},\n\n 'CreateTable': {\n 'ids': 'requestParameters.tableName',\n 'source': 'dynamodb.amazonaws.com'},\n\n 'CreateFunction': {\n 'event': 'CreateFunction20150331',\n 'source': 'lambda.amazonaws.com',\n 'ids': 'requestParameters.functionName'},\n\n 'RunInstances': {\n 'ids': 'responseElements.instancesSet.items[].instanceId',\n 'source': 'ec2.amazonaws.com'}}\n\n @classmethod\n def get(cls, event_name):\n return cls.trail_events.get(event_name)\n\n @classmethod\n def match(cls, event):\n \"\"\"Match a given cwe event as cloudtrail with an api call\n\n That has its information filled out.\n \"\"\"\n if 'detail' not in event:\n return False\n if 'eventName' not in event['detail']:\n return False\n k = event['detail']['eventName']\n\n # We want callers to use a compiled expression, but want to avoid\n # initialization cost of doing it without cause. Not thread safe,\n # but usage context is lambda entry.\n if k in cls.trail_events:\n v = dict(cls.trail_events[k])\n if isinstance(v['ids'], six.string_types):\n v['ids'] = e = jmespath.compile('detail.%s' % v['ids'])\n cls.trail_events[k]['ids'] = e\n return v\n\n return False\n\n @classmethod\n def get_trail_ids(cls, event, mode):\n \"\"\"extract resources ids from a cloud trail event.\"\"\"\n resource_ids = ()\n event_name = event['detail']['eventName']\n event_source = event['detail']['eventSource']\n for e in mode.get('events', []):\n if not isinstance(e, dict):\n # Check if we have a short cut / alias\n info = CloudWatchEvents.match(event)\n if info:\n return info['ids'].search(event)\n continue\n if event_name != e.get('event'):\n continue\n if event_source != e.get('source'):\n continue\n\n id_query = e.get('ids')\n if not id_query:\n raise ValueError(\"No id query configured\")\n evt = event\n # be forgiving for users specifying with details or without\n if not id_query.startswith('detail.'):\n evt = event.get('detail', {})\n resource_ids = jmespath.search(id_query, evt)\n if resource_ids:\n break\n return resource_ids\n\n @classmethod\n def get_ids(cls, event, mode):\n mode_type = mode.get('type')\n if mode_type == 'ec2-instance-state':\n resource_ids = [event.get('detail', {}).get('instance-id')]\n elif mode_type == 'asg-instance-state':\n resource_ids = [event.get('detail', {}).get('AutoScalingGroupName')]\n elif mode_type != 'cloudtrail':\n return None\n else:\n resource_ids = cls.get_trail_ids(event, mode)\n\n if not isinstance(resource_ids, (tuple, list)):\n resource_ids = [resource_ids]\n\n return list(filter(None, resource_ids))\n", "path": "c7n/cwe.py"}]}
| 2,270 | 146 |
gh_patches_debug_51565
|
rasdani/github-patches
|
git_diff
|
ray-project__ray-1413
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Worker dies when passed pandas DataFrame.
### System information
- **Ray version**: 0.3.0
- **Python version**: 3.6.0
- **Exact command to reproduce**:
```python
import pandas as pd
import ray
pd.__version__ # '0.19.2'
ray.init()
df = pd.DataFrame(data={'col1': [1, 2, 3, 4], 'col2': [3, 4, 5, 6]})
@ray.remote
def f(x):
pass
f.remote(df)
```
The last line causes the following error to be printed in the background.
```
A worker died or was killed while executing a task.
```
cc @devin-petersohn
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/ray/dataframe/__init__.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4
5 from .dataframe import DataFrame
6 from .dataframe import from_pandas
7 from .dataframe import to_pandas
8 from .series import Series
9 import ray
10 import pandas as pd
11
12 __all__ = ["DataFrame", "from_pandas", "to_pandas", "Series"]
13
14 ray.register_custom_serializer(pd.DataFrame, use_pickle=True)
15 ray.register_custom_serializer(pd.core.indexes.base.Index, use_pickle=True)
16
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/python/ray/dataframe/__init__.py b/python/ray/dataframe/__init__.py
--- a/python/ray/dataframe/__init__.py
+++ b/python/ray/dataframe/__init__.py
@@ -6,10 +6,5 @@
from .dataframe import from_pandas
from .dataframe import to_pandas
from .series import Series
-import ray
-import pandas as pd
__all__ = ["DataFrame", "from_pandas", "to_pandas", "Series"]
-
-ray.register_custom_serializer(pd.DataFrame, use_pickle=True)
-ray.register_custom_serializer(pd.core.indexes.base.Index, use_pickle=True)
|
{"golden_diff": "diff --git a/python/ray/dataframe/__init__.py b/python/ray/dataframe/__init__.py\n--- a/python/ray/dataframe/__init__.py\n+++ b/python/ray/dataframe/__init__.py\n@@ -6,10 +6,5 @@\n from .dataframe import from_pandas\n from .dataframe import to_pandas\n from .series import Series\n-import ray\n-import pandas as pd\n \n __all__ = [\"DataFrame\", \"from_pandas\", \"to_pandas\", \"Series\"]\n-\n-ray.register_custom_serializer(pd.DataFrame, use_pickle=True)\n-ray.register_custom_serializer(pd.core.indexes.base.Index, use_pickle=True)\n", "issue": "Worker dies when passed pandas DataFrame.\n### System information\r\n- **Ray version**: 0.3.0\r\n- **Python version**: 3.6.0\r\n- **Exact command to reproduce**:\r\n\r\n```python\r\nimport pandas as pd\r\nimport ray\r\n\r\npd.__version__ # '0.19.2'\r\n\r\nray.init()\r\n\r\ndf = pd.DataFrame(data={'col1': [1, 2, 3, 4], 'col2': [3, 4, 5, 6]})\r\n\r\[email protected]\r\ndef f(x):\r\n pass\r\n\r\nf.remote(df)\r\n```\r\n\r\nThe last line causes the following error to be printed in the background.\r\n\r\n```\r\nA worker died or was killed while executing a task.\r\n```\r\n\r\ncc @devin-petersohn\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom .dataframe import DataFrame\nfrom .dataframe import from_pandas\nfrom .dataframe import to_pandas\nfrom .series import Series\nimport ray\nimport pandas as pd\n\n__all__ = [\"DataFrame\", \"from_pandas\", \"to_pandas\", \"Series\"]\n\nray.register_custom_serializer(pd.DataFrame, use_pickle=True)\nray.register_custom_serializer(pd.core.indexes.base.Index, use_pickle=True)\n", "path": "python/ray/dataframe/__init__.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom .dataframe import DataFrame\nfrom .dataframe import from_pandas\nfrom .dataframe import to_pandas\nfrom .series import Series\n\n__all__ = [\"DataFrame\", \"from_pandas\", \"to_pandas\", \"Series\"]\n", "path": "python/ray/dataframe/__init__.py"}]}
| 560 | 138 |
gh_patches_debug_549
|
rasdani/github-patches
|
git_diff
|
mabel-dev__opteryx-1412
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
🪲 ARM test fails
~~~
ValueError: 'orso/bitarray/cbitarray.pyx' doesn't match any files
~~~
https://github.com/mabel-dev/opteryx/actions/runs/7535073365/job/20510453555
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opteryx/__version__.py`
Content:
```
1 __build__ = 244
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 Store the version here so:
17 1) we don't load dependencies by storing it in __init__.py
18 2) we can import it in setup.py for the same reason
19 """
20 from enum import Enum # isort: skip
21
22
23 class VersionStatus(Enum):
24 ALPHA = "alpha"
25 BETA = "beta"
26 RELEASE = "release"
27
28
29 _major = 0
30 _minor = 12
31 _revision = 5
32 _status = VersionStatus.BETA
33
34 __version__ = f"{_major}.{_minor}.{_revision}" + (
35 f"-{_status.value}.{__build__}" if _status != VersionStatus.RELEASE else ""
36 )
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/opteryx/__version__.py b/opteryx/__version__.py
--- a/opteryx/__version__.py
+++ b/opteryx/__version__.py
@@ -1,4 +1,4 @@
-__build__ = 244
+__build__ = 248
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
|
{"golden_diff": "diff --git a/opteryx/__version__.py b/opteryx/__version__.py\n--- a/opteryx/__version__.py\n+++ b/opteryx/__version__.py\n@@ -1,4 +1,4 @@\n-__build__ = 244\n+__build__ = 248\n \n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\n", "issue": "\ud83e\udeb2 ARM test fails \n\r\n~~~\r\nValueError: 'orso/bitarray/cbitarray.pyx' doesn't match any files\r\n~~~\r\n\r\nhttps://github.com/mabel-dev/opteryx/actions/runs/7535073365/job/20510453555\n", "before_files": [{"content": "__build__ = 244\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nStore the version here so:\n1) we don't load dependencies by storing it in __init__.py\n2) we can import it in setup.py for the same reason\n\"\"\"\nfrom enum import Enum # isort: skip\n\n\nclass VersionStatus(Enum):\n ALPHA = \"alpha\"\n BETA = \"beta\"\n RELEASE = \"release\"\n\n\n_major = 0\n_minor = 12\n_revision = 5\n_status = VersionStatus.BETA\n\n__version__ = f\"{_major}.{_minor}.{_revision}\" + (\n f\"-{_status.value}.{__build__}\" if _status != VersionStatus.RELEASE else \"\"\n)\n", "path": "opteryx/__version__.py"}], "after_files": [{"content": "__build__ = 248\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nStore the version here so:\n1) we don't load dependencies by storing it in __init__.py\n2) we can import it in setup.py for the same reason\n\"\"\"\nfrom enum import Enum # isort: skip\n\n\nclass VersionStatus(Enum):\n ALPHA = \"alpha\"\n BETA = \"beta\"\n RELEASE = \"release\"\n\n\n_major = 0\n_minor = 12\n_revision = 5\n_status = VersionStatus.BETA\n\n__version__ = f\"{_major}.{_minor}.{_revision}\" + (\n f\"-{_status.value}.{__build__}\" if _status != VersionStatus.RELEASE else \"\"\n)\n", "path": "opteryx/__version__.py"}]}
| 674 | 101 |
gh_patches_debug_17327
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-3950
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Primrose Schools
Is generating 1,221 errors. Adding a if statement for `content` should fix it. Could also be turned into a sitemap spider.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/primrose_schools.py`
Content:
```
1 import json
2
3 import scrapy
4
5 from locations.items import GeojsonPointItem
6
7
8 class PrimroseSchoolsSpider(scrapy.Spider):
9 name = "primrose_schools"
10 item_attributes = {"brand": "Primrose Schools", "brand_wikidata": "Q7243677"}
11 allowed_domains = ["primroseschools.com"]
12
13 start_urls = ["https://www.primroseschools.com/find-a-school/"]
14
15 def parse(self, response):
16 with open(
17 "./locations/searchable_points/us_centroids_50mile_radius.csv"
18 ) as points:
19 next(points)
20 for point in points:
21 row = point.replace("\n", "").split(",")
22 lati = row[1]
23 long = row[2]
24 searchurl = "https://www.primroseschools.com/find-a-school/?search_string=USA&latitude={la}&longitude={lo}".format(
25 la=lati, lo=long
26 )
27 yield scrapy.Request(
28 response.urljoin(searchurl), callback=self.parse_search
29 )
30
31 def parse_search(self, response):
32 content = response.xpath('//script[@type="application/json"]/text()').get()
33 schools = json.loads(content)
34 for i in schools:
35 if i["address_1"]:
36 properties = {
37 "name": i["name"],
38 "addr_full": i["address_1"] + " " + i["address_2"],
39 "city": i["city"],
40 "state": i["state"],
41 "postcode": i["zip_code"],
42 "phone": i["phone"],
43 "ref": i["id"],
44 "website": "https://www.primroseschools.com" + i["url"],
45 "lat": float(i["latitude"]),
46 "lon": float(i["longitude"]),
47 }
48 yield GeojsonPointItem(**properties)
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/primrose_schools.py b/locations/spiders/primrose_schools.py
--- a/locations/spiders/primrose_schools.py
+++ b/locations/spiders/primrose_schools.py
@@ -30,12 +30,17 @@
def parse_search(self, response):
content = response.xpath('//script[@type="application/json"]/text()').get()
+ if content is None:
+ return
+
schools = json.loads(content)
for i in schools:
if i["address_1"]:
properties = {
"name": i["name"],
- "addr_full": i["address_1"] + " " + i["address_2"],
+ "street_address": ", ".join(
+ filter(None, [i["address_1"], i["address_2"]])
+ ),
"city": i["city"],
"state": i["state"],
"postcode": i["zip_code"],
|
{"golden_diff": "diff --git a/locations/spiders/primrose_schools.py b/locations/spiders/primrose_schools.py\n--- a/locations/spiders/primrose_schools.py\n+++ b/locations/spiders/primrose_schools.py\n@@ -30,12 +30,17 @@\n \n def parse_search(self, response):\n content = response.xpath('//script[@type=\"application/json\"]/text()').get()\n+ if content is None:\n+ return\n+\n schools = json.loads(content)\n for i in schools:\n if i[\"address_1\"]:\n properties = {\n \"name\": i[\"name\"],\n- \"addr_full\": i[\"address_1\"] + \" \" + i[\"address_2\"],\n+ \"street_address\": \", \".join(\n+ filter(None, [i[\"address_1\"], i[\"address_2\"]])\n+ ),\n \"city\": i[\"city\"],\n \"state\": i[\"state\"],\n \"postcode\": i[\"zip_code\"],\n", "issue": "Primrose Schools\nIs generating 1,221 errors. Adding a if statement for `content` should fix it. Could also be turned into a sitemap spider.\n", "before_files": [{"content": "import json\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\n\n\nclass PrimroseSchoolsSpider(scrapy.Spider):\n name = \"primrose_schools\"\n item_attributes = {\"brand\": \"Primrose Schools\", \"brand_wikidata\": \"Q7243677\"}\n allowed_domains = [\"primroseschools.com\"]\n\n start_urls = [\"https://www.primroseschools.com/find-a-school/\"]\n\n def parse(self, response):\n with open(\n \"./locations/searchable_points/us_centroids_50mile_radius.csv\"\n ) as points:\n next(points)\n for point in points:\n row = point.replace(\"\\n\", \"\").split(\",\")\n lati = row[1]\n long = row[2]\n searchurl = \"https://www.primroseschools.com/find-a-school/?search_string=USA&latitude={la}&longitude={lo}\".format(\n la=lati, lo=long\n )\n yield scrapy.Request(\n response.urljoin(searchurl), callback=self.parse_search\n )\n\n def parse_search(self, response):\n content = response.xpath('//script[@type=\"application/json\"]/text()').get()\n schools = json.loads(content)\n for i in schools:\n if i[\"address_1\"]:\n properties = {\n \"name\": i[\"name\"],\n \"addr_full\": i[\"address_1\"] + \" \" + i[\"address_2\"],\n \"city\": i[\"city\"],\n \"state\": i[\"state\"],\n \"postcode\": i[\"zip_code\"],\n \"phone\": i[\"phone\"],\n \"ref\": i[\"id\"],\n \"website\": \"https://www.primroseschools.com\" + i[\"url\"],\n \"lat\": float(i[\"latitude\"]),\n \"lon\": float(i[\"longitude\"]),\n }\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/primrose_schools.py"}], "after_files": [{"content": "import json\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\n\n\nclass PrimroseSchoolsSpider(scrapy.Spider):\n name = \"primrose_schools\"\n item_attributes = {\"brand\": \"Primrose Schools\", \"brand_wikidata\": \"Q7243677\"}\n allowed_domains = [\"primroseschools.com\"]\n\n start_urls = [\"https://www.primroseschools.com/find-a-school/\"]\n\n def parse(self, response):\n with open(\n \"./locations/searchable_points/us_centroids_50mile_radius.csv\"\n ) as points:\n next(points)\n for point in points:\n row = point.replace(\"\\n\", \"\").split(\",\")\n lati = row[1]\n long = row[2]\n searchurl = \"https://www.primroseschools.com/find-a-school/?search_string=USA&latitude={la}&longitude={lo}\".format(\n la=lati, lo=long\n )\n yield scrapy.Request(\n response.urljoin(searchurl), callback=self.parse_search\n )\n\n def parse_search(self, response):\n content = response.xpath('//script[@type=\"application/json\"]/text()').get()\n if content is None:\n return\n\n schools = json.loads(content)\n for i in schools:\n if i[\"address_1\"]:\n properties = {\n \"name\": i[\"name\"],\n \"street_address\": \", \".join(\n filter(None, [i[\"address_1\"], i[\"address_2\"]])\n ),\n \"city\": i[\"city\"],\n \"state\": i[\"state\"],\n \"postcode\": i[\"zip_code\"],\n \"phone\": i[\"phone\"],\n \"ref\": i[\"id\"],\n \"website\": \"https://www.primroseschools.com\" + i[\"url\"],\n \"lat\": float(i[\"latitude\"]),\n \"lon\": float(i[\"longitude\"]),\n }\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/primrose_schools.py"}]}
| 794 | 217 |
gh_patches_debug_9477
|
rasdani/github-patches
|
git_diff
|
sopel-irc__sopel-1102
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bot.db.unalias_nick() behavior does not match docs
> ```
> Raises ValueError if there is not at least one other nick in the group.
> ```
The ValueError is never raised:
```
<dgw> .nickunmerge ThisIsATestNick
<Sopel> Removed ThisIsATestNick from nick group 1497.
<dgw> that's not supposed to work
<ThisIsATestNick> blerg
<dgw> .nickunmerge ThisIsATestNick
<Sopel> Removed ThisIsATestNick from nick group 1498.
```
I have traced this issue to a logic error, and will submit a proposed fix when I'm back in front of my laptop later tonight.
If it's more desirable to change the documentation than the behavior, I'd be happy to rewrite that instead—but I believe that the described behavior is correct, and the observed behavior is a bug.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sopel/db.py`
Content:
```
1 # coding=utf-8
2 from __future__ import unicode_literals, absolute_import, print_function, division
3
4 import json
5 import os.path
6 import sys
7 import sqlite3
8
9 from sopel.tools import Identifier
10
11 if sys.version_info.major >= 3:
12 unicode = str
13 basestring = str
14
15
16 def _deserialize(value):
17 if value is None:
18 return None
19 # sqlite likes to return ints for strings that look like ints, even though
20 # the column type is string. That's how you do dynamic typing wrong.
21 value = unicode(value)
22 # Just in case someone's mucking with the DB in a way we can't account for,
23 # ignore json parsing errors
24 try:
25 value = json.loads(value)
26 except:
27 pass
28 return value
29
30
31 class SopelDB(object):
32 """*Availability: 5.0+*
33
34 This defines an interface for basic, common operations on a sqlite
35 database. It simplifies those common operations, and allows direct access
36 to the database, wherever the user has configured it to be.
37
38 When configured with a relative filename, it is assumed to be in the same
39 directory as the config."""
40
41 def __init__(self, config):
42 path = config.core.db_filename
43 config_dir, config_file = os.path.split(config.filename)
44 config_name, _ = os.path.splitext(config_file)
45 if path is None:
46 path = os.path.join(config_dir, config_name + '.db')
47 path = os.path.expanduser(path)
48 if not os.path.isabs(path):
49 path = os.path.normpath(os.path.join(config_dir, path))
50 self.filename = path
51 self._create()
52
53 def connect(self):
54 """Return a raw database connection object."""
55 return sqlite3.connect(self.filename)
56
57 def execute(self, *args, **kwargs):
58 """Execute an arbitrary SQL query against the database.
59
60 Returns a cursor object, on which things like `.fetchall()` can be
61 called per PEP 249."""
62 with self.connect() as conn:
63 cur = conn.cursor()
64 return cur.execute(*args, **kwargs)
65
66 def _create(self):
67 """Create the basic database structure."""
68 # Do nothing if the db already exists.
69 try:
70 self.execute('SELECT * FROM nick_ids;')
71 self.execute('SELECT * FROM nicknames;')
72 self.execute('SELECT * FROM nick_values;')
73 self.execute('SELECT * FROM channel_values;')
74 except:
75 pass
76 else:
77 return
78
79 self.execute(
80 'CREATE TABLE nick_ids (nick_id INTEGER PRIMARY KEY AUTOINCREMENT)'
81 )
82 self.execute(
83 'CREATE TABLE nicknames '
84 '(nick_id INTEGER REFERENCES nick_ids, '
85 'slug STRING PRIMARY KEY, canonical string)'
86 )
87 self.execute(
88 'CREATE TABLE nick_values '
89 '(nick_id INTEGER REFERENCES nick_ids(nick_id), '
90 'key STRING, value STRING, '
91 'PRIMARY KEY (nick_id, key))'
92 )
93 self.execute(
94 'CREATE TABLE channel_values '
95 '(channel STRING, key STRING, value STRING, '
96 'PRIMARY KEY (channel, key))'
97 )
98
99 def get_uri(self):
100 """Returns a URL for the database, usable to connect with SQLAlchemy.
101 """
102 return 'sqlite://{}'.format(self.filename)
103
104 # NICK FUNCTIONS
105
106 def get_nick_id(self, nick, create=True):
107 """Return the internal identifier for a given nick.
108
109 This identifier is unique to a user, and shared across all of that
110 user's aliases. If create is True, a new ID will be created if one does
111 not already exist"""
112 slug = nick.lower()
113 nick_id = self.execute('SELECT nick_id from nicknames where slug = ?',
114 [slug]).fetchone()
115 if nick_id is None:
116 if not create:
117 raise ValueError('No ID exists for the given nick')
118 with self.connect() as conn:
119 cur = conn.cursor()
120 cur.execute('INSERT INTO nick_ids VALUES (NULL)')
121 nick_id = cur.execute('SELECT last_insert_rowid()').fetchone()[0]
122 cur.execute(
123 'INSERT INTO nicknames (nick_id, slug, canonical) VALUES '
124 '(?, ?, ?)',
125 [nick_id, slug, nick]
126 )
127 nick_id = self.execute('SELECT nick_id from nicknames where slug = ?',
128 [slug]).fetchone()
129 return nick_id[0]
130
131 def alias_nick(self, nick, alias):
132 """Create an alias for a nick.
133
134 Raises ValueError if the alias already exists. If nick does not already
135 exist, it will be added along with the alias."""
136 nick = Identifier(nick)
137 alias = Identifier(alias)
138 nick_id = self.get_nick_id(nick)
139 sql = 'INSERT INTO nicknames (nick_id, slug, canonical) VALUES (?, ?, ?)'
140 values = [nick_id, alias.lower(), alias]
141 try:
142 self.execute(sql, values)
143 except sqlite3.IntegrityError:
144 raise ValueError('Alias already exists.')
145
146 def set_nick_value(self, nick, key, value):
147 """Sets the value for a given key to be associated with the nick."""
148 nick = Identifier(nick)
149 value = json.dumps(value, ensure_ascii=False)
150 nick_id = self.get_nick_id(nick)
151 self.execute('INSERT OR REPLACE INTO nick_values VALUES (?, ?, ?)',
152 [nick_id, key, value])
153
154 def get_nick_value(self, nick, key):
155 """Retrieves the value for a given key associated with a nick."""
156 nick = Identifier(nick)
157 result = self.execute(
158 'SELECT value FROM nicknames JOIN nick_values '
159 'ON nicknames.nick_id = nick_values.nick_id '
160 'WHERE slug = ? AND key = ?',
161 [nick.lower(), key]
162 ).fetchone()
163 if result is not None:
164 result = result[0]
165 return _deserialize(result)
166
167 def unalias_nick(self, alias):
168 """Removes an alias.
169
170 Raises ValueError if there is not at least one other nick in the group.
171 To delete an entire group, use `delete_group`.
172 """
173 alias = Identifier(alias)
174 nick_id = self.get_nick_id(alias, False)
175 count = self.execute('SELECT COUNT(*) FROM nicknames WHERE nick_id = ?',
176 [nick_id]).fetchone()[0]
177 if count == 0:
178 raise ValueError('Given alias is the only entry in its group.')
179 self.execute('DELETE FROM nicknames WHERE slug = ?', [alias.lower()])
180
181 def delete_nick_group(self, nick):
182 """Removes a nickname, and all associated aliases and settings.
183 """
184 nick = Identifier(nick)
185 nick_id = self.get_nick_id(nick, False)
186 self.execute('DELETE FROM nicknames WHERE nick_id = ?', [nick_id])
187 self.execute('DELETE FROM nick_values WHERE nick_id = ?', [nick_id])
188
189 def merge_nick_groups(self, first_nick, second_nick):
190 """Merges the nick groups for the specified nicks.
191
192 Takes two nicks, which may or may not be registered. Unregistered
193 nicks will be registered. Keys which are set for only one of the given
194 nicks will be preserved. Where multiple nicks have values for a given
195 key, the value set for the first nick will be used.
196
197 Note that merging of data only applies to the native key-value store.
198 If modules define their own tables which rely on the nick table, they
199 will need to have their merging done separately."""
200 first_id = self.get_nick_id(Identifier(first_nick))
201 second_id = self.get_nick_id(Identifier(second_nick))
202 self.execute(
203 'UPDATE OR IGNORE nick_values SET nick_id = ? WHERE nick_id = ?',
204 [first_id, second_id])
205 self.execute('DELETE FROM nick_values WHERE nick_id = ?', [second_id])
206 self.execute('UPDATE nicknames SET nick_id = ? WHERE nick_id = ?',
207 [first_id, second_id])
208
209 # CHANNEL FUNCTIONS
210
211 def set_channel_value(self, channel, key, value):
212 channel = Identifier(channel).lower()
213 value = json.dumps(value, ensure_ascii=False)
214 self.execute('INSERT OR REPLACE INTO channel_values VALUES (?, ?, ?)',
215 [channel, key, value])
216
217 def get_channel_value(self, channel, key):
218 """Retrieves the value for a given key associated with a channel."""
219 channel = Identifier(channel).lower()
220 result = self.execute(
221 'SELECT value FROM channel_values WHERE channel = ? AND key = ?',
222 [channel, key]
223 ).fetchone()
224 if result is not None:
225 result = result[0]
226 return _deserialize(result)
227
228 # NICK AND CHANNEL FUNCTIONS
229
230 def get_nick_or_channel_value(self, name, key):
231 """Gets the value `key` associated to the nick or channel `name`.
232 """
233 name = Identifier(name)
234 if name.is_nick():
235 return self.get_nick_value(name, key)
236 else:
237 return self.get_channel_value(name, key)
238
239 def get_preferred_value(self, names, key):
240 """Gets the value for the first name which has it set.
241
242 `names` is a list of channel and/or user names. Returns None if none of
243 the names have the key set."""
244 for name in names:
245 value = self.get_nick_or_channel_value(name, key)
246 if value is not None:
247 return value
248
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sopel/db.py b/sopel/db.py
--- a/sopel/db.py
+++ b/sopel/db.py
@@ -174,7 +174,7 @@
nick_id = self.get_nick_id(alias, False)
count = self.execute('SELECT COUNT(*) FROM nicknames WHERE nick_id = ?',
[nick_id]).fetchone()[0]
- if count == 0:
+ if count <= 1:
raise ValueError('Given alias is the only entry in its group.')
self.execute('DELETE FROM nicknames WHERE slug = ?', [alias.lower()])
|
{"golden_diff": "diff --git a/sopel/db.py b/sopel/db.py\n--- a/sopel/db.py\n+++ b/sopel/db.py\n@@ -174,7 +174,7 @@\n nick_id = self.get_nick_id(alias, False)\n count = self.execute('SELECT COUNT(*) FROM nicknames WHERE nick_id = ?',\n [nick_id]).fetchone()[0]\n- if count == 0:\n+ if count <= 1:\n raise ValueError('Given alias is the only entry in its group.')\n self.execute('DELETE FROM nicknames WHERE slug = ?', [alias.lower()])\n", "issue": "bot.db.unalias_nick() behavior does not match docs\n> ```\n> Raises ValueError if there is not at least one other nick in the group.\n> ```\n\nThe ValueError is never raised:\n\n```\n<dgw> .nickunmerge ThisIsATestNick\n<Sopel> Removed ThisIsATestNick from nick group 1497.\n<dgw> that's not supposed to work\n<ThisIsATestNick> blerg\n<dgw> .nickunmerge ThisIsATestNick\n<Sopel> Removed ThisIsATestNick from nick group 1498.\n```\n\nI have traced this issue to a logic error, and will submit a proposed fix when I'm back in front of my laptop later tonight.\n\nIf it's more desirable to change the documentation than the behavior, I'd be happy to rewrite that instead\u2014but I believe that the described behavior is correct, and the observed behavior is a bug.\n\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport json\nimport os.path\nimport sys\nimport sqlite3\n\nfrom sopel.tools import Identifier\n\nif sys.version_info.major >= 3:\n unicode = str\n basestring = str\n\n\ndef _deserialize(value):\n if value is None:\n return None\n # sqlite likes to return ints for strings that look like ints, even though\n # the column type is string. That's how you do dynamic typing wrong.\n value = unicode(value)\n # Just in case someone's mucking with the DB in a way we can't account for,\n # ignore json parsing errors\n try:\n value = json.loads(value)\n except:\n pass\n return value\n\n\nclass SopelDB(object):\n \"\"\"*Availability: 5.0+*\n\n This defines an interface for basic, common operations on a sqlite\n database. It simplifies those common operations, and allows direct access\n to the database, wherever the user has configured it to be.\n\n When configured with a relative filename, it is assumed to be in the same\n directory as the config.\"\"\"\n\n def __init__(self, config):\n path = config.core.db_filename\n config_dir, config_file = os.path.split(config.filename)\n config_name, _ = os.path.splitext(config_file)\n if path is None:\n path = os.path.join(config_dir, config_name + '.db')\n path = os.path.expanduser(path)\n if not os.path.isabs(path):\n path = os.path.normpath(os.path.join(config_dir, path))\n self.filename = path\n self._create()\n\n def connect(self):\n \"\"\"Return a raw database connection object.\"\"\"\n return sqlite3.connect(self.filename)\n\n def execute(self, *args, **kwargs):\n \"\"\"Execute an arbitrary SQL query against the database.\n\n Returns a cursor object, on which things like `.fetchall()` can be\n called per PEP 249.\"\"\"\n with self.connect() as conn:\n cur = conn.cursor()\n return cur.execute(*args, **kwargs)\n\n def _create(self):\n \"\"\"Create the basic database structure.\"\"\"\n # Do nothing if the db already exists.\n try:\n self.execute('SELECT * FROM nick_ids;')\n self.execute('SELECT * FROM nicknames;')\n self.execute('SELECT * FROM nick_values;')\n self.execute('SELECT * FROM channel_values;')\n except:\n pass\n else:\n return\n\n self.execute(\n 'CREATE TABLE nick_ids (nick_id INTEGER PRIMARY KEY AUTOINCREMENT)'\n )\n self.execute(\n 'CREATE TABLE nicknames '\n '(nick_id INTEGER REFERENCES nick_ids, '\n 'slug STRING PRIMARY KEY, canonical string)'\n )\n self.execute(\n 'CREATE TABLE nick_values '\n '(nick_id INTEGER REFERENCES nick_ids(nick_id), '\n 'key STRING, value STRING, '\n 'PRIMARY KEY (nick_id, key))'\n )\n self.execute(\n 'CREATE TABLE channel_values '\n '(channel STRING, key STRING, value STRING, '\n 'PRIMARY KEY (channel, key))'\n )\n\n def get_uri(self):\n \"\"\"Returns a URL for the database, usable to connect with SQLAlchemy.\n \"\"\"\n return 'sqlite://{}'.format(self.filename)\n\n # NICK FUNCTIONS\n\n def get_nick_id(self, nick, create=True):\n \"\"\"Return the internal identifier for a given nick.\n\n This identifier is unique to a user, and shared across all of that\n user's aliases. If create is True, a new ID will be created if one does\n not already exist\"\"\"\n slug = nick.lower()\n nick_id = self.execute('SELECT nick_id from nicknames where slug = ?',\n [slug]).fetchone()\n if nick_id is None:\n if not create:\n raise ValueError('No ID exists for the given nick')\n with self.connect() as conn:\n cur = conn.cursor()\n cur.execute('INSERT INTO nick_ids VALUES (NULL)')\n nick_id = cur.execute('SELECT last_insert_rowid()').fetchone()[0]\n cur.execute(\n 'INSERT INTO nicknames (nick_id, slug, canonical) VALUES '\n '(?, ?, ?)',\n [nick_id, slug, nick]\n )\n nick_id = self.execute('SELECT nick_id from nicknames where slug = ?',\n [slug]).fetchone()\n return nick_id[0]\n\n def alias_nick(self, nick, alias):\n \"\"\"Create an alias for a nick.\n\n Raises ValueError if the alias already exists. If nick does not already\n exist, it will be added along with the alias.\"\"\"\n nick = Identifier(nick)\n alias = Identifier(alias)\n nick_id = self.get_nick_id(nick)\n sql = 'INSERT INTO nicknames (nick_id, slug, canonical) VALUES (?, ?, ?)'\n values = [nick_id, alias.lower(), alias]\n try:\n self.execute(sql, values)\n except sqlite3.IntegrityError:\n raise ValueError('Alias already exists.')\n\n def set_nick_value(self, nick, key, value):\n \"\"\"Sets the value for a given key to be associated with the nick.\"\"\"\n nick = Identifier(nick)\n value = json.dumps(value, ensure_ascii=False)\n nick_id = self.get_nick_id(nick)\n self.execute('INSERT OR REPLACE INTO nick_values VALUES (?, ?, ?)',\n [nick_id, key, value])\n\n def get_nick_value(self, nick, key):\n \"\"\"Retrieves the value for a given key associated with a nick.\"\"\"\n nick = Identifier(nick)\n result = self.execute(\n 'SELECT value FROM nicknames JOIN nick_values '\n 'ON nicknames.nick_id = nick_values.nick_id '\n 'WHERE slug = ? AND key = ?',\n [nick.lower(), key]\n ).fetchone()\n if result is not None:\n result = result[0]\n return _deserialize(result)\n\n def unalias_nick(self, alias):\n \"\"\"Removes an alias.\n\n Raises ValueError if there is not at least one other nick in the group.\n To delete an entire group, use `delete_group`.\n \"\"\"\n alias = Identifier(alias)\n nick_id = self.get_nick_id(alias, False)\n count = self.execute('SELECT COUNT(*) FROM nicknames WHERE nick_id = ?',\n [nick_id]).fetchone()[0]\n if count == 0:\n raise ValueError('Given alias is the only entry in its group.')\n self.execute('DELETE FROM nicknames WHERE slug = ?', [alias.lower()])\n\n def delete_nick_group(self, nick):\n \"\"\"Removes a nickname, and all associated aliases and settings.\n \"\"\"\n nick = Identifier(nick)\n nick_id = self.get_nick_id(nick, False)\n self.execute('DELETE FROM nicknames WHERE nick_id = ?', [nick_id])\n self.execute('DELETE FROM nick_values WHERE nick_id = ?', [nick_id])\n\n def merge_nick_groups(self, first_nick, second_nick):\n \"\"\"Merges the nick groups for the specified nicks.\n\n Takes two nicks, which may or may not be registered. Unregistered\n nicks will be registered. Keys which are set for only one of the given\n nicks will be preserved. Where multiple nicks have values for a given\n key, the value set for the first nick will be used.\n\n Note that merging of data only applies to the native key-value store.\n If modules define their own tables which rely on the nick table, they\n will need to have their merging done separately.\"\"\"\n first_id = self.get_nick_id(Identifier(first_nick))\n second_id = self.get_nick_id(Identifier(second_nick))\n self.execute(\n 'UPDATE OR IGNORE nick_values SET nick_id = ? WHERE nick_id = ?',\n [first_id, second_id])\n self.execute('DELETE FROM nick_values WHERE nick_id = ?', [second_id])\n self.execute('UPDATE nicknames SET nick_id = ? WHERE nick_id = ?',\n [first_id, second_id])\n\n # CHANNEL FUNCTIONS\n\n def set_channel_value(self, channel, key, value):\n channel = Identifier(channel).lower()\n value = json.dumps(value, ensure_ascii=False)\n self.execute('INSERT OR REPLACE INTO channel_values VALUES (?, ?, ?)',\n [channel, key, value])\n\n def get_channel_value(self, channel, key):\n \"\"\"Retrieves the value for a given key associated with a channel.\"\"\"\n channel = Identifier(channel).lower()\n result = self.execute(\n 'SELECT value FROM channel_values WHERE channel = ? AND key = ?',\n [channel, key]\n ).fetchone()\n if result is not None:\n result = result[0]\n return _deserialize(result)\n\n # NICK AND CHANNEL FUNCTIONS\n\n def get_nick_or_channel_value(self, name, key):\n \"\"\"Gets the value `key` associated to the nick or channel `name`.\n \"\"\"\n name = Identifier(name)\n if name.is_nick():\n return self.get_nick_value(name, key)\n else:\n return self.get_channel_value(name, key)\n\n def get_preferred_value(self, names, key):\n \"\"\"Gets the value for the first name which has it set.\n\n `names` is a list of channel and/or user names. Returns None if none of\n the names have the key set.\"\"\"\n for name in names:\n value = self.get_nick_or_channel_value(name, key)\n if value is not None:\n return value\n", "path": "sopel/db.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport json\nimport os.path\nimport sys\nimport sqlite3\n\nfrom sopel.tools import Identifier\n\nif sys.version_info.major >= 3:\n unicode = str\n basestring = str\n\n\ndef _deserialize(value):\n if value is None:\n return None\n # sqlite likes to return ints for strings that look like ints, even though\n # the column type is string. That's how you do dynamic typing wrong.\n value = unicode(value)\n # Just in case someone's mucking with the DB in a way we can't account for,\n # ignore json parsing errors\n try:\n value = json.loads(value)\n except:\n pass\n return value\n\n\nclass SopelDB(object):\n \"\"\"*Availability: 5.0+*\n\n This defines an interface for basic, common operations on a sqlite\n database. It simplifies those common operations, and allows direct access\n to the database, wherever the user has configured it to be.\n\n When configured with a relative filename, it is assumed to be in the same\n directory as the config.\"\"\"\n\n def __init__(self, config):\n path = config.core.db_filename\n config_dir, config_file = os.path.split(config.filename)\n config_name, _ = os.path.splitext(config_file)\n if path is None:\n path = os.path.join(config_dir, config_name + '.db')\n path = os.path.expanduser(path)\n if not os.path.isabs(path):\n path = os.path.normpath(os.path.join(config_dir, path))\n self.filename = path\n self._create()\n\n def connect(self):\n \"\"\"Return a raw database connection object.\"\"\"\n return sqlite3.connect(self.filename)\n\n def execute(self, *args, **kwargs):\n \"\"\"Execute an arbitrary SQL query against the database.\n\n Returns a cursor object, on which things like `.fetchall()` can be\n called per PEP 249.\"\"\"\n with self.connect() as conn:\n cur = conn.cursor()\n return cur.execute(*args, **kwargs)\n\n def _create(self):\n \"\"\"Create the basic database structure.\"\"\"\n # Do nothing if the db already exists.\n try:\n self.execute('SELECT * FROM nick_ids;')\n self.execute('SELECT * FROM nicknames;')\n self.execute('SELECT * FROM nick_values;')\n self.execute('SELECT * FROM channel_values;')\n except:\n pass\n else:\n return\n\n self.execute(\n 'CREATE TABLE nick_ids (nick_id INTEGER PRIMARY KEY AUTOINCREMENT)'\n )\n self.execute(\n 'CREATE TABLE nicknames '\n '(nick_id INTEGER REFERENCES nick_ids, '\n 'slug STRING PRIMARY KEY, canonical string)'\n )\n self.execute(\n 'CREATE TABLE nick_values '\n '(nick_id INTEGER REFERENCES nick_ids(nick_id), '\n 'key STRING, value STRING, '\n 'PRIMARY KEY (nick_id, key))'\n )\n self.execute(\n 'CREATE TABLE channel_values '\n '(channel STRING, key STRING, value STRING, '\n 'PRIMARY KEY (channel, key))'\n )\n\n def get_uri(self):\n \"\"\"Returns a URL for the database, usable to connect with SQLAlchemy.\n \"\"\"\n return 'sqlite://{}'.format(self.filename)\n\n # NICK FUNCTIONS\n\n def get_nick_id(self, nick, create=True):\n \"\"\"Return the internal identifier for a given nick.\n\n This identifier is unique to a user, and shared across all of that\n user's aliases. If create is True, a new ID will be created if one does\n not already exist\"\"\"\n slug = nick.lower()\n nick_id = self.execute('SELECT nick_id from nicknames where slug = ?',\n [slug]).fetchone()\n if nick_id is None:\n if not create:\n raise ValueError('No ID exists for the given nick')\n with self.connect() as conn:\n cur = conn.cursor()\n cur.execute('INSERT INTO nick_ids VALUES (NULL)')\n nick_id = cur.execute('SELECT last_insert_rowid()').fetchone()[0]\n cur.execute(\n 'INSERT INTO nicknames (nick_id, slug, canonical) VALUES '\n '(?, ?, ?)',\n [nick_id, slug, nick]\n )\n nick_id = self.execute('SELECT nick_id from nicknames where slug = ?',\n [slug]).fetchone()\n return nick_id[0]\n\n def alias_nick(self, nick, alias):\n \"\"\"Create an alias for a nick.\n\n Raises ValueError if the alias already exists. If nick does not already\n exist, it will be added along with the alias.\"\"\"\n nick = Identifier(nick)\n alias = Identifier(alias)\n nick_id = self.get_nick_id(nick)\n sql = 'INSERT INTO nicknames (nick_id, slug, canonical) VALUES (?, ?, ?)'\n values = [nick_id, alias.lower(), alias]\n try:\n self.execute(sql, values)\n except sqlite3.IntegrityError:\n raise ValueError('Alias already exists.')\n\n def set_nick_value(self, nick, key, value):\n \"\"\"Sets the value for a given key to be associated with the nick.\"\"\"\n nick = Identifier(nick)\n value = json.dumps(value, ensure_ascii=False)\n nick_id = self.get_nick_id(nick)\n self.execute('INSERT OR REPLACE INTO nick_values VALUES (?, ?, ?)',\n [nick_id, key, value])\n\n def get_nick_value(self, nick, key):\n \"\"\"Retrieves the value for a given key associated with a nick.\"\"\"\n nick = Identifier(nick)\n result = self.execute(\n 'SELECT value FROM nicknames JOIN nick_values '\n 'ON nicknames.nick_id = nick_values.nick_id '\n 'WHERE slug = ? AND key = ?',\n [nick.lower(), key]\n ).fetchone()\n if result is not None:\n result = result[0]\n return _deserialize(result)\n\n def unalias_nick(self, alias):\n \"\"\"Removes an alias.\n\n Raises ValueError if there is not at least one other nick in the group.\n To delete an entire group, use `delete_group`.\n \"\"\"\n alias = Identifier(alias)\n nick_id = self.get_nick_id(alias, False)\n count = self.execute('SELECT COUNT(*) FROM nicknames WHERE nick_id = ?',\n [nick_id]).fetchone()[0]\n if count <= 1:\n raise ValueError('Given alias is the only entry in its group.')\n self.execute('DELETE FROM nicknames WHERE slug = ?', [alias.lower()])\n\n def delete_nick_group(self, nick):\n \"\"\"Removes a nickname, and all associated aliases and settings.\n \"\"\"\n nick = Identifier(nick)\n nick_id = self.get_nick_id(nick, False)\n self.execute('DELETE FROM nicknames WHERE nick_id = ?', [nick_id])\n self.execute('DELETE FROM nick_values WHERE nick_id = ?', [nick_id])\n\n def merge_nick_groups(self, first_nick, second_nick):\n \"\"\"Merges the nick groups for the specified nicks.\n\n Takes two nicks, which may or may not be registered. Unregistered\n nicks will be registered. Keys which are set for only one of the given\n nicks will be preserved. Where multiple nicks have values for a given\n key, the value set for the first nick will be used.\n\n Note that merging of data only applies to the native key-value store.\n If modules define their own tables which rely on the nick table, they\n will need to have their merging done separately.\"\"\"\n first_id = self.get_nick_id(Identifier(first_nick))\n second_id = self.get_nick_id(Identifier(second_nick))\n self.execute(\n 'UPDATE OR IGNORE nick_values SET nick_id = ? WHERE nick_id = ?',\n [first_id, second_id])\n self.execute('DELETE FROM nick_values WHERE nick_id = ?', [second_id])\n self.execute('UPDATE nicknames SET nick_id = ? WHERE nick_id = ?',\n [first_id, second_id])\n\n # CHANNEL FUNCTIONS\n\n def set_channel_value(self, channel, key, value):\n channel = Identifier(channel).lower()\n value = json.dumps(value, ensure_ascii=False)\n self.execute('INSERT OR REPLACE INTO channel_values VALUES (?, ?, ?)',\n [channel, key, value])\n\n def get_channel_value(self, channel, key):\n \"\"\"Retrieves the value for a given key associated with a channel.\"\"\"\n channel = Identifier(channel).lower()\n result = self.execute(\n 'SELECT value FROM channel_values WHERE channel = ? AND key = ?',\n [channel, key]\n ).fetchone()\n if result is not None:\n result = result[0]\n return _deserialize(result)\n\n # NICK AND CHANNEL FUNCTIONS\n\n def get_nick_or_channel_value(self, name, key):\n \"\"\"Gets the value `key` associated to the nick or channel `name`.\n \"\"\"\n name = Identifier(name)\n if name.is_nick():\n return self.get_nick_value(name, key)\n else:\n return self.get_channel_value(name, key)\n\n def get_preferred_value(self, names, key):\n \"\"\"Gets the value for the first name which has it set.\n\n `names` is a list of channel and/or user names. Returns None if none of\n the names have the key set.\"\"\"\n for name in names:\n value = self.get_nick_or_channel_value(name, key)\n if value is not None:\n return value\n", "path": "sopel/db.py"}]}
| 3,138 | 132 |
gh_patches_debug_16325
|
rasdani/github-patches
|
git_diff
|
rasterio__rasterio-670
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
rio stack output empty
`rio stack`ing one or more rasters without an explicit band index results in a raster with all nulls
```
$ rio info --tell-me-more tests/data/RGB.byte.tif | jq .stats[0].max
255
$ rio stack tests/data/RGB.byte.tif /tmp/test.tif && \
rio info --tell-me-more /tmp/test.tif | jq .stats[0].max
null
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/rio/stack.py`
Content:
```
1 """Commands for operating on bands of datasets."""
2 import logging
3
4 import click
5 from cligj import files_inout_arg, format_opt
6
7 from .helpers import resolve_inout
8 from . import options
9 import rasterio
10 from rasterio.five import zip_longest
11
12
13 # Stack command.
14 @click.command(short_help="Stack a number of bands into a multiband dataset.")
15 @files_inout_arg
16 @options.output_opt
17 @format_opt
18 @options.bidx_mult_opt
19 @options.rgb_opt
20 @options.force_overwrite_opt
21 @options.creation_options
22 @click.pass_context
23 def stack(ctx, files, output, driver, bidx, photometric, force_overwrite,
24 creation_options):
25 """Stack a number of bands from one or more input files into a
26 multiband dataset.
27
28 Input datasets must be of a kind: same data type, dimensions, etc. The
29 output is cloned from the first input.
30
31 By default, rio-stack will take all bands from each input and write them
32 in same order to the output. Optionally, bands for each input may be
33 specified using a simple syntax:
34
35 --bidx N takes the Nth band from the input (first band is 1).
36
37 --bidx M,N,0 takes bands M, N, and O.
38
39 --bidx M..O takes bands M-O, inclusive.
40
41 --bidx ..N takes all bands up to and including N.
42
43 --bidx N.. takes all bands from N to the end.
44
45 Examples, using the Rasterio testing dataset, which produce a copy.
46
47 rio stack RGB.byte.tif -o stacked.tif
48
49 rio stack RGB.byte.tif --bidx 1,2,3 -o stacked.tif
50
51 rio stack RGB.byte.tif --bidx 1..3 -o stacked.tif
52
53 rio stack RGB.byte.tif --bidx ..2 RGB.byte.tif --bidx 3.. -o stacked.tif
54
55 """
56
57 verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 2
58 logger = logging.getLogger('rio')
59 try:
60 with rasterio.drivers(CPL_DEBUG=verbosity>2):
61 output, files = resolve_inout(files=files, output=output,
62 force_overwrite=force_overwrite)
63 output_count = 0
64 indexes = []
65 for path, item in zip_longest(files, bidx, fillvalue=None):
66 with rasterio.open(path) as src:
67 src_indexes = src.indexes
68 if item is None:
69 indexes.append(src_indexes)
70 output_count += len(src_indexes)
71 elif '..' in item:
72 start, stop = map(
73 lambda x: int(x) if x else None, item.split('..'))
74 if start is None:
75 start = 1
76 indexes.append(src_indexes[slice(start-1, stop)])
77 output_count += len(src_indexes[slice(start-1, stop)])
78 else:
79 parts = list(map(int, item.split(',')))
80 if len(parts) == 1:
81 indexes.append(parts[0])
82 output_count += 1
83 else:
84 parts = list(parts)
85 indexes.append(parts)
86 output_count += len(parts)
87
88 with rasterio.open(files[0]) as first:
89 kwargs = first.meta
90 kwargs.update(**creation_options)
91 kwargs['transform'] = kwargs.pop('affine')
92
93 kwargs.update(
94 driver=driver,
95 count=output_count)
96
97 if photometric:
98 kwargs['photometric'] = photometric
99
100 with rasterio.open(output, 'w', **kwargs) as dst:
101 dst_idx = 1
102 for path, index in zip(files, indexes):
103 with rasterio.open(path) as src:
104 if isinstance(index, int):
105 data = src.read(index)
106 dst.write(data, dst_idx)
107 dst_idx += 1
108 elif isinstance(index, list):
109 data = src.read(index)
110 dst.write(data, range(dst_idx, dst_idx+len(index)))
111 dst_idx += len(index)
112
113 except Exception:
114 logger.exception("Exception caught during processing")
115 raise click.Abort()
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/rasterio/rio/stack.py b/rasterio/rio/stack.py
--- a/rasterio/rio/stack.py
+++ b/rasterio/rio/stack.py
@@ -1,4 +1,5 @@
"""Commands for operating on bands of datasets."""
+import collections
import logging
import click
@@ -105,7 +106,7 @@
data = src.read(index)
dst.write(data, dst_idx)
dst_idx += 1
- elif isinstance(index, list):
+ elif isinstance(index, collections.Iterable):
data = src.read(index)
dst.write(data, range(dst_idx, dst_idx+len(index)))
dst_idx += len(index)
|
{"golden_diff": "diff --git a/rasterio/rio/stack.py b/rasterio/rio/stack.py\n--- a/rasterio/rio/stack.py\n+++ b/rasterio/rio/stack.py\n@@ -1,4 +1,5 @@\n \"\"\"Commands for operating on bands of datasets.\"\"\"\n+import collections\n import logging\n \n import click\n@@ -105,7 +106,7 @@\n data = src.read(index)\n dst.write(data, dst_idx)\n dst_idx += 1\n- elif isinstance(index, list):\n+ elif isinstance(index, collections.Iterable):\n data = src.read(index)\n dst.write(data, range(dst_idx, dst_idx+len(index)))\n dst_idx += len(index)\n", "issue": "rio stack output empty\n`rio stack`ing one or more rasters without an explicit band index results in a raster with all nulls\n\n```\n$ rio info --tell-me-more tests/data/RGB.byte.tif | jq .stats[0].max\n255\n$ rio stack tests/data/RGB.byte.tif /tmp/test.tif && \\\n rio info --tell-me-more /tmp/test.tif | jq .stats[0].max\nnull\n```\n\n", "before_files": [{"content": "\"\"\"Commands for operating on bands of datasets.\"\"\"\nimport logging\n\nimport click\nfrom cligj import files_inout_arg, format_opt\n\nfrom .helpers import resolve_inout\nfrom . import options\nimport rasterio\nfrom rasterio.five import zip_longest\n\n\n# Stack command.\[email protected](short_help=\"Stack a number of bands into a multiband dataset.\")\n@files_inout_arg\[email protected]_opt\n@format_opt\[email protected]_mult_opt\[email protected]_opt\[email protected]_overwrite_opt\[email protected]_options\[email protected]_context\ndef stack(ctx, files, output, driver, bidx, photometric, force_overwrite,\n creation_options):\n \"\"\"Stack a number of bands from one or more input files into a\n multiband dataset.\n\n Input datasets must be of a kind: same data type, dimensions, etc. The\n output is cloned from the first input.\n\n By default, rio-stack will take all bands from each input and write them\n in same order to the output. Optionally, bands for each input may be\n specified using a simple syntax:\n\n --bidx N takes the Nth band from the input (first band is 1).\n\n --bidx M,N,0 takes bands M, N, and O.\n\n --bidx M..O takes bands M-O, inclusive.\n\n --bidx ..N takes all bands up to and including N.\n\n --bidx N.. takes all bands from N to the end.\n\n Examples, using the Rasterio testing dataset, which produce a copy.\n\n rio stack RGB.byte.tif -o stacked.tif\n\n rio stack RGB.byte.tif --bidx 1,2,3 -o stacked.tif\n\n rio stack RGB.byte.tif --bidx 1..3 -o stacked.tif\n\n rio stack RGB.byte.tif --bidx ..2 RGB.byte.tif --bidx 3.. -o stacked.tif\n\n \"\"\"\n\n verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 2\n logger = logging.getLogger('rio')\n try:\n with rasterio.drivers(CPL_DEBUG=verbosity>2):\n output, files = resolve_inout(files=files, output=output,\n force_overwrite=force_overwrite)\n output_count = 0\n indexes = []\n for path, item in zip_longest(files, bidx, fillvalue=None):\n with rasterio.open(path) as src:\n src_indexes = src.indexes\n if item is None:\n indexes.append(src_indexes)\n output_count += len(src_indexes)\n elif '..' in item:\n start, stop = map(\n lambda x: int(x) if x else None, item.split('..'))\n if start is None:\n start = 1\n indexes.append(src_indexes[slice(start-1, stop)])\n output_count += len(src_indexes[slice(start-1, stop)])\n else:\n parts = list(map(int, item.split(',')))\n if len(parts) == 1:\n indexes.append(parts[0])\n output_count += 1\n else:\n parts = list(parts)\n indexes.append(parts)\n output_count += len(parts)\n\n with rasterio.open(files[0]) as first:\n kwargs = first.meta\n kwargs.update(**creation_options)\n kwargs['transform'] = kwargs.pop('affine')\n\n kwargs.update(\n driver=driver,\n count=output_count)\n\n if photometric:\n kwargs['photometric'] = photometric\n\n with rasterio.open(output, 'w', **kwargs) as dst:\n dst_idx = 1\n for path, index in zip(files, indexes):\n with rasterio.open(path) as src:\n if isinstance(index, int):\n data = src.read(index)\n dst.write(data, dst_idx)\n dst_idx += 1\n elif isinstance(index, list):\n data = src.read(index)\n dst.write(data, range(dst_idx, dst_idx+len(index)))\n dst_idx += len(index)\n\n except Exception:\n logger.exception(\"Exception caught during processing\")\n raise click.Abort()\n", "path": "rasterio/rio/stack.py"}], "after_files": [{"content": "\"\"\"Commands for operating on bands of datasets.\"\"\"\nimport collections\nimport logging\n\nimport click\nfrom cligj import files_inout_arg, format_opt\n\nfrom .helpers import resolve_inout\nfrom . import options\nimport rasterio\nfrom rasterio.five import zip_longest\n\n\n# Stack command.\[email protected](short_help=\"Stack a number of bands into a multiband dataset.\")\n@files_inout_arg\[email protected]_opt\n@format_opt\[email protected]_mult_opt\[email protected]_opt\[email protected]_overwrite_opt\[email protected]_options\[email protected]_context\ndef stack(ctx, files, output, driver, bidx, photometric, force_overwrite,\n creation_options):\n \"\"\"Stack a number of bands from one or more input files into a\n multiband dataset.\n\n Input datasets must be of a kind: same data type, dimensions, etc. The\n output is cloned from the first input.\n\n By default, rio-stack will take all bands from each input and write them\n in same order to the output. Optionally, bands for each input may be\n specified using a simple syntax:\n\n --bidx N takes the Nth band from the input (first band is 1).\n\n --bidx M,N,0 takes bands M, N, and O.\n\n --bidx M..O takes bands M-O, inclusive.\n\n --bidx ..N takes all bands up to and including N.\n\n --bidx N.. takes all bands from N to the end.\n\n Examples, using the Rasterio testing dataset, which produce a copy.\n\n rio stack RGB.byte.tif -o stacked.tif\n\n rio stack RGB.byte.tif --bidx 1,2,3 -o stacked.tif\n\n rio stack RGB.byte.tif --bidx 1..3 -o stacked.tif\n\n rio stack RGB.byte.tif --bidx ..2 RGB.byte.tif --bidx 3.. -o stacked.tif\n\n \"\"\"\n\n verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 2\n logger = logging.getLogger('rio')\n try:\n with rasterio.drivers(CPL_DEBUG=verbosity>2):\n output, files = resolve_inout(files=files, output=output,\n force_overwrite=force_overwrite)\n output_count = 0\n indexes = []\n for path, item in zip_longest(files, bidx, fillvalue=None):\n with rasterio.open(path) as src:\n src_indexes = src.indexes\n if item is None:\n indexes.append(src_indexes)\n output_count += len(src_indexes)\n elif '..' in item:\n start, stop = map(\n lambda x: int(x) if x else None, item.split('..'))\n if start is None:\n start = 1\n indexes.append(src_indexes[slice(start-1, stop)])\n output_count += len(src_indexes[slice(start-1, stop)])\n else:\n parts = list(map(int, item.split(',')))\n if len(parts) == 1:\n indexes.append(parts[0])\n output_count += 1\n else:\n parts = list(parts)\n indexes.append(parts)\n output_count += len(parts)\n\n with rasterio.open(files[0]) as first:\n kwargs = first.meta\n kwargs.update(**creation_options)\n kwargs['transform'] = kwargs.pop('affine')\n\n kwargs.update(\n driver=driver,\n count=output_count)\n\n if photometric:\n kwargs['photometric'] = photometric\n\n with rasterio.open(output, 'w', **kwargs) as dst:\n dst_idx = 1\n for path, index in zip(files, indexes):\n with rasterio.open(path) as src:\n if isinstance(index, int):\n data = src.read(index)\n dst.write(data, dst_idx)\n dst_idx += 1\n elif isinstance(index, collections.Iterable):\n data = src.read(index)\n dst.write(data, range(dst_idx, dst_idx+len(index)))\n dst_idx += len(index)\n\n except Exception:\n logger.exception(\"Exception caught during processing\")\n raise click.Abort()\n", "path": "rasterio/rio/stack.py"}]}
| 1,483 | 155 |
gh_patches_debug_25922
|
rasdani/github-patches
|
git_diff
|
freedomofpress__securedrop-5834
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Restoring tarball to Focal shows error for v2
## Description
When restoring a v2-only Xenial backup tarball to a v3-only Focal instance, the restore action fails. It fails even if the admin explicitly requests that the tor config be preserved as-is.
## Steps to Reproduce
I used libvirt-based VMs for testing, and performed all admin actions from a virtualized Tails v4.16 VM.
1. Create a v2-only backup tarball from a Xenial host.
2. Perform a clean install of Focal, with v3-only vars.
3. Attempt to restore the backup: `./securedrop-admin --force restore --preserve-tor-config ~/Persistent/backups/xenial-v2-only/sd-backup-2021-02-26--15-57-06.tar.gz`
## Expected Behavior
Restore action completes, old URLs are restored, and I can proceed with regenerating new v3 URL and finalizing the Xenial -> Focal migration.
## Actual Behavior
Restore action fails. Even when I include the `--preserve-tor-config` flag, it still fails.
## Comments
On one hand, the failure is expected, since Focal is v3-only, but in the context of a migration from Xenial, it's likely we're going to have admins migrating to Focal from a recently created backup, so I recommend we defer the fail-closed behavior to a subsequent release. That'd have bearing on WIP docs changes in e..g. https://github.com/freedomofpress/securedrop-docs/pull/133
The above is a policy question, but this ticket is also pointing out some bugs that should be fixed. For one, `--preserve-tor-config` is not honored, and it should be.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `install_files/ansible-base/roles/restore/files/compare_torrc.py`
Content:
```
1 #!/usr/bin/env python
2
3 #
4 # Compares Tor configurations on the app server and from a backup. If
5 # restoring the backup would alter the server's Tor configuration,
6 # print a warning and exit.
7 #
8
9 from __future__ import print_function
10
11 import os
12 import re
13 import sys
14
15
16 def get_tor_versions(path):
17 """
18 Determine which service versions are offered in the given torrc.
19 """
20 service_re = re.compile(r"HiddenServiceDir\s+(?:.*)/(.*)")
21 versions = set([])
22 with open(path) as f:
23 for line in f:
24 m = service_re.match(line)
25 if m:
26 service = m.group(1)
27 if "v3" in service:
28 versions.add(3)
29 else:
30 versions.add(2)
31
32 return versions
33
34
35 def strset(s):
36 """
37 Sort the given set and join members with "and".
38 """
39 return " and ".join(str(v) for v in sorted(s))
40
41
42 if __name__ == "__main__":
43 tempdir = sys.argv[1]
44
45 server_versions = get_tor_versions(os.path.join(tempdir, "app/etc/tor/torrc"))
46 backup_versions = get_tor_versions(os.path.join(tempdir, "backup/etc/tor/torrc"))
47
48 if server_versions == backup_versions:
49 print("The Tor configuration in the backup matches the server.")
50 sys.exit(0)
51
52 if (3 in server_versions) and (3 in backup_versions):
53 print("V3 services detected in backup and server - proceeding with v3-only restore")
54 sys.exit(0)
55
56 print(
57 "The Tor configuration on the app server offers version {} services.".format(
58 strset(server_versions)
59 )
60 )
61
62 print(
63 "The Tor configuration in this backup offers version {} services.".format(
64 strset(backup_versions)
65 )
66 )
67
68 print("\nRestoring a backup with a different Tor configuration than the server ")
69 print("is currently unsupported. If you require technical assistance, please ")
70 print("contact the SecureDrop team via the support portal or at ")
71 print("[email protected].")
72
73 sys.exit(1)
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/install_files/ansible-base/roles/restore/files/compare_torrc.py b/install_files/ansible-base/roles/restore/files/compare_torrc.py
--- a/install_files/ansible-base/roles/restore/files/compare_torrc.py
+++ b/install_files/ansible-base/roles/restore/files/compare_torrc.py
@@ -46,11 +46,11 @@
backup_versions = get_tor_versions(os.path.join(tempdir, "backup/etc/tor/torrc"))
if server_versions == backup_versions:
- print("The Tor configuration in the backup matches the server.")
+ print("Valid configuration: the Tor configuration in the backup matches the server.")
sys.exit(0)
if (3 in server_versions) and (3 in backup_versions):
- print("V3 services detected in backup and server - proceeding with v3-only restore")
+ print("Valid configuration: V3 services only`")
sys.exit(0)
print(
@@ -65,9 +65,11 @@
)
)
- print("\nRestoring a backup with a different Tor configuration than the server ")
- print("is currently unsupported. If you require technical assistance, please ")
- print("contact the SecureDrop team via the support portal or at ")
+ print("\nIncompatible configuration: Restoring a backup including a different ")
+ print("Tor configuration than the server Tor configuration is unsupported. ")
+ print("Optionally, use --preserve-tor-config to apply a data-only backup.")
+ print("If you require technical assistance, please contact the ")
+ print("SecureDrop team via the support portal or at ")
print("[email protected].")
sys.exit(1)
|
{"golden_diff": "diff --git a/install_files/ansible-base/roles/restore/files/compare_torrc.py b/install_files/ansible-base/roles/restore/files/compare_torrc.py\n--- a/install_files/ansible-base/roles/restore/files/compare_torrc.py\n+++ b/install_files/ansible-base/roles/restore/files/compare_torrc.py\n@@ -46,11 +46,11 @@\n backup_versions = get_tor_versions(os.path.join(tempdir, \"backup/etc/tor/torrc\"))\n \n if server_versions == backup_versions:\n- print(\"The Tor configuration in the backup matches the server.\")\n+ print(\"Valid configuration: the Tor configuration in the backup matches the server.\")\n sys.exit(0)\n \n if (3 in server_versions) and (3 in backup_versions):\n- print(\"V3 services detected in backup and server - proceeding with v3-only restore\")\n+ print(\"Valid configuration: V3 services only`\")\n sys.exit(0)\n \n print(\n@@ -65,9 +65,11 @@\n )\n )\n \n- print(\"\\nRestoring a backup with a different Tor configuration than the server \")\n- print(\"is currently unsupported. If you require technical assistance, please \")\n- print(\"contact the SecureDrop team via the support portal or at \")\n+ print(\"\\nIncompatible configuration: Restoring a backup including a different \")\n+ print(\"Tor configuration than the server Tor configuration is unsupported. \")\n+ print(\"Optionally, use --preserve-tor-config to apply a data-only backup.\")\n+ print(\"If you require technical assistance, please contact the \")\n+ print(\"SecureDrop team via the support portal or at \")\n print(\"[email protected].\")\n \n sys.exit(1)\n", "issue": "Restoring tarball to Focal shows error for v2\n## Description\r\n\r\nWhen restoring a v2-only Xenial backup tarball to a v3-only Focal instance, the restore action fails. It fails even if the admin explicitly requests that the tor config be preserved as-is. \r\n\r\n## Steps to Reproduce\r\nI used libvirt-based VMs for testing, and performed all admin actions from a virtualized Tails v4.16 VM.\r\n\r\n1. Create a v2-only backup tarball from a Xenial host.\r\n2. Perform a clean install of Focal, with v3-only vars.\r\n3. Attempt to restore the backup: `./securedrop-admin --force restore --preserve-tor-config ~/Persistent/backups/xenial-v2-only/sd-backup-2021-02-26--15-57-06.tar.gz`\r\n\r\n## Expected Behavior\r\n\r\nRestore action completes, old URLs are restored, and I can proceed with regenerating new v3 URL and finalizing the Xenial -> Focal migration. \r\n\r\n\r\n## Actual Behavior\r\n\r\nRestore action fails. Even when I include the `--preserve-tor-config` flag, it still fails. \r\n\r\n## Comments\r\nOn one hand, the failure is expected, since Focal is v3-only, but in the context of a migration from Xenial, it's likely we're going to have admins migrating to Focal from a recently created backup, so I recommend we defer the fail-closed behavior to a subsequent release. That'd have bearing on WIP docs changes in e..g. https://github.com/freedomofpress/securedrop-docs/pull/133\r\n\r\nThe above is a policy question, but this ticket is also pointing out some bugs that should be fixed. For one, `--preserve-tor-config` is not honored, and it should be.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n#\n# Compares Tor configurations on the app server and from a backup. If\n# restoring the backup would alter the server's Tor configuration,\n# print a warning and exit.\n#\n\nfrom __future__ import print_function\n\nimport os\nimport re\nimport sys\n\n\ndef get_tor_versions(path):\n \"\"\"\n Determine which service versions are offered in the given torrc.\n \"\"\"\n service_re = re.compile(r\"HiddenServiceDir\\s+(?:.*)/(.*)\")\n versions = set([])\n with open(path) as f:\n for line in f:\n m = service_re.match(line)\n if m:\n service = m.group(1)\n if \"v3\" in service:\n versions.add(3)\n else:\n versions.add(2)\n\n return versions\n\n\ndef strset(s):\n \"\"\"\n Sort the given set and join members with \"and\".\n \"\"\"\n return \" and \".join(str(v) for v in sorted(s))\n\n\nif __name__ == \"__main__\":\n tempdir = sys.argv[1]\n\n server_versions = get_tor_versions(os.path.join(tempdir, \"app/etc/tor/torrc\"))\n backup_versions = get_tor_versions(os.path.join(tempdir, \"backup/etc/tor/torrc\"))\n\n if server_versions == backup_versions:\n print(\"The Tor configuration in the backup matches the server.\")\n sys.exit(0)\n\n if (3 in server_versions) and (3 in backup_versions):\n print(\"V3 services detected in backup and server - proceeding with v3-only restore\")\n sys.exit(0)\n\n print(\n \"The Tor configuration on the app server offers version {} services.\".format(\n strset(server_versions)\n )\n )\n\n print(\n \"The Tor configuration in this backup offers version {} services.\".format(\n strset(backup_versions)\n )\n )\n\n print(\"\\nRestoring a backup with a different Tor configuration than the server \")\n print(\"is currently unsupported. If you require technical assistance, please \")\n print(\"contact the SecureDrop team via the support portal or at \")\n print(\"[email protected].\")\n\n sys.exit(1)\n", "path": "install_files/ansible-base/roles/restore/files/compare_torrc.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n#\n# Compares Tor configurations on the app server and from a backup. If\n# restoring the backup would alter the server's Tor configuration,\n# print a warning and exit.\n#\n\nfrom __future__ import print_function\n\nimport os\nimport re\nimport sys\n\n\ndef get_tor_versions(path):\n \"\"\"\n Determine which service versions are offered in the given torrc.\n \"\"\"\n service_re = re.compile(r\"HiddenServiceDir\\s+(?:.*)/(.*)\")\n versions = set([])\n with open(path) as f:\n for line in f:\n m = service_re.match(line)\n if m:\n service = m.group(1)\n if \"v3\" in service:\n versions.add(3)\n else:\n versions.add(2)\n\n return versions\n\n\ndef strset(s):\n \"\"\"\n Sort the given set and join members with \"and\".\n \"\"\"\n return \" and \".join(str(v) for v in sorted(s))\n\n\nif __name__ == \"__main__\":\n tempdir = sys.argv[1]\n\n server_versions = get_tor_versions(os.path.join(tempdir, \"app/etc/tor/torrc\"))\n backup_versions = get_tor_versions(os.path.join(tempdir, \"backup/etc/tor/torrc\"))\n\n if server_versions == backup_versions:\n print(\"Valid configuration: the Tor configuration in the backup matches the server.\")\n sys.exit(0)\n\n if (3 in server_versions) and (3 in backup_versions):\n print(\"Valid configuration: V3 services only`\")\n sys.exit(0)\n\n print(\n \"The Tor configuration on the app server offers version {} services.\".format(\n strset(server_versions)\n )\n )\n\n print(\n \"The Tor configuration in this backup offers version {} services.\".format(\n strset(backup_versions)\n )\n )\n\n print(\"\\nIncompatible configuration: Restoring a backup including a different \")\n print(\"Tor configuration than the server Tor configuration is unsupported. \")\n print(\"Optionally, use --preserve-tor-config to apply a data-only backup.\")\n print(\"If you require technical assistance, please contact the \")\n print(\"SecureDrop team via the support portal or at \")\n print(\"[email protected].\")\n\n sys.exit(1)\n", "path": "install_files/ansible-base/roles/restore/files/compare_torrc.py"}]}
| 1,273 | 383 |
gh_patches_debug_20393
|
rasdani/github-patches
|
git_diff
|
PlasmaPy__PlasmaPy-405
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add doc page on authors and credits
It would be really helpful to have a page in our `docs` directory that lists the Coordinating Committee members and a full list of authors of the code, along with other credits. Some examples are Astropy's [Authors and Credits page](http://docs.astropy.org/en/stable/credits.html), and SunPy's [The Project](http://sunpy.org/team.html). The list of code contributors can already be accessed from our GitHub repository and the commit log; however, this often does not include full names. We might be able to find a way to automate this, though that's low priority. We should do this prior to our 0.1 release.
To help with the organization, we should probably create an `about` subdirectory that will include pages about the PlasmaPy project as a whole, including this one. The `docs/stability.rst` page could go in this directory too.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plasmapy/constants/__init__.py`
Content:
```
1 """Physical and mathematical constants."""
2
3 from numpy import pi
4
5 from astropy.constants.si import (
6 e,
7 mu0,
8 eps0,
9 k_B,
10 c,
11 G,
12 h,
13 hbar,
14 m_p,
15 m_n,
16 m_e,
17 u,
18 sigma_sb,
19 N_A,
20 R,
21 Ryd,
22 a0,
23 muB,
24 sigma_T,
25 au,
26 pc,
27 kpc,
28 g0,
29 L_sun,
30 M_sun,
31 R_sun,
32 M_earth,
33 R_earth,
34 )
35
36 from astropy.constants import atm
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plasmapy/constants/__init__.py b/plasmapy/constants/__init__.py
--- a/plasmapy/constants/__init__.py
+++ b/plasmapy/constants/__init__.py
@@ -1,4 +1,8 @@
-"""Physical and mathematical constants."""
+"""
+Contains physical and mathematical constants commonly used in plasma
+physics.
+
+"""
from numpy import pi
@@ -34,3 +38,26 @@
)
from astropy.constants import atm
+
+# The following code is modified from astropy.constants to produce a
+# table containing information on the constants contained with PlasmaPy.
+# Mathematical constants can be just entered.
+
+_lines = [
+ 'The following constants are available:\n',
+ '========== ================= ================ ============================================',
+ 'Name Value Units Description',
+ '========== ================= ================ ============================================',
+ " pi 3.141592653589793 Ratio of circumference to diameter of circle",
+]
+
+_constants = [eval(item) for item in dir() if item[0] != '_' and item != 'pi']
+for _const in _constants:
+ _lines.append('{0:^10} {1:^17.12g} {2:^16} {3}'
+ .format(_const.abbrev, _const.value, _const._unit_string, _const.name))
+
+_lines.append(_lines[1])
+
+__doc__ += '\n'.join(_lines)
+
+del _lines, _const, _constants
|
{"golden_diff": "diff --git a/plasmapy/constants/__init__.py b/plasmapy/constants/__init__.py\n--- a/plasmapy/constants/__init__.py\n+++ b/plasmapy/constants/__init__.py\n@@ -1,4 +1,8 @@\n-\"\"\"Physical and mathematical constants.\"\"\"\n+\"\"\"\n+Contains physical and mathematical constants commonly used in plasma\n+physics.\n+\n+\"\"\"\n \n from numpy import pi\n \n@@ -34,3 +38,26 @@\n )\n \n from astropy.constants import atm\n+\n+# The following code is modified from astropy.constants to produce a\n+# table containing information on the constants contained with PlasmaPy.\n+# Mathematical constants can be just entered.\n+\n+_lines = [\n+ 'The following constants are available:\\n',\n+ '========== ================= ================ ============================================',\n+ 'Name Value Units Description',\n+ '========== ================= ================ ============================================',\n+ \" pi 3.141592653589793 Ratio of circumference to diameter of circle\",\n+]\n+\n+_constants = [eval(item) for item in dir() if item[0] != '_' and item != 'pi']\n+for _const in _constants:\n+ _lines.append('{0:^10} {1:^17.12g} {2:^16} {3}'\n+ .format(_const.abbrev, _const.value, _const._unit_string, _const.name))\n+\n+_lines.append(_lines[1])\n+\n+__doc__ += '\\n'.join(_lines)\n+\n+del _lines, _const, _constants\n", "issue": "Add doc page on authors and credits\nIt would be really helpful to have a page in our `docs` directory that lists the Coordinating Committee members and a full list of authors of the code, along with other credits. Some examples are Astropy's [Authors and Credits page](http://docs.astropy.org/en/stable/credits.html), and SunPy's [The Project](http://sunpy.org/team.html). The list of code contributors can already be accessed from our GitHub repository and the commit log; however, this often does not include full names. We might be able to find a way to automate this, though that's low priority. We should do this prior to our 0.1 release.\r\n\r\nTo help with the organization, we should probably create an `about` subdirectory that will include pages about the PlasmaPy project as a whole, including this one. The `docs/stability.rst` page could go in this directory too.\n", "before_files": [{"content": "\"\"\"Physical and mathematical constants.\"\"\"\n\nfrom numpy import pi\n\nfrom astropy.constants.si import (\n e,\n mu0,\n eps0,\n k_B,\n c,\n G,\n h,\n hbar,\n m_p,\n m_n,\n m_e,\n u,\n sigma_sb,\n N_A,\n R,\n Ryd,\n a0,\n muB,\n sigma_T,\n au,\n pc,\n kpc,\n g0,\n L_sun,\n M_sun,\n R_sun,\n M_earth,\n R_earth,\n)\n\nfrom astropy.constants import atm\n", "path": "plasmapy/constants/__init__.py"}], "after_files": [{"content": "\"\"\"\nContains physical and mathematical constants commonly used in plasma\nphysics.\n\n\"\"\"\n\nfrom numpy import pi\n\nfrom astropy.constants.si import (\n e,\n mu0,\n eps0,\n k_B,\n c,\n G,\n h,\n hbar,\n m_p,\n m_n,\n m_e,\n u,\n sigma_sb,\n N_A,\n R,\n Ryd,\n a0,\n muB,\n sigma_T,\n au,\n pc,\n kpc,\n g0,\n L_sun,\n M_sun,\n R_sun,\n M_earth,\n R_earth,\n)\n\nfrom astropy.constants import atm\n\n# The following code is modified from astropy.constants to produce a\n# table containing information on the constants contained with PlasmaPy.\n# Mathematical constants can be just entered.\n\n_lines = [\n 'The following constants are available:\\n',\n '========== ================= ================ ============================================',\n 'Name Value Units Description',\n '========== ================= ================ ============================================',\n \" pi 3.141592653589793 Ratio of circumference to diameter of circle\",\n]\n\n_constants = [eval(item) for item in dir() if item[0] != '_' and item != 'pi']\nfor _const in _constants:\n _lines.append('{0:^10} {1:^17.12g} {2:^16} {3}'\n .format(_const.abbrev, _const.value, _const._unit_string, _const.name))\n\n_lines.append(_lines[1])\n\n__doc__ += '\\n'.join(_lines)\n\ndel _lines, _const, _constants\n", "path": "plasmapy/constants/__init__.py"}]}
| 663 | 350 |
gh_patches_debug_20348
|
rasdani/github-patches
|
git_diff
|
google__personfinder-397
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Internal server error on multiview.py with invalid record ID
multiview.py returns Internal server error when one of the specified IDs is invalid. It should return 404 or something instead.
```
AttributeError: 'NoneType' object has no attribute 'person_record_id'
at get (multiview.py:47)
at serve (main.py:622)
at get (main.py:647)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/multiview.py`
Content:
```
1 #!/usr/bin/python2.7
2 # Copyright 2010 Google Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 from model import *
17 from utils import *
18 import pfif
19 import reveal
20 import subscribe
21 import view
22
23 from django.utils.translation import ugettext as _
24
25 # Fields to show for side-by-side comparison.
26 COMPARE_FIELDS = pfif.PFIF_1_4.fields['person'] + ['primary_full_name']
27
28
29 class Handler(BaseHandler):
30 def get(self):
31 # To handle multiple persons, we create a single object where
32 # each property is a list of values, one for each person.
33 # This makes page rendering easier.
34 person = dict([(prop, []) for prop in COMPARE_FIELDS])
35 any_person = dict([(prop, None) for prop in COMPARE_FIELDS])
36
37 # Get all persons from db.
38 # TODO: Can later optimize to use fewer DB calls.
39 for i in [1, 2, 3]:
40 id = self.request.get('id%d' % i)
41 if not id:
42 break
43 p = Person.get(self.repo, id)
44 sanitize_urls(p)
45
46 for prop in COMPARE_FIELDS:
47 val = getattr(p, prop)
48 if prop == 'sex': # convert enum value to localized text
49 val = get_person_sex_text(p)
50 person[prop].append(val)
51 any_person[prop] = any_person[prop] or val
52
53 # Compute the local times for the date fields on the person and format.
54 person['source_datetime_local_string'] = map(
55 self.to_formatted_local_datetime, person['source_date'])
56
57 # Check if private info should be revealed.
58 content_id = 'multiview:' + ','.join(person['person_record_id'])
59 reveal_url = reveal.make_reveal_url(self, content_id)
60 show_private_info = reveal.verify(content_id, self.params.signature)
61
62 standalone = self.request.get('standalone')
63
64 # TODO: Handle no persons found.
65
66 person['profile_pages'] = [view.get_profile_pages(profile_urls, self)
67 for profile_urls in person['profile_urls']]
68 any_person['profile_pages'] = any(person['profile_pages'])
69
70 # Note: we're not showing notes and linked persons information
71 # here at the moment.
72 self.render('multiview.html',
73 person=person, any=any_person, standalone=standalone,
74 cols=len(person['full_name']) + 1,
75 onload_function='view_page_loaded()', markdup=True,
76 show_private_info=show_private_info, reveal_url=reveal_url)
77
78 def post(self):
79 if not self.params.text:
80 return self.error(
81 200, _('Message is required. Please go back and try again.'))
82
83 if not self.params.author_name:
84 return self.error(
85 200, _('Your name is required in the "About you" section. Please go back and try again.'))
86
87 # TODO: To reduce possible abuse, we currently limit to 3 person
88 # match. We could guard using e.g. an XSRF token, which I don't know how
89 # to build in GAE.
90
91 ids = set()
92 for i in [1, 2, 3]:
93 id = getattr(self.params, 'id%d' % i)
94 if not id:
95 break
96 ids.add(id)
97
98 if len(ids) > 1:
99 notes = []
100 for person_id in ids:
101 person = Person.get(self.repo, person_id)
102 person_notes = []
103 for other_id in ids - set([person_id]):
104 note = Note.create_original(
105 self.repo,
106 entry_date=get_utcnow(),
107 person_record_id=person_id,
108 linked_person_record_id=other_id,
109 text=self.params.text,
110 author_name=self.params.author_name,
111 author_phone=self.params.author_phone,
112 author_email=self.params.author_email,
113 source_date=get_utcnow())
114 person_notes.append(note)
115 # Notify person's subscribers of all new duplicates. We do not
116 # follow links since each Person record in the ids list gets its
117 # own note. However, 1) when > 2 records are marked as
118 # duplicates, subscribers will still receive multiple
119 # notifications, and 2) subscribers to already-linked Persons
120 # will not be notified of the new link.
121 subscribe.send_notifications(self, person, person_notes, False)
122 notes += person_notes
123 # Write all notes to store
124 db.put(notes)
125 self.redirect('/view', id=self.params.id1)
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/multiview.py b/app/multiview.py
--- a/app/multiview.py
+++ b/app/multiview.py
@@ -41,6 +41,11 @@
if not id:
break
p = Person.get(self.repo, id)
+ if not p:
+ return self.error(
+ 404,
+ _("This person's entry does not exist or has been "
+ "deleted."))
sanitize_urls(p)
for prop in COMPARE_FIELDS:
@@ -103,7 +108,7 @@
for other_id in ids - set([person_id]):
note = Note.create_original(
self.repo,
- entry_date=get_utcnow(),
+ entry_date=get_utcnow(),
person_record_id=person_id,
linked_person_record_id=other_id,
text=self.params.text,
|
{"golden_diff": "diff --git a/app/multiview.py b/app/multiview.py\n--- a/app/multiview.py\n+++ b/app/multiview.py\n@@ -41,6 +41,11 @@\n if not id:\n break\n p = Person.get(self.repo, id)\n+ if not p:\n+ return self.error(\n+ 404,\n+ _(\"This person's entry does not exist or has been \"\n+ \"deleted.\"))\n sanitize_urls(p)\n \n for prop in COMPARE_FIELDS:\n@@ -103,7 +108,7 @@\n for other_id in ids - set([person_id]):\n note = Note.create_original(\n self.repo,\n- entry_date=get_utcnow(), \n+ entry_date=get_utcnow(),\n person_record_id=person_id,\n linked_person_record_id=other_id,\n text=self.params.text,\n", "issue": "Internal server error on multiview.py with invalid record ID\nmultiview.py returns Internal server error when one of the specified IDs is invalid. It should return 404 or something instead.\r\n\r\n```\r\nAttributeError: 'NoneType' object has no attribute 'person_record_id'\r\nat get (multiview.py:47)\r\nat serve (main.py:622)\r\nat get (main.py:647)\r\n```\n", "before_files": [{"content": "#!/usr/bin/python2.7\n# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom model import *\nfrom utils import *\nimport pfif\nimport reveal\nimport subscribe\nimport view\n\nfrom django.utils.translation import ugettext as _\n\n# Fields to show for side-by-side comparison.\nCOMPARE_FIELDS = pfif.PFIF_1_4.fields['person'] + ['primary_full_name']\n\n\nclass Handler(BaseHandler):\n def get(self):\n # To handle multiple persons, we create a single object where\n # each property is a list of values, one for each person.\n # This makes page rendering easier.\n person = dict([(prop, []) for prop in COMPARE_FIELDS])\n any_person = dict([(prop, None) for prop in COMPARE_FIELDS])\n\n # Get all persons from db.\n # TODO: Can later optimize to use fewer DB calls.\n for i in [1, 2, 3]:\n id = self.request.get('id%d' % i)\n if not id:\n break\n p = Person.get(self.repo, id)\n sanitize_urls(p)\n\n for prop in COMPARE_FIELDS:\n val = getattr(p, prop)\n if prop == 'sex': # convert enum value to localized text\n val = get_person_sex_text(p)\n person[prop].append(val)\n any_person[prop] = any_person[prop] or val\n\n # Compute the local times for the date fields on the person and format.\n person['source_datetime_local_string'] = map(\n self.to_formatted_local_datetime, person['source_date'])\n\n # Check if private info should be revealed.\n content_id = 'multiview:' + ','.join(person['person_record_id'])\n reveal_url = reveal.make_reveal_url(self, content_id)\n show_private_info = reveal.verify(content_id, self.params.signature)\n\n standalone = self.request.get('standalone')\n\n # TODO: Handle no persons found.\n\n person['profile_pages'] = [view.get_profile_pages(profile_urls, self)\n for profile_urls in person['profile_urls']]\n any_person['profile_pages'] = any(person['profile_pages'])\n\n # Note: we're not showing notes and linked persons information\n # here at the moment.\n self.render('multiview.html',\n person=person, any=any_person, standalone=standalone,\n cols=len(person['full_name']) + 1,\n onload_function='view_page_loaded()', markdup=True,\n show_private_info=show_private_info, reveal_url=reveal_url)\n\n def post(self):\n if not self.params.text:\n return self.error(\n 200, _('Message is required. Please go back and try again.'))\n\n if not self.params.author_name:\n return self.error(\n 200, _('Your name is required in the \"About you\" section. Please go back and try again.'))\n\n # TODO: To reduce possible abuse, we currently limit to 3 person\n # match. We could guard using e.g. an XSRF token, which I don't know how\n # to build in GAE.\n\n ids = set()\n for i in [1, 2, 3]:\n id = getattr(self.params, 'id%d' % i)\n if not id:\n break\n ids.add(id)\n\n if len(ids) > 1:\n notes = []\n for person_id in ids:\n person = Person.get(self.repo, person_id)\n person_notes = []\n for other_id in ids - set([person_id]):\n note = Note.create_original(\n self.repo,\n entry_date=get_utcnow(), \n person_record_id=person_id,\n linked_person_record_id=other_id,\n text=self.params.text,\n author_name=self.params.author_name,\n author_phone=self.params.author_phone,\n author_email=self.params.author_email,\n source_date=get_utcnow())\n person_notes.append(note)\n # Notify person's subscribers of all new duplicates. We do not\n # follow links since each Person record in the ids list gets its\n # own note. However, 1) when > 2 records are marked as\n # duplicates, subscribers will still receive multiple\n # notifications, and 2) subscribers to already-linked Persons\n # will not be notified of the new link.\n subscribe.send_notifications(self, person, person_notes, False)\n notes += person_notes\n # Write all notes to store\n db.put(notes)\n self.redirect('/view', id=self.params.id1)\n", "path": "app/multiview.py"}], "after_files": [{"content": "#!/usr/bin/python2.7\n# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom model import *\nfrom utils import *\nimport pfif\nimport reveal\nimport subscribe\nimport view\n\nfrom django.utils.translation import ugettext as _\n\n# Fields to show for side-by-side comparison.\nCOMPARE_FIELDS = pfif.PFIF_1_4.fields['person'] + ['primary_full_name']\n\n\nclass Handler(BaseHandler):\n def get(self):\n # To handle multiple persons, we create a single object where\n # each property is a list of values, one for each person.\n # This makes page rendering easier.\n person = dict([(prop, []) for prop in COMPARE_FIELDS])\n any_person = dict([(prop, None) for prop in COMPARE_FIELDS])\n\n # Get all persons from db.\n # TODO: Can later optimize to use fewer DB calls.\n for i in [1, 2, 3]:\n id = self.request.get('id%d' % i)\n if not id:\n break\n p = Person.get(self.repo, id)\n if not p:\n return self.error(\n 404,\n _(\"This person's entry does not exist or has been \"\n \"deleted.\"))\n sanitize_urls(p)\n\n for prop in COMPARE_FIELDS:\n val = getattr(p, prop)\n if prop == 'sex': # convert enum value to localized text\n val = get_person_sex_text(p)\n person[prop].append(val)\n any_person[prop] = any_person[prop] or val\n\n # Compute the local times for the date fields on the person and format.\n person['source_datetime_local_string'] = map(\n self.to_formatted_local_datetime, person['source_date'])\n\n # Check if private info should be revealed.\n content_id = 'multiview:' + ','.join(person['person_record_id'])\n reveal_url = reveal.make_reveal_url(self, content_id)\n show_private_info = reveal.verify(content_id, self.params.signature)\n\n standalone = self.request.get('standalone')\n\n # TODO: Handle no persons found.\n\n person['profile_pages'] = [view.get_profile_pages(profile_urls, self)\n for profile_urls in person['profile_urls']]\n any_person['profile_pages'] = any(person['profile_pages'])\n\n # Note: we're not showing notes and linked persons information\n # here at the moment.\n self.render('multiview.html',\n person=person, any=any_person, standalone=standalone,\n cols=len(person['full_name']) + 1,\n onload_function='view_page_loaded()', markdup=True,\n show_private_info=show_private_info, reveal_url=reveal_url)\n\n def post(self):\n if not self.params.text:\n return self.error(\n 200, _('Message is required. Please go back and try again.'))\n\n if not self.params.author_name:\n return self.error(\n 200, _('Your name is required in the \"About you\" section. Please go back and try again.'))\n\n # TODO: To reduce possible abuse, we currently limit to 3 person\n # match. We could guard using e.g. an XSRF token, which I don't know how\n # to build in GAE.\n\n ids = set()\n for i in [1, 2, 3]:\n id = getattr(self.params, 'id%d' % i)\n if not id:\n break\n ids.add(id)\n\n if len(ids) > 1:\n notes = []\n for person_id in ids:\n person = Person.get(self.repo, person_id)\n person_notes = []\n for other_id in ids - set([person_id]):\n note = Note.create_original(\n self.repo,\n entry_date=get_utcnow(),\n person_record_id=person_id,\n linked_person_record_id=other_id,\n text=self.params.text,\n author_name=self.params.author_name,\n author_phone=self.params.author_phone,\n author_email=self.params.author_email,\n source_date=get_utcnow())\n person_notes.append(note)\n # Notify person's subscribers of all new duplicates. We do not\n # follow links since each Person record in the ids list gets its\n # own note. However, 1) when > 2 records are marked as\n # duplicates, subscribers will still receive multiple\n # notifications, and 2) subscribers to already-linked Persons\n # will not be notified of the new link.\n subscribe.send_notifications(self, person, person_notes, False)\n notes += person_notes\n # Write all notes to store\n db.put(notes)\n self.redirect('/view', id=self.params.id1)\n", "path": "app/multiview.py"}]}
| 1,718 | 197 |
gh_patches_debug_22455
|
rasdani/github-patches
|
git_diff
|
zestedesavoir__zds-site-5250
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Erreurs 500 pour "Ajouter en Une"
Sur certains contenus, "Ajouter en Une" renvoie une erreur 500
Sujet : https://zestedesavoir.com/forums/sujet/11676/ajouter-en-une/#p195045
*Envoyé depuis Zeste de Savoir*
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/featured/views.py`
Content:
```
1 from datetime import datetime
2
3 from django.contrib import messages
4 from django.contrib.auth.decorators import login_required, permission_required
5 from django.urls import reverse
6 from django.db import transaction
7 from django.shortcuts import redirect
8 from django.utils.decorators import method_decorator
9 from django.utils.translation import ugettext as _
10 from django.views.generic import CreateView, RedirectView, UpdateView, FormView, DeleteView
11 from django.views.generic.list import MultipleObjectMixin
12
13 from django.conf import settings
14 from zds.featured.forms import FeaturedResourceForm, FeaturedMessageForm
15 from zds.featured.models import FeaturedResource, FeaturedMessage
16 from zds.forum.models import Topic
17 from zds.tutorialv2.models.database import PublishedContent
18 from zds.utils.paginator import ZdSPagingListView
19
20
21 class FeaturedResourceList(ZdSPagingListView):
22 """
23 Displays the list of featured resources.
24 """
25
26 context_object_name = 'featured_resource_list'
27 paginate_by = settings.ZDS_APP['featured_resource']['featured_per_page']
28 queryset = FeaturedResource.objects.all().order_by('-pubdate')
29 template_name = 'featured/index.html'
30
31 @method_decorator(login_required)
32 @method_decorator(permission_required('featured.change_featuredresource', raise_exception=True))
33 def dispatch(self, request, *args, **kwargs):
34 return super(FeaturedResourceList, self).dispatch(request, *args, **kwargs)
35
36
37 class FeaturedResourceCreate(CreateView):
38 """
39 Creates a new featured resource.
40 """
41
42 form_class = FeaturedResourceForm
43 template_name = 'featured/resource/create.html'
44 context_object_name = 'featured_resource'
45 initial_error_message = _('Le contenu est introuvable')
46 displayed_content_type = {'TUTORIAL': _('Un tutoriel'),
47 'ARTICLE': _('Un article'),
48 'OPINION': _('Un billet'),
49 'TOPIC': _('Un sujet')}
50
51 @method_decorator(login_required)
52 @method_decorator(permission_required('featured.change_featuredresource', raise_exception=True))
53 def dispatch(self, request, *args, **kwargs):
54 return super(FeaturedResourceCreate, self).dispatch(request, *args, **kwargs)
55
56 def get_initial_topic_data(self, topic_id):
57 try:
58 content = Topic.objects.get(pk=int(topic_id))
59 except (Topic.DoesNotExist, ValueError):
60 messages.error(self.request, self.initial_error_message)
61 return {}
62 return {'title': content.title,
63 'type': self.displayed_content_type['TOPIC'],
64 'authors': str(content.author),
65 'url': self.request.build_absolute_uri(content.get_absolute_url())}
66
67 def get_initial_content_data(self, content_id):
68 try:
69 content = PublishedContent.objects.get(content__pk=int(content_id))
70 except (PublishedContent.DoesNotExist, ValueError):
71 messages.error(self.request, self.initial_error_message)
72 return {}
73 displayed_authors = ', '.join([str(x) for x in content.authors.all()])
74 if content.content.image:
75 image_url = self.request.build_absolute_uri(content.content.image.physical['featured'].url)
76 else:
77 image_url = None
78 return {'title': content.title(),
79 'type': self.displayed_content_type[content.content_type],
80 'authors': displayed_authors,
81 'url': self.request.build_absolute_uri(content.content.get_absolute_url_online()),
82 'image_url': image_url}
83
84 def get_initial(self):
85 initial = super(FeaturedResourceCreate, self).get_initial()
86 content_type = self.request.GET.get('content_type', None)
87 content_id = self.request.GET.get('content_id', None)
88 if content_type == 'topic' and content_id:
89 initial.update(**self.get_initial_topic_data(content_id))
90 elif content_type == 'published_content' and content_id:
91 initial.update(**self.get_initial_content_data(content_id))
92 return initial
93
94 def get_form_kwargs(self):
95 kw = super(FeaturedResourceCreate, self).get_form_kwargs()
96 kw['hide_major_update_field'] = True
97 return kw
98
99 def form_valid(self, form):
100 featured_resource = FeaturedResource()
101 featured_resource.title = form.cleaned_data.get('title')
102 featured_resource.type = form.cleaned_data.get('type')
103 featured_resource.authors = form.cleaned_data.get('authors')
104 featured_resource.image_url = form.cleaned_data.get('image_url')
105 featured_resource.url = form.cleaned_data.get('url')
106
107 if form.cleaned_data.get('major_update', False):
108 featured_resource.pubdate = datetime.now()
109 else:
110 featured_resource.pubdate = form.cleaned_data.get('pubdate')
111
112 featured_resource.save()
113
114 messages.success(self.request, _('La une a été créée.'))
115 return redirect(reverse('featured-resource-list'))
116
117
118 class FeaturedResourceUpdate(UpdateView):
119 """
120 Updates a featured resource.
121 """
122
123 form_class = FeaturedResourceForm
124 template_name = 'featured/resource/update.html'
125 queryset = FeaturedResource.objects.all()
126 context_object_name = 'featured_resource'
127
128 @method_decorator(login_required)
129 @method_decorator(permission_required('featured.change_featuredresource', raise_exception=True))
130 def dispatch(self, request, *args, **kwargs):
131 return super(FeaturedResourceUpdate, self).dispatch(request, *args, **kwargs)
132
133 def get_initial(self):
134 initial = super(FeaturedResourceUpdate, self).get_initial()
135 initial.update({
136 'title': self.object.title,
137 'type': self.object.type,
138 'authors': self.object.authors,
139 'image_url': self.object.image_url,
140 'url': self.object.url,
141 'pubdate': self.object.pubdate,
142 })
143
144 return initial
145
146 def form_valid(self, form):
147
148 self.object.title = form.cleaned_data.get('title')
149 self.object.type = form.cleaned_data.get('type')
150 self.object.authors = form.cleaned_data.get('authors')
151 self.object.image_url = form.cleaned_data.get('image_url')
152 self.object.url = form.cleaned_data.get('url')
153 if form.cleaned_data.get('major_update', False):
154 self.object.pubdate = datetime.now()
155 else:
156 self.object.pubdate = form.cleaned_data.get('pubdate')
157
158 messages.success(self.request, _('La une a été mise à jour.'))
159 self.success_url = reverse('featured-resource-list')
160 return super(FeaturedResourceUpdate, self).form_valid(form)
161
162 def get_form(self, form_class=None):
163 form = super(FeaturedResourceUpdate, self).get_form(form_class)
164 form.helper.form_action = reverse('featured-resource-update', args=[self.object.pk])
165 return form
166
167
168 class FeaturedResourceDeleteDetail(DeleteView):
169 """
170 Deletes a featured resource.
171 """
172
173 model = FeaturedResource
174
175 @method_decorator(login_required)
176 @method_decorator(transaction.atomic)
177 @method_decorator(permission_required('featured.change_featuredresource', raise_exception=True))
178 def dispatch(self, request, *args, **kwargs):
179 self.success_url = reverse('featured-resource-list')
180 return super(FeaturedResourceDeleteDetail, self).dispatch(request, *args, **kwargs)
181
182 def post(self, request, *args, **kwargs):
183 r = super(FeaturedResourceDeleteDetail, self).post(request, *args, **kwargs)
184 messages.success(request, _('La une a été supprimée avec succès.'))
185 return r
186
187
188 class FeaturedResourceDeleteList(MultipleObjectMixin, RedirectView):
189 """
190 Deletes a list of featured resources.
191 """
192 permanent = False
193
194 @method_decorator(login_required)
195 @method_decorator(permission_required('featured.change_featuredresource', raise_exception=True))
196 def dispatch(self, request, *args, **kwargs):
197 return super(FeaturedResourceDeleteList, self).dispatch(request, *args, **kwargs)
198
199 def get_queryset(self):
200 items_list = self.request.POST.getlist('items')
201 return FeaturedResource.objects.filter(pk__in=items_list)
202
203 def post(self, request, *args, **kwargs):
204 for featured_resource in self.get_queryset():
205 featured_resource.delete()
206
207 messages.success(request, _('Les unes ont été supprimées avec succès.'))
208
209 return redirect(reverse('featured-resource-list'))
210
211
212 class FeaturedMessageCreateUpdate(FormView):
213 """
214 Creates or updates the featured message.
215 """
216
217 form_class = FeaturedMessageForm
218 template_name = 'featured/message/create.html'
219 last_message = None
220
221 @method_decorator(login_required)
222 @method_decorator(permission_required('featured.change_featuredmessage', raise_exception=True))
223 def dispatch(self, request, *args, **kwargs):
224 self.last_message = FeaturedMessage.objects.get_last_message()
225 return super(FeaturedMessageCreateUpdate, self).dispatch(request, *args, **kwargs)
226
227 def get_initial(self):
228 init = super(FeaturedMessageCreateUpdate, self).get_initial()
229
230 if self.last_message is not None:
231 init.update({
232 'hook': self.last_message.hook,
233 'message': self.last_message.message,
234 'url': self.last_message.url,
235 })
236
237 return init
238
239 def form_valid(self, form):
240 if self.last_message:
241 self.last_message.delete()
242
243 featured_message = FeaturedMessage()
244 featured_message.hook = form.data.get('hook')
245 featured_message.message = form.data.get('message')
246 featured_message.url = form.data.get('url')
247 featured_message.save()
248
249 messages.success(self.request, _('Le message a été changé'))
250 return redirect(reverse('featured-resource-list'))
251
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/zds/featured/views.py b/zds/featured/views.py
--- a/zds/featured/views.py
+++ b/zds/featured/views.py
@@ -66,7 +66,7 @@
def get_initial_content_data(self, content_id):
try:
- content = PublishedContent.objects.get(content__pk=int(content_id))
+ content = PublishedContent.objects.filter(must_redirect=False, content__pk=int(content_id)).first()
except (PublishedContent.DoesNotExist, ValueError):
messages.error(self.request, self.initial_error_message)
return {}
@@ -79,7 +79,7 @@
'type': self.displayed_content_type[content.content_type],
'authors': displayed_authors,
'url': self.request.build_absolute_uri(content.content.get_absolute_url_online()),
- 'image_url': image_url}
+ 'image_url': self.request.build_absolute_uri(image_url)}
def get_initial(self):
initial = super(FeaturedResourceCreate, self).get_initial()
|
{"golden_diff": "diff --git a/zds/featured/views.py b/zds/featured/views.py\n--- a/zds/featured/views.py\n+++ b/zds/featured/views.py\n@@ -66,7 +66,7 @@\n \n def get_initial_content_data(self, content_id):\n try:\n- content = PublishedContent.objects.get(content__pk=int(content_id))\n+ content = PublishedContent.objects.filter(must_redirect=False, content__pk=int(content_id)).first()\n except (PublishedContent.DoesNotExist, ValueError):\n messages.error(self.request, self.initial_error_message)\n return {}\n@@ -79,7 +79,7 @@\n 'type': self.displayed_content_type[content.content_type],\n 'authors': displayed_authors,\n 'url': self.request.build_absolute_uri(content.content.get_absolute_url_online()),\n- 'image_url': image_url}\n+ 'image_url': self.request.build_absolute_uri(image_url)}\n \n def get_initial(self):\n initial = super(FeaturedResourceCreate, self).get_initial()\n", "issue": "Erreurs 500 pour \"Ajouter en Une\"\nSur certains contenus, \"Ajouter en Une\" renvoie une erreur 500\r\n\r\nSujet : https://zestedesavoir.com/forums/sujet/11676/ajouter-en-une/#p195045\r\n*Envoy\u00e9 depuis Zeste de Savoir*\n", "before_files": [{"content": "from datetime import datetime\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.urls import reverse\nfrom django.db import transaction\nfrom django.shortcuts import redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import CreateView, RedirectView, UpdateView, FormView, DeleteView\nfrom django.views.generic.list import MultipleObjectMixin\n\nfrom django.conf import settings\nfrom zds.featured.forms import FeaturedResourceForm, FeaturedMessageForm\nfrom zds.featured.models import FeaturedResource, FeaturedMessage\nfrom zds.forum.models import Topic\nfrom zds.tutorialv2.models.database import PublishedContent\nfrom zds.utils.paginator import ZdSPagingListView\n\n\nclass FeaturedResourceList(ZdSPagingListView):\n \"\"\"\n Displays the list of featured resources.\n \"\"\"\n\n context_object_name = 'featured_resource_list'\n paginate_by = settings.ZDS_APP['featured_resource']['featured_per_page']\n queryset = FeaturedResource.objects.all().order_by('-pubdate')\n template_name = 'featured/index.html'\n\n @method_decorator(login_required)\n @method_decorator(permission_required('featured.change_featuredresource', raise_exception=True))\n def dispatch(self, request, *args, **kwargs):\n return super(FeaturedResourceList, self).dispatch(request, *args, **kwargs)\n\n\nclass FeaturedResourceCreate(CreateView):\n \"\"\"\n Creates a new featured resource.\n \"\"\"\n\n form_class = FeaturedResourceForm\n template_name = 'featured/resource/create.html'\n context_object_name = 'featured_resource'\n initial_error_message = _('Le contenu est introuvable')\n displayed_content_type = {'TUTORIAL': _('Un tutoriel'),\n 'ARTICLE': _('Un article'),\n 'OPINION': _('Un billet'),\n 'TOPIC': _('Un sujet')}\n\n @method_decorator(login_required)\n @method_decorator(permission_required('featured.change_featuredresource', raise_exception=True))\n def dispatch(self, request, *args, **kwargs):\n return super(FeaturedResourceCreate, self).dispatch(request, *args, **kwargs)\n\n def get_initial_topic_data(self, topic_id):\n try:\n content = Topic.objects.get(pk=int(topic_id))\n except (Topic.DoesNotExist, ValueError):\n messages.error(self.request, self.initial_error_message)\n return {}\n return {'title': content.title,\n 'type': self.displayed_content_type['TOPIC'],\n 'authors': str(content.author),\n 'url': self.request.build_absolute_uri(content.get_absolute_url())}\n\n def get_initial_content_data(self, content_id):\n try:\n content = PublishedContent.objects.get(content__pk=int(content_id))\n except (PublishedContent.DoesNotExist, ValueError):\n messages.error(self.request, self.initial_error_message)\n return {}\n displayed_authors = ', '.join([str(x) for x in content.authors.all()])\n if content.content.image:\n image_url = self.request.build_absolute_uri(content.content.image.physical['featured'].url)\n else:\n image_url = None\n return {'title': content.title(),\n 'type': self.displayed_content_type[content.content_type],\n 'authors': displayed_authors,\n 'url': self.request.build_absolute_uri(content.content.get_absolute_url_online()),\n 'image_url': image_url}\n\n def get_initial(self):\n initial = super(FeaturedResourceCreate, self).get_initial()\n content_type = self.request.GET.get('content_type', None)\n content_id = self.request.GET.get('content_id', None)\n if content_type == 'topic' and content_id:\n initial.update(**self.get_initial_topic_data(content_id))\n elif content_type == 'published_content' and content_id:\n initial.update(**self.get_initial_content_data(content_id))\n return initial\n\n def get_form_kwargs(self):\n kw = super(FeaturedResourceCreate, self).get_form_kwargs()\n kw['hide_major_update_field'] = True\n return kw\n\n def form_valid(self, form):\n featured_resource = FeaturedResource()\n featured_resource.title = form.cleaned_data.get('title')\n featured_resource.type = form.cleaned_data.get('type')\n featured_resource.authors = form.cleaned_data.get('authors')\n featured_resource.image_url = form.cleaned_data.get('image_url')\n featured_resource.url = form.cleaned_data.get('url')\n\n if form.cleaned_data.get('major_update', False):\n featured_resource.pubdate = datetime.now()\n else:\n featured_resource.pubdate = form.cleaned_data.get('pubdate')\n\n featured_resource.save()\n\n messages.success(self.request, _('La une a \u00e9t\u00e9 cr\u00e9\u00e9e.'))\n return redirect(reverse('featured-resource-list'))\n\n\nclass FeaturedResourceUpdate(UpdateView):\n \"\"\"\n Updates a featured resource.\n \"\"\"\n\n form_class = FeaturedResourceForm\n template_name = 'featured/resource/update.html'\n queryset = FeaturedResource.objects.all()\n context_object_name = 'featured_resource'\n\n @method_decorator(login_required)\n @method_decorator(permission_required('featured.change_featuredresource', raise_exception=True))\n def dispatch(self, request, *args, **kwargs):\n return super(FeaturedResourceUpdate, self).dispatch(request, *args, **kwargs)\n\n def get_initial(self):\n initial = super(FeaturedResourceUpdate, self).get_initial()\n initial.update({\n 'title': self.object.title,\n 'type': self.object.type,\n 'authors': self.object.authors,\n 'image_url': self.object.image_url,\n 'url': self.object.url,\n 'pubdate': self.object.pubdate,\n })\n\n return initial\n\n def form_valid(self, form):\n\n self.object.title = form.cleaned_data.get('title')\n self.object.type = form.cleaned_data.get('type')\n self.object.authors = form.cleaned_data.get('authors')\n self.object.image_url = form.cleaned_data.get('image_url')\n self.object.url = form.cleaned_data.get('url')\n if form.cleaned_data.get('major_update', False):\n self.object.pubdate = datetime.now()\n else:\n self.object.pubdate = form.cleaned_data.get('pubdate')\n\n messages.success(self.request, _('La une a \u00e9t\u00e9 mise \u00e0 jour.'))\n self.success_url = reverse('featured-resource-list')\n return super(FeaturedResourceUpdate, self).form_valid(form)\n\n def get_form(self, form_class=None):\n form = super(FeaturedResourceUpdate, self).get_form(form_class)\n form.helper.form_action = reverse('featured-resource-update', args=[self.object.pk])\n return form\n\n\nclass FeaturedResourceDeleteDetail(DeleteView):\n \"\"\"\n Deletes a featured resource.\n \"\"\"\n\n model = FeaturedResource\n\n @method_decorator(login_required)\n @method_decorator(transaction.atomic)\n @method_decorator(permission_required('featured.change_featuredresource', raise_exception=True))\n def dispatch(self, request, *args, **kwargs):\n self.success_url = reverse('featured-resource-list')\n return super(FeaturedResourceDeleteDetail, self).dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n r = super(FeaturedResourceDeleteDetail, self).post(request, *args, **kwargs)\n messages.success(request, _('La une a \u00e9t\u00e9 supprim\u00e9e avec succ\u00e8s.'))\n return r\n\n\nclass FeaturedResourceDeleteList(MultipleObjectMixin, RedirectView):\n \"\"\"\n Deletes a list of featured resources.\n \"\"\"\n permanent = False\n\n @method_decorator(login_required)\n @method_decorator(permission_required('featured.change_featuredresource', raise_exception=True))\n def dispatch(self, request, *args, **kwargs):\n return super(FeaturedResourceDeleteList, self).dispatch(request, *args, **kwargs)\n\n def get_queryset(self):\n items_list = self.request.POST.getlist('items')\n return FeaturedResource.objects.filter(pk__in=items_list)\n\n def post(self, request, *args, **kwargs):\n for featured_resource in self.get_queryset():\n featured_resource.delete()\n\n messages.success(request, _('Les unes ont \u00e9t\u00e9 supprim\u00e9es avec succ\u00e8s.'))\n\n return redirect(reverse('featured-resource-list'))\n\n\nclass FeaturedMessageCreateUpdate(FormView):\n \"\"\"\n Creates or updates the featured message.\n \"\"\"\n\n form_class = FeaturedMessageForm\n template_name = 'featured/message/create.html'\n last_message = None\n\n @method_decorator(login_required)\n @method_decorator(permission_required('featured.change_featuredmessage', raise_exception=True))\n def dispatch(self, request, *args, **kwargs):\n self.last_message = FeaturedMessage.objects.get_last_message()\n return super(FeaturedMessageCreateUpdate, self).dispatch(request, *args, **kwargs)\n\n def get_initial(self):\n init = super(FeaturedMessageCreateUpdate, self).get_initial()\n\n if self.last_message is not None:\n init.update({\n 'hook': self.last_message.hook,\n 'message': self.last_message.message,\n 'url': self.last_message.url,\n })\n\n return init\n\n def form_valid(self, form):\n if self.last_message:\n self.last_message.delete()\n\n featured_message = FeaturedMessage()\n featured_message.hook = form.data.get('hook')\n featured_message.message = form.data.get('message')\n featured_message.url = form.data.get('url')\n featured_message.save()\n\n messages.success(self.request, _('Le message a \u00e9t\u00e9 chang\u00e9'))\n return redirect(reverse('featured-resource-list'))\n", "path": "zds/featured/views.py"}], "after_files": [{"content": "from datetime import datetime\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.urls import reverse\nfrom django.db import transaction\nfrom django.shortcuts import redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import CreateView, RedirectView, UpdateView, FormView, DeleteView\nfrom django.views.generic.list import MultipleObjectMixin\n\nfrom django.conf import settings\nfrom zds.featured.forms import FeaturedResourceForm, FeaturedMessageForm\nfrom zds.featured.models import FeaturedResource, FeaturedMessage\nfrom zds.forum.models import Topic\nfrom zds.tutorialv2.models.database import PublishedContent\nfrom zds.utils.paginator import ZdSPagingListView\n\n\nclass FeaturedResourceList(ZdSPagingListView):\n \"\"\"\n Displays the list of featured resources.\n \"\"\"\n\n context_object_name = 'featured_resource_list'\n paginate_by = settings.ZDS_APP['featured_resource']['featured_per_page']\n queryset = FeaturedResource.objects.all().order_by('-pubdate')\n template_name = 'featured/index.html'\n\n @method_decorator(login_required)\n @method_decorator(permission_required('featured.change_featuredresource', raise_exception=True))\n def dispatch(self, request, *args, **kwargs):\n return super(FeaturedResourceList, self).dispatch(request, *args, **kwargs)\n\n\nclass FeaturedResourceCreate(CreateView):\n \"\"\"\n Creates a new featured resource.\n \"\"\"\n\n form_class = FeaturedResourceForm\n template_name = 'featured/resource/create.html'\n context_object_name = 'featured_resource'\n initial_error_message = _('Le contenu est introuvable')\n displayed_content_type = {'TUTORIAL': _('Un tutoriel'),\n 'ARTICLE': _('Un article'),\n 'OPINION': _('Un billet'),\n 'TOPIC': _('Un sujet')}\n\n @method_decorator(login_required)\n @method_decorator(permission_required('featured.change_featuredresource', raise_exception=True))\n def dispatch(self, request, *args, **kwargs):\n return super(FeaturedResourceCreate, self).dispatch(request, *args, **kwargs)\n\n def get_initial_topic_data(self, topic_id):\n try:\n content = Topic.objects.get(pk=int(topic_id))\n except (Topic.DoesNotExist, ValueError):\n messages.error(self.request, self.initial_error_message)\n return {}\n return {'title': content.title,\n 'type': self.displayed_content_type['TOPIC'],\n 'authors': str(content.author),\n 'url': self.request.build_absolute_uri(content.get_absolute_url())}\n\n def get_initial_content_data(self, content_id):\n try:\n content = PublishedContent.objects.filter(must_redirect=False, content__pk=int(content_id)).first()\n except (PublishedContent.DoesNotExist, ValueError):\n messages.error(self.request, self.initial_error_message)\n return {}\n displayed_authors = ', '.join([str(x) for x in content.authors.all()])\n if content.content.image:\n image_url = self.request.build_absolute_uri(content.content.image.physical['featured'].url)\n else:\n image_url = None\n return {'title': content.title(),\n 'type': self.displayed_content_type[content.content_type],\n 'authors': displayed_authors,\n 'url': self.request.build_absolute_uri(content.content.get_absolute_url_online()),\n 'image_url': self.request.build_absolute_uri(image_url)}\n\n def get_initial(self):\n initial = super(FeaturedResourceCreate, self).get_initial()\n content_type = self.request.GET.get('content_type', None)\n content_id = self.request.GET.get('content_id', None)\n if content_type == 'topic' and content_id:\n initial.update(**self.get_initial_topic_data(content_id))\n elif content_type == 'published_content' and content_id:\n initial.update(**self.get_initial_content_data(content_id))\n return initial\n\n def get_form_kwargs(self):\n kw = super(FeaturedResourceCreate, self).get_form_kwargs()\n kw['hide_major_update_field'] = True\n return kw\n\n def form_valid(self, form):\n featured_resource = FeaturedResource()\n featured_resource.title = form.cleaned_data.get('title')\n featured_resource.type = form.cleaned_data.get('type')\n featured_resource.authors = form.cleaned_data.get('authors')\n featured_resource.image_url = form.cleaned_data.get('image_url')\n featured_resource.url = form.cleaned_data.get('url')\n\n if form.cleaned_data.get('major_update', False):\n featured_resource.pubdate = datetime.now()\n else:\n featured_resource.pubdate = form.cleaned_data.get('pubdate')\n\n featured_resource.save()\n\n messages.success(self.request, _('La une a \u00e9t\u00e9 cr\u00e9\u00e9e.'))\n return redirect(reverse('featured-resource-list'))\n\n\nclass FeaturedResourceUpdate(UpdateView):\n \"\"\"\n Updates a featured resource.\n \"\"\"\n\n form_class = FeaturedResourceForm\n template_name = 'featured/resource/update.html'\n queryset = FeaturedResource.objects.all()\n context_object_name = 'featured_resource'\n\n @method_decorator(login_required)\n @method_decorator(permission_required('featured.change_featuredresource', raise_exception=True))\n def dispatch(self, request, *args, **kwargs):\n return super(FeaturedResourceUpdate, self).dispatch(request, *args, **kwargs)\n\n def get_initial(self):\n initial = super(FeaturedResourceUpdate, self).get_initial()\n initial.update({\n 'title': self.object.title,\n 'type': self.object.type,\n 'authors': self.object.authors,\n 'image_url': self.object.image_url,\n 'url': self.object.url,\n 'pubdate': self.object.pubdate,\n })\n\n return initial\n\n def form_valid(self, form):\n\n self.object.title = form.cleaned_data.get('title')\n self.object.type = form.cleaned_data.get('type')\n self.object.authors = form.cleaned_data.get('authors')\n self.object.image_url = form.cleaned_data.get('image_url')\n self.object.url = form.cleaned_data.get('url')\n if form.cleaned_data.get('major_update', False):\n self.object.pubdate = datetime.now()\n else:\n self.object.pubdate = form.cleaned_data.get('pubdate')\n\n messages.success(self.request, _('La une a \u00e9t\u00e9 mise \u00e0 jour.'))\n self.success_url = reverse('featured-resource-list')\n return super(FeaturedResourceUpdate, self).form_valid(form)\n\n def get_form(self, form_class=None):\n form = super(FeaturedResourceUpdate, self).get_form(form_class)\n form.helper.form_action = reverse('featured-resource-update', args=[self.object.pk])\n return form\n\n\nclass FeaturedResourceDeleteDetail(DeleteView):\n \"\"\"\n Deletes a featured resource.\n \"\"\"\n\n model = FeaturedResource\n\n @method_decorator(login_required)\n @method_decorator(transaction.atomic)\n @method_decorator(permission_required('featured.change_featuredresource', raise_exception=True))\n def dispatch(self, request, *args, **kwargs):\n self.success_url = reverse('featured-resource-list')\n return super(FeaturedResourceDeleteDetail, self).dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n r = super(FeaturedResourceDeleteDetail, self).post(request, *args, **kwargs)\n messages.success(request, _('La une a \u00e9t\u00e9 supprim\u00e9e avec succ\u00e8s.'))\n return r\n\n\nclass FeaturedResourceDeleteList(MultipleObjectMixin, RedirectView):\n \"\"\"\n Deletes a list of featured resources.\n \"\"\"\n permanent = False\n\n @method_decorator(login_required)\n @method_decorator(permission_required('featured.change_featuredresource', raise_exception=True))\n def dispatch(self, request, *args, **kwargs):\n return super(FeaturedResourceDeleteList, self).dispatch(request, *args, **kwargs)\n\n def get_queryset(self):\n items_list = self.request.POST.getlist('items')\n return FeaturedResource.objects.filter(pk__in=items_list)\n\n def post(self, request, *args, **kwargs):\n for featured_resource in self.get_queryset():\n featured_resource.delete()\n\n messages.success(request, _('Les unes ont \u00e9t\u00e9 supprim\u00e9es avec succ\u00e8s.'))\n\n return redirect(reverse('featured-resource-list'))\n\n\nclass FeaturedMessageCreateUpdate(FormView):\n \"\"\"\n Creates or updates the featured message.\n \"\"\"\n\n form_class = FeaturedMessageForm\n template_name = 'featured/message/create.html'\n last_message = None\n\n @method_decorator(login_required)\n @method_decorator(permission_required('featured.change_featuredmessage', raise_exception=True))\n def dispatch(self, request, *args, **kwargs):\n self.last_message = FeaturedMessage.objects.get_last_message()\n return super(FeaturedMessageCreateUpdate, self).dispatch(request, *args, **kwargs)\n\n def get_initial(self):\n init = super(FeaturedMessageCreateUpdate, self).get_initial()\n\n if self.last_message is not None:\n init.update({\n 'hook': self.last_message.hook,\n 'message': self.last_message.message,\n 'url': self.last_message.url,\n })\n\n return init\n\n def form_valid(self, form):\n if self.last_message:\n self.last_message.delete()\n\n featured_message = FeaturedMessage()\n featured_message.hook = form.data.get('hook')\n featured_message.message = form.data.get('message')\n featured_message.url = form.data.get('url')\n featured_message.save()\n\n messages.success(self.request, _('Le message a \u00e9t\u00e9 chang\u00e9'))\n return redirect(reverse('featured-resource-list'))\n", "path": "zds/featured/views.py"}]}
| 2,991 | 217 |
gh_patches_debug_10947
|
rasdani/github-patches
|
git_diff
|
pyqtgraph__pyqtgraph-2483
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PlotSpeedTest command line arguments ignored
### Short description
<!-- This should summarize the issue. -->
Since 108365ba45c1a1302df110dad5f9d960d4d903a9, PlotSpeedTest command arguments are no longer honored.
### Code to reproduce
<!-- Please provide a minimal working example that reproduces the issue in the code block below.
Ideally, this should be a full example someone else could run without additional setup. -->
```
PlotSpeedTest.py --nsamples=10000
```
### Expected behavior
<!-- What should happen? -->
The number of samples used should be 10000.
### Real behavior
<!-- What happens? -->
The number of samples used remains at the default of 5000.
### Tested environment(s)
* PyQtGraph version: master
* Qt Python binding: PyQt5 5.15.7, PySide6 6.3.2
* Python version: Python 3.8.10
* Operating system: Windows 10
* Installation method: pip install -e . <!-- e.g. pip, conda, system packages, ... -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyqtgraph/examples/PlotSpeedTest.py`
Content:
```
1 #!/usr/bin/python
2 """
3 Update a simple plot as rapidly as possible to measure speed.
4 """
5
6 import argparse
7 from collections import deque
8 from time import perf_counter
9
10 import numpy as np
11
12 import pyqtgraph as pg
13 import pyqtgraph.functions as fn
14 import pyqtgraph.parametertree as ptree
15 from pyqtgraph.Qt import QtCore, QtGui, QtWidgets
16
17 # defaults here result in the same configuration as the original PlotSpeedTest
18 parser = argparse.ArgumentParser()
19 parser.add_argument('--noise', dest='noise', action='store_true')
20 parser.add_argument('--no-noise', dest='noise', action='store_false')
21 parser.set_defaults(noise=True)
22 parser.add_argument('--nsamples', default=5000, type=int)
23 parser.add_argument('--frames', default=50, type=int)
24 parser.add_argument('--fsample', default=1000, type=float)
25 parser.add_argument('--frequency', default=0, type=float)
26 parser.add_argument('--amplitude', default=5, type=float)
27 parser.add_argument('--opengl', dest='use_opengl', action='store_true')
28 parser.add_argument('--no-opengl', dest='use_opengl', action='store_false')
29 parser.set_defaults(use_opengl=None)
30 parser.add_argument('--allow-opengl-toggle', action='store_true',
31 help="""Allow on-the-fly change of OpenGL setting. This may cause unwanted side effects.
32 """)
33 args = parser.parse_args()
34
35 if args.use_opengl is not None:
36 pg.setConfigOption('useOpenGL', args.use_opengl)
37 pg.setConfigOption('enableExperimental', args.use_opengl)
38
39 # don't limit frame rate to vsync
40 sfmt = QtGui.QSurfaceFormat()
41 sfmt.setSwapInterval(0)
42 QtGui.QSurfaceFormat.setDefaultFormat(sfmt)
43
44
45 class MonkeyCurveItem(pg.PlotCurveItem):
46 def __init__(self, *args, **kwds):
47 super().__init__(*args, **kwds)
48 self.monkey_mode = ''
49
50 def setMethod(self, value):
51 self.monkey_mode = value
52
53 def paint(self, painter, opt, widget):
54 if self.monkey_mode not in ['drawPolyline']:
55 return super().paint(painter, opt, widget)
56
57 painter.setRenderHint(painter.RenderHint.Antialiasing, self.opts['antialias'])
58 painter.setPen(pg.mkPen(self.opts['pen']))
59
60 if self.monkey_mode == 'drawPolyline':
61 painter.drawPolyline(fn.arrayToQPolygonF(self.xData, self.yData))
62
63 app = pg.mkQApp("Plot Speed Test")
64
65 default_pen = pg.mkPen()
66
67 params = ptree.Parameter.create(name='Parameters', type='group')
68 pt = ptree.ParameterTree(showHeader=False)
69 pt.setParameters(params)
70 pw = pg.PlotWidget()
71 splitter = QtWidgets.QSplitter()
72 splitter.addWidget(pt)
73 splitter.addWidget(pw)
74 splitter.show()
75
76 interactor = ptree.Interactor(
77 parent=params, nest=False, runOptions=ptree.RunOptions.ON_CHANGED
78 )
79
80 pw.setWindowTitle('pyqtgraph example: PlotSpeedTest')
81 pw.setLabel('bottom', 'Index', units='B')
82 curve = MonkeyCurveItem(pen=default_pen, brush='b')
83 pw.addItem(curve)
84
85 rollingAverageSize = 1000
86 elapsed = deque(maxlen=rollingAverageSize)
87
88 def resetTimings(*args):
89 elapsed.clear()
90
91 @interactor.decorate(
92 nest=True,
93 nsamples={'limits': [0, None]},
94 frames={'limits': [1, None]},
95 fsample={'units': 'Hz'},
96 frequency={'units': 'Hz'}
97 )
98 def makeData(noise=True, nsamples=5000, frames=50, fsample=1000.0, frequency=0.0, amplitude=5.0):
99 global data, connect_array, ptr
100 ttt = np.arange(frames * nsamples, dtype=np.float64) / fsample
101 data = amplitude*np.sin(2*np.pi*frequency*ttt).reshape((frames, nsamples))
102 if noise:
103 data += np.random.normal(size=data.shape)
104 connect_array = np.ones(data.shape[-1], dtype=bool)
105 ptr = 0
106 pw.setRange(QtCore.QRectF(0, -10, nsamples, 20))
107
108 params.child('makeData').setOpts(title='Plot Options')
109
110 @interactor.decorate(
111 connect={'type': 'list', 'limits': ['all', 'pairs', 'finite', 'array']}
112 )
113 def update(
114 antialias=pg.getConfigOption('antialias'),
115 connect='all',
116 skipFiniteCheck=False
117 ):
118 global curve, data, ptr, elapsed, fpsLastUpdate
119
120 if connect == 'array':
121 connect = connect_array
122
123 # Measure
124 t_start = perf_counter()
125 curve.setData(data[ptr], antialias=antialias, connect=connect, skipFiniteCheck=skipFiniteCheck)
126 app.processEvents(QtCore.QEventLoop.ProcessEventsFlag.AllEvents)
127 t_end = perf_counter()
128 elapsed.append(t_end - t_start)
129 ptr = (ptr + 1) % data.shape[0]
130
131 # update fps at most once every 0.2 secs
132 if t_end - fpsLastUpdate > 0.2:
133 fpsLastUpdate = t_end
134 average = np.mean(elapsed)
135 fps = 1 / average
136 pw.setTitle('%0.2f fps - %0.1f ms avg' % (fps, average * 1_000))
137
138 @interactor.decorate(
139 useOpenGL={'readonly': not args.allow_opengl_toggle},
140 plotMethod={'limits': ['pyqtgraph', 'drawPolyline'], 'type': 'list'},
141 curvePen={'type': 'pen'}
142 )
143 def updateOptions(
144 curvePen=pg.mkPen(),
145 plotMethod='pyqtgraph',
146 fillLevel=False,
147 enableExperimental=False,
148 useOpenGL=False,
149 ):
150 pg.setConfigOption('enableExperimental', enableExperimental)
151 pg.setConfigOption('useOpenGL', useOpenGL)
152 curve.setPen(curvePen)
153 curve.setFillLevel(0.0 if fillLevel else None)
154 curve.setMethod(plotMethod)
155
156 params.sigTreeStateChanged.connect(resetTimings)
157
158 makeData()
159
160 fpsLastUpdate = perf_counter()
161
162 timer = QtCore.QTimer()
163 timer.timeout.connect(update)
164 timer.start(0)
165
166 if __name__ == '__main__':
167 # Splitter by default gives too small of a width to the parameter tree,
168 # so fix that right before the event loop
169 pt.setMinimumSize(225,0)
170 pg.exec()
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyqtgraph/examples/PlotSpeedTest.py b/pyqtgraph/examples/PlotSpeedTest.py
--- a/pyqtgraph/examples/PlotSpeedTest.py
+++ b/pyqtgraph/examples/PlotSpeedTest.py
@@ -95,7 +95,14 @@
fsample={'units': 'Hz'},
frequency={'units': 'Hz'}
)
-def makeData(noise=True, nsamples=5000, frames=50, fsample=1000.0, frequency=0.0, amplitude=5.0):
+def makeData(
+ noise=args.noise,
+ nsamples=args.nsamples,
+ frames=args.frames,
+ fsample=args.fsample,
+ frequency=args.frequency,
+ amplitude=args.amplitude,
+):
global data, connect_array, ptr
ttt = np.arange(frames * nsamples, dtype=np.float64) / fsample
data = amplitude*np.sin(2*np.pi*frequency*ttt).reshape((frames, nsamples))
|
{"golden_diff": "diff --git a/pyqtgraph/examples/PlotSpeedTest.py b/pyqtgraph/examples/PlotSpeedTest.py\n--- a/pyqtgraph/examples/PlotSpeedTest.py\n+++ b/pyqtgraph/examples/PlotSpeedTest.py\n@@ -95,7 +95,14 @@\n fsample={'units': 'Hz'},\n frequency={'units': 'Hz'}\n )\n-def makeData(noise=True, nsamples=5000, frames=50, fsample=1000.0, frequency=0.0, amplitude=5.0):\n+def makeData(\n+ noise=args.noise,\n+ nsamples=args.nsamples,\n+ frames=args.frames,\n+ fsample=args.fsample,\n+ frequency=args.frequency,\n+ amplitude=args.amplitude,\n+):\n global data, connect_array, ptr\n ttt = np.arange(frames * nsamples, dtype=np.float64) / fsample\n data = amplitude*np.sin(2*np.pi*frequency*ttt).reshape((frames, nsamples))\n", "issue": "PlotSpeedTest command line arguments ignored\n### Short description\r\n<!-- This should summarize the issue. -->\r\n\r\nSince 108365ba45c1a1302df110dad5f9d960d4d903a9, PlotSpeedTest command arguments are no longer honored.\r\n\r\n### Code to reproduce\r\n<!-- Please provide a minimal working example that reproduces the issue in the code block below.\r\n Ideally, this should be a full example someone else could run without additional setup. -->\r\n\r\n```\r\nPlotSpeedTest.py --nsamples=10000\r\n```\r\n\r\n### Expected behavior\r\n<!-- What should happen? -->\r\n\r\nThe number of samples used should be 10000.\r\n\r\n### Real behavior\r\n<!-- What happens? -->\r\n\r\nThe number of samples used remains at the default of 5000.\r\n\r\n### Tested environment(s)\r\n\r\n * PyQtGraph version: master\r\n * Qt Python binding: PyQt5 5.15.7, PySide6 6.3.2\r\n * Python version: Python 3.8.10 \r\n * Operating system: Windows 10\r\n * Installation method: pip install -e . <!-- e.g. pip, conda, system packages, ... -->\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/python\n\"\"\"\nUpdate a simple plot as rapidly as possible to measure speed.\n\"\"\"\n\nimport argparse\nfrom collections import deque\nfrom time import perf_counter\n\nimport numpy as np\n\nimport pyqtgraph as pg\nimport pyqtgraph.functions as fn\nimport pyqtgraph.parametertree as ptree\nfrom pyqtgraph.Qt import QtCore, QtGui, QtWidgets\n\n# defaults here result in the same configuration as the original PlotSpeedTest\nparser = argparse.ArgumentParser()\nparser.add_argument('--noise', dest='noise', action='store_true')\nparser.add_argument('--no-noise', dest='noise', action='store_false')\nparser.set_defaults(noise=True)\nparser.add_argument('--nsamples', default=5000, type=int)\nparser.add_argument('--frames', default=50, type=int)\nparser.add_argument('--fsample', default=1000, type=float)\nparser.add_argument('--frequency', default=0, type=float)\nparser.add_argument('--amplitude', default=5, type=float)\nparser.add_argument('--opengl', dest='use_opengl', action='store_true')\nparser.add_argument('--no-opengl', dest='use_opengl', action='store_false')\nparser.set_defaults(use_opengl=None)\nparser.add_argument('--allow-opengl-toggle', action='store_true',\n help=\"\"\"Allow on-the-fly change of OpenGL setting. This may cause unwanted side effects.\n \"\"\")\nargs = parser.parse_args()\n\nif args.use_opengl is not None:\n pg.setConfigOption('useOpenGL', args.use_opengl)\n pg.setConfigOption('enableExperimental', args.use_opengl)\n\n# don't limit frame rate to vsync\nsfmt = QtGui.QSurfaceFormat()\nsfmt.setSwapInterval(0)\nQtGui.QSurfaceFormat.setDefaultFormat(sfmt)\n\n\nclass MonkeyCurveItem(pg.PlotCurveItem):\n def __init__(self, *args, **kwds):\n super().__init__(*args, **kwds)\n self.monkey_mode = ''\n\n def setMethod(self, value):\n self.monkey_mode = value\n\n def paint(self, painter, opt, widget):\n if self.monkey_mode not in ['drawPolyline']:\n return super().paint(painter, opt, widget)\n\n painter.setRenderHint(painter.RenderHint.Antialiasing, self.opts['antialias'])\n painter.setPen(pg.mkPen(self.opts['pen']))\n\n if self.monkey_mode == 'drawPolyline':\n painter.drawPolyline(fn.arrayToQPolygonF(self.xData, self.yData))\n\napp = pg.mkQApp(\"Plot Speed Test\")\n\ndefault_pen = pg.mkPen()\n\nparams = ptree.Parameter.create(name='Parameters', type='group')\npt = ptree.ParameterTree(showHeader=False)\npt.setParameters(params)\npw = pg.PlotWidget()\nsplitter = QtWidgets.QSplitter()\nsplitter.addWidget(pt)\nsplitter.addWidget(pw)\nsplitter.show()\n\ninteractor = ptree.Interactor(\n parent=params, nest=False, runOptions=ptree.RunOptions.ON_CHANGED\n)\n\npw.setWindowTitle('pyqtgraph example: PlotSpeedTest')\npw.setLabel('bottom', 'Index', units='B')\ncurve = MonkeyCurveItem(pen=default_pen, brush='b')\npw.addItem(curve)\n\nrollingAverageSize = 1000\nelapsed = deque(maxlen=rollingAverageSize)\n\ndef resetTimings(*args):\n elapsed.clear()\n\[email protected](\n nest=True,\n nsamples={'limits': [0, None]},\n frames={'limits': [1, None]},\n fsample={'units': 'Hz'},\n frequency={'units': 'Hz'}\n)\ndef makeData(noise=True, nsamples=5000, frames=50, fsample=1000.0, frequency=0.0, amplitude=5.0):\n global data, connect_array, ptr\n ttt = np.arange(frames * nsamples, dtype=np.float64) / fsample\n data = amplitude*np.sin(2*np.pi*frequency*ttt).reshape((frames, nsamples))\n if noise:\n data += np.random.normal(size=data.shape)\n connect_array = np.ones(data.shape[-1], dtype=bool)\n ptr = 0\n pw.setRange(QtCore.QRectF(0, -10, nsamples, 20))\n\nparams.child('makeData').setOpts(title='Plot Options')\n\[email protected](\n connect={'type': 'list', 'limits': ['all', 'pairs', 'finite', 'array']}\n)\ndef update(\n antialias=pg.getConfigOption('antialias'),\n connect='all',\n skipFiniteCheck=False\n):\n global curve, data, ptr, elapsed, fpsLastUpdate\n\n if connect == 'array':\n connect = connect_array\n\n # Measure\n t_start = perf_counter()\n curve.setData(data[ptr], antialias=antialias, connect=connect, skipFiniteCheck=skipFiniteCheck)\n app.processEvents(QtCore.QEventLoop.ProcessEventsFlag.AllEvents)\n t_end = perf_counter()\n elapsed.append(t_end - t_start)\n ptr = (ptr + 1) % data.shape[0]\n\n # update fps at most once every 0.2 secs\n if t_end - fpsLastUpdate > 0.2:\n fpsLastUpdate = t_end\n average = np.mean(elapsed)\n fps = 1 / average\n pw.setTitle('%0.2f fps - %0.1f ms avg' % (fps, average * 1_000))\n\[email protected](\n useOpenGL={'readonly': not args.allow_opengl_toggle},\n plotMethod={'limits': ['pyqtgraph', 'drawPolyline'], 'type': 'list'},\n curvePen={'type': 'pen'}\n)\ndef updateOptions(\n curvePen=pg.mkPen(),\n plotMethod='pyqtgraph',\n fillLevel=False,\n enableExperimental=False,\n useOpenGL=False,\n):\n pg.setConfigOption('enableExperimental', enableExperimental)\n pg.setConfigOption('useOpenGL', useOpenGL)\n curve.setPen(curvePen)\n curve.setFillLevel(0.0 if fillLevel else None)\n curve.setMethod(plotMethod)\n\nparams.sigTreeStateChanged.connect(resetTimings)\n\nmakeData()\n\nfpsLastUpdate = perf_counter()\n\ntimer = QtCore.QTimer()\ntimer.timeout.connect(update)\ntimer.start(0)\n\nif __name__ == '__main__':\n # Splitter by default gives too small of a width to the parameter tree,\n # so fix that right before the event loop\n pt.setMinimumSize(225,0)\n pg.exec()\n", "path": "pyqtgraph/examples/PlotSpeedTest.py"}], "after_files": [{"content": "#!/usr/bin/python\n\"\"\"\nUpdate a simple plot as rapidly as possible to measure speed.\n\"\"\"\n\nimport argparse\nfrom collections import deque\nfrom time import perf_counter\n\nimport numpy as np\n\nimport pyqtgraph as pg\nimport pyqtgraph.functions as fn\nimport pyqtgraph.parametertree as ptree\nfrom pyqtgraph.Qt import QtCore, QtGui, QtWidgets\n\n# defaults here result in the same configuration as the original PlotSpeedTest\nparser = argparse.ArgumentParser()\nparser.add_argument('--noise', dest='noise', action='store_true')\nparser.add_argument('--no-noise', dest='noise', action='store_false')\nparser.set_defaults(noise=True)\nparser.add_argument('--nsamples', default=5000, type=int)\nparser.add_argument('--frames', default=50, type=int)\nparser.add_argument('--fsample', default=1000, type=float)\nparser.add_argument('--frequency', default=0, type=float)\nparser.add_argument('--amplitude', default=5, type=float)\nparser.add_argument('--opengl', dest='use_opengl', action='store_true')\nparser.add_argument('--no-opengl', dest='use_opengl', action='store_false')\nparser.set_defaults(use_opengl=None)\nparser.add_argument('--allow-opengl-toggle', action='store_true',\n help=\"\"\"Allow on-the-fly change of OpenGL setting. This may cause unwanted side effects.\n \"\"\")\nargs = parser.parse_args()\n\nif args.use_opengl is not None:\n pg.setConfigOption('useOpenGL', args.use_opengl)\n pg.setConfigOption('enableExperimental', args.use_opengl)\n\n# don't limit frame rate to vsync\nsfmt = QtGui.QSurfaceFormat()\nsfmt.setSwapInterval(0)\nQtGui.QSurfaceFormat.setDefaultFormat(sfmt)\n\n\nclass MonkeyCurveItem(pg.PlotCurveItem):\n def __init__(self, *args, **kwds):\n super().__init__(*args, **kwds)\n self.monkey_mode = ''\n\n def setMethod(self, value):\n self.monkey_mode = value\n\n def paint(self, painter, opt, widget):\n if self.monkey_mode not in ['drawPolyline']:\n return super().paint(painter, opt, widget)\n\n painter.setRenderHint(painter.RenderHint.Antialiasing, self.opts['antialias'])\n painter.setPen(pg.mkPen(self.opts['pen']))\n\n if self.monkey_mode == 'drawPolyline':\n painter.drawPolyline(fn.arrayToQPolygonF(self.xData, self.yData))\n\napp = pg.mkQApp(\"Plot Speed Test\")\n\ndefault_pen = pg.mkPen()\n\nparams = ptree.Parameter.create(name='Parameters', type='group')\npt = ptree.ParameterTree(showHeader=False)\npt.setParameters(params)\npw = pg.PlotWidget()\nsplitter = QtWidgets.QSplitter()\nsplitter.addWidget(pt)\nsplitter.addWidget(pw)\nsplitter.show()\n\ninteractor = ptree.Interactor(\n parent=params, nest=False, runOptions=ptree.RunOptions.ON_CHANGED\n)\n\npw.setWindowTitle('pyqtgraph example: PlotSpeedTest')\npw.setLabel('bottom', 'Index', units='B')\ncurve = MonkeyCurveItem(pen=default_pen, brush='b')\npw.addItem(curve)\n\nrollingAverageSize = 1000\nelapsed = deque(maxlen=rollingAverageSize)\n\ndef resetTimings(*args):\n elapsed.clear()\n\[email protected](\n nest=True,\n nsamples={'limits': [0, None]},\n frames={'limits': [1, None]},\n fsample={'units': 'Hz'},\n frequency={'units': 'Hz'}\n)\ndef makeData(\n noise=args.noise,\n nsamples=args.nsamples,\n frames=args.frames,\n fsample=args.fsample,\n frequency=args.frequency,\n amplitude=args.amplitude,\n):\n global data, connect_array, ptr\n ttt = np.arange(frames * nsamples, dtype=np.float64) / fsample\n data = amplitude*np.sin(2*np.pi*frequency*ttt).reshape((frames, nsamples))\n if noise:\n data += np.random.normal(size=data.shape)\n connect_array = np.ones(data.shape[-1], dtype=bool)\n ptr = 0\n pw.setRange(QtCore.QRectF(0, -10, nsamples, 20))\n\nparams.child('makeData').setOpts(title='Plot Options')\n\[email protected](\n connect={'type': 'list', 'limits': ['all', 'pairs', 'finite', 'array']}\n)\ndef update(\n antialias=pg.getConfigOption('antialias'),\n connect='all',\n skipFiniteCheck=False\n):\n global curve, data, ptr, elapsed, fpsLastUpdate\n\n if connect == 'array':\n connect = connect_array\n\n # Measure\n t_start = perf_counter()\n curve.setData(data[ptr], antialias=antialias, connect=connect, skipFiniteCheck=skipFiniteCheck)\n app.processEvents(QtCore.QEventLoop.ProcessEventsFlag.AllEvents)\n t_end = perf_counter()\n elapsed.append(t_end - t_start)\n ptr = (ptr + 1) % data.shape[0]\n\n # update fps at most once every 0.2 secs\n if t_end - fpsLastUpdate > 0.2:\n fpsLastUpdate = t_end\n average = np.mean(elapsed)\n fps = 1 / average\n pw.setTitle('%0.2f fps - %0.1f ms avg' % (fps, average * 1_000))\n\[email protected](\n useOpenGL={'readonly': not args.allow_opengl_toggle},\n plotMethod={'limits': ['pyqtgraph', 'drawPolyline'], 'type': 'list'},\n curvePen={'type': 'pen'}\n)\ndef updateOptions(\n curvePen=pg.mkPen(),\n plotMethod='pyqtgraph',\n fillLevel=False,\n enableExperimental=False,\n useOpenGL=False,\n):\n pg.setConfigOption('enableExperimental', enableExperimental)\n pg.setConfigOption('useOpenGL', useOpenGL)\n curve.setPen(curvePen)\n curve.setFillLevel(0.0 if fillLevel else None)\n curve.setMethod(plotMethod)\n\nparams.sigTreeStateChanged.connect(resetTimings)\n\nmakeData()\n\nfpsLastUpdate = perf_counter()\n\ntimer = QtCore.QTimer()\ntimer.timeout.connect(update)\ntimer.start(0)\n\nif __name__ == '__main__':\n # Splitter by default gives too small of a width to the parameter tree,\n # so fix that right before the event loop\n pt.setMinimumSize(225,0)\n pg.exec()\n", "path": "pyqtgraph/examples/PlotSpeedTest.py"}]}
| 2,345 | 223 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.