prompt
large_stringlengths 72
9.34k
| completion
large_stringlengths 0
7.61k
|
---|---|
<|file_name|>C.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Problem: Making Chess Boards
# Language: Python
# Author: KirarinSnow
# Usage: python thisfile.py <input.in >output.out
from heapq import *
def process(r1, r2, c1, c2):
<|fim_middle|>
def clear(r1, r2, c1, c2):
for i in range(r1, r2):
for j in range(c1, c2):
if 0 <= i < m and 0 <= j < n:
g[i][j] = None
for case in range(int(raw_input())):
m, n = map(int, raw_input().split())
v = [eval('0x'+raw_input()) for i in range(m)]
g = map(lambda x: map(lambda y: (x>>y)%2, range(n)[::-1]), v)
s = [[1 for i in range(n)] for j in range(m)]
q = []
process(0, m, 0, n)
b = []
while q:
x, r, c = heappop(q)
if x != 0 and s[r][c] == -x:
b.append((-x, r, c))
clear(r+x+1, r+1, c+x+1, c+1)
process(r+x+1, r-x+1, c+x+1, c-x+1)
vs = sorted(list(set(map(lambda x: x[0], b))))[::-1]
print "Case #%d: %d" % (case+1, len(vs))
for k in vs:
print k, len(filter(lambda x: x[0] == k, b))
<|fim▁end|> | for i in range(r1, r2):
for j in range(c1, c2):
if 0 <= i < m and 0 <= j < n:
if g[i][j] == None:
s[i][j] = 0
elif i == 0 or j == 0:
s[i][j] = 1
elif g[i-1][j] != g[i][j] and g[i][j-1] != g[i][j] and \
g[i-1][j-1] == g[i][j]:
s[i][j] = 1 + min(s[i-1][j], s[i][j-1], s[i-1][j-1])
else:
s[i][j] = 1
heappush(q, (-s[i][j], i, j)) |
<|file_name|>C.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Problem: Making Chess Boards
# Language: Python
# Author: KirarinSnow
# Usage: python thisfile.py <input.in >output.out
from heapq import *
def process(r1, r2, c1, c2):
for i in range(r1, r2):
for j in range(c1, c2):
if 0 <= i < m and 0 <= j < n:
if g[i][j] == None:
s[i][j] = 0
elif i == 0 or j == 0:
s[i][j] = 1
elif g[i-1][j] != g[i][j] and g[i][j-1] != g[i][j] and \
g[i-1][j-1] == g[i][j]:
s[i][j] = 1 + min(s[i-1][j], s[i][j-1], s[i-1][j-1])
else:
s[i][j] = 1
heappush(q, (-s[i][j], i, j))
def clear(r1, r2, c1, c2):
<|fim_middle|>
for case in range(int(raw_input())):
m, n = map(int, raw_input().split())
v = [eval('0x'+raw_input()) for i in range(m)]
g = map(lambda x: map(lambda y: (x>>y)%2, range(n)[::-1]), v)
s = [[1 for i in range(n)] for j in range(m)]
q = []
process(0, m, 0, n)
b = []
while q:
x, r, c = heappop(q)
if x != 0 and s[r][c] == -x:
b.append((-x, r, c))
clear(r+x+1, r+1, c+x+1, c+1)
process(r+x+1, r-x+1, c+x+1, c-x+1)
vs = sorted(list(set(map(lambda x: x[0], b))))[::-1]
print "Case #%d: %d" % (case+1, len(vs))
for k in vs:
print k, len(filter(lambda x: x[0] == k, b))
<|fim▁end|> | for i in range(r1, r2):
for j in range(c1, c2):
if 0 <= i < m and 0 <= j < n:
g[i][j] = None |
<|file_name|>C.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Problem: Making Chess Boards
# Language: Python
# Author: KirarinSnow
# Usage: python thisfile.py <input.in >output.out
from heapq import *
def process(r1, r2, c1, c2):
for i in range(r1, r2):
for j in range(c1, c2):
if 0 <= i < m and 0 <= j < n:
<|fim_middle|>
def clear(r1, r2, c1, c2):
for i in range(r1, r2):
for j in range(c1, c2):
if 0 <= i < m and 0 <= j < n:
g[i][j] = None
for case in range(int(raw_input())):
m, n = map(int, raw_input().split())
v = [eval('0x'+raw_input()) for i in range(m)]
g = map(lambda x: map(lambda y: (x>>y)%2, range(n)[::-1]), v)
s = [[1 for i in range(n)] for j in range(m)]
q = []
process(0, m, 0, n)
b = []
while q:
x, r, c = heappop(q)
if x != 0 and s[r][c] == -x:
b.append((-x, r, c))
clear(r+x+1, r+1, c+x+1, c+1)
process(r+x+1, r-x+1, c+x+1, c-x+1)
vs = sorted(list(set(map(lambda x: x[0], b))))[::-1]
print "Case #%d: %d" % (case+1, len(vs))
for k in vs:
print k, len(filter(lambda x: x[0] == k, b))
<|fim▁end|> | if g[i][j] == None:
s[i][j] = 0
elif i == 0 or j == 0:
s[i][j] = 1
elif g[i-1][j] != g[i][j] and g[i][j-1] != g[i][j] and \
g[i-1][j-1] == g[i][j]:
s[i][j] = 1 + min(s[i-1][j], s[i][j-1], s[i-1][j-1])
else:
s[i][j] = 1
heappush(q, (-s[i][j], i, j)) |
<|file_name|>C.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Problem: Making Chess Boards
# Language: Python
# Author: KirarinSnow
# Usage: python thisfile.py <input.in >output.out
from heapq import *
def process(r1, r2, c1, c2):
for i in range(r1, r2):
for j in range(c1, c2):
if 0 <= i < m and 0 <= j < n:
if g[i][j] == None:
<|fim_middle|>
elif i == 0 or j == 0:
s[i][j] = 1
elif g[i-1][j] != g[i][j] and g[i][j-1] != g[i][j] and \
g[i-1][j-1] == g[i][j]:
s[i][j] = 1 + min(s[i-1][j], s[i][j-1], s[i-1][j-1])
else:
s[i][j] = 1
heappush(q, (-s[i][j], i, j))
def clear(r1, r2, c1, c2):
for i in range(r1, r2):
for j in range(c1, c2):
if 0 <= i < m and 0 <= j < n:
g[i][j] = None
for case in range(int(raw_input())):
m, n = map(int, raw_input().split())
v = [eval('0x'+raw_input()) for i in range(m)]
g = map(lambda x: map(lambda y: (x>>y)%2, range(n)[::-1]), v)
s = [[1 for i in range(n)] for j in range(m)]
q = []
process(0, m, 0, n)
b = []
while q:
x, r, c = heappop(q)
if x != 0 and s[r][c] == -x:
b.append((-x, r, c))
clear(r+x+1, r+1, c+x+1, c+1)
process(r+x+1, r-x+1, c+x+1, c-x+1)
vs = sorted(list(set(map(lambda x: x[0], b))))[::-1]
print "Case #%d: %d" % (case+1, len(vs))
for k in vs:
print k, len(filter(lambda x: x[0] == k, b))
<|fim▁end|> | s[i][j] = 0 |
<|file_name|>C.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Problem: Making Chess Boards
# Language: Python
# Author: KirarinSnow
# Usage: python thisfile.py <input.in >output.out
from heapq import *
def process(r1, r2, c1, c2):
for i in range(r1, r2):
for j in range(c1, c2):
if 0 <= i < m and 0 <= j < n:
if g[i][j] == None:
s[i][j] = 0
elif i == 0 or j == 0:
<|fim_middle|>
elif g[i-1][j] != g[i][j] and g[i][j-1] != g[i][j] and \
g[i-1][j-1] == g[i][j]:
s[i][j] = 1 + min(s[i-1][j], s[i][j-1], s[i-1][j-1])
else:
s[i][j] = 1
heappush(q, (-s[i][j], i, j))
def clear(r1, r2, c1, c2):
for i in range(r1, r2):
for j in range(c1, c2):
if 0 <= i < m and 0 <= j < n:
g[i][j] = None
for case in range(int(raw_input())):
m, n = map(int, raw_input().split())
v = [eval('0x'+raw_input()) for i in range(m)]
g = map(lambda x: map(lambda y: (x>>y)%2, range(n)[::-1]), v)
s = [[1 for i in range(n)] for j in range(m)]
q = []
process(0, m, 0, n)
b = []
while q:
x, r, c = heappop(q)
if x != 0 and s[r][c] == -x:
b.append((-x, r, c))
clear(r+x+1, r+1, c+x+1, c+1)
process(r+x+1, r-x+1, c+x+1, c-x+1)
vs = sorted(list(set(map(lambda x: x[0], b))))[::-1]
print "Case #%d: %d" % (case+1, len(vs))
for k in vs:
print k, len(filter(lambda x: x[0] == k, b))
<|fim▁end|> | s[i][j] = 1 |
<|file_name|>C.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Problem: Making Chess Boards
# Language: Python
# Author: KirarinSnow
# Usage: python thisfile.py <input.in >output.out
from heapq import *
def process(r1, r2, c1, c2):
for i in range(r1, r2):
for j in range(c1, c2):
if 0 <= i < m and 0 <= j < n:
if g[i][j] == None:
s[i][j] = 0
elif i == 0 or j == 0:
s[i][j] = 1
elif g[i-1][j] != g[i][j] and g[i][j-1] != g[i][j] and \
g[i-1][j-1] == g[i][j]:
<|fim_middle|>
else:
s[i][j] = 1
heappush(q, (-s[i][j], i, j))
def clear(r1, r2, c1, c2):
for i in range(r1, r2):
for j in range(c1, c2):
if 0 <= i < m and 0 <= j < n:
g[i][j] = None
for case in range(int(raw_input())):
m, n = map(int, raw_input().split())
v = [eval('0x'+raw_input()) for i in range(m)]
g = map(lambda x: map(lambda y: (x>>y)%2, range(n)[::-1]), v)
s = [[1 for i in range(n)] for j in range(m)]
q = []
process(0, m, 0, n)
b = []
while q:
x, r, c = heappop(q)
if x != 0 and s[r][c] == -x:
b.append((-x, r, c))
clear(r+x+1, r+1, c+x+1, c+1)
process(r+x+1, r-x+1, c+x+1, c-x+1)
vs = sorted(list(set(map(lambda x: x[0], b))))[::-1]
print "Case #%d: %d" % (case+1, len(vs))
for k in vs:
print k, len(filter(lambda x: x[0] == k, b))
<|fim▁end|> | s[i][j] = 1 + min(s[i-1][j], s[i][j-1], s[i-1][j-1]) |
<|file_name|>C.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Problem: Making Chess Boards
# Language: Python
# Author: KirarinSnow
# Usage: python thisfile.py <input.in >output.out
from heapq import *
def process(r1, r2, c1, c2):
for i in range(r1, r2):
for j in range(c1, c2):
if 0 <= i < m and 0 <= j < n:
if g[i][j] == None:
s[i][j] = 0
elif i == 0 or j == 0:
s[i][j] = 1
elif g[i-1][j] != g[i][j] and g[i][j-1] != g[i][j] and \
g[i-1][j-1] == g[i][j]:
s[i][j] = 1 + min(s[i-1][j], s[i][j-1], s[i-1][j-1])
else:
<|fim_middle|>
heappush(q, (-s[i][j], i, j))
def clear(r1, r2, c1, c2):
for i in range(r1, r2):
for j in range(c1, c2):
if 0 <= i < m and 0 <= j < n:
g[i][j] = None
for case in range(int(raw_input())):
m, n = map(int, raw_input().split())
v = [eval('0x'+raw_input()) for i in range(m)]
g = map(lambda x: map(lambda y: (x>>y)%2, range(n)[::-1]), v)
s = [[1 for i in range(n)] for j in range(m)]
q = []
process(0, m, 0, n)
b = []
while q:
x, r, c = heappop(q)
if x != 0 and s[r][c] == -x:
b.append((-x, r, c))
clear(r+x+1, r+1, c+x+1, c+1)
process(r+x+1, r-x+1, c+x+1, c-x+1)
vs = sorted(list(set(map(lambda x: x[0], b))))[::-1]
print "Case #%d: %d" % (case+1, len(vs))
for k in vs:
print k, len(filter(lambda x: x[0] == k, b))
<|fim▁end|> | s[i][j] = 1 |
<|file_name|>C.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Problem: Making Chess Boards
# Language: Python
# Author: KirarinSnow
# Usage: python thisfile.py <input.in >output.out
from heapq import *
def process(r1, r2, c1, c2):
for i in range(r1, r2):
for j in range(c1, c2):
if 0 <= i < m and 0 <= j < n:
if g[i][j] == None:
s[i][j] = 0
elif i == 0 or j == 0:
s[i][j] = 1
elif g[i-1][j] != g[i][j] and g[i][j-1] != g[i][j] and \
g[i-1][j-1] == g[i][j]:
s[i][j] = 1 + min(s[i-1][j], s[i][j-1], s[i-1][j-1])
else:
s[i][j] = 1
heappush(q, (-s[i][j], i, j))
def clear(r1, r2, c1, c2):
for i in range(r1, r2):
for j in range(c1, c2):
if 0 <= i < m and 0 <= j < n:
<|fim_middle|>
for case in range(int(raw_input())):
m, n = map(int, raw_input().split())
v = [eval('0x'+raw_input()) for i in range(m)]
g = map(lambda x: map(lambda y: (x>>y)%2, range(n)[::-1]), v)
s = [[1 for i in range(n)] for j in range(m)]
q = []
process(0, m, 0, n)
b = []
while q:
x, r, c = heappop(q)
if x != 0 and s[r][c] == -x:
b.append((-x, r, c))
clear(r+x+1, r+1, c+x+1, c+1)
process(r+x+1, r-x+1, c+x+1, c-x+1)
vs = sorted(list(set(map(lambda x: x[0], b))))[::-1]
print "Case #%d: %d" % (case+1, len(vs))
for k in vs:
print k, len(filter(lambda x: x[0] == k, b))
<|fim▁end|> | g[i][j] = None |
<|file_name|>C.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Problem: Making Chess Boards
# Language: Python
# Author: KirarinSnow
# Usage: python thisfile.py <input.in >output.out
from heapq import *
def process(r1, r2, c1, c2):
for i in range(r1, r2):
for j in range(c1, c2):
if 0 <= i < m and 0 <= j < n:
if g[i][j] == None:
s[i][j] = 0
elif i == 0 or j == 0:
s[i][j] = 1
elif g[i-1][j] != g[i][j] and g[i][j-1] != g[i][j] and \
g[i-1][j-1] == g[i][j]:
s[i][j] = 1 + min(s[i-1][j], s[i][j-1], s[i-1][j-1])
else:
s[i][j] = 1
heappush(q, (-s[i][j], i, j))
def clear(r1, r2, c1, c2):
for i in range(r1, r2):
for j in range(c1, c2):
if 0 <= i < m and 0 <= j < n:
g[i][j] = None
for case in range(int(raw_input())):
m, n = map(int, raw_input().split())
v = [eval('0x'+raw_input()) for i in range(m)]
g = map(lambda x: map(lambda y: (x>>y)%2, range(n)[::-1]), v)
s = [[1 for i in range(n)] for j in range(m)]
q = []
process(0, m, 0, n)
b = []
while q:
x, r, c = heappop(q)
if x != 0 and s[r][c] == -x:
<|fim_middle|>
vs = sorted(list(set(map(lambda x: x[0], b))))[::-1]
print "Case #%d: %d" % (case+1, len(vs))
for k in vs:
print k, len(filter(lambda x: x[0] == k, b))
<|fim▁end|> | b.append((-x, r, c))
clear(r+x+1, r+1, c+x+1, c+1)
process(r+x+1, r-x+1, c+x+1, c-x+1) |
<|file_name|>C.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Problem: Making Chess Boards
# Language: Python
# Author: KirarinSnow
# Usage: python thisfile.py <input.in >output.out
from heapq import *
def <|fim_middle|>(r1, r2, c1, c2):
for i in range(r1, r2):
for j in range(c1, c2):
if 0 <= i < m and 0 <= j < n:
if g[i][j] == None:
s[i][j] = 0
elif i == 0 or j == 0:
s[i][j] = 1
elif g[i-1][j] != g[i][j] and g[i][j-1] != g[i][j] and \
g[i-1][j-1] == g[i][j]:
s[i][j] = 1 + min(s[i-1][j], s[i][j-1], s[i-1][j-1])
else:
s[i][j] = 1
heappush(q, (-s[i][j], i, j))
def clear(r1, r2, c1, c2):
for i in range(r1, r2):
for j in range(c1, c2):
if 0 <= i < m and 0 <= j < n:
g[i][j] = None
for case in range(int(raw_input())):
m, n = map(int, raw_input().split())
v = [eval('0x'+raw_input()) for i in range(m)]
g = map(lambda x: map(lambda y: (x>>y)%2, range(n)[::-1]), v)
s = [[1 for i in range(n)] for j in range(m)]
q = []
process(0, m, 0, n)
b = []
while q:
x, r, c = heappop(q)
if x != 0 and s[r][c] == -x:
b.append((-x, r, c))
clear(r+x+1, r+1, c+x+1, c+1)
process(r+x+1, r-x+1, c+x+1, c-x+1)
vs = sorted(list(set(map(lambda x: x[0], b))))[::-1]
print "Case #%d: %d" % (case+1, len(vs))
for k in vs:
print k, len(filter(lambda x: x[0] == k, b))
<|fim▁end|> | process |
<|file_name|>C.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Problem: Making Chess Boards
# Language: Python
# Author: KirarinSnow
# Usage: python thisfile.py <input.in >output.out
from heapq import *
def process(r1, r2, c1, c2):
for i in range(r1, r2):
for j in range(c1, c2):
if 0 <= i < m and 0 <= j < n:
if g[i][j] == None:
s[i][j] = 0
elif i == 0 or j == 0:
s[i][j] = 1
elif g[i-1][j] != g[i][j] and g[i][j-1] != g[i][j] and \
g[i-1][j-1] == g[i][j]:
s[i][j] = 1 + min(s[i-1][j], s[i][j-1], s[i-1][j-1])
else:
s[i][j] = 1
heappush(q, (-s[i][j], i, j))
def <|fim_middle|>(r1, r2, c1, c2):
for i in range(r1, r2):
for j in range(c1, c2):
if 0 <= i < m and 0 <= j < n:
g[i][j] = None
for case in range(int(raw_input())):
m, n = map(int, raw_input().split())
v = [eval('0x'+raw_input()) for i in range(m)]
g = map(lambda x: map(lambda y: (x>>y)%2, range(n)[::-1]), v)
s = [[1 for i in range(n)] for j in range(m)]
q = []
process(0, m, 0, n)
b = []
while q:
x, r, c = heappop(q)
if x != 0 and s[r][c] == -x:
b.append((-x, r, c))
clear(r+x+1, r+1, c+x+1, c+1)
process(r+x+1, r-x+1, c+x+1, c-x+1)
vs = sorted(list(set(map(lambda x: x[0], b))))[::-1]
print "Case #%d: %d" % (case+1, len(vs))
for k in vs:
print k, len(filter(lambda x: x[0] == k, b))
<|fim▁end|> | clear |
<|file_name|>weekend.py<|end_file_name|><|fim▁begin|>from datetime import datetime
from grazyna.utils import register
@register(cmd='weekend')
def weekend(bot):
"""
Answer to timeless question - are we at .weekend, yet?
"""
current_date = datetime.now()
day = current_date.weekday()
nick = bot.user.nick
if day in (5, 6):
answer = "Oczywiście %s - jest weekend. Omawiamy tylko lajtowe tematy, ok?" % nick
else:<|fim▁hole|>
bot.reply(answer)<|fim▁end|> | str_day = datetime.strftime(current_date, "%A")
answer = "%s - dopiero %s, musisz jeszcze poczekać..." % (nick, str_day) |
<|file_name|>weekend.py<|end_file_name|><|fim▁begin|>from datetime import datetime
from grazyna.utils import register
@register(cmd='weekend')
def weekend(bot):
<|fim_middle|>
<|fim▁end|> | """
Answer to timeless question - are we at .weekend, yet?
"""
current_date = datetime.now()
day = current_date.weekday()
nick = bot.user.nick
if day in (5, 6):
answer = "Oczywiście %s - jest weekend. Omawiamy tylko lajtowe tematy, ok?" % nick
else:
str_day = datetime.strftime(current_date, "%A")
answer = "%s - dopiero %s, musisz jeszcze poczekać..." % (nick, str_day)
bot.reply(answer)
|
<|file_name|>weekend.py<|end_file_name|><|fim▁begin|>from datetime import datetime
from grazyna.utils import register
@register(cmd='weekend')
def weekend(bot):
"""
Answer to timeless question - are we at .weekend, yet?
"""
current_date = datetime.now()
day = current_date.weekday()
nick = bot.user.nick
if day in (5, 6):
<|fim_middle|>
else:
str_day = datetime.strftime(current_date, "%A")
answer = "%s - dopiero %s, musisz jeszcze poczekać..." % (nick, str_day)
bot.reply(answer)
<|fim▁end|> | answer = "Oczywiście %s - jest weekend. Omawiamy tylko lajtowe tematy, ok?" % nick
|
<|file_name|>weekend.py<|end_file_name|><|fim▁begin|>from datetime import datetime
from grazyna.utils import register
@register(cmd='weekend')
def weekend(bot):
"""
Answer to timeless question - are we at .weekend, yet?
"""
current_date = datetime.now()
day = current_date.weekday()
nick = bot.user.nick
if day in (5, 6):
answer = "Oczywiście %s - jest weekend. Omawiamy tylko lajtowe tematy, ok?" % nick
else:
s <|fim_middle|>
bot.reply(answer)
<|fim▁end|> | tr_day = datetime.strftime(current_date, "%A")
answer = "%s - dopiero %s, musisz jeszcze poczekać..." % (nick, str_day)
|
<|file_name|>weekend.py<|end_file_name|><|fim▁begin|>from datetime import datetime
from grazyna.utils import register
@register(cmd='weekend')
def <|fim_middle|>(bot):
"""
Answer to timeless question - are we at .weekend, yet?
"""
current_date = datetime.now()
day = current_date.weekday()
nick = bot.user.nick
if day in (5, 6):
answer = "Oczywiście %s - jest weekend. Omawiamy tylko lajtowe tematy, ok?" % nick
else:
str_day = datetime.strftime(current_date, "%A")
answer = "%s - dopiero %s, musisz jeszcze poczekać..." % (nick, str_day)
bot.reply(answer)
<|fim▁end|> | weekend |
<|file_name|>testPanel.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
panel_file = open('panels.txt','r')
name_file = open('testName.txt','r')
sample_type_file = open("sampleType.txt")
test_panel_results = open("output/testPanelResults.txt", 'w')
panel = []
type = []
test_names = []
def get_split_names( name ):
split_name_list = name.split("/")
for i in range(0, len(split_name_list)):
split_name_list[i] = split_name_list[i].strip()
return split_name_list
def esc_char(name):
if "'" in name:
return "$$" + name + "$$"
else:
return "'" + name + "'"
for line in panel_file:
panel.append(line.strip())
panel_file.close()
for line in sample_type_file:
type.append(line.strip())
sample_type_file.close()
for line in name_file:
test_names.append(line.strip())
name_file.close()
test_panel_results.write("Below should be pasted to TestPanel.csv\n\n")
for row in range(0, len(test_names)):
if len(panel[row]) > 1:
test_description = esc_char(test_names[row] + "(" + type[row] + ")")
test_panel_results.write("nextval( 'panel_item_seq' ) , (select id from panel where name = '" + panel[row] + "')")
test_panel_results.write(" , (select id from test where description = " + test_description + ") , null , now() \n")
test_panel_results.close()
<|fim▁hole|><|fim▁end|> | print "Done look for results in testPanelResults.txt" |
<|file_name|>testPanel.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
panel_file = open('panels.txt','r')
name_file = open('testName.txt','r')
sample_type_file = open("sampleType.txt")
test_panel_results = open("output/testPanelResults.txt", 'w')
panel = []
type = []
test_names = []
def get_split_names( name ):
<|fim_middle|>
def esc_char(name):
if "'" in name:
return "$$" + name + "$$"
else:
return "'" + name + "'"
for line in panel_file:
panel.append(line.strip())
panel_file.close()
for line in sample_type_file:
type.append(line.strip())
sample_type_file.close()
for line in name_file:
test_names.append(line.strip())
name_file.close()
test_panel_results.write("Below should be pasted to TestPanel.csv\n\n")
for row in range(0, len(test_names)):
if len(panel[row]) > 1:
test_description = esc_char(test_names[row] + "(" + type[row] + ")")
test_panel_results.write("nextval( 'panel_item_seq' ) , (select id from panel where name = '" + panel[row] + "')")
test_panel_results.write(" , (select id from test where description = " + test_description + ") , null , now() \n")
test_panel_results.close()
print "Done look for results in testPanelResults.txt"<|fim▁end|> | split_name_list = name.split("/")
for i in range(0, len(split_name_list)):
split_name_list[i] = split_name_list[i].strip()
return split_name_list |
<|file_name|>testPanel.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
panel_file = open('panels.txt','r')
name_file = open('testName.txt','r')
sample_type_file = open("sampleType.txt")
test_panel_results = open("output/testPanelResults.txt", 'w')
panel = []
type = []
test_names = []
def get_split_names( name ):
split_name_list = name.split("/")
for i in range(0, len(split_name_list)):
split_name_list[i] = split_name_list[i].strip()
return split_name_list
def esc_char(name):
<|fim_middle|>
for line in panel_file:
panel.append(line.strip())
panel_file.close()
for line in sample_type_file:
type.append(line.strip())
sample_type_file.close()
for line in name_file:
test_names.append(line.strip())
name_file.close()
test_panel_results.write("Below should be pasted to TestPanel.csv\n\n")
for row in range(0, len(test_names)):
if len(panel[row]) > 1:
test_description = esc_char(test_names[row] + "(" + type[row] + ")")
test_panel_results.write("nextval( 'panel_item_seq' ) , (select id from panel where name = '" + panel[row] + "')")
test_panel_results.write(" , (select id from test where description = " + test_description + ") , null , now() \n")
test_panel_results.close()
print "Done look for results in testPanelResults.txt"<|fim▁end|> | if "'" in name:
return "$$" + name + "$$"
else:
return "'" + name + "'" |
<|file_name|>testPanel.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
panel_file = open('panels.txt','r')
name_file = open('testName.txt','r')
sample_type_file = open("sampleType.txt")
test_panel_results = open("output/testPanelResults.txt", 'w')
panel = []
type = []
test_names = []
def get_split_names( name ):
split_name_list = name.split("/")
for i in range(0, len(split_name_list)):
split_name_list[i] = split_name_list[i].strip()
return split_name_list
def esc_char(name):
if "'" in name:
<|fim_middle|>
else:
return "'" + name + "'"
for line in panel_file:
panel.append(line.strip())
panel_file.close()
for line in sample_type_file:
type.append(line.strip())
sample_type_file.close()
for line in name_file:
test_names.append(line.strip())
name_file.close()
test_panel_results.write("Below should be pasted to TestPanel.csv\n\n")
for row in range(0, len(test_names)):
if len(panel[row]) > 1:
test_description = esc_char(test_names[row] + "(" + type[row] + ")")
test_panel_results.write("nextval( 'panel_item_seq' ) , (select id from panel where name = '" + panel[row] + "')")
test_panel_results.write(" , (select id from test where description = " + test_description + ") , null , now() \n")
test_panel_results.close()
print "Done look for results in testPanelResults.txt"<|fim▁end|> | return "$$" + name + "$$" |
<|file_name|>testPanel.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
panel_file = open('panels.txt','r')
name_file = open('testName.txt','r')
sample_type_file = open("sampleType.txt")
test_panel_results = open("output/testPanelResults.txt", 'w')
panel = []
type = []
test_names = []
def get_split_names( name ):
split_name_list = name.split("/")
for i in range(0, len(split_name_list)):
split_name_list[i] = split_name_list[i].strip()
return split_name_list
def esc_char(name):
if "'" in name:
return "$$" + name + "$$"
else:
<|fim_middle|>
for line in panel_file:
panel.append(line.strip())
panel_file.close()
for line in sample_type_file:
type.append(line.strip())
sample_type_file.close()
for line in name_file:
test_names.append(line.strip())
name_file.close()
test_panel_results.write("Below should be pasted to TestPanel.csv\n\n")
for row in range(0, len(test_names)):
if len(panel[row]) > 1:
test_description = esc_char(test_names[row] + "(" + type[row] + ")")
test_panel_results.write("nextval( 'panel_item_seq' ) , (select id from panel where name = '" + panel[row] + "')")
test_panel_results.write(" , (select id from test where description = " + test_description + ") , null , now() \n")
test_panel_results.close()
print "Done look for results in testPanelResults.txt"<|fim▁end|> | return "'" + name + "'" |
<|file_name|>testPanel.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
panel_file = open('panels.txt','r')
name_file = open('testName.txt','r')
sample_type_file = open("sampleType.txt")
test_panel_results = open("output/testPanelResults.txt", 'w')
panel = []
type = []
test_names = []
def get_split_names( name ):
split_name_list = name.split("/")
for i in range(0, len(split_name_list)):
split_name_list[i] = split_name_list[i].strip()
return split_name_list
def esc_char(name):
if "'" in name:
return "$$" + name + "$$"
else:
return "'" + name + "'"
for line in panel_file:
panel.append(line.strip())
panel_file.close()
for line in sample_type_file:
type.append(line.strip())
sample_type_file.close()
for line in name_file:
test_names.append(line.strip())
name_file.close()
test_panel_results.write("Below should be pasted to TestPanel.csv\n\n")
for row in range(0, len(test_names)):
if len(panel[row]) > 1:
<|fim_middle|>
test_panel_results.close()
print "Done look for results in testPanelResults.txt"<|fim▁end|> | test_description = esc_char(test_names[row] + "(" + type[row] + ")")
test_panel_results.write("nextval( 'panel_item_seq' ) , (select id from panel where name = '" + panel[row] + "')")
test_panel_results.write(" , (select id from test where description = " + test_description + ") , null , now() \n") |
<|file_name|>testPanel.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
panel_file = open('panels.txt','r')
name_file = open('testName.txt','r')
sample_type_file = open("sampleType.txt")
test_panel_results = open("output/testPanelResults.txt", 'w')
panel = []
type = []
test_names = []
def <|fim_middle|>( name ):
split_name_list = name.split("/")
for i in range(0, len(split_name_list)):
split_name_list[i] = split_name_list[i].strip()
return split_name_list
def esc_char(name):
if "'" in name:
return "$$" + name + "$$"
else:
return "'" + name + "'"
for line in panel_file:
panel.append(line.strip())
panel_file.close()
for line in sample_type_file:
type.append(line.strip())
sample_type_file.close()
for line in name_file:
test_names.append(line.strip())
name_file.close()
test_panel_results.write("Below should be pasted to TestPanel.csv\n\n")
for row in range(0, len(test_names)):
if len(panel[row]) > 1:
test_description = esc_char(test_names[row] + "(" + type[row] + ")")
test_panel_results.write("nextval( 'panel_item_seq' ) , (select id from panel where name = '" + panel[row] + "')")
test_panel_results.write(" , (select id from test where description = " + test_description + ") , null , now() \n")
test_panel_results.close()
print "Done look for results in testPanelResults.txt"<|fim▁end|> | get_split_names |
<|file_name|>testPanel.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
panel_file = open('panels.txt','r')
name_file = open('testName.txt','r')
sample_type_file = open("sampleType.txt")
test_panel_results = open("output/testPanelResults.txt", 'w')
panel = []
type = []
test_names = []
def get_split_names( name ):
split_name_list = name.split("/")
for i in range(0, len(split_name_list)):
split_name_list[i] = split_name_list[i].strip()
return split_name_list
def <|fim_middle|>(name):
if "'" in name:
return "$$" + name + "$$"
else:
return "'" + name + "'"
for line in panel_file:
panel.append(line.strip())
panel_file.close()
for line in sample_type_file:
type.append(line.strip())
sample_type_file.close()
for line in name_file:
test_names.append(line.strip())
name_file.close()
test_panel_results.write("Below should be pasted to TestPanel.csv\n\n")
for row in range(0, len(test_names)):
if len(panel[row]) > 1:
test_description = esc_char(test_names[row] + "(" + type[row] + ")")
test_panel_results.write("nextval( 'panel_item_seq' ) , (select id from panel where name = '" + panel[row] + "')")
test_panel_results.write(" , (select id from test where description = " + test_description + ") , null , now() \n")
test_panel_results.close()
print "Done look for results in testPanelResults.txt"<|fim▁end|> | esc_char |
<|file_name|>test_docs.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Money doctests as unittest Suite
"""
# RADAR: Python2
from __future__ import absolute_import
import doctest
import unittest
# RADAR: Python2
import money.six
FILES = (
'../../README.rst',
)
def load_tests(loader, tests, pattern):<|fim▁hole|> # RADAR Python 2.x
if money.six.PY2:
# Doc tests are Python 3.x
return unittest.TestSuite()
return doctest.DocFileSuite(*FILES)<|fim▁end|> | |
<|file_name|>test_docs.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Money doctests as unittest Suite
"""
# RADAR: Python2
from __future__ import absolute_import
import doctest
import unittest
# RADAR: Python2
import money.six
FILES = (
'../../README.rst',
)
def load_tests(loader, tests, pattern):
# RADAR Python 2.x
<|fim_middle|>
<|fim▁end|> | if money.six.PY2:
# Doc tests are Python 3.x
return unittest.TestSuite()
return doctest.DocFileSuite(*FILES) |
<|file_name|>test_docs.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Money doctests as unittest Suite
"""
# RADAR: Python2
from __future__ import absolute_import
import doctest
import unittest
# RADAR: Python2
import money.six
FILES = (
'../../README.rst',
)
def load_tests(loader, tests, pattern):
# RADAR Python 2.x
if money.six.PY2:
# Doc tests are Python 3.x
<|fim_middle|>
return doctest.DocFileSuite(*FILES)
<|fim▁end|> | return unittest.TestSuite() |
<|file_name|>test_docs.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Money doctests as unittest Suite
"""
# RADAR: Python2
from __future__ import absolute_import
import doctest
import unittest
# RADAR: Python2
import money.six
FILES = (
'../../README.rst',
)
def <|fim_middle|>(loader, tests, pattern):
# RADAR Python 2.x
if money.six.PY2:
# Doc tests are Python 3.x
return unittest.TestSuite()
return doctest.DocFileSuite(*FILES)
<|fim▁end|> | load_tests |
<|file_name|>testlibbind_ns_msg.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# testlibbind_ns_msg.py - Unit tests for the libbind ns_msg wrapper
#
# This file is part of Strangle.
#
# Strangle is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Strangle is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Strangle; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, testutils, random
import unittest
from Strangle import libbind
class ns_msgTestCase(unittest.TestCase):
"""Tests for the wrapper around the libbind ns_msg struct"""
def test000Exists(self):
"""Check that the ns_msg type object exists cleanly in the module"""
assert(libbind.ns_msg.__class__ is type)
def testInstantiate(self):
"""Check that the ns_msg type accepts the correct arguments"""
# Too few
self.assertRaises(TypeError, libbind.ns_msg)
# Too many
self.assertRaises(TypeError, libbind.ns_msg, 'one', 'two')
def testNoticeInvalid(self):
"""Test whether the ns_msg type can handle bad data"""
rng = testutils.rng
for testNum in range(0, 50):
packetLength = random.randrange(20, 80)
packetVal = rng.read(packetLength)
self.assertRaises(TypeError, libbind.ns_msg, packetVal)
def testParseValidQuery(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-query").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def testParseValidResponse(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-response").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def suite():
s = unittest.TestSuite()
s.addTest( unittest.makeSuite(ns_msgTestCase, 'test') )
return s
<|fim▁hole|>if __name__ == "__main__":
unittest.main()<|fim▁end|> | |
<|file_name|>testlibbind_ns_msg.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# testlibbind_ns_msg.py - Unit tests for the libbind ns_msg wrapper
#
# This file is part of Strangle.
#
# Strangle is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Strangle is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Strangle; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, testutils, random
import unittest
from Strangle import libbind
class ns_msgTestCase(unittest.TestCase):
<|fim_middle|>
def suite():
s = unittest.TestSuite()
s.addTest( unittest.makeSuite(ns_msgTestCase, 'test') )
return s
if __name__ == "__main__":
unittest.main()
<|fim▁end|> | """Tests for the wrapper around the libbind ns_msg struct"""
def test000Exists(self):
"""Check that the ns_msg type object exists cleanly in the module"""
assert(libbind.ns_msg.__class__ is type)
def testInstantiate(self):
"""Check that the ns_msg type accepts the correct arguments"""
# Too few
self.assertRaises(TypeError, libbind.ns_msg)
# Too many
self.assertRaises(TypeError, libbind.ns_msg, 'one', 'two')
def testNoticeInvalid(self):
"""Test whether the ns_msg type can handle bad data"""
rng = testutils.rng
for testNum in range(0, 50):
packetLength = random.randrange(20, 80)
packetVal = rng.read(packetLength)
self.assertRaises(TypeError, libbind.ns_msg, packetVal)
def testParseValidQuery(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-query").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def testParseValidResponse(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-response").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg) |
<|file_name|>testlibbind_ns_msg.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# testlibbind_ns_msg.py - Unit tests for the libbind ns_msg wrapper
#
# This file is part of Strangle.
#
# Strangle is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Strangle is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Strangle; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, testutils, random
import unittest
from Strangle import libbind
class ns_msgTestCase(unittest.TestCase):
"""Tests for the wrapper around the libbind ns_msg struct"""
def test000Exists(self):
<|fim_middle|>
def testInstantiate(self):
"""Check that the ns_msg type accepts the correct arguments"""
# Too few
self.assertRaises(TypeError, libbind.ns_msg)
# Too many
self.assertRaises(TypeError, libbind.ns_msg, 'one', 'two')
def testNoticeInvalid(self):
"""Test whether the ns_msg type can handle bad data"""
rng = testutils.rng
for testNum in range(0, 50):
packetLength = random.randrange(20, 80)
packetVal = rng.read(packetLength)
self.assertRaises(TypeError, libbind.ns_msg, packetVal)
def testParseValidQuery(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-query").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def testParseValidResponse(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-response").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def suite():
s = unittest.TestSuite()
s.addTest( unittest.makeSuite(ns_msgTestCase, 'test') )
return s
if __name__ == "__main__":
unittest.main()
<|fim▁end|> | """Check that the ns_msg type object exists cleanly in the module"""
assert(libbind.ns_msg.__class__ is type) |
<|file_name|>testlibbind_ns_msg.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# testlibbind_ns_msg.py - Unit tests for the libbind ns_msg wrapper
#
# This file is part of Strangle.
#
# Strangle is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Strangle is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Strangle; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, testutils, random
import unittest
from Strangle import libbind
class ns_msgTestCase(unittest.TestCase):
"""Tests for the wrapper around the libbind ns_msg struct"""
def test000Exists(self):
"""Check that the ns_msg type object exists cleanly in the module"""
assert(libbind.ns_msg.__class__ is type)
def testInstantiate(self):
<|fim_middle|>
def testNoticeInvalid(self):
"""Test whether the ns_msg type can handle bad data"""
rng = testutils.rng
for testNum in range(0, 50):
packetLength = random.randrange(20, 80)
packetVal = rng.read(packetLength)
self.assertRaises(TypeError, libbind.ns_msg, packetVal)
def testParseValidQuery(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-query").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def testParseValidResponse(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-response").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def suite():
s = unittest.TestSuite()
s.addTest( unittest.makeSuite(ns_msgTestCase, 'test') )
return s
if __name__ == "__main__":
unittest.main()
<|fim▁end|> | """Check that the ns_msg type accepts the correct arguments"""
# Too few
self.assertRaises(TypeError, libbind.ns_msg)
# Too many
self.assertRaises(TypeError, libbind.ns_msg, 'one', 'two') |
<|file_name|>testlibbind_ns_msg.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# testlibbind_ns_msg.py - Unit tests for the libbind ns_msg wrapper
#
# This file is part of Strangle.
#
# Strangle is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Strangle is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Strangle; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, testutils, random
import unittest
from Strangle import libbind
class ns_msgTestCase(unittest.TestCase):
"""Tests for the wrapper around the libbind ns_msg struct"""
def test000Exists(self):
"""Check that the ns_msg type object exists cleanly in the module"""
assert(libbind.ns_msg.__class__ is type)
def testInstantiate(self):
"""Check that the ns_msg type accepts the correct arguments"""
# Too few
self.assertRaises(TypeError, libbind.ns_msg)
# Too many
self.assertRaises(TypeError, libbind.ns_msg, 'one', 'two')
def testNoticeInvalid(self):
<|fim_middle|>
def testParseValidQuery(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-query").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def testParseValidResponse(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-response").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def suite():
s = unittest.TestSuite()
s.addTest( unittest.makeSuite(ns_msgTestCase, 'test') )
return s
if __name__ == "__main__":
unittest.main()
<|fim▁end|> | """Test whether the ns_msg type can handle bad data"""
rng = testutils.rng
for testNum in range(0, 50):
packetLength = random.randrange(20, 80)
packetVal = rng.read(packetLength)
self.assertRaises(TypeError, libbind.ns_msg, packetVal) |
<|file_name|>testlibbind_ns_msg.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# testlibbind_ns_msg.py - Unit tests for the libbind ns_msg wrapper
#
# This file is part of Strangle.
#
# Strangle is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Strangle is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Strangle; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, testutils, random
import unittest
from Strangle import libbind
class ns_msgTestCase(unittest.TestCase):
"""Tests for the wrapper around the libbind ns_msg struct"""
def test000Exists(self):
"""Check that the ns_msg type object exists cleanly in the module"""
assert(libbind.ns_msg.__class__ is type)
def testInstantiate(self):
"""Check that the ns_msg type accepts the correct arguments"""
# Too few
self.assertRaises(TypeError, libbind.ns_msg)
# Too many
self.assertRaises(TypeError, libbind.ns_msg, 'one', 'two')
def testNoticeInvalid(self):
"""Test whether the ns_msg type can handle bad data"""
rng = testutils.rng
for testNum in range(0, 50):
packetLength = random.randrange(20, 80)
packetVal = rng.read(packetLength)
self.assertRaises(TypeError, libbind.ns_msg, packetVal)
def testParseValidQuery(self):
<|fim_middle|>
def testParseValidResponse(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-response").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def suite():
s = unittest.TestSuite()
s.addTest( unittest.makeSuite(ns_msgTestCase, 'test') )
return s
if __name__ == "__main__":
unittest.main()
<|fim▁end|> | """Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-query").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg) |
<|file_name|>testlibbind_ns_msg.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# testlibbind_ns_msg.py - Unit tests for the libbind ns_msg wrapper
#
# This file is part of Strangle.
#
# Strangle is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Strangle is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Strangle; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, testutils, random
import unittest
from Strangle import libbind
class ns_msgTestCase(unittest.TestCase):
"""Tests for the wrapper around the libbind ns_msg struct"""
def test000Exists(self):
"""Check that the ns_msg type object exists cleanly in the module"""
assert(libbind.ns_msg.__class__ is type)
def testInstantiate(self):
"""Check that the ns_msg type accepts the correct arguments"""
# Too few
self.assertRaises(TypeError, libbind.ns_msg)
# Too many
self.assertRaises(TypeError, libbind.ns_msg, 'one', 'two')
def testNoticeInvalid(self):
"""Test whether the ns_msg type can handle bad data"""
rng = testutils.rng
for testNum in range(0, 50):
packetLength = random.randrange(20, 80)
packetVal = rng.read(packetLength)
self.assertRaises(TypeError, libbind.ns_msg, packetVal)
def testParseValidQuery(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-query").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def testParseValidResponse(self):
<|fim_middle|>
def suite():
s = unittest.TestSuite()
s.addTest( unittest.makeSuite(ns_msgTestCase, 'test') )
return s
if __name__ == "__main__":
unittest.main()
<|fim▁end|> | """Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-response").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg) |
<|file_name|>testlibbind_ns_msg.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# testlibbind_ns_msg.py - Unit tests for the libbind ns_msg wrapper
#
# This file is part of Strangle.
#
# Strangle is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Strangle is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Strangle; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, testutils, random
import unittest
from Strangle import libbind
class ns_msgTestCase(unittest.TestCase):
"""Tests for the wrapper around the libbind ns_msg struct"""
def test000Exists(self):
"""Check that the ns_msg type object exists cleanly in the module"""
assert(libbind.ns_msg.__class__ is type)
def testInstantiate(self):
"""Check that the ns_msg type accepts the correct arguments"""
# Too few
self.assertRaises(TypeError, libbind.ns_msg)
# Too many
self.assertRaises(TypeError, libbind.ns_msg, 'one', 'two')
def testNoticeInvalid(self):
"""Test whether the ns_msg type can handle bad data"""
rng = testutils.rng
for testNum in range(0, 50):
packetLength = random.randrange(20, 80)
packetVal = rng.read(packetLength)
self.assertRaises(TypeError, libbind.ns_msg, packetVal)
def testParseValidQuery(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-query").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def testParseValidResponse(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-response").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def suite():
<|fim_middle|>
if __name__ == "__main__":
unittest.main()
<|fim▁end|> | s = unittest.TestSuite()
s.addTest( unittest.makeSuite(ns_msgTestCase, 'test') )
return s |
<|file_name|>testlibbind_ns_msg.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# testlibbind_ns_msg.py - Unit tests for the libbind ns_msg wrapper
#
# This file is part of Strangle.
#
# Strangle is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Strangle is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Strangle; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, testutils, random
import unittest
from Strangle import libbind
class ns_msgTestCase(unittest.TestCase):
"""Tests for the wrapper around the libbind ns_msg struct"""
def test000Exists(self):
"""Check that the ns_msg type object exists cleanly in the module"""
assert(libbind.ns_msg.__class__ is type)
def testInstantiate(self):
"""Check that the ns_msg type accepts the correct arguments"""
# Too few
self.assertRaises(TypeError, libbind.ns_msg)
# Too many
self.assertRaises(TypeError, libbind.ns_msg, 'one', 'two')
def testNoticeInvalid(self):
"""Test whether the ns_msg type can handle bad data"""
rng = testutils.rng
for testNum in range(0, 50):
packetLength = random.randrange(20, 80)
packetVal = rng.read(packetLength)
self.assertRaises(TypeError, libbind.ns_msg, packetVal)
def testParseValidQuery(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-query").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def testParseValidResponse(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-response").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def suite():
s = unittest.TestSuite()
s.addTest( unittest.makeSuite(ns_msgTestCase, 'test') )
return s
if __name__ == "__main__":
<|fim_middle|>
<|fim▁end|> | unittest.main() |
<|file_name|>testlibbind_ns_msg.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# testlibbind_ns_msg.py - Unit tests for the libbind ns_msg wrapper
#
# This file is part of Strangle.
#
# Strangle is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Strangle is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Strangle; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, testutils, random
import unittest
from Strangle import libbind
class ns_msgTestCase(unittest.TestCase):
"""Tests for the wrapper around the libbind ns_msg struct"""
def <|fim_middle|>(self):
"""Check that the ns_msg type object exists cleanly in the module"""
assert(libbind.ns_msg.__class__ is type)
def testInstantiate(self):
"""Check that the ns_msg type accepts the correct arguments"""
# Too few
self.assertRaises(TypeError, libbind.ns_msg)
# Too many
self.assertRaises(TypeError, libbind.ns_msg, 'one', 'two')
def testNoticeInvalid(self):
"""Test whether the ns_msg type can handle bad data"""
rng = testutils.rng
for testNum in range(0, 50):
packetLength = random.randrange(20, 80)
packetVal = rng.read(packetLength)
self.assertRaises(TypeError, libbind.ns_msg, packetVal)
def testParseValidQuery(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-query").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def testParseValidResponse(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-response").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def suite():
s = unittest.TestSuite()
s.addTest( unittest.makeSuite(ns_msgTestCase, 'test') )
return s
if __name__ == "__main__":
unittest.main()
<|fim▁end|> | test000Exists |
<|file_name|>testlibbind_ns_msg.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# testlibbind_ns_msg.py - Unit tests for the libbind ns_msg wrapper
#
# This file is part of Strangle.
#
# Strangle is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Strangle is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Strangle; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, testutils, random
import unittest
from Strangle import libbind
class ns_msgTestCase(unittest.TestCase):
"""Tests for the wrapper around the libbind ns_msg struct"""
def test000Exists(self):
"""Check that the ns_msg type object exists cleanly in the module"""
assert(libbind.ns_msg.__class__ is type)
def <|fim_middle|>(self):
"""Check that the ns_msg type accepts the correct arguments"""
# Too few
self.assertRaises(TypeError, libbind.ns_msg)
# Too many
self.assertRaises(TypeError, libbind.ns_msg, 'one', 'two')
def testNoticeInvalid(self):
"""Test whether the ns_msg type can handle bad data"""
rng = testutils.rng
for testNum in range(0, 50):
packetLength = random.randrange(20, 80)
packetVal = rng.read(packetLength)
self.assertRaises(TypeError, libbind.ns_msg, packetVal)
def testParseValidQuery(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-query").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def testParseValidResponse(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-response").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def suite():
s = unittest.TestSuite()
s.addTest( unittest.makeSuite(ns_msgTestCase, 'test') )
return s
if __name__ == "__main__":
unittest.main()
<|fim▁end|> | testInstantiate |
<|file_name|>testlibbind_ns_msg.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# testlibbind_ns_msg.py - Unit tests for the libbind ns_msg wrapper
#
# This file is part of Strangle.
#
# Strangle is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Strangle is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Strangle; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, testutils, random
import unittest
from Strangle import libbind
class ns_msgTestCase(unittest.TestCase):
"""Tests for the wrapper around the libbind ns_msg struct"""
def test000Exists(self):
"""Check that the ns_msg type object exists cleanly in the module"""
assert(libbind.ns_msg.__class__ is type)
def testInstantiate(self):
"""Check that the ns_msg type accepts the correct arguments"""
# Too few
self.assertRaises(TypeError, libbind.ns_msg)
# Too many
self.assertRaises(TypeError, libbind.ns_msg, 'one', 'two')
def <|fim_middle|>(self):
"""Test whether the ns_msg type can handle bad data"""
rng = testutils.rng
for testNum in range(0, 50):
packetLength = random.randrange(20, 80)
packetVal = rng.read(packetLength)
self.assertRaises(TypeError, libbind.ns_msg, packetVal)
def testParseValidQuery(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-query").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def testParseValidResponse(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-response").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def suite():
s = unittest.TestSuite()
s.addTest( unittest.makeSuite(ns_msgTestCase, 'test') )
return s
if __name__ == "__main__":
unittest.main()
<|fim▁end|> | testNoticeInvalid |
<|file_name|>testlibbind_ns_msg.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# testlibbind_ns_msg.py - Unit tests for the libbind ns_msg wrapper
#
# This file is part of Strangle.
#
# Strangle is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Strangle is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Strangle; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, testutils, random
import unittest
from Strangle import libbind
class ns_msgTestCase(unittest.TestCase):
"""Tests for the wrapper around the libbind ns_msg struct"""
def test000Exists(self):
"""Check that the ns_msg type object exists cleanly in the module"""
assert(libbind.ns_msg.__class__ is type)
def testInstantiate(self):
"""Check that the ns_msg type accepts the correct arguments"""
# Too few
self.assertRaises(TypeError, libbind.ns_msg)
# Too many
self.assertRaises(TypeError, libbind.ns_msg, 'one', 'two')
def testNoticeInvalid(self):
"""Test whether the ns_msg type can handle bad data"""
rng = testutils.rng
for testNum in range(0, 50):
packetLength = random.randrange(20, 80)
packetVal = rng.read(packetLength)
self.assertRaises(TypeError, libbind.ns_msg, packetVal)
def <|fim_middle|>(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-query").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def testParseValidResponse(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-response").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def suite():
s = unittest.TestSuite()
s.addTest( unittest.makeSuite(ns_msgTestCase, 'test') )
return s
if __name__ == "__main__":
unittest.main()
<|fim▁end|> | testParseValidQuery |
<|file_name|>testlibbind_ns_msg.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# testlibbind_ns_msg.py - Unit tests for the libbind ns_msg wrapper
#
# This file is part of Strangle.
#
# Strangle is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Strangle is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Strangle; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, testutils, random
import unittest
from Strangle import libbind
class ns_msgTestCase(unittest.TestCase):
"""Tests for the wrapper around the libbind ns_msg struct"""
def test000Exists(self):
"""Check that the ns_msg type object exists cleanly in the module"""
assert(libbind.ns_msg.__class__ is type)
def testInstantiate(self):
"""Check that the ns_msg type accepts the correct arguments"""
# Too few
self.assertRaises(TypeError, libbind.ns_msg)
# Too many
self.assertRaises(TypeError, libbind.ns_msg, 'one', 'two')
def testNoticeInvalid(self):
"""Test whether the ns_msg type can handle bad data"""
rng = testutils.rng
for testNum in range(0, 50):
packetLength = random.randrange(20, 80)
packetVal = rng.read(packetLength)
self.assertRaises(TypeError, libbind.ns_msg, packetVal)
def testParseValidQuery(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-query").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def <|fim_middle|>(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-response").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def suite():
s = unittest.TestSuite()
s.addTest( unittest.makeSuite(ns_msgTestCase, 'test') )
return s
if __name__ == "__main__":
unittest.main()
<|fim▁end|> | testParseValidResponse |
<|file_name|>testlibbind_ns_msg.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# testlibbind_ns_msg.py - Unit tests for the libbind ns_msg wrapper
#
# This file is part of Strangle.
#
# Strangle is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Strangle is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Strangle; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, testutils, random
import unittest
from Strangle import libbind
class ns_msgTestCase(unittest.TestCase):
"""Tests for the wrapper around the libbind ns_msg struct"""
def test000Exists(self):
"""Check that the ns_msg type object exists cleanly in the module"""
assert(libbind.ns_msg.__class__ is type)
def testInstantiate(self):
"""Check that the ns_msg type accepts the correct arguments"""
# Too few
self.assertRaises(TypeError, libbind.ns_msg)
# Too many
self.assertRaises(TypeError, libbind.ns_msg, 'one', 'two')
def testNoticeInvalid(self):
"""Test whether the ns_msg type can handle bad data"""
rng = testutils.rng
for testNum in range(0, 50):
packetLength = random.randrange(20, 80)
packetVal = rng.read(packetLength)
self.assertRaises(TypeError, libbind.ns_msg, packetVal)
def testParseValidQuery(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-query").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def testParseValidResponse(self):
"""Test whether ns_msg initialization parses valid NS queries"""
packetData = file("data/www.company.example-response").read()
n = libbind.ns_msg(packetData)
assert(type(n) is libbind.ns_msg)
def <|fim_middle|>():
s = unittest.TestSuite()
s.addTest( unittest.makeSuite(ns_msgTestCase, 'test') )
return s
if __name__ == "__main__":
unittest.main()
<|fim▁end|> | suite |
<|file_name|>template.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""Template shortcut & filters"""
import os
import datetime
from jinja2 import Environment, FileSystemLoader
from uwsgi_sloth.settings import ROOT
from uwsgi_sloth import settings, __VERSION__
template_path = os.path.join(ROOT, 'templates')
env = Environment(loader=FileSystemLoader(template_path))
# Template filters
def friendly_time(msecs):
secs, msecs = divmod(msecs, 1000)
mins, secs = divmod(secs, 60)
hours, mins = divmod(mins, 60)
if hours:
return '%dh%dm%ds' % (hours, mins, secs)
elif mins:
return '%dm%ds' % (mins, secs)
elif secs:
return '%ds%dms' % (secs, msecs)
else:
return '%.2fms' % msecs
env.filters['friendly_time'] = friendly_time
def render_template(template_name, context={}):
template = env.get_template(template_name)<|fim▁hole|> version='.'.join(map(str, __VERSION__)))
return template.render(**context)<|fim▁end|> | context.update(
SETTINGS=settings,
now=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), |
<|file_name|>template.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""Template shortcut & filters"""
import os
import datetime
from jinja2 import Environment, FileSystemLoader
from uwsgi_sloth.settings import ROOT
from uwsgi_sloth import settings, __VERSION__
template_path = os.path.join(ROOT, 'templates')
env = Environment(loader=FileSystemLoader(template_path))
# Template filters
def friendly_time(msecs):
<|fim_middle|>
env.filters['friendly_time'] = friendly_time
def render_template(template_name, context={}):
template = env.get_template(template_name)
context.update(
SETTINGS=settings,
now=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
version='.'.join(map(str, __VERSION__)))
return template.render(**context)
<|fim▁end|> | secs, msecs = divmod(msecs, 1000)
mins, secs = divmod(secs, 60)
hours, mins = divmod(mins, 60)
if hours:
return '%dh%dm%ds' % (hours, mins, secs)
elif mins:
return '%dm%ds' % (mins, secs)
elif secs:
return '%ds%dms' % (secs, msecs)
else:
return '%.2fms' % msecs |
<|file_name|>template.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""Template shortcut & filters"""
import os
import datetime
from jinja2 import Environment, FileSystemLoader
from uwsgi_sloth.settings import ROOT
from uwsgi_sloth import settings, __VERSION__
template_path = os.path.join(ROOT, 'templates')
env = Environment(loader=FileSystemLoader(template_path))
# Template filters
def friendly_time(msecs):
secs, msecs = divmod(msecs, 1000)
mins, secs = divmod(secs, 60)
hours, mins = divmod(mins, 60)
if hours:
return '%dh%dm%ds' % (hours, mins, secs)
elif mins:
return '%dm%ds' % (mins, secs)
elif secs:
return '%ds%dms' % (secs, msecs)
else:
return '%.2fms' % msecs
env.filters['friendly_time'] = friendly_time
def render_template(template_name, context={}):
<|fim_middle|>
<|fim▁end|> | template = env.get_template(template_name)
context.update(
SETTINGS=settings,
now=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
version='.'.join(map(str, __VERSION__)))
return template.render(**context) |
<|file_name|>template.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""Template shortcut & filters"""
import os
import datetime
from jinja2 import Environment, FileSystemLoader
from uwsgi_sloth.settings import ROOT
from uwsgi_sloth import settings, __VERSION__
template_path = os.path.join(ROOT, 'templates')
env = Environment(loader=FileSystemLoader(template_path))
# Template filters
def friendly_time(msecs):
secs, msecs = divmod(msecs, 1000)
mins, secs = divmod(secs, 60)
hours, mins = divmod(mins, 60)
if hours:
<|fim_middle|>
elif mins:
return '%dm%ds' % (mins, secs)
elif secs:
return '%ds%dms' % (secs, msecs)
else:
return '%.2fms' % msecs
env.filters['friendly_time'] = friendly_time
def render_template(template_name, context={}):
template = env.get_template(template_name)
context.update(
SETTINGS=settings,
now=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
version='.'.join(map(str, __VERSION__)))
return template.render(**context)
<|fim▁end|> | return '%dh%dm%ds' % (hours, mins, secs) |
<|file_name|>template.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""Template shortcut & filters"""
import os
import datetime
from jinja2 import Environment, FileSystemLoader
from uwsgi_sloth.settings import ROOT
from uwsgi_sloth import settings, __VERSION__
template_path = os.path.join(ROOT, 'templates')
env = Environment(loader=FileSystemLoader(template_path))
# Template filters
def friendly_time(msecs):
secs, msecs = divmod(msecs, 1000)
mins, secs = divmod(secs, 60)
hours, mins = divmod(mins, 60)
if hours:
return '%dh%dm%ds' % (hours, mins, secs)
elif mins:
<|fim_middle|>
elif secs:
return '%ds%dms' % (secs, msecs)
else:
return '%.2fms' % msecs
env.filters['friendly_time'] = friendly_time
def render_template(template_name, context={}):
template = env.get_template(template_name)
context.update(
SETTINGS=settings,
now=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
version='.'.join(map(str, __VERSION__)))
return template.render(**context)
<|fim▁end|> | return '%dm%ds' % (mins, secs) |
<|file_name|>template.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""Template shortcut & filters"""
import os
import datetime
from jinja2 import Environment, FileSystemLoader
from uwsgi_sloth.settings import ROOT
from uwsgi_sloth import settings, __VERSION__
template_path = os.path.join(ROOT, 'templates')
env = Environment(loader=FileSystemLoader(template_path))
# Template filters
def friendly_time(msecs):
secs, msecs = divmod(msecs, 1000)
mins, secs = divmod(secs, 60)
hours, mins = divmod(mins, 60)
if hours:
return '%dh%dm%ds' % (hours, mins, secs)
elif mins:
return '%dm%ds' % (mins, secs)
elif secs:
<|fim_middle|>
else:
return '%.2fms' % msecs
env.filters['friendly_time'] = friendly_time
def render_template(template_name, context={}):
template = env.get_template(template_name)
context.update(
SETTINGS=settings,
now=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
version='.'.join(map(str, __VERSION__)))
return template.render(**context)
<|fim▁end|> | return '%ds%dms' % (secs, msecs) |
<|file_name|>template.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""Template shortcut & filters"""
import os
import datetime
from jinja2 import Environment, FileSystemLoader
from uwsgi_sloth.settings import ROOT
from uwsgi_sloth import settings, __VERSION__
template_path = os.path.join(ROOT, 'templates')
env = Environment(loader=FileSystemLoader(template_path))
# Template filters
def friendly_time(msecs):
secs, msecs = divmod(msecs, 1000)
mins, secs = divmod(secs, 60)
hours, mins = divmod(mins, 60)
if hours:
return '%dh%dm%ds' % (hours, mins, secs)
elif mins:
return '%dm%ds' % (mins, secs)
elif secs:
return '%ds%dms' % (secs, msecs)
else:
<|fim_middle|>
env.filters['friendly_time'] = friendly_time
def render_template(template_name, context={}):
template = env.get_template(template_name)
context.update(
SETTINGS=settings,
now=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
version='.'.join(map(str, __VERSION__)))
return template.render(**context)
<|fim▁end|> | return '%.2fms' % msecs |
<|file_name|>template.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""Template shortcut & filters"""
import os
import datetime
from jinja2 import Environment, FileSystemLoader
from uwsgi_sloth.settings import ROOT
from uwsgi_sloth import settings, __VERSION__
template_path = os.path.join(ROOT, 'templates')
env = Environment(loader=FileSystemLoader(template_path))
# Template filters
def <|fim_middle|>(msecs):
secs, msecs = divmod(msecs, 1000)
mins, secs = divmod(secs, 60)
hours, mins = divmod(mins, 60)
if hours:
return '%dh%dm%ds' % (hours, mins, secs)
elif mins:
return '%dm%ds' % (mins, secs)
elif secs:
return '%ds%dms' % (secs, msecs)
else:
return '%.2fms' % msecs
env.filters['friendly_time'] = friendly_time
def render_template(template_name, context={}):
template = env.get_template(template_name)
context.update(
SETTINGS=settings,
now=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
version='.'.join(map(str, __VERSION__)))
return template.render(**context)
<|fim▁end|> | friendly_time |
<|file_name|>template.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""Template shortcut & filters"""
import os
import datetime
from jinja2 import Environment, FileSystemLoader
from uwsgi_sloth.settings import ROOT
from uwsgi_sloth import settings, __VERSION__
template_path = os.path.join(ROOT, 'templates')
env = Environment(loader=FileSystemLoader(template_path))
# Template filters
def friendly_time(msecs):
secs, msecs = divmod(msecs, 1000)
mins, secs = divmod(secs, 60)
hours, mins = divmod(mins, 60)
if hours:
return '%dh%dm%ds' % (hours, mins, secs)
elif mins:
return '%dm%ds' % (mins, secs)
elif secs:
return '%ds%dms' % (secs, msecs)
else:
return '%.2fms' % msecs
env.filters['friendly_time'] = friendly_time
def <|fim_middle|>(template_name, context={}):
template = env.get_template(template_name)
context.update(
SETTINGS=settings,
now=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
version='.'.join(map(str, __VERSION__)))
return template.render(**context)
<|fim▁end|> | render_template |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),<|fim▁hole|> User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key]
def clean_username_or_email(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def get_user_by_username(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_email(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_both(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user<|fim▁end|> | } |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
<|fim_middle|>
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user
<|fim▁end|> | username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key]
def clean_username_or_email(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def get_user_by_username(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_email(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_both(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
<|fim_middle|>
def clean_username_or_email(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def get_user_by_username(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_email(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_both(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user
<|fim▁end|> | self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key] |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key]
def clean_username_or_email(self):
<|fim_middle|>
def get_user_by_username(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_email(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_both(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user
<|fim▁end|> | username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key]
def clean_username_or_email(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def get_user_by_username(self, username):
<|fim_middle|>
def get_user_by_email(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_both(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user
<|fim▁end|> | key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key]
def clean_username_or_email(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def get_user_by_username(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_email(self, email):
<|fim_middle|>
def get_user_by_both(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user
<|fim▁end|> | validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key]
def clean_username_or_email(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def get_user_by_username(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_email(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_both(self, username):
<|fim_middle|>
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user
<|fim▁end|> | key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key]
def clean_username_or_email(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def get_user_by_username(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_email(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_both(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user
class PasswordResetForm(forms.Form):
<|fim_middle|>
<|fim▁end|> | password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key]
def clean_username_or_email(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def get_user_by_username(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_email(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_both(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
<|fim_middle|>
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user
<|fim▁end|> | self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs) |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key]
def clean_username_or_email(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def get_user_by_username(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_email(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_both(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_password2(self):
<|fim_middle|>
def save(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user
<|fim▁end|> | password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2 |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key]
def clean_username_or_email(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def get_user_by_username(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_email(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_both(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
def save(self, commit=True):
<|fim_middle|>
<|fim▁end|> | self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
<|fim_middle|>
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key]
def clean_username_or_email(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def get_user_by_username(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_email(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_both(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user
<|fim▁end|> | raise ValueError(message) |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
<|fim_middle|>
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key]
def clean_username_or_email(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def get_user_by_username(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_email(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_both(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user
<|fim▁end|> | raise ValueError(message) |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
<|fim_middle|>
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key]
def clean_username_or_email(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def get_user_by_username(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_email(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_both(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user
<|fim▁end|> | self.label_key = 'email' |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
<|fim_middle|>
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key]
def clean_username_or_email(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def get_user_by_username(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_email(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_both(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user
<|fim▁end|> | self.label_key = search_fields[0] |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
<|fim_middle|>
self.fields['username_or_email'].label = labels[self.label_key]
def clean_username_or_email(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def get_user_by_username(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_email(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_both(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user
<|fim▁end|> | self.label_key = 'both' |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key]
def clean_username_or_email(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def get_user_by_username(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_email(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_both(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
<|fim_middle|>
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user
<|fim▁end|> | raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch') |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key]
def clean_username_or_email(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def get_user_by_username(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_email(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_both(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
<|fim_middle|>
return self.user
<|fim▁end|> | get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
) |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def <|fim_middle|>(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key]
def clean_username_or_email(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def get_user_by_username(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_email(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_both(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user
<|fim▁end|> | __init__ |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key]
def <|fim_middle|>(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def get_user_by_username(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_email(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_both(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user
<|fim▁end|> | clean_username_or_email |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key]
def clean_username_or_email(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def <|fim_middle|>(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_email(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_both(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user
<|fim▁end|> | get_user_by_username |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key]
def clean_username_or_email(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def get_user_by_username(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def <|fim_middle|>(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_both(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user
<|fim▁end|> | get_user_by_email |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key]
def clean_username_or_email(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def get_user_by_username(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_email(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def <|fim_middle|>(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user
<|fim▁end|> | get_user_by_both |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key]
def clean_username_or_email(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def get_user_by_username(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_email(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_both(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def <|fim_middle|>(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user
<|fim▁end|> | __init__ |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key]
def clean_username_or_email(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def get_user_by_username(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_email(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_both(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def <|fim_middle|>(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user
<|fim▁end|> | clean_password2 |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms
from django.core.validators import validate_email
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .utils import get_user_model
class PasswordRecoveryForm(forms.Form):
username_or_email = forms.CharField()
error_messages = {
'not_found': _("Sorry, this user doesn't exist."),
}
def __init__(self, *args, **kwargs):
self.case_sensitive = kwargs.pop('case_sensitive', True)
search_fields = kwargs.pop('search_fields', ('username', 'email'))
super(PasswordRecoveryForm, self).__init__(*args, **kwargs)
message = ("No other fields than email are supported "
"by default")
if len(search_fields) not in (1, 2):
raise ValueError(message)
for field in search_fields:
if field not in ['username', 'email']:
raise ValueError(message)
labels = {
'username': _('Username'),
'email': _('Email'),
'both': _('Username or Email'),
}
User = get_user_model() # noqa
if getattr(User, 'USERNAME_FIELD', 'username') == 'email':
self.label_key = 'email'
elif len(search_fields) == 1:
self.label_key = search_fields[0]
else:
self.label_key = 'both'
self.fields['username_or_email'].label = labels[self.label_key]
def clean_username_or_email(self):
username = self.cleaned_data['username_or_email']
cleaner = getattr(self, 'get_user_by_%s' % self.label_key)
self.cleaned_data['user'] = cleaner(username)
return username
def get_user_by_username(self, username):
key = 'username__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: username})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_email(self, email):
validate_email(email)
key = 'email__%sexact' % ('' if self.case_sensitive else 'i')
User = get_user_model()
try:
user = User._default_manager.get(**{key: email})
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
return user
def get_user_by_both(self, username):
key = '__%sexact'
key = key % '' if self.case_sensitive else key % 'i'
f = lambda field: Q(**{field + key: username})
filters = f('username') | f('email')
User = get_user_model()
try:
user = User._default_manager.get(filters)
except User.DoesNotExist:
raise forms.ValidationError(self.error_messages['not_found'],
code='not_found')
except User.MultipleObjectsReturned:
raise forms.ValidationError(_("Unable to find user."))
return user
class PasswordResetForm(forms.Form):
password1 = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_('New password (confirm)'),
widget=forms.PasswordInput,
)
error_messages = {
'password_mismatch': _("The two passwords didn't match."),
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data['password2']
if not password1 == password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch')
return password2
def <|fim_middle|>(self, commit=True):
self.user.set_password(self.cleaned_data['password1'])
if commit:
get_user_model()._default_manager.filter(pk=self.user.pk).update(
password=self.user.password,
)
return self.user
<|fim▁end|> | save |
<|file_name|>example_grid_time.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
"""
Grid time
=============
"""
from datetime import timedelta
import numpy as np
from opendrift.readers import reader_global_landmask
from opendrift.readers import reader_netCDF_CF_generic
from opendrift.models.oceandrift import OceanDrift
# Seeding at a grid at regular interval
o = OceanDrift(loglevel=20) # Set loglevel to 0 for debug information
reader_norkyst = reader_netCDF_CF_generic.Reader(o.test_data_folder() +
'16Nov2015_NorKyst_z_surface/norkyst800_subset_16Nov2015.nc')
#%%
# Landmask
reader_landmask = reader_global_landmask.Reader(
extent=[4.0, 5.5, 59.9, 61.2])
o.add_reader([reader_landmask, reader_norkyst])
#%%
# Seeding some particles
lons = np.linspace(4.4, 4.6, 10)
lats = np.linspace(60.0, 60.1, 10)
lons, lats = np.meshgrid(lons, lats)<|fim▁hole|>lats = lats.ravel()
#%%
# Seed oil elements on a grid at regular time interval
start_time = reader_norkyst.start_time
time_step = timedelta(hours=6)
num_steps = 10
for i in range(num_steps+1):
o.seed_elements(lons, lats, radius=0, number=100,
time=start_time + i*time_step)
#%%
# Running model for 60 hours
o.run(steps=60*4, time_step=900, time_step_output=3600)
#%%
# Print and plot results
print(o)
o.animation(fast=True)
#%%
# .. image:: /gallery/animations/example_grid_time_0.gif<|fim▁end|> | lons = lons.ravel() |
<|file_name|>statistics.py<|end_file_name|><|fim▁begin|>"""
Copyright (C) 2013 Matthew Woodruff
This script is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This script is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this script. If not, see <http://www.gnu.org/licenses/>.
===========================================================
Coming in: one of 36 algo/problem combinations. 50 seeds in
one file. Also the _Sobol file specifying the
parameterization for each row, as well as the parameters
file itself.
Going out: stats: mean, quantile, variance
grouped by parameterization
grouped by some or all 2d combinations of
parameters
"""
import argparse
import pandas
import numpy
import re
import os
import copy
def is_quantile(stat):
return re.match("q[0-9][0-9]?$", stat)
def is_stat(stat):
if stat in ["mean", "variance", "min", "max", "q100"]:
return stat
elif is_quantile(stat):
return stat
else:
raise argparse.ArgumentTypeError(
"Invalid statistic {0}".format(stat))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("data",
type=argparse.FileType("r"),
help="data file to be summarized."
"Should have columns seed, "\
"set, and metrics columns.")
parser.add_argument("parameterizations",
type=argparse.FileType("r"),
help="file containing parameter"\
"izations. Number of param"\
"eterizations should be the "\
"same as number of rows per "\
"seed in the data file."
)
parser.add_argument("parameters",
type=argparse.FileType("r"),
help="file describing parameters. "\
"Should have as many rows as "\
"parameterizations file has "\
"columns."
)
stats = ["mean", "variance", "q10", "q50", "q90"]
parser.add_argument("-s", "--stats", nargs="+",
default = stats, type = is_stat,
help="statistics to compute")
parser.add_argument("-g", "--group", nargs="+",
help="parameters by which to "\
"group. Names should be "\
"found in the parameters "\
"file. "
)
parser.add_argument("-d", "--deltas",
help="If group is specified, "\
"deltas may be used to impose "\
"grid boxes on the summary "\
"rather than using point "\
"values.",
nargs="+", type = float
)
parser.add_argument("-o", "--output-directory",
default="/gpfs/scratch/mjw5407/"
"task1/stats/"
)
return parser.parse_args()
def compute(data, stat):<|fim▁hole|> if is_quantile(stat):
quantile = float(stat[1:]) / 100.0
if quantile == 0.0:
return data.min()
return data.quantile(quantile)
if stat == "max" or stat == "q100":
return data.max()
if stat == "min":
return data.min()
def analyze(data, stats, group=None, deltas=None):
results = []
if group is None:
group = ["Set"]
togroupby = copy.copy(group)
ii = 0
if deltas is None:
togroupby = group
else:
while ii < len(group) and ii < len(deltas):
colname = "grid_{0}".format(group[ii])
gridnumbers = numpy.floor(data[group[ii]].apply(
lambda val: val / deltas[ii]))
data[colname] = gridnumbers.apply(
lambda val: val * deltas[ii])
togroupby[ii] = colname
ii += 1
print "analyzing grouped by {0}".format(group)
gb = data.groupby(togroupby)
for stat in stats:
print "computing {0}".format(stat)
tag = "{0}_{1}".format("_".join(group), stat)
results.append((tag, compute(gb, stat)))
return results
def write_result(infn, result, outputdir):
fn = "_".join([result[0], os.path.basename(infn)])
fn = re.sub("\.hv$", "", fn)
fn = os.path.join(outputdir, fn)
print "writing {0}".format(fn)
result[1].to_csv(fn, sep=" ", index=True)
def cli():
args = get_args()
data = pandas.read_table(args.data, sep=" ")
parameters = pandas.read_table(
args.parameters, sep=" ",
names=["name","low","high"],
header=None)
param_names = parameters["name"].values
parameterizations = pandas.read_table(
args.parameterizations,
sep=" ",
names = param_names,
header = None)
data = data.join(parameterizations, on=["Set"],
how="outer")
if args.deltas is not None:
deltas = args.deltas
else:
deltas = []
results = analyze(data, args.stats, args.group, deltas)
for result in results:
write_result(args.data.name, result,
args.output_directory)
if __name__ == "__main__":
cli()
# vim:ts=4:sw=4:expandtab:ai:colorcolumn=60:number:fdm=indent<|fim▁end|> | if stat == "mean":
return data.mean()
if stat == "variance":
return data.var() |
<|file_name|>statistics.py<|end_file_name|><|fim▁begin|>"""
Copyright (C) 2013 Matthew Woodruff
This script is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This script is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this script. If not, see <http://www.gnu.org/licenses/>.
===========================================================
Coming in: one of 36 algo/problem combinations. 50 seeds in
one file. Also the _Sobol file specifying the
parameterization for each row, as well as the parameters
file itself.
Going out: stats: mean, quantile, variance
grouped by parameterization
grouped by some or all 2d combinations of
parameters
"""
import argparse
import pandas
import numpy
import re
import os
import copy
def is_quantile(stat):
<|fim_middle|>
def is_stat(stat):
if stat in ["mean", "variance", "min", "max", "q100"]:
return stat
elif is_quantile(stat):
return stat
else:
raise argparse.ArgumentTypeError(
"Invalid statistic {0}".format(stat))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("data",
type=argparse.FileType("r"),
help="data file to be summarized."
"Should have columns seed, "\
"set, and metrics columns.")
parser.add_argument("parameterizations",
type=argparse.FileType("r"),
help="file containing parameter"\
"izations. Number of param"\
"eterizations should be the "\
"same as number of rows per "\
"seed in the data file."
)
parser.add_argument("parameters",
type=argparse.FileType("r"),
help="file describing parameters. "\
"Should have as many rows as "\
"parameterizations file has "\
"columns."
)
stats = ["mean", "variance", "q10", "q50", "q90"]
parser.add_argument("-s", "--stats", nargs="+",
default = stats, type = is_stat,
help="statistics to compute")
parser.add_argument("-g", "--group", nargs="+",
help="parameters by which to "\
"group. Names should be "\
"found in the parameters "\
"file. "
)
parser.add_argument("-d", "--deltas",
help="If group is specified, "\
"deltas may be used to impose "\
"grid boxes on the summary "\
"rather than using point "\
"values.",
nargs="+", type = float
)
parser.add_argument("-o", "--output-directory",
default="/gpfs/scratch/mjw5407/"
"task1/stats/"
)
return parser.parse_args()
def compute(data, stat):
if stat == "mean":
return data.mean()
if stat == "variance":
return data.var()
if is_quantile(stat):
quantile = float(stat[1:]) / 100.0
if quantile == 0.0:
return data.min()
return data.quantile(quantile)
if stat == "max" or stat == "q100":
return data.max()
if stat == "min":
return data.min()
def analyze(data, stats, group=None, deltas=None):
results = []
if group is None:
group = ["Set"]
togroupby = copy.copy(group)
ii = 0
if deltas is None:
togroupby = group
else:
while ii < len(group) and ii < len(deltas):
colname = "grid_{0}".format(group[ii])
gridnumbers = numpy.floor(data[group[ii]].apply(
lambda val: val / deltas[ii]))
data[colname] = gridnumbers.apply(
lambda val: val * deltas[ii])
togroupby[ii] = colname
ii += 1
print "analyzing grouped by {0}".format(group)
gb = data.groupby(togroupby)
for stat in stats:
print "computing {0}".format(stat)
tag = "{0}_{1}".format("_".join(group), stat)
results.append((tag, compute(gb, stat)))
return results
def write_result(infn, result, outputdir):
fn = "_".join([result[0], os.path.basename(infn)])
fn = re.sub("\.hv$", "", fn)
fn = os.path.join(outputdir, fn)
print "writing {0}".format(fn)
result[1].to_csv(fn, sep=" ", index=True)
def cli():
args = get_args()
data = pandas.read_table(args.data, sep=" ")
parameters = pandas.read_table(
args.parameters, sep=" ",
names=["name","low","high"],
header=None)
param_names = parameters["name"].values
parameterizations = pandas.read_table(
args.parameterizations,
sep=" ",
names = param_names,
header = None)
data = data.join(parameterizations, on=["Set"],
how="outer")
if args.deltas is not None:
deltas = args.deltas
else:
deltas = []
results = analyze(data, args.stats, args.group, deltas)
for result in results:
write_result(args.data.name, result,
args.output_directory)
if __name__ == "__main__":
cli()
# vim:ts=4:sw=4:expandtab:ai:colorcolumn=60:number:fdm=indent
<|fim▁end|> | return re.match("q[0-9][0-9]?$", stat) |
<|file_name|>statistics.py<|end_file_name|><|fim▁begin|>"""
Copyright (C) 2013 Matthew Woodruff
This script is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This script is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this script. If not, see <http://www.gnu.org/licenses/>.
===========================================================
Coming in: one of 36 algo/problem combinations. 50 seeds in
one file. Also the _Sobol file specifying the
parameterization for each row, as well as the parameters
file itself.
Going out: stats: mean, quantile, variance
grouped by parameterization
grouped by some or all 2d combinations of
parameters
"""
import argparse
import pandas
import numpy
import re
import os
import copy
def is_quantile(stat):
return re.match("q[0-9][0-9]?$", stat)
def is_stat(stat):
<|fim_middle|>
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("data",
type=argparse.FileType("r"),
help="data file to be summarized."
"Should have columns seed, "\
"set, and metrics columns.")
parser.add_argument("parameterizations",
type=argparse.FileType("r"),
help="file containing parameter"\
"izations. Number of param"\
"eterizations should be the "\
"same as number of rows per "\
"seed in the data file."
)
parser.add_argument("parameters",
type=argparse.FileType("r"),
help="file describing parameters. "\
"Should have as many rows as "\
"parameterizations file has "\
"columns."
)
stats = ["mean", "variance", "q10", "q50", "q90"]
parser.add_argument("-s", "--stats", nargs="+",
default = stats, type = is_stat,
help="statistics to compute")
parser.add_argument("-g", "--group", nargs="+",
help="parameters by which to "\
"group. Names should be "\
"found in the parameters "\
"file. "
)
parser.add_argument("-d", "--deltas",
help="If group is specified, "\
"deltas may be used to impose "\
"grid boxes on the summary "\
"rather than using point "\
"values.",
nargs="+", type = float
)
parser.add_argument("-o", "--output-directory",
default="/gpfs/scratch/mjw5407/"
"task1/stats/"
)
return parser.parse_args()
def compute(data, stat):
if stat == "mean":
return data.mean()
if stat == "variance":
return data.var()
if is_quantile(stat):
quantile = float(stat[1:]) / 100.0
if quantile == 0.0:
return data.min()
return data.quantile(quantile)
if stat == "max" or stat == "q100":
return data.max()
if stat == "min":
return data.min()
def analyze(data, stats, group=None, deltas=None):
results = []
if group is None:
group = ["Set"]
togroupby = copy.copy(group)
ii = 0
if deltas is None:
togroupby = group
else:
while ii < len(group) and ii < len(deltas):
colname = "grid_{0}".format(group[ii])
gridnumbers = numpy.floor(data[group[ii]].apply(
lambda val: val / deltas[ii]))
data[colname] = gridnumbers.apply(
lambda val: val * deltas[ii])
togroupby[ii] = colname
ii += 1
print "analyzing grouped by {0}".format(group)
gb = data.groupby(togroupby)
for stat in stats:
print "computing {0}".format(stat)
tag = "{0}_{1}".format("_".join(group), stat)
results.append((tag, compute(gb, stat)))
return results
def write_result(infn, result, outputdir):
fn = "_".join([result[0], os.path.basename(infn)])
fn = re.sub("\.hv$", "", fn)
fn = os.path.join(outputdir, fn)
print "writing {0}".format(fn)
result[1].to_csv(fn, sep=" ", index=True)
def cli():
args = get_args()
data = pandas.read_table(args.data, sep=" ")
parameters = pandas.read_table(
args.parameters, sep=" ",
names=["name","low","high"],
header=None)
param_names = parameters["name"].values
parameterizations = pandas.read_table(
args.parameterizations,
sep=" ",
names = param_names,
header = None)
data = data.join(parameterizations, on=["Set"],
how="outer")
if args.deltas is not None:
deltas = args.deltas
else:
deltas = []
results = analyze(data, args.stats, args.group, deltas)
for result in results:
write_result(args.data.name, result,
args.output_directory)
if __name__ == "__main__":
cli()
# vim:ts=4:sw=4:expandtab:ai:colorcolumn=60:number:fdm=indent
<|fim▁end|> | if stat in ["mean", "variance", "min", "max", "q100"]:
return stat
elif is_quantile(stat):
return stat
else:
raise argparse.ArgumentTypeError(
"Invalid statistic {0}".format(stat)) |
<|file_name|>statistics.py<|end_file_name|><|fim▁begin|>"""
Copyright (C) 2013 Matthew Woodruff
This script is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This script is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this script. If not, see <http://www.gnu.org/licenses/>.
===========================================================
Coming in: one of 36 algo/problem combinations. 50 seeds in
one file. Also the _Sobol file specifying the
parameterization for each row, as well as the parameters
file itself.
Going out: stats: mean, quantile, variance
grouped by parameterization
grouped by some or all 2d combinations of
parameters
"""
import argparse
import pandas
import numpy
import re
import os
import copy
def is_quantile(stat):
return re.match("q[0-9][0-9]?$", stat)
def is_stat(stat):
if stat in ["mean", "variance", "min", "max", "q100"]:
return stat
elif is_quantile(stat):
return stat
else:
raise argparse.ArgumentTypeError(
"Invalid statistic {0}".format(stat))
def get_args():
<|fim_middle|>
def compute(data, stat):
if stat == "mean":
return data.mean()
if stat == "variance":
return data.var()
if is_quantile(stat):
quantile = float(stat[1:]) / 100.0
if quantile == 0.0:
return data.min()
return data.quantile(quantile)
if stat == "max" or stat == "q100":
return data.max()
if stat == "min":
return data.min()
def analyze(data, stats, group=None, deltas=None):
results = []
if group is None:
group = ["Set"]
togroupby = copy.copy(group)
ii = 0
if deltas is None:
togroupby = group
else:
while ii < len(group) and ii < len(deltas):
colname = "grid_{0}".format(group[ii])
gridnumbers = numpy.floor(data[group[ii]].apply(
lambda val: val / deltas[ii]))
data[colname] = gridnumbers.apply(
lambda val: val * deltas[ii])
togroupby[ii] = colname
ii += 1
print "analyzing grouped by {0}".format(group)
gb = data.groupby(togroupby)
for stat in stats:
print "computing {0}".format(stat)
tag = "{0}_{1}".format("_".join(group), stat)
results.append((tag, compute(gb, stat)))
return results
def write_result(infn, result, outputdir):
fn = "_".join([result[0], os.path.basename(infn)])
fn = re.sub("\.hv$", "", fn)
fn = os.path.join(outputdir, fn)
print "writing {0}".format(fn)
result[1].to_csv(fn, sep=" ", index=True)
def cli():
args = get_args()
data = pandas.read_table(args.data, sep=" ")
parameters = pandas.read_table(
args.parameters, sep=" ",
names=["name","low","high"],
header=None)
param_names = parameters["name"].values
parameterizations = pandas.read_table(
args.parameterizations,
sep=" ",
names = param_names,
header = None)
data = data.join(parameterizations, on=["Set"],
how="outer")
if args.deltas is not None:
deltas = args.deltas
else:
deltas = []
results = analyze(data, args.stats, args.group, deltas)
for result in results:
write_result(args.data.name, result,
args.output_directory)
if __name__ == "__main__":
cli()
# vim:ts=4:sw=4:expandtab:ai:colorcolumn=60:number:fdm=indent
<|fim▁end|> | parser = argparse.ArgumentParser()
parser.add_argument("data",
type=argparse.FileType("r"),
help="data file to be summarized."
"Should have columns seed, "\
"set, and metrics columns.")
parser.add_argument("parameterizations",
type=argparse.FileType("r"),
help="file containing parameter"\
"izations. Number of param"\
"eterizations should be the "\
"same as number of rows per "\
"seed in the data file."
)
parser.add_argument("parameters",
type=argparse.FileType("r"),
help="file describing parameters. "\
"Should have as many rows as "\
"parameterizations file has "\
"columns."
)
stats = ["mean", "variance", "q10", "q50", "q90"]
parser.add_argument("-s", "--stats", nargs="+",
default = stats, type = is_stat,
help="statistics to compute")
parser.add_argument("-g", "--group", nargs="+",
help="parameters by which to "\
"group. Names should be "\
"found in the parameters "\
"file. "
)
parser.add_argument("-d", "--deltas",
help="If group is specified, "\
"deltas may be used to impose "\
"grid boxes on the summary "\
"rather than using point "\
"values.",
nargs="+", type = float
)
parser.add_argument("-o", "--output-directory",
default="/gpfs/scratch/mjw5407/"
"task1/stats/"
)
return parser.parse_args() |
<|file_name|>statistics.py<|end_file_name|><|fim▁begin|>"""
Copyright (C) 2013 Matthew Woodruff
This script is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This script is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this script. If not, see <http://www.gnu.org/licenses/>.
===========================================================
Coming in: one of 36 algo/problem combinations. 50 seeds in
one file. Also the _Sobol file specifying the
parameterization for each row, as well as the parameters
file itself.
Going out: stats: mean, quantile, variance
grouped by parameterization
grouped by some or all 2d combinations of
parameters
"""
import argparse
import pandas
import numpy
import re
import os
import copy
def is_quantile(stat):
return re.match("q[0-9][0-9]?$", stat)
def is_stat(stat):
if stat in ["mean", "variance", "min", "max", "q100"]:
return stat
elif is_quantile(stat):
return stat
else:
raise argparse.ArgumentTypeError(
"Invalid statistic {0}".format(stat))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("data",
type=argparse.FileType("r"),
help="data file to be summarized."
"Should have columns seed, "\
"set, and metrics columns.")
parser.add_argument("parameterizations",
type=argparse.FileType("r"),
help="file containing parameter"\
"izations. Number of param"\
"eterizations should be the "\
"same as number of rows per "\
"seed in the data file."
)
parser.add_argument("parameters",
type=argparse.FileType("r"),
help="file describing parameters. "\
"Should have as many rows as "\
"parameterizations file has "\
"columns."
)
stats = ["mean", "variance", "q10", "q50", "q90"]
parser.add_argument("-s", "--stats", nargs="+",
default = stats, type = is_stat,
help="statistics to compute")
parser.add_argument("-g", "--group", nargs="+",
help="parameters by which to "\
"group. Names should be "\
"found in the parameters "\
"file. "
)
parser.add_argument("-d", "--deltas",
help="If group is specified, "\
"deltas may be used to impose "\
"grid boxes on the summary "\
"rather than using point "\
"values.",
nargs="+", type = float
)
parser.add_argument("-o", "--output-directory",
default="/gpfs/scratch/mjw5407/"
"task1/stats/"
)
return parser.parse_args()
def compute(data, stat):
<|fim_middle|>
def analyze(data, stats, group=None, deltas=None):
results = []
if group is None:
group = ["Set"]
togroupby = copy.copy(group)
ii = 0
if deltas is None:
togroupby = group
else:
while ii < len(group) and ii < len(deltas):
colname = "grid_{0}".format(group[ii])
gridnumbers = numpy.floor(data[group[ii]].apply(
lambda val: val / deltas[ii]))
data[colname] = gridnumbers.apply(
lambda val: val * deltas[ii])
togroupby[ii] = colname
ii += 1
print "analyzing grouped by {0}".format(group)
gb = data.groupby(togroupby)
for stat in stats:
print "computing {0}".format(stat)
tag = "{0}_{1}".format("_".join(group), stat)
results.append((tag, compute(gb, stat)))
return results
def write_result(infn, result, outputdir):
fn = "_".join([result[0], os.path.basename(infn)])
fn = re.sub("\.hv$", "", fn)
fn = os.path.join(outputdir, fn)
print "writing {0}".format(fn)
result[1].to_csv(fn, sep=" ", index=True)
def cli():
args = get_args()
data = pandas.read_table(args.data, sep=" ")
parameters = pandas.read_table(
args.parameters, sep=" ",
names=["name","low","high"],
header=None)
param_names = parameters["name"].values
parameterizations = pandas.read_table(
args.parameterizations,
sep=" ",
names = param_names,
header = None)
data = data.join(parameterizations, on=["Set"],
how="outer")
if args.deltas is not None:
deltas = args.deltas
else:
deltas = []
results = analyze(data, args.stats, args.group, deltas)
for result in results:
write_result(args.data.name, result,
args.output_directory)
if __name__ == "__main__":
cli()
# vim:ts=4:sw=4:expandtab:ai:colorcolumn=60:number:fdm=indent
<|fim▁end|> | if stat == "mean":
return data.mean()
if stat == "variance":
return data.var()
if is_quantile(stat):
quantile = float(stat[1:]) / 100.0
if quantile == 0.0:
return data.min()
return data.quantile(quantile)
if stat == "max" or stat == "q100":
return data.max()
if stat == "min":
return data.min() |
<|file_name|>statistics.py<|end_file_name|><|fim▁begin|>"""
Copyright (C) 2013 Matthew Woodruff
This script is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This script is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this script. If not, see <http://www.gnu.org/licenses/>.
===========================================================
Coming in: one of 36 algo/problem combinations. 50 seeds in
one file. Also the _Sobol file specifying the
parameterization for each row, as well as the parameters
file itself.
Going out: stats: mean, quantile, variance
grouped by parameterization
grouped by some or all 2d combinations of
parameters
"""
import argparse
import pandas
import numpy
import re
import os
import copy
def is_quantile(stat):
return re.match("q[0-9][0-9]?$", stat)
def is_stat(stat):
if stat in ["mean", "variance", "min", "max", "q100"]:
return stat
elif is_quantile(stat):
return stat
else:
raise argparse.ArgumentTypeError(
"Invalid statistic {0}".format(stat))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("data",
type=argparse.FileType("r"),
help="data file to be summarized."
"Should have columns seed, "\
"set, and metrics columns.")
parser.add_argument("parameterizations",
type=argparse.FileType("r"),
help="file containing parameter"\
"izations. Number of param"\
"eterizations should be the "\
"same as number of rows per "\
"seed in the data file."
)
parser.add_argument("parameters",
type=argparse.FileType("r"),
help="file describing parameters. "\
"Should have as many rows as "\
"parameterizations file has "\
"columns."
)
stats = ["mean", "variance", "q10", "q50", "q90"]
parser.add_argument("-s", "--stats", nargs="+",
default = stats, type = is_stat,
help="statistics to compute")
parser.add_argument("-g", "--group", nargs="+",
help="parameters by which to "\
"group. Names should be "\
"found in the parameters "\
"file. "
)
parser.add_argument("-d", "--deltas",
help="If group is specified, "\
"deltas may be used to impose "\
"grid boxes on the summary "\
"rather than using point "\
"values.",
nargs="+", type = float
)
parser.add_argument("-o", "--output-directory",
default="/gpfs/scratch/mjw5407/"
"task1/stats/"
)
return parser.parse_args()
def compute(data, stat):
if stat == "mean":
return data.mean()
if stat == "variance":
return data.var()
if is_quantile(stat):
quantile = float(stat[1:]) / 100.0
if quantile == 0.0:
return data.min()
return data.quantile(quantile)
if stat == "max" or stat == "q100":
return data.max()
if stat == "min":
return data.min()
def analyze(data, stats, group=None, deltas=None):
<|fim_middle|>
def write_result(infn, result, outputdir):
fn = "_".join([result[0], os.path.basename(infn)])
fn = re.sub("\.hv$", "", fn)
fn = os.path.join(outputdir, fn)
print "writing {0}".format(fn)
result[1].to_csv(fn, sep=" ", index=True)
def cli():
args = get_args()
data = pandas.read_table(args.data, sep=" ")
parameters = pandas.read_table(
args.parameters, sep=" ",
names=["name","low","high"],
header=None)
param_names = parameters["name"].values
parameterizations = pandas.read_table(
args.parameterizations,
sep=" ",
names = param_names,
header = None)
data = data.join(parameterizations, on=["Set"],
how="outer")
if args.deltas is not None:
deltas = args.deltas
else:
deltas = []
results = analyze(data, args.stats, args.group, deltas)
for result in results:
write_result(args.data.name, result,
args.output_directory)
if __name__ == "__main__":
cli()
# vim:ts=4:sw=4:expandtab:ai:colorcolumn=60:number:fdm=indent
<|fim▁end|> | results = []
if group is None:
group = ["Set"]
togroupby = copy.copy(group)
ii = 0
if deltas is None:
togroupby = group
else:
while ii < len(group) and ii < len(deltas):
colname = "grid_{0}".format(group[ii])
gridnumbers = numpy.floor(data[group[ii]].apply(
lambda val: val / deltas[ii]))
data[colname] = gridnumbers.apply(
lambda val: val * deltas[ii])
togroupby[ii] = colname
ii += 1
print "analyzing grouped by {0}".format(group)
gb = data.groupby(togroupby)
for stat in stats:
print "computing {0}".format(stat)
tag = "{0}_{1}".format("_".join(group), stat)
results.append((tag, compute(gb, stat)))
return results |
<|file_name|>statistics.py<|end_file_name|><|fim▁begin|>"""
Copyright (C) 2013 Matthew Woodruff
This script is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This script is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this script. If not, see <http://www.gnu.org/licenses/>.
===========================================================
Coming in: one of 36 algo/problem combinations. 50 seeds in
one file. Also the _Sobol file specifying the
parameterization for each row, as well as the parameters
file itself.
Going out: stats: mean, quantile, variance
grouped by parameterization
grouped by some or all 2d combinations of
parameters
"""
import argparse
import pandas
import numpy
import re
import os
import copy
def is_quantile(stat):
return re.match("q[0-9][0-9]?$", stat)
def is_stat(stat):
if stat in ["mean", "variance", "min", "max", "q100"]:
return stat
elif is_quantile(stat):
return stat
else:
raise argparse.ArgumentTypeError(
"Invalid statistic {0}".format(stat))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("data",
type=argparse.FileType("r"),
help="data file to be summarized."
"Should have columns seed, "\
"set, and metrics columns.")
parser.add_argument("parameterizations",
type=argparse.FileType("r"),
help="file containing parameter"\
"izations. Number of param"\
"eterizations should be the "\
"same as number of rows per "\
"seed in the data file."
)
parser.add_argument("parameters",
type=argparse.FileType("r"),
help="file describing parameters. "\
"Should have as many rows as "\
"parameterizations file has "\
"columns."
)
stats = ["mean", "variance", "q10", "q50", "q90"]
parser.add_argument("-s", "--stats", nargs="+",
default = stats, type = is_stat,
help="statistics to compute")
parser.add_argument("-g", "--group", nargs="+",
help="parameters by which to "\
"group. Names should be "\
"found in the parameters "\
"file. "
)
parser.add_argument("-d", "--deltas",
help="If group is specified, "\
"deltas may be used to impose "\
"grid boxes on the summary "\
"rather than using point "\
"values.",
nargs="+", type = float
)
parser.add_argument("-o", "--output-directory",
default="/gpfs/scratch/mjw5407/"
"task1/stats/"
)
return parser.parse_args()
def compute(data, stat):
if stat == "mean":
return data.mean()
if stat == "variance":
return data.var()
if is_quantile(stat):
quantile = float(stat[1:]) / 100.0
if quantile == 0.0:
return data.min()
return data.quantile(quantile)
if stat == "max" or stat == "q100":
return data.max()
if stat == "min":
return data.min()
def analyze(data, stats, group=None, deltas=None):
results = []
if group is None:
group = ["Set"]
togroupby = copy.copy(group)
ii = 0
if deltas is None:
togroupby = group
else:
while ii < len(group) and ii < len(deltas):
colname = "grid_{0}".format(group[ii])
gridnumbers = numpy.floor(data[group[ii]].apply(
lambda val: val / deltas[ii]))
data[colname] = gridnumbers.apply(
lambda val: val * deltas[ii])
togroupby[ii] = colname
ii += 1
print "analyzing grouped by {0}".format(group)
gb = data.groupby(togroupby)
for stat in stats:
print "computing {0}".format(stat)
tag = "{0}_{1}".format("_".join(group), stat)
results.append((tag, compute(gb, stat)))
return results
def write_result(infn, result, outputdir):
<|fim_middle|>
def cli():
args = get_args()
data = pandas.read_table(args.data, sep=" ")
parameters = pandas.read_table(
args.parameters, sep=" ",
names=["name","low","high"],
header=None)
param_names = parameters["name"].values
parameterizations = pandas.read_table(
args.parameterizations,
sep=" ",
names = param_names,
header = None)
data = data.join(parameterizations, on=["Set"],
how="outer")
if args.deltas is not None:
deltas = args.deltas
else:
deltas = []
results = analyze(data, args.stats, args.group, deltas)
for result in results:
write_result(args.data.name, result,
args.output_directory)
if __name__ == "__main__":
cli()
# vim:ts=4:sw=4:expandtab:ai:colorcolumn=60:number:fdm=indent
<|fim▁end|> | fn = "_".join([result[0], os.path.basename(infn)])
fn = re.sub("\.hv$", "", fn)
fn = os.path.join(outputdir, fn)
print "writing {0}".format(fn)
result[1].to_csv(fn, sep=" ", index=True) |
<|file_name|>statistics.py<|end_file_name|><|fim▁begin|>"""
Copyright (C) 2013 Matthew Woodruff
This script is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This script is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this script. If not, see <http://www.gnu.org/licenses/>.
===========================================================
Coming in: one of 36 algo/problem combinations. 50 seeds in
one file. Also the _Sobol file specifying the
parameterization for each row, as well as the parameters
file itself.
Going out: stats: mean, quantile, variance
grouped by parameterization
grouped by some or all 2d combinations of
parameters
"""
import argparse
import pandas
import numpy
import re
import os
import copy
def is_quantile(stat):
return re.match("q[0-9][0-9]?$", stat)
def is_stat(stat):
if stat in ["mean", "variance", "min", "max", "q100"]:
return stat
elif is_quantile(stat):
return stat
else:
raise argparse.ArgumentTypeError(
"Invalid statistic {0}".format(stat))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("data",
type=argparse.FileType("r"),
help="data file to be summarized."
"Should have columns seed, "\
"set, and metrics columns.")
parser.add_argument("parameterizations",
type=argparse.FileType("r"),
help="file containing parameter"\
"izations. Number of param"\
"eterizations should be the "\
"same as number of rows per "\
"seed in the data file."
)
parser.add_argument("parameters",
type=argparse.FileType("r"),
help="file describing parameters. "\
"Should have as many rows as "\
"parameterizations file has "\
"columns."
)
stats = ["mean", "variance", "q10", "q50", "q90"]
parser.add_argument("-s", "--stats", nargs="+",
default = stats, type = is_stat,
help="statistics to compute")
parser.add_argument("-g", "--group", nargs="+",
help="parameters by which to "\
"group. Names should be "\
"found in the parameters "\
"file. "
)
parser.add_argument("-d", "--deltas",
help="If group is specified, "\
"deltas may be used to impose "\
"grid boxes on the summary "\
"rather than using point "\
"values.",
nargs="+", type = float
)
parser.add_argument("-o", "--output-directory",
default="/gpfs/scratch/mjw5407/"
"task1/stats/"
)
return parser.parse_args()
def compute(data, stat):
if stat == "mean":
return data.mean()
if stat == "variance":
return data.var()
if is_quantile(stat):
quantile = float(stat[1:]) / 100.0
if quantile == 0.0:
return data.min()
return data.quantile(quantile)
if stat == "max" or stat == "q100":
return data.max()
if stat == "min":
return data.min()
def analyze(data, stats, group=None, deltas=None):
results = []
if group is None:
group = ["Set"]
togroupby = copy.copy(group)
ii = 0
if deltas is None:
togroupby = group
else:
while ii < len(group) and ii < len(deltas):
colname = "grid_{0}".format(group[ii])
gridnumbers = numpy.floor(data[group[ii]].apply(
lambda val: val / deltas[ii]))
data[colname] = gridnumbers.apply(
lambda val: val * deltas[ii])
togroupby[ii] = colname
ii += 1
print "analyzing grouped by {0}".format(group)
gb = data.groupby(togroupby)
for stat in stats:
print "computing {0}".format(stat)
tag = "{0}_{1}".format("_".join(group), stat)
results.append((tag, compute(gb, stat)))
return results
def write_result(infn, result, outputdir):
fn = "_".join([result[0], os.path.basename(infn)])
fn = re.sub("\.hv$", "", fn)
fn = os.path.join(outputdir, fn)
print "writing {0}".format(fn)
result[1].to_csv(fn, sep=" ", index=True)
def cli():
<|fim_middle|>
if __name__ == "__main__":
cli()
# vim:ts=4:sw=4:expandtab:ai:colorcolumn=60:number:fdm=indent
<|fim▁end|> | args = get_args()
data = pandas.read_table(args.data, sep=" ")
parameters = pandas.read_table(
args.parameters, sep=" ",
names=["name","low","high"],
header=None)
param_names = parameters["name"].values
parameterizations = pandas.read_table(
args.parameterizations,
sep=" ",
names = param_names,
header = None)
data = data.join(parameterizations, on=["Set"],
how="outer")
if args.deltas is not None:
deltas = args.deltas
else:
deltas = []
results = analyze(data, args.stats, args.group, deltas)
for result in results:
write_result(args.data.name, result,
args.output_directory) |
<|file_name|>statistics.py<|end_file_name|><|fim▁begin|>"""
Copyright (C) 2013 Matthew Woodruff
This script is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This script is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this script. If not, see <http://www.gnu.org/licenses/>.
===========================================================
Coming in: one of 36 algo/problem combinations. 50 seeds in
one file. Also the _Sobol file specifying the
parameterization for each row, as well as the parameters
file itself.
Going out: stats: mean, quantile, variance
grouped by parameterization
grouped by some or all 2d combinations of
parameters
"""
import argparse
import pandas
import numpy
import re
import os
import copy
def is_quantile(stat):
return re.match("q[0-9][0-9]?$", stat)
def is_stat(stat):
if stat in ["mean", "variance", "min", "max", "q100"]:
<|fim_middle|>
elif is_quantile(stat):
return stat
else:
raise argparse.ArgumentTypeError(
"Invalid statistic {0}".format(stat))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("data",
type=argparse.FileType("r"),
help="data file to be summarized."
"Should have columns seed, "\
"set, and metrics columns.")
parser.add_argument("parameterizations",
type=argparse.FileType("r"),
help="file containing parameter"\
"izations. Number of param"\
"eterizations should be the "\
"same as number of rows per "\
"seed in the data file."
)
parser.add_argument("parameters",
type=argparse.FileType("r"),
help="file describing parameters. "\
"Should have as many rows as "\
"parameterizations file has "\
"columns."
)
stats = ["mean", "variance", "q10", "q50", "q90"]
parser.add_argument("-s", "--stats", nargs="+",
default = stats, type = is_stat,
help="statistics to compute")
parser.add_argument("-g", "--group", nargs="+",
help="parameters by which to "\
"group. Names should be "\
"found in the parameters "\
"file. "
)
parser.add_argument("-d", "--deltas",
help="If group is specified, "\
"deltas may be used to impose "\
"grid boxes on the summary "\
"rather than using point "\
"values.",
nargs="+", type = float
)
parser.add_argument("-o", "--output-directory",
default="/gpfs/scratch/mjw5407/"
"task1/stats/"
)
return parser.parse_args()
def compute(data, stat):
if stat == "mean":
return data.mean()
if stat == "variance":
return data.var()
if is_quantile(stat):
quantile = float(stat[1:]) / 100.0
if quantile == 0.0:
return data.min()
return data.quantile(quantile)
if stat == "max" or stat == "q100":
return data.max()
if stat == "min":
return data.min()
def analyze(data, stats, group=None, deltas=None):
results = []
if group is None:
group = ["Set"]
togroupby = copy.copy(group)
ii = 0
if deltas is None:
togroupby = group
else:
while ii < len(group) and ii < len(deltas):
colname = "grid_{0}".format(group[ii])
gridnumbers = numpy.floor(data[group[ii]].apply(
lambda val: val / deltas[ii]))
data[colname] = gridnumbers.apply(
lambda val: val * deltas[ii])
togroupby[ii] = colname
ii += 1
print "analyzing grouped by {0}".format(group)
gb = data.groupby(togroupby)
for stat in stats:
print "computing {0}".format(stat)
tag = "{0}_{1}".format("_".join(group), stat)
results.append((tag, compute(gb, stat)))
return results
def write_result(infn, result, outputdir):
fn = "_".join([result[0], os.path.basename(infn)])
fn = re.sub("\.hv$", "", fn)
fn = os.path.join(outputdir, fn)
print "writing {0}".format(fn)
result[1].to_csv(fn, sep=" ", index=True)
def cli():
args = get_args()
data = pandas.read_table(args.data, sep=" ")
parameters = pandas.read_table(
args.parameters, sep=" ",
names=["name","low","high"],
header=None)
param_names = parameters["name"].values
parameterizations = pandas.read_table(
args.parameterizations,
sep=" ",
names = param_names,
header = None)
data = data.join(parameterizations, on=["Set"],
how="outer")
if args.deltas is not None:
deltas = args.deltas
else:
deltas = []
results = analyze(data, args.stats, args.group, deltas)
for result in results:
write_result(args.data.name, result,
args.output_directory)
if __name__ == "__main__":
cli()
# vim:ts=4:sw=4:expandtab:ai:colorcolumn=60:number:fdm=indent
<|fim▁end|> | return stat |
<|file_name|>statistics.py<|end_file_name|><|fim▁begin|>"""
Copyright (C) 2013 Matthew Woodruff
This script is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This script is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this script. If not, see <http://www.gnu.org/licenses/>.
===========================================================
Coming in: one of 36 algo/problem combinations. 50 seeds in
one file. Also the _Sobol file specifying the
parameterization for each row, as well as the parameters
file itself.
Going out: stats: mean, quantile, variance
grouped by parameterization
grouped by some or all 2d combinations of
parameters
"""
import argparse
import pandas
import numpy
import re
import os
import copy
def is_quantile(stat):
return re.match("q[0-9][0-9]?$", stat)
def is_stat(stat):
if stat in ["mean", "variance", "min", "max", "q100"]:
return stat
elif is_quantile(stat):
<|fim_middle|>
else:
raise argparse.ArgumentTypeError(
"Invalid statistic {0}".format(stat))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("data",
type=argparse.FileType("r"),
help="data file to be summarized."
"Should have columns seed, "\
"set, and metrics columns.")
parser.add_argument("parameterizations",
type=argparse.FileType("r"),
help="file containing parameter"\
"izations. Number of param"\
"eterizations should be the "\
"same as number of rows per "\
"seed in the data file."
)
parser.add_argument("parameters",
type=argparse.FileType("r"),
help="file describing parameters. "\
"Should have as many rows as "\
"parameterizations file has "\
"columns."
)
stats = ["mean", "variance", "q10", "q50", "q90"]
parser.add_argument("-s", "--stats", nargs="+",
default = stats, type = is_stat,
help="statistics to compute")
parser.add_argument("-g", "--group", nargs="+",
help="parameters by which to "\
"group. Names should be "\
"found in the parameters "\
"file. "
)
parser.add_argument("-d", "--deltas",
help="If group is specified, "\
"deltas may be used to impose "\
"grid boxes on the summary "\
"rather than using point "\
"values.",
nargs="+", type = float
)
parser.add_argument("-o", "--output-directory",
default="/gpfs/scratch/mjw5407/"
"task1/stats/"
)
return parser.parse_args()
def compute(data, stat):
if stat == "mean":
return data.mean()
if stat == "variance":
return data.var()
if is_quantile(stat):
quantile = float(stat[1:]) / 100.0
if quantile == 0.0:
return data.min()
return data.quantile(quantile)
if stat == "max" or stat == "q100":
return data.max()
if stat == "min":
return data.min()
def analyze(data, stats, group=None, deltas=None):
results = []
if group is None:
group = ["Set"]
togroupby = copy.copy(group)
ii = 0
if deltas is None:
togroupby = group
else:
while ii < len(group) and ii < len(deltas):
colname = "grid_{0}".format(group[ii])
gridnumbers = numpy.floor(data[group[ii]].apply(
lambda val: val / deltas[ii]))
data[colname] = gridnumbers.apply(
lambda val: val * deltas[ii])
togroupby[ii] = colname
ii += 1
print "analyzing grouped by {0}".format(group)
gb = data.groupby(togroupby)
for stat in stats:
print "computing {0}".format(stat)
tag = "{0}_{1}".format("_".join(group), stat)
results.append((tag, compute(gb, stat)))
return results
def write_result(infn, result, outputdir):
fn = "_".join([result[0], os.path.basename(infn)])
fn = re.sub("\.hv$", "", fn)
fn = os.path.join(outputdir, fn)
print "writing {0}".format(fn)
result[1].to_csv(fn, sep=" ", index=True)
def cli():
args = get_args()
data = pandas.read_table(args.data, sep=" ")
parameters = pandas.read_table(
args.parameters, sep=" ",
names=["name","low","high"],
header=None)
param_names = parameters["name"].values
parameterizations = pandas.read_table(
args.parameterizations,
sep=" ",
names = param_names,
header = None)
data = data.join(parameterizations, on=["Set"],
how="outer")
if args.deltas is not None:
deltas = args.deltas
else:
deltas = []
results = analyze(data, args.stats, args.group, deltas)
for result in results:
write_result(args.data.name, result,
args.output_directory)
if __name__ == "__main__":
cli()
# vim:ts=4:sw=4:expandtab:ai:colorcolumn=60:number:fdm=indent
<|fim▁end|> | return stat |
<|file_name|>statistics.py<|end_file_name|><|fim▁begin|>"""
Copyright (C) 2013 Matthew Woodruff
This script is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This script is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this script. If not, see <http://www.gnu.org/licenses/>.
===========================================================
Coming in: one of 36 algo/problem combinations. 50 seeds in
one file. Also the _Sobol file specifying the
parameterization for each row, as well as the parameters
file itself.
Going out: stats: mean, quantile, variance
grouped by parameterization
grouped by some or all 2d combinations of
parameters
"""
import argparse
import pandas
import numpy
import re
import os
import copy
def is_quantile(stat):
return re.match("q[0-9][0-9]?$", stat)
def is_stat(stat):
if stat in ["mean", "variance", "min", "max", "q100"]:
return stat
elif is_quantile(stat):
return stat
else:
<|fim_middle|>
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("data",
type=argparse.FileType("r"),
help="data file to be summarized."
"Should have columns seed, "\
"set, and metrics columns.")
parser.add_argument("parameterizations",
type=argparse.FileType("r"),
help="file containing parameter"\
"izations. Number of param"\
"eterizations should be the "\
"same as number of rows per "\
"seed in the data file."
)
parser.add_argument("parameters",
type=argparse.FileType("r"),
help="file describing parameters. "\
"Should have as many rows as "\
"parameterizations file has "\
"columns."
)
stats = ["mean", "variance", "q10", "q50", "q90"]
parser.add_argument("-s", "--stats", nargs="+",
default = stats, type = is_stat,
help="statistics to compute")
parser.add_argument("-g", "--group", nargs="+",
help="parameters by which to "\
"group. Names should be "\
"found in the parameters "\
"file. "
)
parser.add_argument("-d", "--deltas",
help="If group is specified, "\
"deltas may be used to impose "\
"grid boxes on the summary "\
"rather than using point "\
"values.",
nargs="+", type = float
)
parser.add_argument("-o", "--output-directory",
default="/gpfs/scratch/mjw5407/"
"task1/stats/"
)
return parser.parse_args()
def compute(data, stat):
if stat == "mean":
return data.mean()
if stat == "variance":
return data.var()
if is_quantile(stat):
quantile = float(stat[1:]) / 100.0
if quantile == 0.0:
return data.min()
return data.quantile(quantile)
if stat == "max" or stat == "q100":
return data.max()
if stat == "min":
return data.min()
def analyze(data, stats, group=None, deltas=None):
results = []
if group is None:
group = ["Set"]
togroupby = copy.copy(group)
ii = 0
if deltas is None:
togroupby = group
else:
while ii < len(group) and ii < len(deltas):
colname = "grid_{0}".format(group[ii])
gridnumbers = numpy.floor(data[group[ii]].apply(
lambda val: val / deltas[ii]))
data[colname] = gridnumbers.apply(
lambda val: val * deltas[ii])
togroupby[ii] = colname
ii += 1
print "analyzing grouped by {0}".format(group)
gb = data.groupby(togroupby)
for stat in stats:
print "computing {0}".format(stat)
tag = "{0}_{1}".format("_".join(group), stat)
results.append((tag, compute(gb, stat)))
return results
def write_result(infn, result, outputdir):
fn = "_".join([result[0], os.path.basename(infn)])
fn = re.sub("\.hv$", "", fn)
fn = os.path.join(outputdir, fn)
print "writing {0}".format(fn)
result[1].to_csv(fn, sep=" ", index=True)
def cli():
args = get_args()
data = pandas.read_table(args.data, sep=" ")
parameters = pandas.read_table(
args.parameters, sep=" ",
names=["name","low","high"],
header=None)
param_names = parameters["name"].values
parameterizations = pandas.read_table(
args.parameterizations,
sep=" ",
names = param_names,
header = None)
data = data.join(parameterizations, on=["Set"],
how="outer")
if args.deltas is not None:
deltas = args.deltas
else:
deltas = []
results = analyze(data, args.stats, args.group, deltas)
for result in results:
write_result(args.data.name, result,
args.output_directory)
if __name__ == "__main__":
cli()
# vim:ts=4:sw=4:expandtab:ai:colorcolumn=60:number:fdm=indent
<|fim▁end|> | raise argparse.ArgumentTypeError(
"Invalid statistic {0}".format(stat)) |
<|file_name|>statistics.py<|end_file_name|><|fim▁begin|>"""
Copyright (C) 2013 Matthew Woodruff
This script is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This script is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this script. If not, see <http://www.gnu.org/licenses/>.
===========================================================
Coming in: one of 36 algo/problem combinations. 50 seeds in
one file. Also the _Sobol file specifying the
parameterization for each row, as well as the parameters
file itself.
Going out: stats: mean, quantile, variance
grouped by parameterization
grouped by some or all 2d combinations of
parameters
"""
import argparse
import pandas
import numpy
import re
import os
import copy
def is_quantile(stat):
return re.match("q[0-9][0-9]?$", stat)
def is_stat(stat):
if stat in ["mean", "variance", "min", "max", "q100"]:
return stat
elif is_quantile(stat):
return stat
else:
raise argparse.ArgumentTypeError(
"Invalid statistic {0}".format(stat))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("data",
type=argparse.FileType("r"),
help="data file to be summarized."
"Should have columns seed, "\
"set, and metrics columns.")
parser.add_argument("parameterizations",
type=argparse.FileType("r"),
help="file containing parameter"\
"izations. Number of param"\
"eterizations should be the "\
"same as number of rows per "\
"seed in the data file."
)
parser.add_argument("parameters",
type=argparse.FileType("r"),
help="file describing parameters. "\
"Should have as many rows as "\
"parameterizations file has "\
"columns."
)
stats = ["mean", "variance", "q10", "q50", "q90"]
parser.add_argument("-s", "--stats", nargs="+",
default = stats, type = is_stat,
help="statistics to compute")
parser.add_argument("-g", "--group", nargs="+",
help="parameters by which to "\
"group. Names should be "\
"found in the parameters "\
"file. "
)
parser.add_argument("-d", "--deltas",
help="If group is specified, "\
"deltas may be used to impose "\
"grid boxes on the summary "\
"rather than using point "\
"values.",
nargs="+", type = float
)
parser.add_argument("-o", "--output-directory",
default="/gpfs/scratch/mjw5407/"
"task1/stats/"
)
return parser.parse_args()
def compute(data, stat):
if stat == "mean":
<|fim_middle|>
if stat == "variance":
return data.var()
if is_quantile(stat):
quantile = float(stat[1:]) / 100.0
if quantile == 0.0:
return data.min()
return data.quantile(quantile)
if stat == "max" or stat == "q100":
return data.max()
if stat == "min":
return data.min()
def analyze(data, stats, group=None, deltas=None):
results = []
if group is None:
group = ["Set"]
togroupby = copy.copy(group)
ii = 0
if deltas is None:
togroupby = group
else:
while ii < len(group) and ii < len(deltas):
colname = "grid_{0}".format(group[ii])
gridnumbers = numpy.floor(data[group[ii]].apply(
lambda val: val / deltas[ii]))
data[colname] = gridnumbers.apply(
lambda val: val * deltas[ii])
togroupby[ii] = colname
ii += 1
print "analyzing grouped by {0}".format(group)
gb = data.groupby(togroupby)
for stat in stats:
print "computing {0}".format(stat)
tag = "{0}_{1}".format("_".join(group), stat)
results.append((tag, compute(gb, stat)))
return results
def write_result(infn, result, outputdir):
fn = "_".join([result[0], os.path.basename(infn)])
fn = re.sub("\.hv$", "", fn)
fn = os.path.join(outputdir, fn)
print "writing {0}".format(fn)
result[1].to_csv(fn, sep=" ", index=True)
def cli():
args = get_args()
data = pandas.read_table(args.data, sep=" ")
parameters = pandas.read_table(
args.parameters, sep=" ",
names=["name","low","high"],
header=None)
param_names = parameters["name"].values
parameterizations = pandas.read_table(
args.parameterizations,
sep=" ",
names = param_names,
header = None)
data = data.join(parameterizations, on=["Set"],
how="outer")
if args.deltas is not None:
deltas = args.deltas
else:
deltas = []
results = analyze(data, args.stats, args.group, deltas)
for result in results:
write_result(args.data.name, result,
args.output_directory)
if __name__ == "__main__":
cli()
# vim:ts=4:sw=4:expandtab:ai:colorcolumn=60:number:fdm=indent
<|fim▁end|> | return data.mean() |
<|file_name|>statistics.py<|end_file_name|><|fim▁begin|>"""
Copyright (C) 2013 Matthew Woodruff
This script is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This script is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this script. If not, see <http://www.gnu.org/licenses/>.
===========================================================
Coming in: one of 36 algo/problem combinations. 50 seeds in
one file. Also the _Sobol file specifying the
parameterization for each row, as well as the parameters
file itself.
Going out: stats: mean, quantile, variance
grouped by parameterization
grouped by some or all 2d combinations of
parameters
"""
import argparse
import pandas
import numpy
import re
import os
import copy
def is_quantile(stat):
return re.match("q[0-9][0-9]?$", stat)
def is_stat(stat):
if stat in ["mean", "variance", "min", "max", "q100"]:
return stat
elif is_quantile(stat):
return stat
else:
raise argparse.ArgumentTypeError(
"Invalid statistic {0}".format(stat))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("data",
type=argparse.FileType("r"),
help="data file to be summarized."
"Should have columns seed, "\
"set, and metrics columns.")
parser.add_argument("parameterizations",
type=argparse.FileType("r"),
help="file containing parameter"\
"izations. Number of param"\
"eterizations should be the "\
"same as number of rows per "\
"seed in the data file."
)
parser.add_argument("parameters",
type=argparse.FileType("r"),
help="file describing parameters. "\
"Should have as many rows as "\
"parameterizations file has "\
"columns."
)
stats = ["mean", "variance", "q10", "q50", "q90"]
parser.add_argument("-s", "--stats", nargs="+",
default = stats, type = is_stat,
help="statistics to compute")
parser.add_argument("-g", "--group", nargs="+",
help="parameters by which to "\
"group. Names should be "\
"found in the parameters "\
"file. "
)
parser.add_argument("-d", "--deltas",
help="If group is specified, "\
"deltas may be used to impose "\
"grid boxes on the summary "\
"rather than using point "\
"values.",
nargs="+", type = float
)
parser.add_argument("-o", "--output-directory",
default="/gpfs/scratch/mjw5407/"
"task1/stats/"
)
return parser.parse_args()
def compute(data, stat):
if stat == "mean":
return data.mean()
if stat == "variance":
<|fim_middle|>
if is_quantile(stat):
quantile = float(stat[1:]) / 100.0
if quantile == 0.0:
return data.min()
return data.quantile(quantile)
if stat == "max" or stat == "q100":
return data.max()
if stat == "min":
return data.min()
def analyze(data, stats, group=None, deltas=None):
results = []
if group is None:
group = ["Set"]
togroupby = copy.copy(group)
ii = 0
if deltas is None:
togroupby = group
else:
while ii < len(group) and ii < len(deltas):
colname = "grid_{0}".format(group[ii])
gridnumbers = numpy.floor(data[group[ii]].apply(
lambda val: val / deltas[ii]))
data[colname] = gridnumbers.apply(
lambda val: val * deltas[ii])
togroupby[ii] = colname
ii += 1
print "analyzing grouped by {0}".format(group)
gb = data.groupby(togroupby)
for stat in stats:
print "computing {0}".format(stat)
tag = "{0}_{1}".format("_".join(group), stat)
results.append((tag, compute(gb, stat)))
return results
def write_result(infn, result, outputdir):
fn = "_".join([result[0], os.path.basename(infn)])
fn = re.sub("\.hv$", "", fn)
fn = os.path.join(outputdir, fn)
print "writing {0}".format(fn)
result[1].to_csv(fn, sep=" ", index=True)
def cli():
args = get_args()
data = pandas.read_table(args.data, sep=" ")
parameters = pandas.read_table(
args.parameters, sep=" ",
names=["name","low","high"],
header=None)
param_names = parameters["name"].values
parameterizations = pandas.read_table(
args.parameterizations,
sep=" ",
names = param_names,
header = None)
data = data.join(parameterizations, on=["Set"],
how="outer")
if args.deltas is not None:
deltas = args.deltas
else:
deltas = []
results = analyze(data, args.stats, args.group, deltas)
for result in results:
write_result(args.data.name, result,
args.output_directory)
if __name__ == "__main__":
cli()
# vim:ts=4:sw=4:expandtab:ai:colorcolumn=60:number:fdm=indent
<|fim▁end|> | return data.var() |
<|file_name|>statistics.py<|end_file_name|><|fim▁begin|>"""
Copyright (C) 2013 Matthew Woodruff
This script is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This script is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this script. If not, see <http://www.gnu.org/licenses/>.
===========================================================
Coming in: one of 36 algo/problem combinations. 50 seeds in
one file. Also the _Sobol file specifying the
parameterization for each row, as well as the parameters
file itself.
Going out: stats: mean, quantile, variance
grouped by parameterization
grouped by some or all 2d combinations of
parameters
"""
import argparse
import pandas
import numpy
import re
import os
import copy
def is_quantile(stat):
return re.match("q[0-9][0-9]?$", stat)
def is_stat(stat):
if stat in ["mean", "variance", "min", "max", "q100"]:
return stat
elif is_quantile(stat):
return stat
else:
raise argparse.ArgumentTypeError(
"Invalid statistic {0}".format(stat))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("data",
type=argparse.FileType("r"),
help="data file to be summarized."
"Should have columns seed, "\
"set, and metrics columns.")
parser.add_argument("parameterizations",
type=argparse.FileType("r"),
help="file containing parameter"\
"izations. Number of param"\
"eterizations should be the "\
"same as number of rows per "\
"seed in the data file."
)
parser.add_argument("parameters",
type=argparse.FileType("r"),
help="file describing parameters. "\
"Should have as many rows as "\
"parameterizations file has "\
"columns."
)
stats = ["mean", "variance", "q10", "q50", "q90"]
parser.add_argument("-s", "--stats", nargs="+",
default = stats, type = is_stat,
help="statistics to compute")
parser.add_argument("-g", "--group", nargs="+",
help="parameters by which to "\
"group. Names should be "\
"found in the parameters "\
"file. "
)
parser.add_argument("-d", "--deltas",
help="If group is specified, "\
"deltas may be used to impose "\
"grid boxes on the summary "\
"rather than using point "\
"values.",
nargs="+", type = float
)
parser.add_argument("-o", "--output-directory",
default="/gpfs/scratch/mjw5407/"
"task1/stats/"
)
return parser.parse_args()
def compute(data, stat):
if stat == "mean":
return data.mean()
if stat == "variance":
return data.var()
if is_quantile(stat):
<|fim_middle|>
if stat == "max" or stat == "q100":
return data.max()
if stat == "min":
return data.min()
def analyze(data, stats, group=None, deltas=None):
results = []
if group is None:
group = ["Set"]
togroupby = copy.copy(group)
ii = 0
if deltas is None:
togroupby = group
else:
while ii < len(group) and ii < len(deltas):
colname = "grid_{0}".format(group[ii])
gridnumbers = numpy.floor(data[group[ii]].apply(
lambda val: val / deltas[ii]))
data[colname] = gridnumbers.apply(
lambda val: val * deltas[ii])
togroupby[ii] = colname
ii += 1
print "analyzing grouped by {0}".format(group)
gb = data.groupby(togroupby)
for stat in stats:
print "computing {0}".format(stat)
tag = "{0}_{1}".format("_".join(group), stat)
results.append((tag, compute(gb, stat)))
return results
def write_result(infn, result, outputdir):
fn = "_".join([result[0], os.path.basename(infn)])
fn = re.sub("\.hv$", "", fn)
fn = os.path.join(outputdir, fn)
print "writing {0}".format(fn)
result[1].to_csv(fn, sep=" ", index=True)
def cli():
args = get_args()
data = pandas.read_table(args.data, sep=" ")
parameters = pandas.read_table(
args.parameters, sep=" ",
names=["name","low","high"],
header=None)
param_names = parameters["name"].values
parameterizations = pandas.read_table(
args.parameterizations,
sep=" ",
names = param_names,
header = None)
data = data.join(parameterizations, on=["Set"],
how="outer")
if args.deltas is not None:
deltas = args.deltas
else:
deltas = []
results = analyze(data, args.stats, args.group, deltas)
for result in results:
write_result(args.data.name, result,
args.output_directory)
if __name__ == "__main__":
cli()
# vim:ts=4:sw=4:expandtab:ai:colorcolumn=60:number:fdm=indent
<|fim▁end|> | quantile = float(stat[1:]) / 100.0
if quantile == 0.0:
return data.min()
return data.quantile(quantile) |
<|file_name|>statistics.py<|end_file_name|><|fim▁begin|>"""
Copyright (C) 2013 Matthew Woodruff
This script is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This script is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this script. If not, see <http://www.gnu.org/licenses/>.
===========================================================
Coming in: one of 36 algo/problem combinations. 50 seeds in
one file. Also the _Sobol file specifying the
parameterization for each row, as well as the parameters
file itself.
Going out: stats: mean, quantile, variance
grouped by parameterization
grouped by some or all 2d combinations of
parameters
"""
import argparse
import pandas
import numpy
import re
import os
import copy
def is_quantile(stat):
return re.match("q[0-9][0-9]?$", stat)
def is_stat(stat):
if stat in ["mean", "variance", "min", "max", "q100"]:
return stat
elif is_quantile(stat):
return stat
else:
raise argparse.ArgumentTypeError(
"Invalid statistic {0}".format(stat))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("data",
type=argparse.FileType("r"),
help="data file to be summarized."
"Should have columns seed, "\
"set, and metrics columns.")
parser.add_argument("parameterizations",
type=argparse.FileType("r"),
help="file containing parameter"\
"izations. Number of param"\
"eterizations should be the "\
"same as number of rows per "\
"seed in the data file."
)
parser.add_argument("parameters",
type=argparse.FileType("r"),
help="file describing parameters. "\
"Should have as many rows as "\
"parameterizations file has "\
"columns."
)
stats = ["mean", "variance", "q10", "q50", "q90"]
parser.add_argument("-s", "--stats", nargs="+",
default = stats, type = is_stat,
help="statistics to compute")
parser.add_argument("-g", "--group", nargs="+",
help="parameters by which to "\
"group. Names should be "\
"found in the parameters "\
"file. "
)
parser.add_argument("-d", "--deltas",
help="If group is specified, "\
"deltas may be used to impose "\
"grid boxes on the summary "\
"rather than using point "\
"values.",
nargs="+", type = float
)
parser.add_argument("-o", "--output-directory",
default="/gpfs/scratch/mjw5407/"
"task1/stats/"
)
return parser.parse_args()
def compute(data, stat):
if stat == "mean":
return data.mean()
if stat == "variance":
return data.var()
if is_quantile(stat):
quantile = float(stat[1:]) / 100.0
if quantile == 0.0:
<|fim_middle|>
return data.quantile(quantile)
if stat == "max" or stat == "q100":
return data.max()
if stat == "min":
return data.min()
def analyze(data, stats, group=None, deltas=None):
results = []
if group is None:
group = ["Set"]
togroupby = copy.copy(group)
ii = 0
if deltas is None:
togroupby = group
else:
while ii < len(group) and ii < len(deltas):
colname = "grid_{0}".format(group[ii])
gridnumbers = numpy.floor(data[group[ii]].apply(
lambda val: val / deltas[ii]))
data[colname] = gridnumbers.apply(
lambda val: val * deltas[ii])
togroupby[ii] = colname
ii += 1
print "analyzing grouped by {0}".format(group)
gb = data.groupby(togroupby)
for stat in stats:
print "computing {0}".format(stat)
tag = "{0}_{1}".format("_".join(group), stat)
results.append((tag, compute(gb, stat)))
return results
def write_result(infn, result, outputdir):
fn = "_".join([result[0], os.path.basename(infn)])
fn = re.sub("\.hv$", "", fn)
fn = os.path.join(outputdir, fn)
print "writing {0}".format(fn)
result[1].to_csv(fn, sep=" ", index=True)
def cli():
args = get_args()
data = pandas.read_table(args.data, sep=" ")
parameters = pandas.read_table(
args.parameters, sep=" ",
names=["name","low","high"],
header=None)
param_names = parameters["name"].values
parameterizations = pandas.read_table(
args.parameterizations,
sep=" ",
names = param_names,
header = None)
data = data.join(parameterizations, on=["Set"],
how="outer")
if args.deltas is not None:
deltas = args.deltas
else:
deltas = []
results = analyze(data, args.stats, args.group, deltas)
for result in results:
write_result(args.data.name, result,
args.output_directory)
if __name__ == "__main__":
cli()
# vim:ts=4:sw=4:expandtab:ai:colorcolumn=60:number:fdm=indent
<|fim▁end|> | return data.min() |
<|file_name|>statistics.py<|end_file_name|><|fim▁begin|>"""
Copyright (C) 2013 Matthew Woodruff
This script is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This script is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this script. If not, see <http://www.gnu.org/licenses/>.
===========================================================
Coming in: one of 36 algo/problem combinations. 50 seeds in
one file. Also the _Sobol file specifying the
parameterization for each row, as well as the parameters
file itself.
Going out: stats: mean, quantile, variance
grouped by parameterization
grouped by some or all 2d combinations of
parameters
"""
import argparse
import pandas
import numpy
import re
import os
import copy
def is_quantile(stat):
return re.match("q[0-9][0-9]?$", stat)
def is_stat(stat):
if stat in ["mean", "variance", "min", "max", "q100"]:
return stat
elif is_quantile(stat):
return stat
else:
raise argparse.ArgumentTypeError(
"Invalid statistic {0}".format(stat))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("data",
type=argparse.FileType("r"),
help="data file to be summarized."
"Should have columns seed, "\
"set, and metrics columns.")
parser.add_argument("parameterizations",
type=argparse.FileType("r"),
help="file containing parameter"\
"izations. Number of param"\
"eterizations should be the "\
"same as number of rows per "\
"seed in the data file."
)
parser.add_argument("parameters",
type=argparse.FileType("r"),
help="file describing parameters. "\
"Should have as many rows as "\
"parameterizations file has "\
"columns."
)
stats = ["mean", "variance", "q10", "q50", "q90"]
parser.add_argument("-s", "--stats", nargs="+",
default = stats, type = is_stat,
help="statistics to compute")
parser.add_argument("-g", "--group", nargs="+",
help="parameters by which to "\
"group. Names should be "\
"found in the parameters "\
"file. "
)
parser.add_argument("-d", "--deltas",
help="If group is specified, "\
"deltas may be used to impose "\
"grid boxes on the summary "\
"rather than using point "\
"values.",
nargs="+", type = float
)
parser.add_argument("-o", "--output-directory",
default="/gpfs/scratch/mjw5407/"
"task1/stats/"
)
return parser.parse_args()
def compute(data, stat):
if stat == "mean":
return data.mean()
if stat == "variance":
return data.var()
if is_quantile(stat):
quantile = float(stat[1:]) / 100.0
if quantile == 0.0:
return data.min()
return data.quantile(quantile)
if stat == "max" or stat == "q100":
<|fim_middle|>
if stat == "min":
return data.min()
def analyze(data, stats, group=None, deltas=None):
results = []
if group is None:
group = ["Set"]
togroupby = copy.copy(group)
ii = 0
if deltas is None:
togroupby = group
else:
while ii < len(group) and ii < len(deltas):
colname = "grid_{0}".format(group[ii])
gridnumbers = numpy.floor(data[group[ii]].apply(
lambda val: val / deltas[ii]))
data[colname] = gridnumbers.apply(
lambda val: val * deltas[ii])
togroupby[ii] = colname
ii += 1
print "analyzing grouped by {0}".format(group)
gb = data.groupby(togroupby)
for stat in stats:
print "computing {0}".format(stat)
tag = "{0}_{1}".format("_".join(group), stat)
results.append((tag, compute(gb, stat)))
return results
def write_result(infn, result, outputdir):
fn = "_".join([result[0], os.path.basename(infn)])
fn = re.sub("\.hv$", "", fn)
fn = os.path.join(outputdir, fn)
print "writing {0}".format(fn)
result[1].to_csv(fn, sep=" ", index=True)
def cli():
args = get_args()
data = pandas.read_table(args.data, sep=" ")
parameters = pandas.read_table(
args.parameters, sep=" ",
names=["name","low","high"],
header=None)
param_names = parameters["name"].values
parameterizations = pandas.read_table(
args.parameterizations,
sep=" ",
names = param_names,
header = None)
data = data.join(parameterizations, on=["Set"],
how="outer")
if args.deltas is not None:
deltas = args.deltas
else:
deltas = []
results = analyze(data, args.stats, args.group, deltas)
for result in results:
write_result(args.data.name, result,
args.output_directory)
if __name__ == "__main__":
cli()
# vim:ts=4:sw=4:expandtab:ai:colorcolumn=60:number:fdm=indent
<|fim▁end|> | return data.max() |
<|file_name|>statistics.py<|end_file_name|><|fim▁begin|>"""
Copyright (C) 2013 Matthew Woodruff
This script is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This script is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this script. If not, see <http://www.gnu.org/licenses/>.
===========================================================
Coming in: one of 36 algo/problem combinations. 50 seeds in
one file. Also the _Sobol file specifying the
parameterization for each row, as well as the parameters
file itself.
Going out: stats: mean, quantile, variance
grouped by parameterization
grouped by some or all 2d combinations of
parameters
"""
import argparse
import pandas
import numpy
import re
import os
import copy
def is_quantile(stat):
return re.match("q[0-9][0-9]?$", stat)
def is_stat(stat):
if stat in ["mean", "variance", "min", "max", "q100"]:
return stat
elif is_quantile(stat):
return stat
else:
raise argparse.ArgumentTypeError(
"Invalid statistic {0}".format(stat))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("data",
type=argparse.FileType("r"),
help="data file to be summarized."
"Should have columns seed, "\
"set, and metrics columns.")
parser.add_argument("parameterizations",
type=argparse.FileType("r"),
help="file containing parameter"\
"izations. Number of param"\
"eterizations should be the "\
"same as number of rows per "\
"seed in the data file."
)
parser.add_argument("parameters",
type=argparse.FileType("r"),
help="file describing parameters. "\
"Should have as many rows as "\
"parameterizations file has "\
"columns."
)
stats = ["mean", "variance", "q10", "q50", "q90"]
parser.add_argument("-s", "--stats", nargs="+",
default = stats, type = is_stat,
help="statistics to compute")
parser.add_argument("-g", "--group", nargs="+",
help="parameters by which to "\
"group. Names should be "\
"found in the parameters "\
"file. "
)
parser.add_argument("-d", "--deltas",
help="If group is specified, "\
"deltas may be used to impose "\
"grid boxes on the summary "\
"rather than using point "\
"values.",
nargs="+", type = float
)
parser.add_argument("-o", "--output-directory",
default="/gpfs/scratch/mjw5407/"
"task1/stats/"
)
return parser.parse_args()
def compute(data, stat):
if stat == "mean":
return data.mean()
if stat == "variance":
return data.var()
if is_quantile(stat):
quantile = float(stat[1:]) / 100.0
if quantile == 0.0:
return data.min()
return data.quantile(quantile)
if stat == "max" or stat == "q100":
return data.max()
if stat == "min":
<|fim_middle|>
def analyze(data, stats, group=None, deltas=None):
results = []
if group is None:
group = ["Set"]
togroupby = copy.copy(group)
ii = 0
if deltas is None:
togroupby = group
else:
while ii < len(group) and ii < len(deltas):
colname = "grid_{0}".format(group[ii])
gridnumbers = numpy.floor(data[group[ii]].apply(
lambda val: val / deltas[ii]))
data[colname] = gridnumbers.apply(
lambda val: val * deltas[ii])
togroupby[ii] = colname
ii += 1
print "analyzing grouped by {0}".format(group)
gb = data.groupby(togroupby)
for stat in stats:
print "computing {0}".format(stat)
tag = "{0}_{1}".format("_".join(group), stat)
results.append((tag, compute(gb, stat)))
return results
def write_result(infn, result, outputdir):
fn = "_".join([result[0], os.path.basename(infn)])
fn = re.sub("\.hv$", "", fn)
fn = os.path.join(outputdir, fn)
print "writing {0}".format(fn)
result[1].to_csv(fn, sep=" ", index=True)
def cli():
args = get_args()
data = pandas.read_table(args.data, sep=" ")
parameters = pandas.read_table(
args.parameters, sep=" ",
names=["name","low","high"],
header=None)
param_names = parameters["name"].values
parameterizations = pandas.read_table(
args.parameterizations,
sep=" ",
names = param_names,
header = None)
data = data.join(parameterizations, on=["Set"],
how="outer")
if args.deltas is not None:
deltas = args.deltas
else:
deltas = []
results = analyze(data, args.stats, args.group, deltas)
for result in results:
write_result(args.data.name, result,
args.output_directory)
if __name__ == "__main__":
cli()
# vim:ts=4:sw=4:expandtab:ai:colorcolumn=60:number:fdm=indent
<|fim▁end|> | return data.min() |
<|file_name|>statistics.py<|end_file_name|><|fim▁begin|>"""
Copyright (C) 2013 Matthew Woodruff
This script is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This script is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this script. If not, see <http://www.gnu.org/licenses/>.
===========================================================
Coming in: one of 36 algo/problem combinations. 50 seeds in
one file. Also the _Sobol file specifying the
parameterization for each row, as well as the parameters
file itself.
Going out: stats: mean, quantile, variance
grouped by parameterization
grouped by some or all 2d combinations of
parameters
"""
import argparse
import pandas
import numpy
import re
import os
import copy
def is_quantile(stat):
return re.match("q[0-9][0-9]?$", stat)
def is_stat(stat):
if stat in ["mean", "variance", "min", "max", "q100"]:
return stat
elif is_quantile(stat):
return stat
else:
raise argparse.ArgumentTypeError(
"Invalid statistic {0}".format(stat))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("data",
type=argparse.FileType("r"),
help="data file to be summarized."
"Should have columns seed, "\
"set, and metrics columns.")
parser.add_argument("parameterizations",
type=argparse.FileType("r"),
help="file containing parameter"\
"izations. Number of param"\
"eterizations should be the "\
"same as number of rows per "\
"seed in the data file."
)
parser.add_argument("parameters",
type=argparse.FileType("r"),
help="file describing parameters. "\
"Should have as many rows as "\
"parameterizations file has "\
"columns."
)
stats = ["mean", "variance", "q10", "q50", "q90"]
parser.add_argument("-s", "--stats", nargs="+",
default = stats, type = is_stat,
help="statistics to compute")
parser.add_argument("-g", "--group", nargs="+",
help="parameters by which to "\
"group. Names should be "\
"found in the parameters "\
"file. "
)
parser.add_argument("-d", "--deltas",
help="If group is specified, "\
"deltas may be used to impose "\
"grid boxes on the summary "\
"rather than using point "\
"values.",
nargs="+", type = float
)
parser.add_argument("-o", "--output-directory",
default="/gpfs/scratch/mjw5407/"
"task1/stats/"
)
return parser.parse_args()
def compute(data, stat):
if stat == "mean":
return data.mean()
if stat == "variance":
return data.var()
if is_quantile(stat):
quantile = float(stat[1:]) / 100.0
if quantile == 0.0:
return data.min()
return data.quantile(quantile)
if stat == "max" or stat == "q100":
return data.max()
if stat == "min":
return data.min()
def analyze(data, stats, group=None, deltas=None):
results = []
if group is None:
<|fim_middle|>
togroupby = copy.copy(group)
ii = 0
if deltas is None:
togroupby = group
else:
while ii < len(group) and ii < len(deltas):
colname = "grid_{0}".format(group[ii])
gridnumbers = numpy.floor(data[group[ii]].apply(
lambda val: val / deltas[ii]))
data[colname] = gridnumbers.apply(
lambda val: val * deltas[ii])
togroupby[ii] = colname
ii += 1
print "analyzing grouped by {0}".format(group)
gb = data.groupby(togroupby)
for stat in stats:
print "computing {0}".format(stat)
tag = "{0}_{1}".format("_".join(group), stat)
results.append((tag, compute(gb, stat)))
return results
def write_result(infn, result, outputdir):
fn = "_".join([result[0], os.path.basename(infn)])
fn = re.sub("\.hv$", "", fn)
fn = os.path.join(outputdir, fn)
print "writing {0}".format(fn)
result[1].to_csv(fn, sep=" ", index=True)
def cli():
args = get_args()
data = pandas.read_table(args.data, sep=" ")
parameters = pandas.read_table(
args.parameters, sep=" ",
names=["name","low","high"],
header=None)
param_names = parameters["name"].values
parameterizations = pandas.read_table(
args.parameterizations,
sep=" ",
names = param_names,
header = None)
data = data.join(parameterizations, on=["Set"],
how="outer")
if args.deltas is not None:
deltas = args.deltas
else:
deltas = []
results = analyze(data, args.stats, args.group, deltas)
for result in results:
write_result(args.data.name, result,
args.output_directory)
if __name__ == "__main__":
cli()
# vim:ts=4:sw=4:expandtab:ai:colorcolumn=60:number:fdm=indent
<|fim▁end|> | group = ["Set"] |
<|file_name|>statistics.py<|end_file_name|><|fim▁begin|>"""
Copyright (C) 2013 Matthew Woodruff
This script is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This script is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this script. If not, see <http://www.gnu.org/licenses/>.
===========================================================
Coming in: one of 36 algo/problem combinations. 50 seeds in
one file. Also the _Sobol file specifying the
parameterization for each row, as well as the parameters
file itself.
Going out: stats: mean, quantile, variance
grouped by parameterization
grouped by some or all 2d combinations of
parameters
"""
import argparse
import pandas
import numpy
import re
import os
import copy
def is_quantile(stat):
return re.match("q[0-9][0-9]?$", stat)
def is_stat(stat):
if stat in ["mean", "variance", "min", "max", "q100"]:
return stat
elif is_quantile(stat):
return stat
else:
raise argparse.ArgumentTypeError(
"Invalid statistic {0}".format(stat))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("data",
type=argparse.FileType("r"),
help="data file to be summarized."
"Should have columns seed, "\
"set, and metrics columns.")
parser.add_argument("parameterizations",
type=argparse.FileType("r"),
help="file containing parameter"\
"izations. Number of param"\
"eterizations should be the "\
"same as number of rows per "\
"seed in the data file."
)
parser.add_argument("parameters",
type=argparse.FileType("r"),
help="file describing parameters. "\
"Should have as many rows as "\
"parameterizations file has "\
"columns."
)
stats = ["mean", "variance", "q10", "q50", "q90"]
parser.add_argument("-s", "--stats", nargs="+",
default = stats, type = is_stat,
help="statistics to compute")
parser.add_argument("-g", "--group", nargs="+",
help="parameters by which to "\
"group. Names should be "\
"found in the parameters "\
"file. "
)
parser.add_argument("-d", "--deltas",
help="If group is specified, "\
"deltas may be used to impose "\
"grid boxes on the summary "\
"rather than using point "\
"values.",
nargs="+", type = float
)
parser.add_argument("-o", "--output-directory",
default="/gpfs/scratch/mjw5407/"
"task1/stats/"
)
return parser.parse_args()
def compute(data, stat):
if stat == "mean":
return data.mean()
if stat == "variance":
return data.var()
if is_quantile(stat):
quantile = float(stat[1:]) / 100.0
if quantile == 0.0:
return data.min()
return data.quantile(quantile)
if stat == "max" or stat == "q100":
return data.max()
if stat == "min":
return data.min()
def analyze(data, stats, group=None, deltas=None):
results = []
if group is None:
group = ["Set"]
togroupby = copy.copy(group)
ii = 0
if deltas is None:
<|fim_middle|>
else:
while ii < len(group) and ii < len(deltas):
colname = "grid_{0}".format(group[ii])
gridnumbers = numpy.floor(data[group[ii]].apply(
lambda val: val / deltas[ii]))
data[colname] = gridnumbers.apply(
lambda val: val * deltas[ii])
togroupby[ii] = colname
ii += 1
print "analyzing grouped by {0}".format(group)
gb = data.groupby(togroupby)
for stat in stats:
print "computing {0}".format(stat)
tag = "{0}_{1}".format("_".join(group), stat)
results.append((tag, compute(gb, stat)))
return results
def write_result(infn, result, outputdir):
fn = "_".join([result[0], os.path.basename(infn)])
fn = re.sub("\.hv$", "", fn)
fn = os.path.join(outputdir, fn)
print "writing {0}".format(fn)
result[1].to_csv(fn, sep=" ", index=True)
def cli():
args = get_args()
data = pandas.read_table(args.data, sep=" ")
parameters = pandas.read_table(
args.parameters, sep=" ",
names=["name","low","high"],
header=None)
param_names = parameters["name"].values
parameterizations = pandas.read_table(
args.parameterizations,
sep=" ",
names = param_names,
header = None)
data = data.join(parameterizations, on=["Set"],
how="outer")
if args.deltas is not None:
deltas = args.deltas
else:
deltas = []
results = analyze(data, args.stats, args.group, deltas)
for result in results:
write_result(args.data.name, result,
args.output_directory)
if __name__ == "__main__":
cli()
# vim:ts=4:sw=4:expandtab:ai:colorcolumn=60:number:fdm=indent
<|fim▁end|> | togroupby = group |
<|file_name|>statistics.py<|end_file_name|><|fim▁begin|>"""
Copyright (C) 2013 Matthew Woodruff
This script is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This script is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this script. If not, see <http://www.gnu.org/licenses/>.
===========================================================
Coming in: one of 36 algo/problem combinations. 50 seeds in
one file. Also the _Sobol file specifying the
parameterization for each row, as well as the parameters
file itself.
Going out: stats: mean, quantile, variance
grouped by parameterization
grouped by some or all 2d combinations of
parameters
"""
import argparse
import pandas
import numpy
import re
import os
import copy
def is_quantile(stat):
return re.match("q[0-9][0-9]?$", stat)
def is_stat(stat):
if stat in ["mean", "variance", "min", "max", "q100"]:
return stat
elif is_quantile(stat):
return stat
else:
raise argparse.ArgumentTypeError(
"Invalid statistic {0}".format(stat))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("data",
type=argparse.FileType("r"),
help="data file to be summarized."
"Should have columns seed, "\
"set, and metrics columns.")
parser.add_argument("parameterizations",
type=argparse.FileType("r"),
help="file containing parameter"\
"izations. Number of param"\
"eterizations should be the "\
"same as number of rows per "\
"seed in the data file."
)
parser.add_argument("parameters",
type=argparse.FileType("r"),
help="file describing parameters. "\
"Should have as many rows as "\
"parameterizations file has "\
"columns."
)
stats = ["mean", "variance", "q10", "q50", "q90"]
parser.add_argument("-s", "--stats", nargs="+",
default = stats, type = is_stat,
help="statistics to compute")
parser.add_argument("-g", "--group", nargs="+",
help="parameters by which to "\
"group. Names should be "\
"found in the parameters "\
"file. "
)
parser.add_argument("-d", "--deltas",
help="If group is specified, "\
"deltas may be used to impose "\
"grid boxes on the summary "\
"rather than using point "\
"values.",
nargs="+", type = float
)
parser.add_argument("-o", "--output-directory",
default="/gpfs/scratch/mjw5407/"
"task1/stats/"
)
return parser.parse_args()
def compute(data, stat):
if stat == "mean":
return data.mean()
if stat == "variance":
return data.var()
if is_quantile(stat):
quantile = float(stat[1:]) / 100.0
if quantile == 0.0:
return data.min()
return data.quantile(quantile)
if stat == "max" or stat == "q100":
return data.max()
if stat == "min":
return data.min()
def analyze(data, stats, group=None, deltas=None):
results = []
if group is None:
group = ["Set"]
togroupby = copy.copy(group)
ii = 0
if deltas is None:
togroupby = group
else:
<|fim_middle|>
print "analyzing grouped by {0}".format(group)
gb = data.groupby(togroupby)
for stat in stats:
print "computing {0}".format(stat)
tag = "{0}_{1}".format("_".join(group), stat)
results.append((tag, compute(gb, stat)))
return results
def write_result(infn, result, outputdir):
fn = "_".join([result[0], os.path.basename(infn)])
fn = re.sub("\.hv$", "", fn)
fn = os.path.join(outputdir, fn)
print "writing {0}".format(fn)
result[1].to_csv(fn, sep=" ", index=True)
def cli():
args = get_args()
data = pandas.read_table(args.data, sep=" ")
parameters = pandas.read_table(
args.parameters, sep=" ",
names=["name","low","high"],
header=None)
param_names = parameters["name"].values
parameterizations = pandas.read_table(
args.parameterizations,
sep=" ",
names = param_names,
header = None)
data = data.join(parameterizations, on=["Set"],
how="outer")
if args.deltas is not None:
deltas = args.deltas
else:
deltas = []
results = analyze(data, args.stats, args.group, deltas)
for result in results:
write_result(args.data.name, result,
args.output_directory)
if __name__ == "__main__":
cli()
# vim:ts=4:sw=4:expandtab:ai:colorcolumn=60:number:fdm=indent
<|fim▁end|> | while ii < len(group) and ii < len(deltas):
colname = "grid_{0}".format(group[ii])
gridnumbers = numpy.floor(data[group[ii]].apply(
lambda val: val / deltas[ii]))
data[colname] = gridnumbers.apply(
lambda val: val * deltas[ii])
togroupby[ii] = colname
ii += 1 |
<|file_name|>statistics.py<|end_file_name|><|fim▁begin|>"""
Copyright (C) 2013 Matthew Woodruff
This script is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This script is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this script. If not, see <http://www.gnu.org/licenses/>.
===========================================================
Coming in: one of 36 algo/problem combinations. 50 seeds in
one file. Also the _Sobol file specifying the
parameterization for each row, as well as the parameters
file itself.
Going out: stats: mean, quantile, variance
grouped by parameterization
grouped by some or all 2d combinations of
parameters
"""
import argparse
import pandas
import numpy
import re
import os
import copy
def is_quantile(stat):
return re.match("q[0-9][0-9]?$", stat)
def is_stat(stat):
if stat in ["mean", "variance", "min", "max", "q100"]:
return stat
elif is_quantile(stat):
return stat
else:
raise argparse.ArgumentTypeError(
"Invalid statistic {0}".format(stat))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("data",
type=argparse.FileType("r"),
help="data file to be summarized."
"Should have columns seed, "\
"set, and metrics columns.")
parser.add_argument("parameterizations",
type=argparse.FileType("r"),
help="file containing parameter"\
"izations. Number of param"\
"eterizations should be the "\
"same as number of rows per "\
"seed in the data file."
)
parser.add_argument("parameters",
type=argparse.FileType("r"),
help="file describing parameters. "\
"Should have as many rows as "\
"parameterizations file has "\
"columns."
)
stats = ["mean", "variance", "q10", "q50", "q90"]
parser.add_argument("-s", "--stats", nargs="+",
default = stats, type = is_stat,
help="statistics to compute")
parser.add_argument("-g", "--group", nargs="+",
help="parameters by which to "\
"group. Names should be "\
"found in the parameters "\
"file. "
)
parser.add_argument("-d", "--deltas",
help="If group is specified, "\
"deltas may be used to impose "\
"grid boxes on the summary "\
"rather than using point "\
"values.",
nargs="+", type = float
)
parser.add_argument("-o", "--output-directory",
default="/gpfs/scratch/mjw5407/"
"task1/stats/"
)
return parser.parse_args()
def compute(data, stat):
if stat == "mean":
return data.mean()
if stat == "variance":
return data.var()
if is_quantile(stat):
quantile = float(stat[1:]) / 100.0
if quantile == 0.0:
return data.min()
return data.quantile(quantile)
if stat == "max" or stat == "q100":
return data.max()
if stat == "min":
return data.min()
def analyze(data, stats, group=None, deltas=None):
results = []
if group is None:
group = ["Set"]
togroupby = copy.copy(group)
ii = 0
if deltas is None:
togroupby = group
else:
while ii < len(group) and ii < len(deltas):
colname = "grid_{0}".format(group[ii])
gridnumbers = numpy.floor(data[group[ii]].apply(
lambda val: val / deltas[ii]))
data[colname] = gridnumbers.apply(
lambda val: val * deltas[ii])
togroupby[ii] = colname
ii += 1
print "analyzing grouped by {0}".format(group)
gb = data.groupby(togroupby)
for stat in stats:
print "computing {0}".format(stat)
tag = "{0}_{1}".format("_".join(group), stat)
results.append((tag, compute(gb, stat)))
return results
def write_result(infn, result, outputdir):
fn = "_".join([result[0], os.path.basename(infn)])
fn = re.sub("\.hv$", "", fn)
fn = os.path.join(outputdir, fn)
print "writing {0}".format(fn)
result[1].to_csv(fn, sep=" ", index=True)
def cli():
args = get_args()
data = pandas.read_table(args.data, sep=" ")
parameters = pandas.read_table(
args.parameters, sep=" ",
names=["name","low","high"],
header=None)
param_names = parameters["name"].values
parameterizations = pandas.read_table(
args.parameterizations,
sep=" ",
names = param_names,
header = None)
data = data.join(parameterizations, on=["Set"],
how="outer")
if args.deltas is not None:
<|fim_middle|>
else:
deltas = []
results = analyze(data, args.stats, args.group, deltas)
for result in results:
write_result(args.data.name, result,
args.output_directory)
if __name__ == "__main__":
cli()
# vim:ts=4:sw=4:expandtab:ai:colorcolumn=60:number:fdm=indent
<|fim▁end|> | deltas = args.deltas |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.