prompt
large_stringlengths 72
9.34k
| completion
large_stringlengths 0
7.61k
|
---|---|
<|file_name|>1dlbp_tests.py<|end_file_name|><|fim▁begin|>import numpy as np
import matplotlib.pylab as plt
from numba import cuda, uint8, int32, uint32, jit
from timeit import default_timer as timer
@cuda.jit('void(uint8[:], int32, int32[:], int32[:])')
def lbp_kernel(input, neighborhood, powers, h):
i = cuda.grid(1)
r = 0
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
<|fim▁hole|> r += powers[j - i + neighborhood - 1]
cuda.atomic.add(h, r, 1)
def extract_1dlbp_gpu(input, neighborhood, d_powers):
maxThread = 512
blockDim = maxThread
d_input = cuda.to_device(input)
hist = np.zeros(2 ** (2 * neighborhood), dtype='int32')
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
d_hist = cuda.to_device(hist)
lbp_kernel[gridDim, blockDim](d_input, neighborhood, d_powers, d_hist)
d_hist.to_host()
return hist
def extract_1dlbp_gpu_debug(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
@jit("int32[:](uint8[:], int64, int32[:], int32[:])", nopython=True)
def extract_1dlbp_cpu_jit(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
def extract_1dlbp_cpu(input, neighborhood, p):
"""
Extract the 1d lbp pattern on CPU
"""
res = np.zeros(1 << (2 * neighborhood))
for i in range(neighborhood, len(input) - neighborhood):
left = input[i - neighborhood : i]
right = input[i + 1 : i + neighborhood + 1]
both = np.r_[left, right]
res[np.sum(p [both >= input[i]])] += 1
return res
X = np.arange(3, 7)
X = 10 ** X
neighborhood = 4
cpu_times = np.zeros(X.shape[0])
cpu_times_simple = cpu_times.copy()
cpu_times_jit = cpu_times.copy()
gpu_times = np.zeros(X.shape[0])
p = 1 << np.array(range(0, 2 * neighborhood), dtype='int32')
d_powers = cuda.to_device(p)
for i, x in enumerate(X):
input = np.random.randint(0, 256, size = x).astype(np.uint8)
print "Length: {0}".format(x)
print "--------------"
start = timer()
h_cpu = extract_1dlbp_cpu(input, neighborhood, p)
cpu_times[i] = timer() - start
print "Finished on CPU: time: {0:3.5f}s".format(cpu_times[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_simple = extract_1dlbp_gpu_debug(input, neighborhood, p, res)
cpu_times_simple[i] = timer() - start
print "Finished on CPU (simple): time: {0:3.5f}s".format(cpu_times_simple[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_jit = extract_1dlbp_cpu_jit(input, neighborhood, p, res)
cpu_times_jit[i] = timer() - start
print "Finished on CPU (numba: jit): time: {0:3.5f}s".format(cpu_times_jit[i])
start = timer()
h_gpu = extract_1dlbp_gpu(input, neighborhood, d_powers)
gpu_times[i] = timer() - start
print "Finished on GPU: time: {0:3.5f}s".format(gpu_times[i])
print "All h_cpu == h_gpu: ", (h_cpu_jit == h_gpu).all() and (h_cpu_simple == h_cpu_jit).all() and (h_cpu == h_cpu_jit).all()
print ''
f = plt.figure(figsize=(10, 5))
plt.plot(X, cpu_times, label = "CPU")
plt.plot(X, cpu_times_simple, label = "CPU non-vectorized")
plt.plot(X, cpu_times_jit, label = "CPU jit")
plt.plot(X, gpu_times, label = "GPU")
plt.yscale('log')
plt.xscale('log')
plt.xlabel('input length')
plt.ylabel('time, sec')
plt.legend()
plt.show()<|fim▁end|> | for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]: |
<|file_name|>1dlbp_tests.py<|end_file_name|><|fim▁begin|>import numpy as np
import matplotlib.pylab as plt
from numba import cuda, uint8, int32, uint32, jit
from timeit import default_timer as timer
@cuda.jit('void(uint8[:], int32, int32[:], int32[:])')
def lbp_kernel(input, neighborhood, powers, h):
<|fim_middle|>
def extract_1dlbp_gpu(input, neighborhood, d_powers):
maxThread = 512
blockDim = maxThread
d_input = cuda.to_device(input)
hist = np.zeros(2 ** (2 * neighborhood), dtype='int32')
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
d_hist = cuda.to_device(hist)
lbp_kernel[gridDim, blockDim](d_input, neighborhood, d_powers, d_hist)
d_hist.to_host()
return hist
def extract_1dlbp_gpu_debug(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
@jit("int32[:](uint8[:], int64, int32[:], int32[:])", nopython=True)
def extract_1dlbp_cpu_jit(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
def extract_1dlbp_cpu(input, neighborhood, p):
"""
Extract the 1d lbp pattern on CPU
"""
res = np.zeros(1 << (2 * neighborhood))
for i in range(neighborhood, len(input) - neighborhood):
left = input[i - neighborhood : i]
right = input[i + 1 : i + neighborhood + 1]
both = np.r_[left, right]
res[np.sum(p [both >= input[i]])] += 1
return res
X = np.arange(3, 7)
X = 10 ** X
neighborhood = 4
cpu_times = np.zeros(X.shape[0])
cpu_times_simple = cpu_times.copy()
cpu_times_jit = cpu_times.copy()
gpu_times = np.zeros(X.shape[0])
p = 1 << np.array(range(0, 2 * neighborhood), dtype='int32')
d_powers = cuda.to_device(p)
for i, x in enumerate(X):
input = np.random.randint(0, 256, size = x).astype(np.uint8)
print "Length: {0}".format(x)
print "--------------"
start = timer()
h_cpu = extract_1dlbp_cpu(input, neighborhood, p)
cpu_times[i] = timer() - start
print "Finished on CPU: time: {0:3.5f}s".format(cpu_times[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_simple = extract_1dlbp_gpu_debug(input, neighborhood, p, res)
cpu_times_simple[i] = timer() - start
print "Finished on CPU (simple): time: {0:3.5f}s".format(cpu_times_simple[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_jit = extract_1dlbp_cpu_jit(input, neighborhood, p, res)
cpu_times_jit[i] = timer() - start
print "Finished on CPU (numba: jit): time: {0:3.5f}s".format(cpu_times_jit[i])
start = timer()
h_gpu = extract_1dlbp_gpu(input, neighborhood, d_powers)
gpu_times[i] = timer() - start
print "Finished on GPU: time: {0:3.5f}s".format(gpu_times[i])
print "All h_cpu == h_gpu: ", (h_cpu_jit == h_gpu).all() and (h_cpu_simple == h_cpu_jit).all() and (h_cpu == h_cpu_jit).all()
print ''
f = plt.figure(figsize=(10, 5))
plt.plot(X, cpu_times, label = "CPU")
plt.plot(X, cpu_times_simple, label = "CPU non-vectorized")
plt.plot(X, cpu_times_jit, label = "CPU jit")
plt.plot(X, gpu_times, label = "GPU")
plt.yscale('log')
plt.xscale('log')
plt.xlabel('input length')
plt.ylabel('time, sec')
plt.legend()
plt.show()
<|fim▁end|> | i = cuda.grid(1)
r = 0
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
cuda.atomic.add(h, r, 1) |
<|file_name|>1dlbp_tests.py<|end_file_name|><|fim▁begin|>import numpy as np
import matplotlib.pylab as plt
from numba import cuda, uint8, int32, uint32, jit
from timeit import default_timer as timer
@cuda.jit('void(uint8[:], int32, int32[:], int32[:])')
def lbp_kernel(input, neighborhood, powers, h):
i = cuda.grid(1)
r = 0
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
cuda.atomic.add(h, r, 1)
def extract_1dlbp_gpu(input, neighborhood, d_powers):
<|fim_middle|>
def extract_1dlbp_gpu_debug(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
@jit("int32[:](uint8[:], int64, int32[:], int32[:])", nopython=True)
def extract_1dlbp_cpu_jit(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
def extract_1dlbp_cpu(input, neighborhood, p):
"""
Extract the 1d lbp pattern on CPU
"""
res = np.zeros(1 << (2 * neighborhood))
for i in range(neighborhood, len(input) - neighborhood):
left = input[i - neighborhood : i]
right = input[i + 1 : i + neighborhood + 1]
both = np.r_[left, right]
res[np.sum(p [both >= input[i]])] += 1
return res
X = np.arange(3, 7)
X = 10 ** X
neighborhood = 4
cpu_times = np.zeros(X.shape[0])
cpu_times_simple = cpu_times.copy()
cpu_times_jit = cpu_times.copy()
gpu_times = np.zeros(X.shape[0])
p = 1 << np.array(range(0, 2 * neighborhood), dtype='int32')
d_powers = cuda.to_device(p)
for i, x in enumerate(X):
input = np.random.randint(0, 256, size = x).astype(np.uint8)
print "Length: {0}".format(x)
print "--------------"
start = timer()
h_cpu = extract_1dlbp_cpu(input, neighborhood, p)
cpu_times[i] = timer() - start
print "Finished on CPU: time: {0:3.5f}s".format(cpu_times[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_simple = extract_1dlbp_gpu_debug(input, neighborhood, p, res)
cpu_times_simple[i] = timer() - start
print "Finished on CPU (simple): time: {0:3.5f}s".format(cpu_times_simple[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_jit = extract_1dlbp_cpu_jit(input, neighborhood, p, res)
cpu_times_jit[i] = timer() - start
print "Finished on CPU (numba: jit): time: {0:3.5f}s".format(cpu_times_jit[i])
start = timer()
h_gpu = extract_1dlbp_gpu(input, neighborhood, d_powers)
gpu_times[i] = timer() - start
print "Finished on GPU: time: {0:3.5f}s".format(gpu_times[i])
print "All h_cpu == h_gpu: ", (h_cpu_jit == h_gpu).all() and (h_cpu_simple == h_cpu_jit).all() and (h_cpu == h_cpu_jit).all()
print ''
f = plt.figure(figsize=(10, 5))
plt.plot(X, cpu_times, label = "CPU")
plt.plot(X, cpu_times_simple, label = "CPU non-vectorized")
plt.plot(X, cpu_times_jit, label = "CPU jit")
plt.plot(X, gpu_times, label = "GPU")
plt.yscale('log')
plt.xscale('log')
plt.xlabel('input length')
plt.ylabel('time, sec')
plt.legend()
plt.show()
<|fim▁end|> | maxThread = 512
blockDim = maxThread
d_input = cuda.to_device(input)
hist = np.zeros(2 ** (2 * neighborhood), dtype='int32')
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
d_hist = cuda.to_device(hist)
lbp_kernel[gridDim, blockDim](d_input, neighborhood, d_powers, d_hist)
d_hist.to_host()
return hist |
<|file_name|>1dlbp_tests.py<|end_file_name|><|fim▁begin|>import numpy as np
import matplotlib.pylab as plt
from numba import cuda, uint8, int32, uint32, jit
from timeit import default_timer as timer
@cuda.jit('void(uint8[:], int32, int32[:], int32[:])')
def lbp_kernel(input, neighborhood, powers, h):
i = cuda.grid(1)
r = 0
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
cuda.atomic.add(h, r, 1)
def extract_1dlbp_gpu(input, neighborhood, d_powers):
maxThread = 512
blockDim = maxThread
d_input = cuda.to_device(input)
hist = np.zeros(2 ** (2 * neighborhood), dtype='int32')
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
d_hist = cuda.to_device(hist)
lbp_kernel[gridDim, blockDim](d_input, neighborhood, d_powers, d_hist)
d_hist.to_host()
return hist
def extract_1dlbp_gpu_debug(input, neighborhood, powers, res):
<|fim_middle|>
@jit("int32[:](uint8[:], int64, int32[:], int32[:])", nopython=True)
def extract_1dlbp_cpu_jit(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
def extract_1dlbp_cpu(input, neighborhood, p):
"""
Extract the 1d lbp pattern on CPU
"""
res = np.zeros(1 << (2 * neighborhood))
for i in range(neighborhood, len(input) - neighborhood):
left = input[i - neighborhood : i]
right = input[i + 1 : i + neighborhood + 1]
both = np.r_[left, right]
res[np.sum(p [both >= input[i]])] += 1
return res
X = np.arange(3, 7)
X = 10 ** X
neighborhood = 4
cpu_times = np.zeros(X.shape[0])
cpu_times_simple = cpu_times.copy()
cpu_times_jit = cpu_times.copy()
gpu_times = np.zeros(X.shape[0])
p = 1 << np.array(range(0, 2 * neighborhood), dtype='int32')
d_powers = cuda.to_device(p)
for i, x in enumerate(X):
input = np.random.randint(0, 256, size = x).astype(np.uint8)
print "Length: {0}".format(x)
print "--------------"
start = timer()
h_cpu = extract_1dlbp_cpu(input, neighborhood, p)
cpu_times[i] = timer() - start
print "Finished on CPU: time: {0:3.5f}s".format(cpu_times[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_simple = extract_1dlbp_gpu_debug(input, neighborhood, p, res)
cpu_times_simple[i] = timer() - start
print "Finished on CPU (simple): time: {0:3.5f}s".format(cpu_times_simple[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_jit = extract_1dlbp_cpu_jit(input, neighborhood, p, res)
cpu_times_jit[i] = timer() - start
print "Finished on CPU (numba: jit): time: {0:3.5f}s".format(cpu_times_jit[i])
start = timer()
h_gpu = extract_1dlbp_gpu(input, neighborhood, d_powers)
gpu_times[i] = timer() - start
print "Finished on GPU: time: {0:3.5f}s".format(gpu_times[i])
print "All h_cpu == h_gpu: ", (h_cpu_jit == h_gpu).all() and (h_cpu_simple == h_cpu_jit).all() and (h_cpu == h_cpu_jit).all()
print ''
f = plt.figure(figsize=(10, 5))
plt.plot(X, cpu_times, label = "CPU")
plt.plot(X, cpu_times_simple, label = "CPU non-vectorized")
plt.plot(X, cpu_times_jit, label = "CPU jit")
plt.plot(X, gpu_times, label = "GPU")
plt.yscale('log')
plt.xscale('log')
plt.xlabel('input length')
plt.ylabel('time, sec')
plt.legend()
plt.show()
<|fim▁end|> | maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res |
<|file_name|>1dlbp_tests.py<|end_file_name|><|fim▁begin|>import numpy as np
import matplotlib.pylab as plt
from numba import cuda, uint8, int32, uint32, jit
from timeit import default_timer as timer
@cuda.jit('void(uint8[:], int32, int32[:], int32[:])')
def lbp_kernel(input, neighborhood, powers, h):
i = cuda.grid(1)
r = 0
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
cuda.atomic.add(h, r, 1)
def extract_1dlbp_gpu(input, neighborhood, d_powers):
maxThread = 512
blockDim = maxThread
d_input = cuda.to_device(input)
hist = np.zeros(2 ** (2 * neighborhood), dtype='int32')
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
d_hist = cuda.to_device(hist)
lbp_kernel[gridDim, blockDim](d_input, neighborhood, d_powers, d_hist)
d_hist.to_host()
return hist
def extract_1dlbp_gpu_debug(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
@jit("int32[:](uint8[:], int64, int32[:], int32[:])", nopython=True)
def extract_1dlbp_cpu_jit(input, neighborhood, powers, res):
<|fim_middle|>
def extract_1dlbp_cpu(input, neighborhood, p):
"""
Extract the 1d lbp pattern on CPU
"""
res = np.zeros(1 << (2 * neighborhood))
for i in range(neighborhood, len(input) - neighborhood):
left = input[i - neighborhood : i]
right = input[i + 1 : i + neighborhood + 1]
both = np.r_[left, right]
res[np.sum(p [both >= input[i]])] += 1
return res
X = np.arange(3, 7)
X = 10 ** X
neighborhood = 4
cpu_times = np.zeros(X.shape[0])
cpu_times_simple = cpu_times.copy()
cpu_times_jit = cpu_times.copy()
gpu_times = np.zeros(X.shape[0])
p = 1 << np.array(range(0, 2 * neighborhood), dtype='int32')
d_powers = cuda.to_device(p)
for i, x in enumerate(X):
input = np.random.randint(0, 256, size = x).astype(np.uint8)
print "Length: {0}".format(x)
print "--------------"
start = timer()
h_cpu = extract_1dlbp_cpu(input, neighborhood, p)
cpu_times[i] = timer() - start
print "Finished on CPU: time: {0:3.5f}s".format(cpu_times[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_simple = extract_1dlbp_gpu_debug(input, neighborhood, p, res)
cpu_times_simple[i] = timer() - start
print "Finished on CPU (simple): time: {0:3.5f}s".format(cpu_times_simple[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_jit = extract_1dlbp_cpu_jit(input, neighborhood, p, res)
cpu_times_jit[i] = timer() - start
print "Finished on CPU (numba: jit): time: {0:3.5f}s".format(cpu_times_jit[i])
start = timer()
h_gpu = extract_1dlbp_gpu(input, neighborhood, d_powers)
gpu_times[i] = timer() - start
print "Finished on GPU: time: {0:3.5f}s".format(gpu_times[i])
print "All h_cpu == h_gpu: ", (h_cpu_jit == h_gpu).all() and (h_cpu_simple == h_cpu_jit).all() and (h_cpu == h_cpu_jit).all()
print ''
f = plt.figure(figsize=(10, 5))
plt.plot(X, cpu_times, label = "CPU")
plt.plot(X, cpu_times_simple, label = "CPU non-vectorized")
plt.plot(X, cpu_times_jit, label = "CPU jit")
plt.plot(X, gpu_times, label = "GPU")
plt.yscale('log')
plt.xscale('log')
plt.xlabel('input length')
plt.ylabel('time, sec')
plt.legend()
plt.show()
<|fim▁end|> | maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res |
<|file_name|>1dlbp_tests.py<|end_file_name|><|fim▁begin|>import numpy as np
import matplotlib.pylab as plt
from numba import cuda, uint8, int32, uint32, jit
from timeit import default_timer as timer
@cuda.jit('void(uint8[:], int32, int32[:], int32[:])')
def lbp_kernel(input, neighborhood, powers, h):
i = cuda.grid(1)
r = 0
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
cuda.atomic.add(h, r, 1)
def extract_1dlbp_gpu(input, neighborhood, d_powers):
maxThread = 512
blockDim = maxThread
d_input = cuda.to_device(input)
hist = np.zeros(2 ** (2 * neighborhood), dtype='int32')
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
d_hist = cuda.to_device(hist)
lbp_kernel[gridDim, blockDim](d_input, neighborhood, d_powers, d_hist)
d_hist.to_host()
return hist
def extract_1dlbp_gpu_debug(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
@jit("int32[:](uint8[:], int64, int32[:], int32[:])", nopython=True)
def extract_1dlbp_cpu_jit(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
def extract_1dlbp_cpu(input, neighborhood, p):
<|fim_middle|>
X = np.arange(3, 7)
X = 10 ** X
neighborhood = 4
cpu_times = np.zeros(X.shape[0])
cpu_times_simple = cpu_times.copy()
cpu_times_jit = cpu_times.copy()
gpu_times = np.zeros(X.shape[0])
p = 1 << np.array(range(0, 2 * neighborhood), dtype='int32')
d_powers = cuda.to_device(p)
for i, x in enumerate(X):
input = np.random.randint(0, 256, size = x).astype(np.uint8)
print "Length: {0}".format(x)
print "--------------"
start = timer()
h_cpu = extract_1dlbp_cpu(input, neighborhood, p)
cpu_times[i] = timer() - start
print "Finished on CPU: time: {0:3.5f}s".format(cpu_times[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_simple = extract_1dlbp_gpu_debug(input, neighborhood, p, res)
cpu_times_simple[i] = timer() - start
print "Finished on CPU (simple): time: {0:3.5f}s".format(cpu_times_simple[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_jit = extract_1dlbp_cpu_jit(input, neighborhood, p, res)
cpu_times_jit[i] = timer() - start
print "Finished on CPU (numba: jit): time: {0:3.5f}s".format(cpu_times_jit[i])
start = timer()
h_gpu = extract_1dlbp_gpu(input, neighborhood, d_powers)
gpu_times[i] = timer() - start
print "Finished on GPU: time: {0:3.5f}s".format(gpu_times[i])
print "All h_cpu == h_gpu: ", (h_cpu_jit == h_gpu).all() and (h_cpu_simple == h_cpu_jit).all() and (h_cpu == h_cpu_jit).all()
print ''
f = plt.figure(figsize=(10, 5))
plt.plot(X, cpu_times, label = "CPU")
plt.plot(X, cpu_times_simple, label = "CPU non-vectorized")
plt.plot(X, cpu_times_jit, label = "CPU jit")
plt.plot(X, gpu_times, label = "GPU")
plt.yscale('log')
plt.xscale('log')
plt.xlabel('input length')
plt.ylabel('time, sec')
plt.legend()
plt.show()
<|fim▁end|> | """
Extract the 1d lbp pattern on CPU
"""
res = np.zeros(1 << (2 * neighborhood))
for i in range(neighborhood, len(input) - neighborhood):
left = input[i - neighborhood : i]
right = input[i + 1 : i + neighborhood + 1]
both = np.r_[left, right]
res[np.sum(p [both >= input[i]])] += 1
return res |
<|file_name|>1dlbp_tests.py<|end_file_name|><|fim▁begin|>import numpy as np
import matplotlib.pylab as plt
from numba import cuda, uint8, int32, uint32, jit
from timeit import default_timer as timer
@cuda.jit('void(uint8[:], int32, int32[:], int32[:])')
def lbp_kernel(input, neighborhood, powers, h):
i = cuda.grid(1)
r = 0
if i < input.shape[0] - 2 * neighborhood:
<|fim_middle|>
def extract_1dlbp_gpu(input, neighborhood, d_powers):
maxThread = 512
blockDim = maxThread
d_input = cuda.to_device(input)
hist = np.zeros(2 ** (2 * neighborhood), dtype='int32')
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
d_hist = cuda.to_device(hist)
lbp_kernel[gridDim, blockDim](d_input, neighborhood, d_powers, d_hist)
d_hist.to_host()
return hist
def extract_1dlbp_gpu_debug(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
@jit("int32[:](uint8[:], int64, int32[:], int32[:])", nopython=True)
def extract_1dlbp_cpu_jit(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
def extract_1dlbp_cpu(input, neighborhood, p):
"""
Extract the 1d lbp pattern on CPU
"""
res = np.zeros(1 << (2 * neighborhood))
for i in range(neighborhood, len(input) - neighborhood):
left = input[i - neighborhood : i]
right = input[i + 1 : i + neighborhood + 1]
both = np.r_[left, right]
res[np.sum(p [both >= input[i]])] += 1
return res
X = np.arange(3, 7)
X = 10 ** X
neighborhood = 4
cpu_times = np.zeros(X.shape[0])
cpu_times_simple = cpu_times.copy()
cpu_times_jit = cpu_times.copy()
gpu_times = np.zeros(X.shape[0])
p = 1 << np.array(range(0, 2 * neighborhood), dtype='int32')
d_powers = cuda.to_device(p)
for i, x in enumerate(X):
input = np.random.randint(0, 256, size = x).astype(np.uint8)
print "Length: {0}".format(x)
print "--------------"
start = timer()
h_cpu = extract_1dlbp_cpu(input, neighborhood, p)
cpu_times[i] = timer() - start
print "Finished on CPU: time: {0:3.5f}s".format(cpu_times[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_simple = extract_1dlbp_gpu_debug(input, neighborhood, p, res)
cpu_times_simple[i] = timer() - start
print "Finished on CPU (simple): time: {0:3.5f}s".format(cpu_times_simple[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_jit = extract_1dlbp_cpu_jit(input, neighborhood, p, res)
cpu_times_jit[i] = timer() - start
print "Finished on CPU (numba: jit): time: {0:3.5f}s".format(cpu_times_jit[i])
start = timer()
h_gpu = extract_1dlbp_gpu(input, neighborhood, d_powers)
gpu_times[i] = timer() - start
print "Finished on GPU: time: {0:3.5f}s".format(gpu_times[i])
print "All h_cpu == h_gpu: ", (h_cpu_jit == h_gpu).all() and (h_cpu_simple == h_cpu_jit).all() and (h_cpu == h_cpu_jit).all()
print ''
f = plt.figure(figsize=(10, 5))
plt.plot(X, cpu_times, label = "CPU")
plt.plot(X, cpu_times_simple, label = "CPU non-vectorized")
plt.plot(X, cpu_times_jit, label = "CPU jit")
plt.plot(X, gpu_times, label = "GPU")
plt.yscale('log')
plt.xscale('log')
plt.xlabel('input length')
plt.ylabel('time, sec')
plt.legend()
plt.show()
<|fim▁end|> | i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
cuda.atomic.add(h, r, 1) |
<|file_name|>1dlbp_tests.py<|end_file_name|><|fim▁begin|>import numpy as np
import matplotlib.pylab as plt
from numba import cuda, uint8, int32, uint32, jit
from timeit import default_timer as timer
@cuda.jit('void(uint8[:], int32, int32[:], int32[:])')
def lbp_kernel(input, neighborhood, powers, h):
i = cuda.grid(1)
r = 0
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
<|fim_middle|>
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
cuda.atomic.add(h, r, 1)
def extract_1dlbp_gpu(input, neighborhood, d_powers):
maxThread = 512
blockDim = maxThread
d_input = cuda.to_device(input)
hist = np.zeros(2 ** (2 * neighborhood), dtype='int32')
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
d_hist = cuda.to_device(hist)
lbp_kernel[gridDim, blockDim](d_input, neighborhood, d_powers, d_hist)
d_hist.to_host()
return hist
def extract_1dlbp_gpu_debug(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
@jit("int32[:](uint8[:], int64, int32[:], int32[:])", nopython=True)
def extract_1dlbp_cpu_jit(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
def extract_1dlbp_cpu(input, neighborhood, p):
"""
Extract the 1d lbp pattern on CPU
"""
res = np.zeros(1 << (2 * neighborhood))
for i in range(neighborhood, len(input) - neighborhood):
left = input[i - neighborhood : i]
right = input[i + 1 : i + neighborhood + 1]
both = np.r_[left, right]
res[np.sum(p [both >= input[i]])] += 1
return res
X = np.arange(3, 7)
X = 10 ** X
neighborhood = 4
cpu_times = np.zeros(X.shape[0])
cpu_times_simple = cpu_times.copy()
cpu_times_jit = cpu_times.copy()
gpu_times = np.zeros(X.shape[0])
p = 1 << np.array(range(0, 2 * neighborhood), dtype='int32')
d_powers = cuda.to_device(p)
for i, x in enumerate(X):
input = np.random.randint(0, 256, size = x).astype(np.uint8)
print "Length: {0}".format(x)
print "--------------"
start = timer()
h_cpu = extract_1dlbp_cpu(input, neighborhood, p)
cpu_times[i] = timer() - start
print "Finished on CPU: time: {0:3.5f}s".format(cpu_times[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_simple = extract_1dlbp_gpu_debug(input, neighborhood, p, res)
cpu_times_simple[i] = timer() - start
print "Finished on CPU (simple): time: {0:3.5f}s".format(cpu_times_simple[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_jit = extract_1dlbp_cpu_jit(input, neighborhood, p, res)
cpu_times_jit[i] = timer() - start
print "Finished on CPU (numba: jit): time: {0:3.5f}s".format(cpu_times_jit[i])
start = timer()
h_gpu = extract_1dlbp_gpu(input, neighborhood, d_powers)
gpu_times[i] = timer() - start
print "Finished on GPU: time: {0:3.5f}s".format(gpu_times[i])
print "All h_cpu == h_gpu: ", (h_cpu_jit == h_gpu).all() and (h_cpu_simple == h_cpu_jit).all() and (h_cpu == h_cpu_jit).all()
print ''
f = plt.figure(figsize=(10, 5))
plt.plot(X, cpu_times, label = "CPU")
plt.plot(X, cpu_times_simple, label = "CPU non-vectorized")
plt.plot(X, cpu_times_jit, label = "CPU jit")
plt.plot(X, gpu_times, label = "GPU")
plt.yscale('log')
plt.xscale('log')
plt.xlabel('input length')
plt.ylabel('time, sec')
plt.legend()
plt.show()
<|fim▁end|> | r += powers[j - i + neighborhood] |
<|file_name|>1dlbp_tests.py<|end_file_name|><|fim▁begin|>import numpy as np
import matplotlib.pylab as plt
from numba import cuda, uint8, int32, uint32, jit
from timeit import default_timer as timer
@cuda.jit('void(uint8[:], int32, int32[:], int32[:])')
def lbp_kernel(input, neighborhood, powers, h):
i = cuda.grid(1)
r = 0
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
<|fim_middle|>
cuda.atomic.add(h, r, 1)
def extract_1dlbp_gpu(input, neighborhood, d_powers):
maxThread = 512
blockDim = maxThread
d_input = cuda.to_device(input)
hist = np.zeros(2 ** (2 * neighborhood), dtype='int32')
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
d_hist = cuda.to_device(hist)
lbp_kernel[gridDim, blockDim](d_input, neighborhood, d_powers, d_hist)
d_hist.to_host()
return hist
def extract_1dlbp_gpu_debug(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
@jit("int32[:](uint8[:], int64, int32[:], int32[:])", nopython=True)
def extract_1dlbp_cpu_jit(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
def extract_1dlbp_cpu(input, neighborhood, p):
"""
Extract the 1d lbp pattern on CPU
"""
res = np.zeros(1 << (2 * neighborhood))
for i in range(neighborhood, len(input) - neighborhood):
left = input[i - neighborhood : i]
right = input[i + 1 : i + neighborhood + 1]
both = np.r_[left, right]
res[np.sum(p [both >= input[i]])] += 1
return res
X = np.arange(3, 7)
X = 10 ** X
neighborhood = 4
cpu_times = np.zeros(X.shape[0])
cpu_times_simple = cpu_times.copy()
cpu_times_jit = cpu_times.copy()
gpu_times = np.zeros(X.shape[0])
p = 1 << np.array(range(0, 2 * neighborhood), dtype='int32')
d_powers = cuda.to_device(p)
for i, x in enumerate(X):
input = np.random.randint(0, 256, size = x).astype(np.uint8)
print "Length: {0}".format(x)
print "--------------"
start = timer()
h_cpu = extract_1dlbp_cpu(input, neighborhood, p)
cpu_times[i] = timer() - start
print "Finished on CPU: time: {0:3.5f}s".format(cpu_times[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_simple = extract_1dlbp_gpu_debug(input, neighborhood, p, res)
cpu_times_simple[i] = timer() - start
print "Finished on CPU (simple): time: {0:3.5f}s".format(cpu_times_simple[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_jit = extract_1dlbp_cpu_jit(input, neighborhood, p, res)
cpu_times_jit[i] = timer() - start
print "Finished on CPU (numba: jit): time: {0:3.5f}s".format(cpu_times_jit[i])
start = timer()
h_gpu = extract_1dlbp_gpu(input, neighborhood, d_powers)
gpu_times[i] = timer() - start
print "Finished on GPU: time: {0:3.5f}s".format(gpu_times[i])
print "All h_cpu == h_gpu: ", (h_cpu_jit == h_gpu).all() and (h_cpu_simple == h_cpu_jit).all() and (h_cpu == h_cpu_jit).all()
print ''
f = plt.figure(figsize=(10, 5))
plt.plot(X, cpu_times, label = "CPU")
plt.plot(X, cpu_times_simple, label = "CPU non-vectorized")
plt.plot(X, cpu_times_jit, label = "CPU jit")
plt.plot(X, gpu_times, label = "GPU")
plt.yscale('log')
plt.xscale('log')
plt.xlabel('input length')
plt.ylabel('time, sec')
plt.legend()
plt.show()
<|fim▁end|> | r += powers[j - i + neighborhood - 1] |
<|file_name|>1dlbp_tests.py<|end_file_name|><|fim▁begin|>import numpy as np
import matplotlib.pylab as plt
from numba import cuda, uint8, int32, uint32, jit
from timeit import default_timer as timer
@cuda.jit('void(uint8[:], int32, int32[:], int32[:])')
def lbp_kernel(input, neighborhood, powers, h):
i = cuda.grid(1)
r = 0
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
cuda.atomic.add(h, r, 1)
def extract_1dlbp_gpu(input, neighborhood, d_powers):
maxThread = 512
blockDim = maxThread
d_input = cuda.to_device(input)
hist = np.zeros(2 ** (2 * neighborhood), dtype='int32')
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
d_hist = cuda.to_device(hist)
lbp_kernel[gridDim, blockDim](d_input, neighborhood, d_powers, d_hist)
d_hist.to_host()
return hist
def extract_1dlbp_gpu_debug(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
<|fim_middle|>
return res
@jit("int32[:](uint8[:], int64, int32[:], int32[:])", nopython=True)
def extract_1dlbp_cpu_jit(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
def extract_1dlbp_cpu(input, neighborhood, p):
"""
Extract the 1d lbp pattern on CPU
"""
res = np.zeros(1 << (2 * neighborhood))
for i in range(neighborhood, len(input) - neighborhood):
left = input[i - neighborhood : i]
right = input[i + 1 : i + neighborhood + 1]
both = np.r_[left, right]
res[np.sum(p [both >= input[i]])] += 1
return res
X = np.arange(3, 7)
X = 10 ** X
neighborhood = 4
cpu_times = np.zeros(X.shape[0])
cpu_times_simple = cpu_times.copy()
cpu_times_jit = cpu_times.copy()
gpu_times = np.zeros(X.shape[0])
p = 1 << np.array(range(0, 2 * neighborhood), dtype='int32')
d_powers = cuda.to_device(p)
for i, x in enumerate(X):
input = np.random.randint(0, 256, size = x).astype(np.uint8)
print "Length: {0}".format(x)
print "--------------"
start = timer()
h_cpu = extract_1dlbp_cpu(input, neighborhood, p)
cpu_times[i] = timer() - start
print "Finished on CPU: time: {0:3.5f}s".format(cpu_times[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_simple = extract_1dlbp_gpu_debug(input, neighborhood, p, res)
cpu_times_simple[i] = timer() - start
print "Finished on CPU (simple): time: {0:3.5f}s".format(cpu_times_simple[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_jit = extract_1dlbp_cpu_jit(input, neighborhood, p, res)
cpu_times_jit[i] = timer() - start
print "Finished on CPU (numba: jit): time: {0:3.5f}s".format(cpu_times_jit[i])
start = timer()
h_gpu = extract_1dlbp_gpu(input, neighborhood, d_powers)
gpu_times[i] = timer() - start
print "Finished on GPU: time: {0:3.5f}s".format(gpu_times[i])
print "All h_cpu == h_gpu: ", (h_cpu_jit == h_gpu).all() and (h_cpu_simple == h_cpu_jit).all() and (h_cpu == h_cpu_jit).all()
print ''
f = plt.figure(figsize=(10, 5))
plt.plot(X, cpu_times, label = "CPU")
plt.plot(X, cpu_times_simple, label = "CPU non-vectorized")
plt.plot(X, cpu_times_jit, label = "CPU jit")
plt.plot(X, gpu_times, label = "GPU")
plt.yscale('log')
plt.xscale('log')
plt.xlabel('input length')
plt.ylabel('time, sec')
plt.legend()
plt.show()
<|fim▁end|> | i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1 |
<|file_name|>1dlbp_tests.py<|end_file_name|><|fim▁begin|>import numpy as np
import matplotlib.pylab as plt
from numba import cuda, uint8, int32, uint32, jit
from timeit import default_timer as timer
@cuda.jit('void(uint8[:], int32, int32[:], int32[:])')
def lbp_kernel(input, neighborhood, powers, h):
i = cuda.grid(1)
r = 0
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
cuda.atomic.add(h, r, 1)
def extract_1dlbp_gpu(input, neighborhood, d_powers):
maxThread = 512
blockDim = maxThread
d_input = cuda.to_device(input)
hist = np.zeros(2 ** (2 * neighborhood), dtype='int32')
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
d_hist = cuda.to_device(hist)
lbp_kernel[gridDim, blockDim](d_input, neighborhood, d_powers, d_hist)
d_hist.to_host()
return hist
def extract_1dlbp_gpu_debug(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
<|fim_middle|>
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
@jit("int32[:](uint8[:], int64, int32[:], int32[:])", nopython=True)
def extract_1dlbp_cpu_jit(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
def extract_1dlbp_cpu(input, neighborhood, p):
"""
Extract the 1d lbp pattern on CPU
"""
res = np.zeros(1 << (2 * neighborhood))
for i in range(neighborhood, len(input) - neighborhood):
left = input[i - neighborhood : i]
right = input[i + 1 : i + neighborhood + 1]
both = np.r_[left, right]
res[np.sum(p [both >= input[i]])] += 1
return res
X = np.arange(3, 7)
X = 10 ** X
neighborhood = 4
cpu_times = np.zeros(X.shape[0])
cpu_times_simple = cpu_times.copy()
cpu_times_jit = cpu_times.copy()
gpu_times = np.zeros(X.shape[0])
p = 1 << np.array(range(0, 2 * neighborhood), dtype='int32')
d_powers = cuda.to_device(p)
for i, x in enumerate(X):
input = np.random.randint(0, 256, size = x).astype(np.uint8)
print "Length: {0}".format(x)
print "--------------"
start = timer()
h_cpu = extract_1dlbp_cpu(input, neighborhood, p)
cpu_times[i] = timer() - start
print "Finished on CPU: time: {0:3.5f}s".format(cpu_times[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_simple = extract_1dlbp_gpu_debug(input, neighborhood, p, res)
cpu_times_simple[i] = timer() - start
print "Finished on CPU (simple): time: {0:3.5f}s".format(cpu_times_simple[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_jit = extract_1dlbp_cpu_jit(input, neighborhood, p, res)
cpu_times_jit[i] = timer() - start
print "Finished on CPU (numba: jit): time: {0:3.5f}s".format(cpu_times_jit[i])
start = timer()
h_gpu = extract_1dlbp_gpu(input, neighborhood, d_powers)
gpu_times[i] = timer() - start
print "Finished on GPU: time: {0:3.5f}s".format(gpu_times[i])
print "All h_cpu == h_gpu: ", (h_cpu_jit == h_gpu).all() and (h_cpu_simple == h_cpu_jit).all() and (h_cpu == h_cpu_jit).all()
print ''
f = plt.figure(figsize=(10, 5))
plt.plot(X, cpu_times, label = "CPU")
plt.plot(X, cpu_times_simple, label = "CPU non-vectorized")
plt.plot(X, cpu_times_jit, label = "CPU jit")
plt.plot(X, gpu_times, label = "GPU")
plt.yscale('log')
plt.xscale('log')
plt.xlabel('input length')
plt.ylabel('time, sec')
plt.legend()
plt.show()
<|fim▁end|> | r += powers[j - i + neighborhood] |
<|file_name|>1dlbp_tests.py<|end_file_name|><|fim▁begin|>import numpy as np
import matplotlib.pylab as plt
from numba import cuda, uint8, int32, uint32, jit
from timeit import default_timer as timer
@cuda.jit('void(uint8[:], int32, int32[:], int32[:])')
def lbp_kernel(input, neighborhood, powers, h):
i = cuda.grid(1)
r = 0
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
cuda.atomic.add(h, r, 1)
def extract_1dlbp_gpu(input, neighborhood, d_powers):
maxThread = 512
blockDim = maxThread
d_input = cuda.to_device(input)
hist = np.zeros(2 ** (2 * neighborhood), dtype='int32')
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
d_hist = cuda.to_device(hist)
lbp_kernel[gridDim, blockDim](d_input, neighborhood, d_powers, d_hist)
d_hist.to_host()
return hist
def extract_1dlbp_gpu_debug(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
<|fim_middle|>
res[r] += 1
return res
@jit("int32[:](uint8[:], int64, int32[:], int32[:])", nopython=True)
def extract_1dlbp_cpu_jit(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
def extract_1dlbp_cpu(input, neighborhood, p):
"""
Extract the 1d lbp pattern on CPU
"""
res = np.zeros(1 << (2 * neighborhood))
for i in range(neighborhood, len(input) - neighborhood):
left = input[i - neighborhood : i]
right = input[i + 1 : i + neighborhood + 1]
both = np.r_[left, right]
res[np.sum(p [both >= input[i]])] += 1
return res
X = np.arange(3, 7)
X = 10 ** X
neighborhood = 4
cpu_times = np.zeros(X.shape[0])
cpu_times_simple = cpu_times.copy()
cpu_times_jit = cpu_times.copy()
gpu_times = np.zeros(X.shape[0])
p = 1 << np.array(range(0, 2 * neighborhood), dtype='int32')
d_powers = cuda.to_device(p)
for i, x in enumerate(X):
input = np.random.randint(0, 256, size = x).astype(np.uint8)
print "Length: {0}".format(x)
print "--------------"
start = timer()
h_cpu = extract_1dlbp_cpu(input, neighborhood, p)
cpu_times[i] = timer() - start
print "Finished on CPU: time: {0:3.5f}s".format(cpu_times[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_simple = extract_1dlbp_gpu_debug(input, neighborhood, p, res)
cpu_times_simple[i] = timer() - start
print "Finished on CPU (simple): time: {0:3.5f}s".format(cpu_times_simple[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_jit = extract_1dlbp_cpu_jit(input, neighborhood, p, res)
cpu_times_jit[i] = timer() - start
print "Finished on CPU (numba: jit): time: {0:3.5f}s".format(cpu_times_jit[i])
start = timer()
h_gpu = extract_1dlbp_gpu(input, neighborhood, d_powers)
gpu_times[i] = timer() - start
print "Finished on GPU: time: {0:3.5f}s".format(gpu_times[i])
print "All h_cpu == h_gpu: ", (h_cpu_jit == h_gpu).all() and (h_cpu_simple == h_cpu_jit).all() and (h_cpu == h_cpu_jit).all()
print ''
f = plt.figure(figsize=(10, 5))
plt.plot(X, cpu_times, label = "CPU")
plt.plot(X, cpu_times_simple, label = "CPU non-vectorized")
plt.plot(X, cpu_times_jit, label = "CPU jit")
plt.plot(X, gpu_times, label = "GPU")
plt.yscale('log')
plt.xscale('log')
plt.xlabel('input length')
plt.ylabel('time, sec')
plt.legend()
plt.show()
<|fim▁end|> | r += powers[j - i + neighborhood - 1] |
<|file_name|>1dlbp_tests.py<|end_file_name|><|fim▁begin|>import numpy as np
import matplotlib.pylab as plt
from numba import cuda, uint8, int32, uint32, jit
from timeit import default_timer as timer
@cuda.jit('void(uint8[:], int32, int32[:], int32[:])')
def lbp_kernel(input, neighborhood, powers, h):
i = cuda.grid(1)
r = 0
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
cuda.atomic.add(h, r, 1)
def extract_1dlbp_gpu(input, neighborhood, d_powers):
maxThread = 512
blockDim = maxThread
d_input = cuda.to_device(input)
hist = np.zeros(2 ** (2 * neighborhood), dtype='int32')
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
d_hist = cuda.to_device(hist)
lbp_kernel[gridDim, blockDim](d_input, neighborhood, d_powers, d_hist)
d_hist.to_host()
return hist
def extract_1dlbp_gpu_debug(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
@jit("int32[:](uint8[:], int64, int32[:], int32[:])", nopython=True)
def extract_1dlbp_cpu_jit(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
<|fim_middle|>
return res
def extract_1dlbp_cpu(input, neighborhood, p):
"""
Extract the 1d lbp pattern on CPU
"""
res = np.zeros(1 << (2 * neighborhood))
for i in range(neighborhood, len(input) - neighborhood):
left = input[i - neighborhood : i]
right = input[i + 1 : i + neighborhood + 1]
both = np.r_[left, right]
res[np.sum(p [both >= input[i]])] += 1
return res
X = np.arange(3, 7)
X = 10 ** X
neighborhood = 4
cpu_times = np.zeros(X.shape[0])
cpu_times_simple = cpu_times.copy()
cpu_times_jit = cpu_times.copy()
gpu_times = np.zeros(X.shape[0])
p = 1 << np.array(range(0, 2 * neighborhood), dtype='int32')
d_powers = cuda.to_device(p)
for i, x in enumerate(X):
input = np.random.randint(0, 256, size = x).astype(np.uint8)
print "Length: {0}".format(x)
print "--------------"
start = timer()
h_cpu = extract_1dlbp_cpu(input, neighborhood, p)
cpu_times[i] = timer() - start
print "Finished on CPU: time: {0:3.5f}s".format(cpu_times[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_simple = extract_1dlbp_gpu_debug(input, neighborhood, p, res)
cpu_times_simple[i] = timer() - start
print "Finished on CPU (simple): time: {0:3.5f}s".format(cpu_times_simple[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_jit = extract_1dlbp_cpu_jit(input, neighborhood, p, res)
cpu_times_jit[i] = timer() - start
print "Finished on CPU (numba: jit): time: {0:3.5f}s".format(cpu_times_jit[i])
start = timer()
h_gpu = extract_1dlbp_gpu(input, neighborhood, d_powers)
gpu_times[i] = timer() - start
print "Finished on GPU: time: {0:3.5f}s".format(gpu_times[i])
print "All h_cpu == h_gpu: ", (h_cpu_jit == h_gpu).all() and (h_cpu_simple == h_cpu_jit).all() and (h_cpu == h_cpu_jit).all()
print ''
f = plt.figure(figsize=(10, 5))
plt.plot(X, cpu_times, label = "CPU")
plt.plot(X, cpu_times_simple, label = "CPU non-vectorized")
plt.plot(X, cpu_times_jit, label = "CPU jit")
plt.plot(X, gpu_times, label = "GPU")
plt.yscale('log')
plt.xscale('log')
plt.xlabel('input length')
plt.ylabel('time, sec')
plt.legend()
plt.show()
<|fim▁end|> | i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1 |
<|file_name|>1dlbp_tests.py<|end_file_name|><|fim▁begin|>import numpy as np
import matplotlib.pylab as plt
from numba import cuda, uint8, int32, uint32, jit
from timeit import default_timer as timer
@cuda.jit('void(uint8[:], int32, int32[:], int32[:])')
def lbp_kernel(input, neighborhood, powers, h):
i = cuda.grid(1)
r = 0
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
cuda.atomic.add(h, r, 1)
def extract_1dlbp_gpu(input, neighborhood, d_powers):
maxThread = 512
blockDim = maxThread
d_input = cuda.to_device(input)
hist = np.zeros(2 ** (2 * neighborhood), dtype='int32')
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
d_hist = cuda.to_device(hist)
lbp_kernel[gridDim, blockDim](d_input, neighborhood, d_powers, d_hist)
d_hist.to_host()
return hist
def extract_1dlbp_gpu_debug(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
@jit("int32[:](uint8[:], int64, int32[:], int32[:])", nopython=True)
def extract_1dlbp_cpu_jit(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
<|fim_middle|>
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
def extract_1dlbp_cpu(input, neighborhood, p):
"""
Extract the 1d lbp pattern on CPU
"""
res = np.zeros(1 << (2 * neighborhood))
for i in range(neighborhood, len(input) - neighborhood):
left = input[i - neighborhood : i]
right = input[i + 1 : i + neighborhood + 1]
both = np.r_[left, right]
res[np.sum(p [both >= input[i]])] += 1
return res
X = np.arange(3, 7)
X = 10 ** X
neighborhood = 4
cpu_times = np.zeros(X.shape[0])
cpu_times_simple = cpu_times.copy()
cpu_times_jit = cpu_times.copy()
gpu_times = np.zeros(X.shape[0])
p = 1 << np.array(range(0, 2 * neighborhood), dtype='int32')
d_powers = cuda.to_device(p)
for i, x in enumerate(X):
input = np.random.randint(0, 256, size = x).astype(np.uint8)
print "Length: {0}".format(x)
print "--------------"
start = timer()
h_cpu = extract_1dlbp_cpu(input, neighborhood, p)
cpu_times[i] = timer() - start
print "Finished on CPU: time: {0:3.5f}s".format(cpu_times[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_simple = extract_1dlbp_gpu_debug(input, neighborhood, p, res)
cpu_times_simple[i] = timer() - start
print "Finished on CPU (simple): time: {0:3.5f}s".format(cpu_times_simple[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_jit = extract_1dlbp_cpu_jit(input, neighborhood, p, res)
cpu_times_jit[i] = timer() - start
print "Finished on CPU (numba: jit): time: {0:3.5f}s".format(cpu_times_jit[i])
start = timer()
h_gpu = extract_1dlbp_gpu(input, neighborhood, d_powers)
gpu_times[i] = timer() - start
print "Finished on GPU: time: {0:3.5f}s".format(gpu_times[i])
print "All h_cpu == h_gpu: ", (h_cpu_jit == h_gpu).all() and (h_cpu_simple == h_cpu_jit).all() and (h_cpu == h_cpu_jit).all()
print ''
f = plt.figure(figsize=(10, 5))
plt.plot(X, cpu_times, label = "CPU")
plt.plot(X, cpu_times_simple, label = "CPU non-vectorized")
plt.plot(X, cpu_times_jit, label = "CPU jit")
plt.plot(X, gpu_times, label = "GPU")
plt.yscale('log')
plt.xscale('log')
plt.xlabel('input length')
plt.ylabel('time, sec')
plt.legend()
plt.show()
<|fim▁end|> | r += powers[j - i + neighborhood] |
<|file_name|>1dlbp_tests.py<|end_file_name|><|fim▁begin|>import numpy as np
import matplotlib.pylab as plt
from numba import cuda, uint8, int32, uint32, jit
from timeit import default_timer as timer
@cuda.jit('void(uint8[:], int32, int32[:], int32[:])')
def lbp_kernel(input, neighborhood, powers, h):
i = cuda.grid(1)
r = 0
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
cuda.atomic.add(h, r, 1)
def extract_1dlbp_gpu(input, neighborhood, d_powers):
maxThread = 512
blockDim = maxThread
d_input = cuda.to_device(input)
hist = np.zeros(2 ** (2 * neighborhood), dtype='int32')
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
d_hist = cuda.to_device(hist)
lbp_kernel[gridDim, blockDim](d_input, neighborhood, d_powers, d_hist)
d_hist.to_host()
return hist
def extract_1dlbp_gpu_debug(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
@jit("int32[:](uint8[:], int64, int32[:], int32[:])", nopython=True)
def extract_1dlbp_cpu_jit(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
<|fim_middle|>
res[r] += 1
return res
def extract_1dlbp_cpu(input, neighborhood, p):
"""
Extract the 1d lbp pattern on CPU
"""
res = np.zeros(1 << (2 * neighborhood))
for i in range(neighborhood, len(input) - neighborhood):
left = input[i - neighborhood : i]
right = input[i + 1 : i + neighborhood + 1]
both = np.r_[left, right]
res[np.sum(p [both >= input[i]])] += 1
return res
X = np.arange(3, 7)
X = 10 ** X
neighborhood = 4
cpu_times = np.zeros(X.shape[0])
cpu_times_simple = cpu_times.copy()
cpu_times_jit = cpu_times.copy()
gpu_times = np.zeros(X.shape[0])
p = 1 << np.array(range(0, 2 * neighborhood), dtype='int32')
d_powers = cuda.to_device(p)
for i, x in enumerate(X):
input = np.random.randint(0, 256, size = x).astype(np.uint8)
print "Length: {0}".format(x)
print "--------------"
start = timer()
h_cpu = extract_1dlbp_cpu(input, neighborhood, p)
cpu_times[i] = timer() - start
print "Finished on CPU: time: {0:3.5f}s".format(cpu_times[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_simple = extract_1dlbp_gpu_debug(input, neighborhood, p, res)
cpu_times_simple[i] = timer() - start
print "Finished on CPU (simple): time: {0:3.5f}s".format(cpu_times_simple[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_jit = extract_1dlbp_cpu_jit(input, neighborhood, p, res)
cpu_times_jit[i] = timer() - start
print "Finished on CPU (numba: jit): time: {0:3.5f}s".format(cpu_times_jit[i])
start = timer()
h_gpu = extract_1dlbp_gpu(input, neighborhood, d_powers)
gpu_times[i] = timer() - start
print "Finished on GPU: time: {0:3.5f}s".format(gpu_times[i])
print "All h_cpu == h_gpu: ", (h_cpu_jit == h_gpu).all() and (h_cpu_simple == h_cpu_jit).all() and (h_cpu == h_cpu_jit).all()
print ''
f = plt.figure(figsize=(10, 5))
plt.plot(X, cpu_times, label = "CPU")
plt.plot(X, cpu_times_simple, label = "CPU non-vectorized")
plt.plot(X, cpu_times_jit, label = "CPU jit")
plt.plot(X, gpu_times, label = "GPU")
plt.yscale('log')
plt.xscale('log')
plt.xlabel('input length')
plt.ylabel('time, sec')
plt.legend()
plt.show()
<|fim▁end|> | r += powers[j - i + neighborhood - 1] |
<|file_name|>1dlbp_tests.py<|end_file_name|><|fim▁begin|>import numpy as np
import matplotlib.pylab as plt
from numba import cuda, uint8, int32, uint32, jit
from timeit import default_timer as timer
@cuda.jit('void(uint8[:], int32, int32[:], int32[:])')
def <|fim_middle|>(input, neighborhood, powers, h):
i = cuda.grid(1)
r = 0
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
cuda.atomic.add(h, r, 1)
def extract_1dlbp_gpu(input, neighborhood, d_powers):
maxThread = 512
blockDim = maxThread
d_input = cuda.to_device(input)
hist = np.zeros(2 ** (2 * neighborhood), dtype='int32')
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
d_hist = cuda.to_device(hist)
lbp_kernel[gridDim, blockDim](d_input, neighborhood, d_powers, d_hist)
d_hist.to_host()
return hist
def extract_1dlbp_gpu_debug(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
@jit("int32[:](uint8[:], int64, int32[:], int32[:])", nopython=True)
def extract_1dlbp_cpu_jit(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
def extract_1dlbp_cpu(input, neighborhood, p):
"""
Extract the 1d lbp pattern on CPU
"""
res = np.zeros(1 << (2 * neighborhood))
for i in range(neighborhood, len(input) - neighborhood):
left = input[i - neighborhood : i]
right = input[i + 1 : i + neighborhood + 1]
both = np.r_[left, right]
res[np.sum(p [both >= input[i]])] += 1
return res
X = np.arange(3, 7)
X = 10 ** X
neighborhood = 4
cpu_times = np.zeros(X.shape[0])
cpu_times_simple = cpu_times.copy()
cpu_times_jit = cpu_times.copy()
gpu_times = np.zeros(X.shape[0])
p = 1 << np.array(range(0, 2 * neighborhood), dtype='int32')
d_powers = cuda.to_device(p)
for i, x in enumerate(X):
input = np.random.randint(0, 256, size = x).astype(np.uint8)
print "Length: {0}".format(x)
print "--------------"
start = timer()
h_cpu = extract_1dlbp_cpu(input, neighborhood, p)
cpu_times[i] = timer() - start
print "Finished on CPU: time: {0:3.5f}s".format(cpu_times[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_simple = extract_1dlbp_gpu_debug(input, neighborhood, p, res)
cpu_times_simple[i] = timer() - start
print "Finished on CPU (simple): time: {0:3.5f}s".format(cpu_times_simple[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_jit = extract_1dlbp_cpu_jit(input, neighborhood, p, res)
cpu_times_jit[i] = timer() - start
print "Finished on CPU (numba: jit): time: {0:3.5f}s".format(cpu_times_jit[i])
start = timer()
h_gpu = extract_1dlbp_gpu(input, neighborhood, d_powers)
gpu_times[i] = timer() - start
print "Finished on GPU: time: {0:3.5f}s".format(gpu_times[i])
print "All h_cpu == h_gpu: ", (h_cpu_jit == h_gpu).all() and (h_cpu_simple == h_cpu_jit).all() and (h_cpu == h_cpu_jit).all()
print ''
f = plt.figure(figsize=(10, 5))
plt.plot(X, cpu_times, label = "CPU")
plt.plot(X, cpu_times_simple, label = "CPU non-vectorized")
plt.plot(X, cpu_times_jit, label = "CPU jit")
plt.plot(X, gpu_times, label = "GPU")
plt.yscale('log')
plt.xscale('log')
plt.xlabel('input length')
plt.ylabel('time, sec')
plt.legend()
plt.show()
<|fim▁end|> | lbp_kernel |
<|file_name|>1dlbp_tests.py<|end_file_name|><|fim▁begin|>import numpy as np
import matplotlib.pylab as plt
from numba import cuda, uint8, int32, uint32, jit
from timeit import default_timer as timer
@cuda.jit('void(uint8[:], int32, int32[:], int32[:])')
def lbp_kernel(input, neighborhood, powers, h):
i = cuda.grid(1)
r = 0
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
cuda.atomic.add(h, r, 1)
def <|fim_middle|>(input, neighborhood, d_powers):
maxThread = 512
blockDim = maxThread
d_input = cuda.to_device(input)
hist = np.zeros(2 ** (2 * neighborhood), dtype='int32')
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
d_hist = cuda.to_device(hist)
lbp_kernel[gridDim, blockDim](d_input, neighborhood, d_powers, d_hist)
d_hist.to_host()
return hist
def extract_1dlbp_gpu_debug(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
@jit("int32[:](uint8[:], int64, int32[:], int32[:])", nopython=True)
def extract_1dlbp_cpu_jit(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
def extract_1dlbp_cpu(input, neighborhood, p):
"""
Extract the 1d lbp pattern on CPU
"""
res = np.zeros(1 << (2 * neighborhood))
for i in range(neighborhood, len(input) - neighborhood):
left = input[i - neighborhood : i]
right = input[i + 1 : i + neighborhood + 1]
both = np.r_[left, right]
res[np.sum(p [both >= input[i]])] += 1
return res
X = np.arange(3, 7)
X = 10 ** X
neighborhood = 4
cpu_times = np.zeros(X.shape[0])
cpu_times_simple = cpu_times.copy()
cpu_times_jit = cpu_times.copy()
gpu_times = np.zeros(X.shape[0])
p = 1 << np.array(range(0, 2 * neighborhood), dtype='int32')
d_powers = cuda.to_device(p)
for i, x in enumerate(X):
input = np.random.randint(0, 256, size = x).astype(np.uint8)
print "Length: {0}".format(x)
print "--------------"
start = timer()
h_cpu = extract_1dlbp_cpu(input, neighborhood, p)
cpu_times[i] = timer() - start
print "Finished on CPU: time: {0:3.5f}s".format(cpu_times[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_simple = extract_1dlbp_gpu_debug(input, neighborhood, p, res)
cpu_times_simple[i] = timer() - start
print "Finished on CPU (simple): time: {0:3.5f}s".format(cpu_times_simple[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_jit = extract_1dlbp_cpu_jit(input, neighborhood, p, res)
cpu_times_jit[i] = timer() - start
print "Finished on CPU (numba: jit): time: {0:3.5f}s".format(cpu_times_jit[i])
start = timer()
h_gpu = extract_1dlbp_gpu(input, neighborhood, d_powers)
gpu_times[i] = timer() - start
print "Finished on GPU: time: {0:3.5f}s".format(gpu_times[i])
print "All h_cpu == h_gpu: ", (h_cpu_jit == h_gpu).all() and (h_cpu_simple == h_cpu_jit).all() and (h_cpu == h_cpu_jit).all()
print ''
f = plt.figure(figsize=(10, 5))
plt.plot(X, cpu_times, label = "CPU")
plt.plot(X, cpu_times_simple, label = "CPU non-vectorized")
plt.plot(X, cpu_times_jit, label = "CPU jit")
plt.plot(X, gpu_times, label = "GPU")
plt.yscale('log')
plt.xscale('log')
plt.xlabel('input length')
plt.ylabel('time, sec')
plt.legend()
plt.show()
<|fim▁end|> | extract_1dlbp_gpu |
<|file_name|>1dlbp_tests.py<|end_file_name|><|fim▁begin|>import numpy as np
import matplotlib.pylab as plt
from numba import cuda, uint8, int32, uint32, jit
from timeit import default_timer as timer
@cuda.jit('void(uint8[:], int32, int32[:], int32[:])')
def lbp_kernel(input, neighborhood, powers, h):
i = cuda.grid(1)
r = 0
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
cuda.atomic.add(h, r, 1)
def extract_1dlbp_gpu(input, neighborhood, d_powers):
maxThread = 512
blockDim = maxThread
d_input = cuda.to_device(input)
hist = np.zeros(2 ** (2 * neighborhood), dtype='int32')
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
d_hist = cuda.to_device(hist)
lbp_kernel[gridDim, blockDim](d_input, neighborhood, d_powers, d_hist)
d_hist.to_host()
return hist
def <|fim_middle|>(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
@jit("int32[:](uint8[:], int64, int32[:], int32[:])", nopython=True)
def extract_1dlbp_cpu_jit(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
def extract_1dlbp_cpu(input, neighborhood, p):
"""
Extract the 1d lbp pattern on CPU
"""
res = np.zeros(1 << (2 * neighborhood))
for i in range(neighborhood, len(input) - neighborhood):
left = input[i - neighborhood : i]
right = input[i + 1 : i + neighborhood + 1]
both = np.r_[left, right]
res[np.sum(p [both >= input[i]])] += 1
return res
X = np.arange(3, 7)
X = 10 ** X
neighborhood = 4
cpu_times = np.zeros(X.shape[0])
cpu_times_simple = cpu_times.copy()
cpu_times_jit = cpu_times.copy()
gpu_times = np.zeros(X.shape[0])
p = 1 << np.array(range(0, 2 * neighborhood), dtype='int32')
d_powers = cuda.to_device(p)
for i, x in enumerate(X):
input = np.random.randint(0, 256, size = x).astype(np.uint8)
print "Length: {0}".format(x)
print "--------------"
start = timer()
h_cpu = extract_1dlbp_cpu(input, neighborhood, p)
cpu_times[i] = timer() - start
print "Finished on CPU: time: {0:3.5f}s".format(cpu_times[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_simple = extract_1dlbp_gpu_debug(input, neighborhood, p, res)
cpu_times_simple[i] = timer() - start
print "Finished on CPU (simple): time: {0:3.5f}s".format(cpu_times_simple[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_jit = extract_1dlbp_cpu_jit(input, neighborhood, p, res)
cpu_times_jit[i] = timer() - start
print "Finished on CPU (numba: jit): time: {0:3.5f}s".format(cpu_times_jit[i])
start = timer()
h_gpu = extract_1dlbp_gpu(input, neighborhood, d_powers)
gpu_times[i] = timer() - start
print "Finished on GPU: time: {0:3.5f}s".format(gpu_times[i])
print "All h_cpu == h_gpu: ", (h_cpu_jit == h_gpu).all() and (h_cpu_simple == h_cpu_jit).all() and (h_cpu == h_cpu_jit).all()
print ''
f = plt.figure(figsize=(10, 5))
plt.plot(X, cpu_times, label = "CPU")
plt.plot(X, cpu_times_simple, label = "CPU non-vectorized")
plt.plot(X, cpu_times_jit, label = "CPU jit")
plt.plot(X, gpu_times, label = "GPU")
plt.yscale('log')
plt.xscale('log')
plt.xlabel('input length')
plt.ylabel('time, sec')
plt.legend()
plt.show()
<|fim▁end|> | extract_1dlbp_gpu_debug |
<|file_name|>1dlbp_tests.py<|end_file_name|><|fim▁begin|>import numpy as np
import matplotlib.pylab as plt
from numba import cuda, uint8, int32, uint32, jit
from timeit import default_timer as timer
@cuda.jit('void(uint8[:], int32, int32[:], int32[:])')
def lbp_kernel(input, neighborhood, powers, h):
i = cuda.grid(1)
r = 0
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
cuda.atomic.add(h, r, 1)
def extract_1dlbp_gpu(input, neighborhood, d_powers):
maxThread = 512
blockDim = maxThread
d_input = cuda.to_device(input)
hist = np.zeros(2 ** (2 * neighborhood), dtype='int32')
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
d_hist = cuda.to_device(hist)
lbp_kernel[gridDim, blockDim](d_input, neighborhood, d_powers, d_hist)
d_hist.to_host()
return hist
def extract_1dlbp_gpu_debug(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
@jit("int32[:](uint8[:], int64, int32[:], int32[:])", nopython=True)
def <|fim_middle|>(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
def extract_1dlbp_cpu(input, neighborhood, p):
"""
Extract the 1d lbp pattern on CPU
"""
res = np.zeros(1 << (2 * neighborhood))
for i in range(neighborhood, len(input) - neighborhood):
left = input[i - neighborhood : i]
right = input[i + 1 : i + neighborhood + 1]
both = np.r_[left, right]
res[np.sum(p [both >= input[i]])] += 1
return res
X = np.arange(3, 7)
X = 10 ** X
neighborhood = 4
cpu_times = np.zeros(X.shape[0])
cpu_times_simple = cpu_times.copy()
cpu_times_jit = cpu_times.copy()
gpu_times = np.zeros(X.shape[0])
p = 1 << np.array(range(0, 2 * neighborhood), dtype='int32')
d_powers = cuda.to_device(p)
for i, x in enumerate(X):
input = np.random.randint(0, 256, size = x).astype(np.uint8)
print "Length: {0}".format(x)
print "--------------"
start = timer()
h_cpu = extract_1dlbp_cpu(input, neighborhood, p)
cpu_times[i] = timer() - start
print "Finished on CPU: time: {0:3.5f}s".format(cpu_times[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_simple = extract_1dlbp_gpu_debug(input, neighborhood, p, res)
cpu_times_simple[i] = timer() - start
print "Finished on CPU (simple): time: {0:3.5f}s".format(cpu_times_simple[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_jit = extract_1dlbp_cpu_jit(input, neighborhood, p, res)
cpu_times_jit[i] = timer() - start
print "Finished on CPU (numba: jit): time: {0:3.5f}s".format(cpu_times_jit[i])
start = timer()
h_gpu = extract_1dlbp_gpu(input, neighborhood, d_powers)
gpu_times[i] = timer() - start
print "Finished on GPU: time: {0:3.5f}s".format(gpu_times[i])
print "All h_cpu == h_gpu: ", (h_cpu_jit == h_gpu).all() and (h_cpu_simple == h_cpu_jit).all() and (h_cpu == h_cpu_jit).all()
print ''
f = plt.figure(figsize=(10, 5))
plt.plot(X, cpu_times, label = "CPU")
plt.plot(X, cpu_times_simple, label = "CPU non-vectorized")
plt.plot(X, cpu_times_jit, label = "CPU jit")
plt.plot(X, gpu_times, label = "GPU")
plt.yscale('log')
plt.xscale('log')
plt.xlabel('input length')
plt.ylabel('time, sec')
plt.legend()
plt.show()
<|fim▁end|> | extract_1dlbp_cpu_jit |
<|file_name|>1dlbp_tests.py<|end_file_name|><|fim▁begin|>import numpy as np
import matplotlib.pylab as plt
from numba import cuda, uint8, int32, uint32, jit
from timeit import default_timer as timer
@cuda.jit('void(uint8[:], int32, int32[:], int32[:])')
def lbp_kernel(input, neighborhood, powers, h):
i = cuda.grid(1)
r = 0
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
cuda.atomic.add(h, r, 1)
def extract_1dlbp_gpu(input, neighborhood, d_powers):
maxThread = 512
blockDim = maxThread
d_input = cuda.to_device(input)
hist = np.zeros(2 ** (2 * neighborhood), dtype='int32')
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
d_hist = cuda.to_device(hist)
lbp_kernel[gridDim, blockDim](d_input, neighborhood, d_powers, d_hist)
d_hist.to_host()
return hist
def extract_1dlbp_gpu_debug(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
@jit("int32[:](uint8[:], int64, int32[:], int32[:])", nopython=True)
def extract_1dlbp_cpu_jit(input, neighborhood, powers, res):
maxThread = 512
blockDim = maxThread
gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim
for block in range(0, gridDim):
for thread in range(0, blockDim):
r = 0
i = blockDim * block + thread
if i < input.shape[0] - 2 * neighborhood:
i += neighborhood
for j in range(i - neighborhood, i):
if input[j] >= input[i]:
r += powers[j - i + neighborhood]
for j in range(i + 1, i + neighborhood + 1):
if input[j] >= input[i]:
r += powers[j - i + neighborhood - 1]
res[r] += 1
return res
def <|fim_middle|>(input, neighborhood, p):
"""
Extract the 1d lbp pattern on CPU
"""
res = np.zeros(1 << (2 * neighborhood))
for i in range(neighborhood, len(input) - neighborhood):
left = input[i - neighborhood : i]
right = input[i + 1 : i + neighborhood + 1]
both = np.r_[left, right]
res[np.sum(p [both >= input[i]])] += 1
return res
X = np.arange(3, 7)
X = 10 ** X
neighborhood = 4
cpu_times = np.zeros(X.shape[0])
cpu_times_simple = cpu_times.copy()
cpu_times_jit = cpu_times.copy()
gpu_times = np.zeros(X.shape[0])
p = 1 << np.array(range(0, 2 * neighborhood), dtype='int32')
d_powers = cuda.to_device(p)
for i, x in enumerate(X):
input = np.random.randint(0, 256, size = x).astype(np.uint8)
print "Length: {0}".format(x)
print "--------------"
start = timer()
h_cpu = extract_1dlbp_cpu(input, neighborhood, p)
cpu_times[i] = timer() - start
print "Finished on CPU: time: {0:3.5f}s".format(cpu_times[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_simple = extract_1dlbp_gpu_debug(input, neighborhood, p, res)
cpu_times_simple[i] = timer() - start
print "Finished on CPU (simple): time: {0:3.5f}s".format(cpu_times_simple[i])
res = np.zeros(1 << (2 * neighborhood), dtype='int32')
start = timer()
h_cpu_jit = extract_1dlbp_cpu_jit(input, neighborhood, p, res)
cpu_times_jit[i] = timer() - start
print "Finished on CPU (numba: jit): time: {0:3.5f}s".format(cpu_times_jit[i])
start = timer()
h_gpu = extract_1dlbp_gpu(input, neighborhood, d_powers)
gpu_times[i] = timer() - start
print "Finished on GPU: time: {0:3.5f}s".format(gpu_times[i])
print "All h_cpu == h_gpu: ", (h_cpu_jit == h_gpu).all() and (h_cpu_simple == h_cpu_jit).all() and (h_cpu == h_cpu_jit).all()
print ''
f = plt.figure(figsize=(10, 5))
plt.plot(X, cpu_times, label = "CPU")
plt.plot(X, cpu_times_simple, label = "CPU non-vectorized")
plt.plot(X, cpu_times_jit, label = "CPU jit")
plt.plot(X, gpu_times, label = "GPU")
plt.yscale('log')
plt.xscale('log')
plt.xlabel('input length')
plt.ylabel('time, sec')
plt.legend()
plt.show()
<|fim▁end|> | extract_1dlbp_cpu |
<|file_name|>test_hypervisor.py<|end_file_name|><|fim▁begin|># Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Hypervisors API that require admin privileges"""
@classmethod
def setup_clients(cls):
super(HypervisorAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hypervisor_client
def _list_hypervisors(self):
# List of hypervisors
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
def assertHypervisors(self, hypers):
self.assertTrue(len(hypers) > 0, "No hypervisors found: %s" % hypers)
@test.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
@test.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
hypers = self.client.list_hypervisors(detail=True)['hypervisors']
self.assertHypervisors(hypers)
@test.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
self.assertTrue(len(details) > 0)
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
@test.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hostname = hypers[0]['hypervisor_hostname']
hypervisors = (self.client.list_servers_on_hypervisor(hostname)
['hypervisors'])
self.assertTrue(len(hypervisors) > 0)
@test.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
stats = (self.client.show_hypervisor_statistics()
['hypervisor_statistics'])
self.assertTrue(len(stats) > 0)
@test.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
<|fim▁hole|> # 'ironic'. If they are ALL ironic, skip this test since ironic
# doesn't support hypervisor uptime. Otherwise, remove them
# from the list of hypervisors to test.
ironic_only = True
hypers_without_ironic = []
for hyper in hypers:
details = (self.client.show_hypervisor(hypers[0]['id'])
['hypervisor'])
if details['hypervisor_type'] != 'ironic':
hypers_without_ironic.append(hyper)
ironic_only = False
if ironic_only:
raise self.skipException(
"Ironic does not support hypervisor uptime")
has_valid_uptime = False
for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
uptime = (self.client.show_hypervisor_uptime(hyper['id'])
['hypervisor'])
if len(uptime) > 0:
has_valid_uptime = True
break
except Exception:
pass
self.assertTrue(
has_valid_uptime,
"None of the hypervisors had a valid uptime: %s" % hypers)
@test.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
self.assertHypervisors(hypers)<|fim▁end|> | # Ironic will register each baremetal node as a 'hypervisor',
# so the hypervisor list can contain many hypervisors of type |
<|file_name|>test_hypervisor.py<|end_file_name|><|fim▁begin|># Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
<|fim_middle|>
<|fim▁end|> | """Tests Hypervisors API that require admin privileges"""
@classmethod
def setup_clients(cls):
super(HypervisorAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hypervisor_client
def _list_hypervisors(self):
# List of hypervisors
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
def assertHypervisors(self, hypers):
self.assertTrue(len(hypers) > 0, "No hypervisors found: %s" % hypers)
@test.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
@test.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
hypers = self.client.list_hypervisors(detail=True)['hypervisors']
self.assertHypervisors(hypers)
@test.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
self.assertTrue(len(details) > 0)
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
@test.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hostname = hypers[0]['hypervisor_hostname']
hypervisors = (self.client.list_servers_on_hypervisor(hostname)
['hypervisors'])
self.assertTrue(len(hypervisors) > 0)
@test.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
stats = (self.client.show_hypervisor_statistics()
['hypervisor_statistics'])
self.assertTrue(len(stats) > 0)
@test.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
# Ironic will register each baremetal node as a 'hypervisor',
# so the hypervisor list can contain many hypervisors of type
# 'ironic'. If they are ALL ironic, skip this test since ironic
# doesn't support hypervisor uptime. Otherwise, remove them
# from the list of hypervisors to test.
ironic_only = True
hypers_without_ironic = []
for hyper in hypers:
details = (self.client.show_hypervisor(hypers[0]['id'])
['hypervisor'])
if details['hypervisor_type'] != 'ironic':
hypers_without_ironic.append(hyper)
ironic_only = False
if ironic_only:
raise self.skipException(
"Ironic does not support hypervisor uptime")
has_valid_uptime = False
for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
uptime = (self.client.show_hypervisor_uptime(hyper['id'])
['hypervisor'])
if len(uptime) > 0:
has_valid_uptime = True
break
except Exception:
pass
self.assertTrue(
has_valid_uptime,
"None of the hypervisors had a valid uptime: %s" % hypers)
@test.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
self.assertHypervisors(hypers) |
<|file_name|>test_hypervisor.py<|end_file_name|><|fim▁begin|># Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Hypervisors API that require admin privileges"""
@classmethod
def setup_clients(cls):
<|fim_middle|>
def _list_hypervisors(self):
# List of hypervisors
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
def assertHypervisors(self, hypers):
self.assertTrue(len(hypers) > 0, "No hypervisors found: %s" % hypers)
@test.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
@test.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
hypers = self.client.list_hypervisors(detail=True)['hypervisors']
self.assertHypervisors(hypers)
@test.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
self.assertTrue(len(details) > 0)
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
@test.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hostname = hypers[0]['hypervisor_hostname']
hypervisors = (self.client.list_servers_on_hypervisor(hostname)
['hypervisors'])
self.assertTrue(len(hypervisors) > 0)
@test.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
stats = (self.client.show_hypervisor_statistics()
['hypervisor_statistics'])
self.assertTrue(len(stats) > 0)
@test.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
# Ironic will register each baremetal node as a 'hypervisor',
# so the hypervisor list can contain many hypervisors of type
# 'ironic'. If they are ALL ironic, skip this test since ironic
# doesn't support hypervisor uptime. Otherwise, remove them
# from the list of hypervisors to test.
ironic_only = True
hypers_without_ironic = []
for hyper in hypers:
details = (self.client.show_hypervisor(hypers[0]['id'])
['hypervisor'])
if details['hypervisor_type'] != 'ironic':
hypers_without_ironic.append(hyper)
ironic_only = False
if ironic_only:
raise self.skipException(
"Ironic does not support hypervisor uptime")
has_valid_uptime = False
for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
uptime = (self.client.show_hypervisor_uptime(hyper['id'])
['hypervisor'])
if len(uptime) > 0:
has_valid_uptime = True
break
except Exception:
pass
self.assertTrue(
has_valid_uptime,
"None of the hypervisors had a valid uptime: %s" % hypers)
@test.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
self.assertHypervisors(hypers)
<|fim▁end|> | super(HypervisorAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hypervisor_client |
<|file_name|>test_hypervisor.py<|end_file_name|><|fim▁begin|># Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Hypervisors API that require admin privileges"""
@classmethod
def setup_clients(cls):
super(HypervisorAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hypervisor_client
def _list_hypervisors(self):
# List of hypervisors
<|fim_middle|>
def assertHypervisors(self, hypers):
self.assertTrue(len(hypers) > 0, "No hypervisors found: %s" % hypers)
@test.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
@test.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
hypers = self.client.list_hypervisors(detail=True)['hypervisors']
self.assertHypervisors(hypers)
@test.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
self.assertTrue(len(details) > 0)
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
@test.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hostname = hypers[0]['hypervisor_hostname']
hypervisors = (self.client.list_servers_on_hypervisor(hostname)
['hypervisors'])
self.assertTrue(len(hypervisors) > 0)
@test.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
stats = (self.client.show_hypervisor_statistics()
['hypervisor_statistics'])
self.assertTrue(len(stats) > 0)
@test.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
# Ironic will register each baremetal node as a 'hypervisor',
# so the hypervisor list can contain many hypervisors of type
# 'ironic'. If they are ALL ironic, skip this test since ironic
# doesn't support hypervisor uptime. Otherwise, remove them
# from the list of hypervisors to test.
ironic_only = True
hypers_without_ironic = []
for hyper in hypers:
details = (self.client.show_hypervisor(hypers[0]['id'])
['hypervisor'])
if details['hypervisor_type'] != 'ironic':
hypers_without_ironic.append(hyper)
ironic_only = False
if ironic_only:
raise self.skipException(
"Ironic does not support hypervisor uptime")
has_valid_uptime = False
for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
uptime = (self.client.show_hypervisor_uptime(hyper['id'])
['hypervisor'])
if len(uptime) > 0:
has_valid_uptime = True
break
except Exception:
pass
self.assertTrue(
has_valid_uptime,
"None of the hypervisors had a valid uptime: %s" % hypers)
@test.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
self.assertHypervisors(hypers)
<|fim▁end|> | hypers = self.client.list_hypervisors()['hypervisors']
return hypers |
<|file_name|>test_hypervisor.py<|end_file_name|><|fim▁begin|># Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Hypervisors API that require admin privileges"""
@classmethod
def setup_clients(cls):
super(HypervisorAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hypervisor_client
def _list_hypervisors(self):
# List of hypervisors
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
def assertHypervisors(self, hypers):
<|fim_middle|>
@test.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
@test.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
hypers = self.client.list_hypervisors(detail=True)['hypervisors']
self.assertHypervisors(hypers)
@test.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
self.assertTrue(len(details) > 0)
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
@test.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hostname = hypers[0]['hypervisor_hostname']
hypervisors = (self.client.list_servers_on_hypervisor(hostname)
['hypervisors'])
self.assertTrue(len(hypervisors) > 0)
@test.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
stats = (self.client.show_hypervisor_statistics()
['hypervisor_statistics'])
self.assertTrue(len(stats) > 0)
@test.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
# Ironic will register each baremetal node as a 'hypervisor',
# so the hypervisor list can contain many hypervisors of type
# 'ironic'. If they are ALL ironic, skip this test since ironic
# doesn't support hypervisor uptime. Otherwise, remove them
# from the list of hypervisors to test.
ironic_only = True
hypers_without_ironic = []
for hyper in hypers:
details = (self.client.show_hypervisor(hypers[0]['id'])
['hypervisor'])
if details['hypervisor_type'] != 'ironic':
hypers_without_ironic.append(hyper)
ironic_only = False
if ironic_only:
raise self.skipException(
"Ironic does not support hypervisor uptime")
has_valid_uptime = False
for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
uptime = (self.client.show_hypervisor_uptime(hyper['id'])
['hypervisor'])
if len(uptime) > 0:
has_valid_uptime = True
break
except Exception:
pass
self.assertTrue(
has_valid_uptime,
"None of the hypervisors had a valid uptime: %s" % hypers)
@test.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
self.assertHypervisors(hypers)
<|fim▁end|> | self.assertTrue(len(hypers) > 0, "No hypervisors found: %s" % hypers) |
<|file_name|>test_hypervisor.py<|end_file_name|><|fim▁begin|># Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Hypervisors API that require admin privileges"""
@classmethod
def setup_clients(cls):
super(HypervisorAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hypervisor_client
def _list_hypervisors(self):
# List of hypervisors
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
def assertHypervisors(self, hypers):
self.assertTrue(len(hypers) > 0, "No hypervisors found: %s" % hypers)
@test.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
<|fim_middle|>
@test.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
hypers = self.client.list_hypervisors(detail=True)['hypervisors']
self.assertHypervisors(hypers)
@test.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
self.assertTrue(len(details) > 0)
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
@test.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hostname = hypers[0]['hypervisor_hostname']
hypervisors = (self.client.list_servers_on_hypervisor(hostname)
['hypervisors'])
self.assertTrue(len(hypervisors) > 0)
@test.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
stats = (self.client.show_hypervisor_statistics()
['hypervisor_statistics'])
self.assertTrue(len(stats) > 0)
@test.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
# Ironic will register each baremetal node as a 'hypervisor',
# so the hypervisor list can contain many hypervisors of type
# 'ironic'. If they are ALL ironic, skip this test since ironic
# doesn't support hypervisor uptime. Otherwise, remove them
# from the list of hypervisors to test.
ironic_only = True
hypers_without_ironic = []
for hyper in hypers:
details = (self.client.show_hypervisor(hypers[0]['id'])
['hypervisor'])
if details['hypervisor_type'] != 'ironic':
hypers_without_ironic.append(hyper)
ironic_only = False
if ironic_only:
raise self.skipException(
"Ironic does not support hypervisor uptime")
has_valid_uptime = False
for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
uptime = (self.client.show_hypervisor_uptime(hyper['id'])
['hypervisor'])
if len(uptime) > 0:
has_valid_uptime = True
break
except Exception:
pass
self.assertTrue(
has_valid_uptime,
"None of the hypervisors had a valid uptime: %s" % hypers)
@test.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
self.assertHypervisors(hypers)
<|fim▁end|> | hypers = self._list_hypervisors()
self.assertHypervisors(hypers) |
<|file_name|>test_hypervisor.py<|end_file_name|><|fim▁begin|># Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Hypervisors API that require admin privileges"""
@classmethod
def setup_clients(cls):
super(HypervisorAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hypervisor_client
def _list_hypervisors(self):
# List of hypervisors
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
def assertHypervisors(self, hypers):
self.assertTrue(len(hypers) > 0, "No hypervisors found: %s" % hypers)
@test.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
@test.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
<|fim_middle|>
@test.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
self.assertTrue(len(details) > 0)
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
@test.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hostname = hypers[0]['hypervisor_hostname']
hypervisors = (self.client.list_servers_on_hypervisor(hostname)
['hypervisors'])
self.assertTrue(len(hypervisors) > 0)
@test.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
stats = (self.client.show_hypervisor_statistics()
['hypervisor_statistics'])
self.assertTrue(len(stats) > 0)
@test.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
# Ironic will register each baremetal node as a 'hypervisor',
# so the hypervisor list can contain many hypervisors of type
# 'ironic'. If they are ALL ironic, skip this test since ironic
# doesn't support hypervisor uptime. Otherwise, remove them
# from the list of hypervisors to test.
ironic_only = True
hypers_without_ironic = []
for hyper in hypers:
details = (self.client.show_hypervisor(hypers[0]['id'])
['hypervisor'])
if details['hypervisor_type'] != 'ironic':
hypers_without_ironic.append(hyper)
ironic_only = False
if ironic_only:
raise self.skipException(
"Ironic does not support hypervisor uptime")
has_valid_uptime = False
for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
uptime = (self.client.show_hypervisor_uptime(hyper['id'])
['hypervisor'])
if len(uptime) > 0:
has_valid_uptime = True
break
except Exception:
pass
self.assertTrue(
has_valid_uptime,
"None of the hypervisors had a valid uptime: %s" % hypers)
@test.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
self.assertHypervisors(hypers)
<|fim▁end|> | hypers = self.client.list_hypervisors(detail=True)['hypervisors']
self.assertHypervisors(hypers) |
<|file_name|>test_hypervisor.py<|end_file_name|><|fim▁begin|># Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Hypervisors API that require admin privileges"""
@classmethod
def setup_clients(cls):
super(HypervisorAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hypervisor_client
def _list_hypervisors(self):
# List of hypervisors
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
def assertHypervisors(self, hypers):
self.assertTrue(len(hypers) > 0, "No hypervisors found: %s" % hypers)
@test.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
@test.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
hypers = self.client.list_hypervisors(detail=True)['hypervisors']
self.assertHypervisors(hypers)
@test.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
<|fim_middle|>
@test.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hostname = hypers[0]['hypervisor_hostname']
hypervisors = (self.client.list_servers_on_hypervisor(hostname)
['hypervisors'])
self.assertTrue(len(hypervisors) > 0)
@test.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
stats = (self.client.show_hypervisor_statistics()
['hypervisor_statistics'])
self.assertTrue(len(stats) > 0)
@test.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
# Ironic will register each baremetal node as a 'hypervisor',
# so the hypervisor list can contain many hypervisors of type
# 'ironic'. If they are ALL ironic, skip this test since ironic
# doesn't support hypervisor uptime. Otherwise, remove them
# from the list of hypervisors to test.
ironic_only = True
hypers_without_ironic = []
for hyper in hypers:
details = (self.client.show_hypervisor(hypers[0]['id'])
['hypervisor'])
if details['hypervisor_type'] != 'ironic':
hypers_without_ironic.append(hyper)
ironic_only = False
if ironic_only:
raise self.skipException(
"Ironic does not support hypervisor uptime")
has_valid_uptime = False
for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
uptime = (self.client.show_hypervisor_uptime(hyper['id'])
['hypervisor'])
if len(uptime) > 0:
has_valid_uptime = True
break
except Exception:
pass
self.assertTrue(
has_valid_uptime,
"None of the hypervisors had a valid uptime: %s" % hypers)
@test.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
self.assertHypervisors(hypers)
<|fim▁end|> | hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
self.assertTrue(len(details) > 0)
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname']) |
<|file_name|>test_hypervisor.py<|end_file_name|><|fim▁begin|># Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Hypervisors API that require admin privileges"""
@classmethod
def setup_clients(cls):
super(HypervisorAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hypervisor_client
def _list_hypervisors(self):
# List of hypervisors
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
def assertHypervisors(self, hypers):
self.assertTrue(len(hypers) > 0, "No hypervisors found: %s" % hypers)
@test.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
@test.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
hypers = self.client.list_hypervisors(detail=True)['hypervisors']
self.assertHypervisors(hypers)
@test.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
self.assertTrue(len(details) > 0)
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
@test.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
<|fim_middle|>
@test.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
stats = (self.client.show_hypervisor_statistics()
['hypervisor_statistics'])
self.assertTrue(len(stats) > 0)
@test.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
# Ironic will register each baremetal node as a 'hypervisor',
# so the hypervisor list can contain many hypervisors of type
# 'ironic'. If they are ALL ironic, skip this test since ironic
# doesn't support hypervisor uptime. Otherwise, remove them
# from the list of hypervisors to test.
ironic_only = True
hypers_without_ironic = []
for hyper in hypers:
details = (self.client.show_hypervisor(hypers[0]['id'])
['hypervisor'])
if details['hypervisor_type'] != 'ironic':
hypers_without_ironic.append(hyper)
ironic_only = False
if ironic_only:
raise self.skipException(
"Ironic does not support hypervisor uptime")
has_valid_uptime = False
for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
uptime = (self.client.show_hypervisor_uptime(hyper['id'])
['hypervisor'])
if len(uptime) > 0:
has_valid_uptime = True
break
except Exception:
pass
self.assertTrue(
has_valid_uptime,
"None of the hypervisors had a valid uptime: %s" % hypers)
@test.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
self.assertHypervisors(hypers)
<|fim▁end|> | hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hostname = hypers[0]['hypervisor_hostname']
hypervisors = (self.client.list_servers_on_hypervisor(hostname)
['hypervisors'])
self.assertTrue(len(hypervisors) > 0) |
<|file_name|>test_hypervisor.py<|end_file_name|><|fim▁begin|># Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Hypervisors API that require admin privileges"""
@classmethod
def setup_clients(cls):
super(HypervisorAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hypervisor_client
def _list_hypervisors(self):
# List of hypervisors
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
def assertHypervisors(self, hypers):
self.assertTrue(len(hypers) > 0, "No hypervisors found: %s" % hypers)
@test.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
@test.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
hypers = self.client.list_hypervisors(detail=True)['hypervisors']
self.assertHypervisors(hypers)
@test.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
self.assertTrue(len(details) > 0)
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
@test.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hostname = hypers[0]['hypervisor_hostname']
hypervisors = (self.client.list_servers_on_hypervisor(hostname)
['hypervisors'])
self.assertTrue(len(hypervisors) > 0)
@test.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
<|fim_middle|>
@test.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
# Ironic will register each baremetal node as a 'hypervisor',
# so the hypervisor list can contain many hypervisors of type
# 'ironic'. If they are ALL ironic, skip this test since ironic
# doesn't support hypervisor uptime. Otherwise, remove them
# from the list of hypervisors to test.
ironic_only = True
hypers_without_ironic = []
for hyper in hypers:
details = (self.client.show_hypervisor(hypers[0]['id'])
['hypervisor'])
if details['hypervisor_type'] != 'ironic':
hypers_without_ironic.append(hyper)
ironic_only = False
if ironic_only:
raise self.skipException(
"Ironic does not support hypervisor uptime")
has_valid_uptime = False
for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
uptime = (self.client.show_hypervisor_uptime(hyper['id'])
['hypervisor'])
if len(uptime) > 0:
has_valid_uptime = True
break
except Exception:
pass
self.assertTrue(
has_valid_uptime,
"None of the hypervisors had a valid uptime: %s" % hypers)
@test.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
self.assertHypervisors(hypers)
<|fim▁end|> | stats = (self.client.show_hypervisor_statistics()
['hypervisor_statistics'])
self.assertTrue(len(stats) > 0) |
<|file_name|>test_hypervisor.py<|end_file_name|><|fim▁begin|># Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Hypervisors API that require admin privileges"""
@classmethod
def setup_clients(cls):
super(HypervisorAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hypervisor_client
def _list_hypervisors(self):
# List of hypervisors
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
def assertHypervisors(self, hypers):
self.assertTrue(len(hypers) > 0, "No hypervisors found: %s" % hypers)
@test.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
@test.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
hypers = self.client.list_hypervisors(detail=True)['hypervisors']
self.assertHypervisors(hypers)
@test.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
self.assertTrue(len(details) > 0)
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
@test.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hostname = hypers[0]['hypervisor_hostname']
hypervisors = (self.client.list_servers_on_hypervisor(hostname)
['hypervisors'])
self.assertTrue(len(hypervisors) > 0)
@test.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
stats = (self.client.show_hypervisor_statistics()
['hypervisor_statistics'])
self.assertTrue(len(stats) > 0)
@test.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
<|fim_middle|>
@test.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
self.assertHypervisors(hypers)
<|fim▁end|> | hypers = self._list_hypervisors()
# Ironic will register each baremetal node as a 'hypervisor',
# so the hypervisor list can contain many hypervisors of type
# 'ironic'. If they are ALL ironic, skip this test since ironic
# doesn't support hypervisor uptime. Otherwise, remove them
# from the list of hypervisors to test.
ironic_only = True
hypers_without_ironic = []
for hyper in hypers:
details = (self.client.show_hypervisor(hypers[0]['id'])
['hypervisor'])
if details['hypervisor_type'] != 'ironic':
hypers_without_ironic.append(hyper)
ironic_only = False
if ironic_only:
raise self.skipException(
"Ironic does not support hypervisor uptime")
has_valid_uptime = False
for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
uptime = (self.client.show_hypervisor_uptime(hyper['id'])
['hypervisor'])
if len(uptime) > 0:
has_valid_uptime = True
break
except Exception:
pass
self.assertTrue(
has_valid_uptime,
"None of the hypervisors had a valid uptime: %s" % hypers) |
<|file_name|>test_hypervisor.py<|end_file_name|><|fim▁begin|># Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Hypervisors API that require admin privileges"""
@classmethod
def setup_clients(cls):
super(HypervisorAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hypervisor_client
def _list_hypervisors(self):
# List of hypervisors
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
def assertHypervisors(self, hypers):
self.assertTrue(len(hypers) > 0, "No hypervisors found: %s" % hypers)
@test.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
@test.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
hypers = self.client.list_hypervisors(detail=True)['hypervisors']
self.assertHypervisors(hypers)
@test.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
self.assertTrue(len(details) > 0)
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
@test.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hostname = hypers[0]['hypervisor_hostname']
hypervisors = (self.client.list_servers_on_hypervisor(hostname)
['hypervisors'])
self.assertTrue(len(hypervisors) > 0)
@test.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
stats = (self.client.show_hypervisor_statistics()
['hypervisor_statistics'])
self.assertTrue(len(stats) > 0)
@test.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
# Ironic will register each baremetal node as a 'hypervisor',
# so the hypervisor list can contain many hypervisors of type
# 'ironic'. If they are ALL ironic, skip this test since ironic
# doesn't support hypervisor uptime. Otherwise, remove them
# from the list of hypervisors to test.
ironic_only = True
hypers_without_ironic = []
for hyper in hypers:
details = (self.client.show_hypervisor(hypers[0]['id'])
['hypervisor'])
if details['hypervisor_type'] != 'ironic':
hypers_without_ironic.append(hyper)
ironic_only = False
if ironic_only:
raise self.skipException(
"Ironic does not support hypervisor uptime")
has_valid_uptime = False
for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
uptime = (self.client.show_hypervisor_uptime(hyper['id'])
['hypervisor'])
if len(uptime) > 0:
has_valid_uptime = True
break
except Exception:
pass
self.assertTrue(
has_valid_uptime,
"None of the hypervisors had a valid uptime: %s" % hypers)
@test.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
<|fim_middle|>
<|fim▁end|> | hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
self.assertHypervisors(hypers) |
<|file_name|>test_hypervisor.py<|end_file_name|><|fim▁begin|># Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Hypervisors API that require admin privileges"""
@classmethod
def setup_clients(cls):
super(HypervisorAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hypervisor_client
def _list_hypervisors(self):
# List of hypervisors
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
def assertHypervisors(self, hypers):
self.assertTrue(len(hypers) > 0, "No hypervisors found: %s" % hypers)
@test.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
@test.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
hypers = self.client.list_hypervisors(detail=True)['hypervisors']
self.assertHypervisors(hypers)
@test.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
self.assertTrue(len(details) > 0)
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
@test.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hostname = hypers[0]['hypervisor_hostname']
hypervisors = (self.client.list_servers_on_hypervisor(hostname)
['hypervisors'])
self.assertTrue(len(hypervisors) > 0)
@test.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
stats = (self.client.show_hypervisor_statistics()
['hypervisor_statistics'])
self.assertTrue(len(stats) > 0)
@test.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
# Ironic will register each baremetal node as a 'hypervisor',
# so the hypervisor list can contain many hypervisors of type
# 'ironic'. If they are ALL ironic, skip this test since ironic
# doesn't support hypervisor uptime. Otherwise, remove them
# from the list of hypervisors to test.
ironic_only = True
hypers_without_ironic = []
for hyper in hypers:
details = (self.client.show_hypervisor(hypers[0]['id'])
['hypervisor'])
if details['hypervisor_type'] != 'ironic':
<|fim_middle|>
if ironic_only:
raise self.skipException(
"Ironic does not support hypervisor uptime")
has_valid_uptime = False
for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
uptime = (self.client.show_hypervisor_uptime(hyper['id'])
['hypervisor'])
if len(uptime) > 0:
has_valid_uptime = True
break
except Exception:
pass
self.assertTrue(
has_valid_uptime,
"None of the hypervisors had a valid uptime: %s" % hypers)
@test.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
self.assertHypervisors(hypers)
<|fim▁end|> | hypers_without_ironic.append(hyper)
ironic_only = False |
<|file_name|>test_hypervisor.py<|end_file_name|><|fim▁begin|># Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Hypervisors API that require admin privileges"""
@classmethod
def setup_clients(cls):
super(HypervisorAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hypervisor_client
def _list_hypervisors(self):
# List of hypervisors
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
def assertHypervisors(self, hypers):
self.assertTrue(len(hypers) > 0, "No hypervisors found: %s" % hypers)
@test.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
@test.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
hypers = self.client.list_hypervisors(detail=True)['hypervisors']
self.assertHypervisors(hypers)
@test.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
self.assertTrue(len(details) > 0)
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
@test.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hostname = hypers[0]['hypervisor_hostname']
hypervisors = (self.client.list_servers_on_hypervisor(hostname)
['hypervisors'])
self.assertTrue(len(hypervisors) > 0)
@test.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
stats = (self.client.show_hypervisor_statistics()
['hypervisor_statistics'])
self.assertTrue(len(stats) > 0)
@test.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
# Ironic will register each baremetal node as a 'hypervisor',
# so the hypervisor list can contain many hypervisors of type
# 'ironic'. If they are ALL ironic, skip this test since ironic
# doesn't support hypervisor uptime. Otherwise, remove them
# from the list of hypervisors to test.
ironic_only = True
hypers_without_ironic = []
for hyper in hypers:
details = (self.client.show_hypervisor(hypers[0]['id'])
['hypervisor'])
if details['hypervisor_type'] != 'ironic':
hypers_without_ironic.append(hyper)
ironic_only = False
if ironic_only:
<|fim_middle|>
has_valid_uptime = False
for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
uptime = (self.client.show_hypervisor_uptime(hyper['id'])
['hypervisor'])
if len(uptime) > 0:
has_valid_uptime = True
break
except Exception:
pass
self.assertTrue(
has_valid_uptime,
"None of the hypervisors had a valid uptime: %s" % hypers)
@test.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
self.assertHypervisors(hypers)
<|fim▁end|> | raise self.skipException(
"Ironic does not support hypervisor uptime") |
<|file_name|>test_hypervisor.py<|end_file_name|><|fim▁begin|># Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Hypervisors API that require admin privileges"""
@classmethod
def setup_clients(cls):
super(HypervisorAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hypervisor_client
def _list_hypervisors(self):
# List of hypervisors
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
def assertHypervisors(self, hypers):
self.assertTrue(len(hypers) > 0, "No hypervisors found: %s" % hypers)
@test.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
@test.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
hypers = self.client.list_hypervisors(detail=True)['hypervisors']
self.assertHypervisors(hypers)
@test.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
self.assertTrue(len(details) > 0)
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
@test.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hostname = hypers[0]['hypervisor_hostname']
hypervisors = (self.client.list_servers_on_hypervisor(hostname)
['hypervisors'])
self.assertTrue(len(hypervisors) > 0)
@test.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
stats = (self.client.show_hypervisor_statistics()
['hypervisor_statistics'])
self.assertTrue(len(stats) > 0)
@test.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
# Ironic will register each baremetal node as a 'hypervisor',
# so the hypervisor list can contain many hypervisors of type
# 'ironic'. If they are ALL ironic, skip this test since ironic
# doesn't support hypervisor uptime. Otherwise, remove them
# from the list of hypervisors to test.
ironic_only = True
hypers_without_ironic = []
for hyper in hypers:
details = (self.client.show_hypervisor(hypers[0]['id'])
['hypervisor'])
if details['hypervisor_type'] != 'ironic':
hypers_without_ironic.append(hyper)
ironic_only = False
if ironic_only:
raise self.skipException(
"Ironic does not support hypervisor uptime")
has_valid_uptime = False
for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
uptime = (self.client.show_hypervisor_uptime(hyper['id'])
['hypervisor'])
if len(uptime) > 0:
<|fim_middle|>
except Exception:
pass
self.assertTrue(
has_valid_uptime,
"None of the hypervisors had a valid uptime: %s" % hypers)
@test.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
self.assertHypervisors(hypers)
<|fim▁end|> | has_valid_uptime = True
break |
<|file_name|>test_hypervisor.py<|end_file_name|><|fim▁begin|># Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Hypervisors API that require admin privileges"""
@classmethod
def <|fim_middle|>(cls):
super(HypervisorAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hypervisor_client
def _list_hypervisors(self):
# List of hypervisors
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
def assertHypervisors(self, hypers):
self.assertTrue(len(hypers) > 0, "No hypervisors found: %s" % hypers)
@test.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
@test.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
hypers = self.client.list_hypervisors(detail=True)['hypervisors']
self.assertHypervisors(hypers)
@test.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
self.assertTrue(len(details) > 0)
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
@test.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hostname = hypers[0]['hypervisor_hostname']
hypervisors = (self.client.list_servers_on_hypervisor(hostname)
['hypervisors'])
self.assertTrue(len(hypervisors) > 0)
@test.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
stats = (self.client.show_hypervisor_statistics()
['hypervisor_statistics'])
self.assertTrue(len(stats) > 0)
@test.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
# Ironic will register each baremetal node as a 'hypervisor',
# so the hypervisor list can contain many hypervisors of type
# 'ironic'. If they are ALL ironic, skip this test since ironic
# doesn't support hypervisor uptime. Otherwise, remove them
# from the list of hypervisors to test.
ironic_only = True
hypers_without_ironic = []
for hyper in hypers:
details = (self.client.show_hypervisor(hypers[0]['id'])
['hypervisor'])
if details['hypervisor_type'] != 'ironic':
hypers_without_ironic.append(hyper)
ironic_only = False
if ironic_only:
raise self.skipException(
"Ironic does not support hypervisor uptime")
has_valid_uptime = False
for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
uptime = (self.client.show_hypervisor_uptime(hyper['id'])
['hypervisor'])
if len(uptime) > 0:
has_valid_uptime = True
break
except Exception:
pass
self.assertTrue(
has_valid_uptime,
"None of the hypervisors had a valid uptime: %s" % hypers)
@test.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
self.assertHypervisors(hypers)
<|fim▁end|> | setup_clients |
<|file_name|>test_hypervisor.py<|end_file_name|><|fim▁begin|># Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Hypervisors API that require admin privileges"""
@classmethod
def setup_clients(cls):
super(HypervisorAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hypervisor_client
def <|fim_middle|>(self):
# List of hypervisors
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
def assertHypervisors(self, hypers):
self.assertTrue(len(hypers) > 0, "No hypervisors found: %s" % hypers)
@test.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
@test.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
hypers = self.client.list_hypervisors(detail=True)['hypervisors']
self.assertHypervisors(hypers)
@test.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
self.assertTrue(len(details) > 0)
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
@test.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hostname = hypers[0]['hypervisor_hostname']
hypervisors = (self.client.list_servers_on_hypervisor(hostname)
['hypervisors'])
self.assertTrue(len(hypervisors) > 0)
@test.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
stats = (self.client.show_hypervisor_statistics()
['hypervisor_statistics'])
self.assertTrue(len(stats) > 0)
@test.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
# Ironic will register each baremetal node as a 'hypervisor',
# so the hypervisor list can contain many hypervisors of type
# 'ironic'. If they are ALL ironic, skip this test since ironic
# doesn't support hypervisor uptime. Otherwise, remove them
# from the list of hypervisors to test.
ironic_only = True
hypers_without_ironic = []
for hyper in hypers:
details = (self.client.show_hypervisor(hypers[0]['id'])
['hypervisor'])
if details['hypervisor_type'] != 'ironic':
hypers_without_ironic.append(hyper)
ironic_only = False
if ironic_only:
raise self.skipException(
"Ironic does not support hypervisor uptime")
has_valid_uptime = False
for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
uptime = (self.client.show_hypervisor_uptime(hyper['id'])
['hypervisor'])
if len(uptime) > 0:
has_valid_uptime = True
break
except Exception:
pass
self.assertTrue(
has_valid_uptime,
"None of the hypervisors had a valid uptime: %s" % hypers)
@test.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
self.assertHypervisors(hypers)
<|fim▁end|> | _list_hypervisors |
<|file_name|>test_hypervisor.py<|end_file_name|><|fim▁begin|># Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Hypervisors API that require admin privileges"""
@classmethod
def setup_clients(cls):
super(HypervisorAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hypervisor_client
def _list_hypervisors(self):
# List of hypervisors
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
def <|fim_middle|>(self, hypers):
self.assertTrue(len(hypers) > 0, "No hypervisors found: %s" % hypers)
@test.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
@test.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
hypers = self.client.list_hypervisors(detail=True)['hypervisors']
self.assertHypervisors(hypers)
@test.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
self.assertTrue(len(details) > 0)
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
@test.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hostname = hypers[0]['hypervisor_hostname']
hypervisors = (self.client.list_servers_on_hypervisor(hostname)
['hypervisors'])
self.assertTrue(len(hypervisors) > 0)
@test.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
stats = (self.client.show_hypervisor_statistics()
['hypervisor_statistics'])
self.assertTrue(len(stats) > 0)
@test.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
# Ironic will register each baremetal node as a 'hypervisor',
# so the hypervisor list can contain many hypervisors of type
# 'ironic'. If they are ALL ironic, skip this test since ironic
# doesn't support hypervisor uptime. Otherwise, remove them
# from the list of hypervisors to test.
ironic_only = True
hypers_without_ironic = []
for hyper in hypers:
details = (self.client.show_hypervisor(hypers[0]['id'])
['hypervisor'])
if details['hypervisor_type'] != 'ironic':
hypers_without_ironic.append(hyper)
ironic_only = False
if ironic_only:
raise self.skipException(
"Ironic does not support hypervisor uptime")
has_valid_uptime = False
for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
uptime = (self.client.show_hypervisor_uptime(hyper['id'])
['hypervisor'])
if len(uptime) > 0:
has_valid_uptime = True
break
except Exception:
pass
self.assertTrue(
has_valid_uptime,
"None of the hypervisors had a valid uptime: %s" % hypers)
@test.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
self.assertHypervisors(hypers)
<|fim▁end|> | assertHypervisors |
<|file_name|>test_hypervisor.py<|end_file_name|><|fim▁begin|># Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Hypervisors API that require admin privileges"""
@classmethod
def setup_clients(cls):
super(HypervisorAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hypervisor_client
def _list_hypervisors(self):
# List of hypervisors
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
def assertHypervisors(self, hypers):
self.assertTrue(len(hypers) > 0, "No hypervisors found: %s" % hypers)
@test.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def <|fim_middle|>(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
@test.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
hypers = self.client.list_hypervisors(detail=True)['hypervisors']
self.assertHypervisors(hypers)
@test.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
self.assertTrue(len(details) > 0)
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
@test.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hostname = hypers[0]['hypervisor_hostname']
hypervisors = (self.client.list_servers_on_hypervisor(hostname)
['hypervisors'])
self.assertTrue(len(hypervisors) > 0)
@test.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
stats = (self.client.show_hypervisor_statistics()
['hypervisor_statistics'])
self.assertTrue(len(stats) > 0)
@test.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
# Ironic will register each baremetal node as a 'hypervisor',
# so the hypervisor list can contain many hypervisors of type
# 'ironic'. If they are ALL ironic, skip this test since ironic
# doesn't support hypervisor uptime. Otherwise, remove them
# from the list of hypervisors to test.
ironic_only = True
hypers_without_ironic = []
for hyper in hypers:
details = (self.client.show_hypervisor(hypers[0]['id'])
['hypervisor'])
if details['hypervisor_type'] != 'ironic':
hypers_without_ironic.append(hyper)
ironic_only = False
if ironic_only:
raise self.skipException(
"Ironic does not support hypervisor uptime")
has_valid_uptime = False
for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
uptime = (self.client.show_hypervisor_uptime(hyper['id'])
['hypervisor'])
if len(uptime) > 0:
has_valid_uptime = True
break
except Exception:
pass
self.assertTrue(
has_valid_uptime,
"None of the hypervisors had a valid uptime: %s" % hypers)
@test.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
self.assertHypervisors(hypers)
<|fim▁end|> | test_get_hypervisor_list |
<|file_name|>test_hypervisor.py<|end_file_name|><|fim▁begin|># Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Hypervisors API that require admin privileges"""
@classmethod
def setup_clients(cls):
super(HypervisorAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hypervisor_client
def _list_hypervisors(self):
# List of hypervisors
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
def assertHypervisors(self, hypers):
self.assertTrue(len(hypers) > 0, "No hypervisors found: %s" % hypers)
@test.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
@test.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
def <|fim_middle|>(self):
# Display the details of the all hypervisor
hypers = self.client.list_hypervisors(detail=True)['hypervisors']
self.assertHypervisors(hypers)
@test.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
self.assertTrue(len(details) > 0)
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
@test.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hostname = hypers[0]['hypervisor_hostname']
hypervisors = (self.client.list_servers_on_hypervisor(hostname)
['hypervisors'])
self.assertTrue(len(hypervisors) > 0)
@test.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
stats = (self.client.show_hypervisor_statistics()
['hypervisor_statistics'])
self.assertTrue(len(stats) > 0)
@test.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
# Ironic will register each baremetal node as a 'hypervisor',
# so the hypervisor list can contain many hypervisors of type
# 'ironic'. If they are ALL ironic, skip this test since ironic
# doesn't support hypervisor uptime. Otherwise, remove them
# from the list of hypervisors to test.
ironic_only = True
hypers_without_ironic = []
for hyper in hypers:
details = (self.client.show_hypervisor(hypers[0]['id'])
['hypervisor'])
if details['hypervisor_type'] != 'ironic':
hypers_without_ironic.append(hyper)
ironic_only = False
if ironic_only:
raise self.skipException(
"Ironic does not support hypervisor uptime")
has_valid_uptime = False
for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
uptime = (self.client.show_hypervisor_uptime(hyper['id'])
['hypervisor'])
if len(uptime) > 0:
has_valid_uptime = True
break
except Exception:
pass
self.assertTrue(
has_valid_uptime,
"None of the hypervisors had a valid uptime: %s" % hypers)
@test.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
self.assertHypervisors(hypers)
<|fim▁end|> | test_get_hypervisor_list_details |
<|file_name|>test_hypervisor.py<|end_file_name|><|fim▁begin|># Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Hypervisors API that require admin privileges"""
@classmethod
def setup_clients(cls):
super(HypervisorAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hypervisor_client
def _list_hypervisors(self):
# List of hypervisors
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
def assertHypervisors(self, hypers):
self.assertTrue(len(hypers) > 0, "No hypervisors found: %s" % hypers)
@test.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
@test.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
hypers = self.client.list_hypervisors(detail=True)['hypervisors']
self.assertHypervisors(hypers)
@test.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
def <|fim_middle|>(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
self.assertTrue(len(details) > 0)
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
@test.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hostname = hypers[0]['hypervisor_hostname']
hypervisors = (self.client.list_servers_on_hypervisor(hostname)
['hypervisors'])
self.assertTrue(len(hypervisors) > 0)
@test.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
stats = (self.client.show_hypervisor_statistics()
['hypervisor_statistics'])
self.assertTrue(len(stats) > 0)
@test.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
# Ironic will register each baremetal node as a 'hypervisor',
# so the hypervisor list can contain many hypervisors of type
# 'ironic'. If they are ALL ironic, skip this test since ironic
# doesn't support hypervisor uptime. Otherwise, remove them
# from the list of hypervisors to test.
ironic_only = True
hypers_without_ironic = []
for hyper in hypers:
details = (self.client.show_hypervisor(hypers[0]['id'])
['hypervisor'])
if details['hypervisor_type'] != 'ironic':
hypers_without_ironic.append(hyper)
ironic_only = False
if ironic_only:
raise self.skipException(
"Ironic does not support hypervisor uptime")
has_valid_uptime = False
for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
uptime = (self.client.show_hypervisor_uptime(hyper['id'])
['hypervisor'])
if len(uptime) > 0:
has_valid_uptime = True
break
except Exception:
pass
self.assertTrue(
has_valid_uptime,
"None of the hypervisors had a valid uptime: %s" % hypers)
@test.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
self.assertHypervisors(hypers)
<|fim▁end|> | test_get_hypervisor_show_details |
<|file_name|>test_hypervisor.py<|end_file_name|><|fim▁begin|># Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Hypervisors API that require admin privileges"""
@classmethod
def setup_clients(cls):
super(HypervisorAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hypervisor_client
def _list_hypervisors(self):
# List of hypervisors
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
def assertHypervisors(self, hypers):
self.assertTrue(len(hypers) > 0, "No hypervisors found: %s" % hypers)
@test.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
@test.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
hypers = self.client.list_hypervisors(detail=True)['hypervisors']
self.assertHypervisors(hypers)
@test.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
self.assertTrue(len(details) > 0)
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
@test.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
def <|fim_middle|>(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hostname = hypers[0]['hypervisor_hostname']
hypervisors = (self.client.list_servers_on_hypervisor(hostname)
['hypervisors'])
self.assertTrue(len(hypervisors) > 0)
@test.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
stats = (self.client.show_hypervisor_statistics()
['hypervisor_statistics'])
self.assertTrue(len(stats) > 0)
@test.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
# Ironic will register each baremetal node as a 'hypervisor',
# so the hypervisor list can contain many hypervisors of type
# 'ironic'. If they are ALL ironic, skip this test since ironic
# doesn't support hypervisor uptime. Otherwise, remove them
# from the list of hypervisors to test.
ironic_only = True
hypers_without_ironic = []
for hyper in hypers:
details = (self.client.show_hypervisor(hypers[0]['id'])
['hypervisor'])
if details['hypervisor_type'] != 'ironic':
hypers_without_ironic.append(hyper)
ironic_only = False
if ironic_only:
raise self.skipException(
"Ironic does not support hypervisor uptime")
has_valid_uptime = False
for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
uptime = (self.client.show_hypervisor_uptime(hyper['id'])
['hypervisor'])
if len(uptime) > 0:
has_valid_uptime = True
break
except Exception:
pass
self.assertTrue(
has_valid_uptime,
"None of the hypervisors had a valid uptime: %s" % hypers)
@test.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
self.assertHypervisors(hypers)
<|fim▁end|> | test_get_hypervisor_show_servers |
<|file_name|>test_hypervisor.py<|end_file_name|><|fim▁begin|># Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Hypervisors API that require admin privileges"""
@classmethod
def setup_clients(cls):
super(HypervisorAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hypervisor_client
def _list_hypervisors(self):
# List of hypervisors
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
def assertHypervisors(self, hypers):
self.assertTrue(len(hypers) > 0, "No hypervisors found: %s" % hypers)
@test.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
@test.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
hypers = self.client.list_hypervisors(detail=True)['hypervisors']
self.assertHypervisors(hypers)
@test.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
self.assertTrue(len(details) > 0)
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
@test.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hostname = hypers[0]['hypervisor_hostname']
hypervisors = (self.client.list_servers_on_hypervisor(hostname)
['hypervisors'])
self.assertTrue(len(hypervisors) > 0)
@test.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
def <|fim_middle|>(self):
# Verify the stats of the all hypervisor
stats = (self.client.show_hypervisor_statistics()
['hypervisor_statistics'])
self.assertTrue(len(stats) > 0)
@test.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
# Ironic will register each baremetal node as a 'hypervisor',
# so the hypervisor list can contain many hypervisors of type
# 'ironic'. If they are ALL ironic, skip this test since ironic
# doesn't support hypervisor uptime. Otherwise, remove them
# from the list of hypervisors to test.
ironic_only = True
hypers_without_ironic = []
for hyper in hypers:
details = (self.client.show_hypervisor(hypers[0]['id'])
['hypervisor'])
if details['hypervisor_type'] != 'ironic':
hypers_without_ironic.append(hyper)
ironic_only = False
if ironic_only:
raise self.skipException(
"Ironic does not support hypervisor uptime")
has_valid_uptime = False
for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
uptime = (self.client.show_hypervisor_uptime(hyper['id'])
['hypervisor'])
if len(uptime) > 0:
has_valid_uptime = True
break
except Exception:
pass
self.assertTrue(
has_valid_uptime,
"None of the hypervisors had a valid uptime: %s" % hypers)
@test.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
self.assertHypervisors(hypers)
<|fim▁end|> | test_get_hypervisor_stats |
<|file_name|>test_hypervisor.py<|end_file_name|><|fim▁begin|># Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Hypervisors API that require admin privileges"""
@classmethod
def setup_clients(cls):
super(HypervisorAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hypervisor_client
def _list_hypervisors(self):
# List of hypervisors
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
def assertHypervisors(self, hypers):
self.assertTrue(len(hypers) > 0, "No hypervisors found: %s" % hypers)
@test.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
@test.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
hypers = self.client.list_hypervisors(detail=True)['hypervisors']
self.assertHypervisors(hypers)
@test.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
self.assertTrue(len(details) > 0)
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
@test.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hostname = hypers[0]['hypervisor_hostname']
hypervisors = (self.client.list_servers_on_hypervisor(hostname)
['hypervisors'])
self.assertTrue(len(hypervisors) > 0)
@test.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
stats = (self.client.show_hypervisor_statistics()
['hypervisor_statistics'])
self.assertTrue(len(stats) > 0)
@test.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
def <|fim_middle|>(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
# Ironic will register each baremetal node as a 'hypervisor',
# so the hypervisor list can contain many hypervisors of type
# 'ironic'. If they are ALL ironic, skip this test since ironic
# doesn't support hypervisor uptime. Otherwise, remove them
# from the list of hypervisors to test.
ironic_only = True
hypers_without_ironic = []
for hyper in hypers:
details = (self.client.show_hypervisor(hypers[0]['id'])
['hypervisor'])
if details['hypervisor_type'] != 'ironic':
hypers_without_ironic.append(hyper)
ironic_only = False
if ironic_only:
raise self.skipException(
"Ironic does not support hypervisor uptime")
has_valid_uptime = False
for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
uptime = (self.client.show_hypervisor_uptime(hyper['id'])
['hypervisor'])
if len(uptime) > 0:
has_valid_uptime = True
break
except Exception:
pass
self.assertTrue(
has_valid_uptime,
"None of the hypervisors had a valid uptime: %s" % hypers)
@test.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
self.assertHypervisors(hypers)
<|fim▁end|> | test_get_hypervisor_uptime |
<|file_name|>test_hypervisor.py<|end_file_name|><|fim▁begin|># Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class HypervisorAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Hypervisors API that require admin privileges"""
@classmethod
def setup_clients(cls):
super(HypervisorAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.hypervisor_client
def _list_hypervisors(self):
# List of hypervisors
hypers = self.client.list_hypervisors()['hypervisors']
return hypers
def assertHypervisors(self, hypers):
self.assertTrue(len(hypers) > 0, "No hypervisors found: %s" % hypers)
@test.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
@test.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
hypers = self.client.list_hypervisors(detail=True)['hypervisors']
self.assertHypervisors(hypers)
@test.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
details = self.client.show_hypervisor(hypers[0]['id'])['hypervisor']
self.assertTrue(len(details) > 0)
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
@test.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hostname = hypers[0]['hypervisor_hostname']
hypervisors = (self.client.list_servers_on_hypervisor(hostname)
['hypervisors'])
self.assertTrue(len(hypervisors) > 0)
@test.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
stats = (self.client.show_hypervisor_statistics()
['hypervisor_statistics'])
self.assertTrue(len(stats) > 0)
@test.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
# Ironic will register each baremetal node as a 'hypervisor',
# so the hypervisor list can contain many hypervisors of type
# 'ironic'. If they are ALL ironic, skip this test since ironic
# doesn't support hypervisor uptime. Otherwise, remove them
# from the list of hypervisors to test.
ironic_only = True
hypers_without_ironic = []
for hyper in hypers:
details = (self.client.show_hypervisor(hypers[0]['id'])
['hypervisor'])
if details['hypervisor_type'] != 'ironic':
hypers_without_ironic.append(hyper)
ironic_only = False
if ironic_only:
raise self.skipException(
"Ironic does not support hypervisor uptime")
has_valid_uptime = False
for hyper in hypers_without_ironic:
# because hypervisors might be disabled, this loops looking
# for any good hit.
try:
uptime = (self.client.show_hypervisor_uptime(hyper['id'])
['hypervisor'])
if len(uptime) > 0:
has_valid_uptime = True
break
except Exception:
pass
self.assertTrue(
has_valid_uptime,
"None of the hypervisors had a valid uptime: %s" % hypers)
@test.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
def <|fim_middle|>(self):
hypers = self._list_hypervisors()
self.assertHypervisors(hypers)
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
self.assertHypervisors(hypers)
<|fim▁end|> | test_search_hypervisor |
<|file_name|>11.py<|end_file_name|><|fim▁begin|># Created by PyCharm Community Edition
# User: Kaushik Talukdar
# Date: 22-03-2017
# Time: 03:52 PM
#python doesn't allow you to mix strings and numbers directly. numbers must be converted to strings
<|fim▁hole|>
print("Greetings on your " + str(age) + "th birthday")<|fim▁end|> |
age = 28
|
<|file_name|>test.py<|end_file_name|><|fim▁begin|>from messenger import Skype
import keyring
import utils
token = keyring.get_password('messagesReceiver', 'skypeToken')
registrationToken = keyring.get_password('messagesReceiver', 'skypeRegistrationToken')
username = keyring.get_password('messagesReceiver', 'skypeUsername')
password = keyring.get_password('messagesReceiver', 'skypePassword')
s = Skype(token, registrationToken)
if s.token == None:
s.login(username, password)
print "logging in..."
if s.registrationToken == None:
print s.createRegistrationToken()<|fim▁hole|> print "creating endpoint and registrationToken..."
while True:
data = s.pull()
if data == 404:
print s.createRegistrationToken()
print s.subcribe()
data = s.pull()
if data == 400:
continue
messages = utils.skypeParse(data)
if not messages:
continue
for sender, receiver, message in messages:
if receiver != None:
print "%s to %s" % (sender, receiver)
else:
print "From %s" % sender
print message<|fim▁end|> | print s.subcribe() |
<|file_name|>test.py<|end_file_name|><|fim▁begin|>from messenger import Skype
import keyring
import utils
token = keyring.get_password('messagesReceiver', 'skypeToken')
registrationToken = keyring.get_password('messagesReceiver', 'skypeRegistrationToken')
username = keyring.get_password('messagesReceiver', 'skypeUsername')
password = keyring.get_password('messagesReceiver', 'skypePassword')
s = Skype(token, registrationToken)
if s.token == None:
<|fim_middle|>
if s.registrationToken == None:
print s.createRegistrationToken()
print s.subcribe()
print "creating endpoint and registrationToken..."
while True:
data = s.pull()
if data == 404:
print s.createRegistrationToken()
print s.subcribe()
data = s.pull()
if data == 400:
continue
messages = utils.skypeParse(data)
if not messages:
continue
for sender, receiver, message in messages:
if receiver != None:
print "%s to %s" % (sender, receiver)
else:
print "From %s" % sender
print message
<|fim▁end|> | s.login(username, password)
print "logging in..." |
<|file_name|>test.py<|end_file_name|><|fim▁begin|>from messenger import Skype
import keyring
import utils
token = keyring.get_password('messagesReceiver', 'skypeToken')
registrationToken = keyring.get_password('messagesReceiver', 'skypeRegistrationToken')
username = keyring.get_password('messagesReceiver', 'skypeUsername')
password = keyring.get_password('messagesReceiver', 'skypePassword')
s = Skype(token, registrationToken)
if s.token == None:
s.login(username, password)
print "logging in..."
if s.registrationToken == None:
<|fim_middle|>
while True:
data = s.pull()
if data == 404:
print s.createRegistrationToken()
print s.subcribe()
data = s.pull()
if data == 400:
continue
messages = utils.skypeParse(data)
if not messages:
continue
for sender, receiver, message in messages:
if receiver != None:
print "%s to %s" % (sender, receiver)
else:
print "From %s" % sender
print message
<|fim▁end|> | print s.createRegistrationToken()
print s.subcribe()
print "creating endpoint and registrationToken..." |
<|file_name|>test.py<|end_file_name|><|fim▁begin|>from messenger import Skype
import keyring
import utils
token = keyring.get_password('messagesReceiver', 'skypeToken')
registrationToken = keyring.get_password('messagesReceiver', 'skypeRegistrationToken')
username = keyring.get_password('messagesReceiver', 'skypeUsername')
password = keyring.get_password('messagesReceiver', 'skypePassword')
s = Skype(token, registrationToken)
if s.token == None:
s.login(username, password)
print "logging in..."
if s.registrationToken == None:
print s.createRegistrationToken()
print s.subcribe()
print "creating endpoint and registrationToken..."
while True:
data = s.pull()
if data == 404:
<|fim_middle|>
if data == 400:
continue
messages = utils.skypeParse(data)
if not messages:
continue
for sender, receiver, message in messages:
if receiver != None:
print "%s to %s" % (sender, receiver)
else:
print "From %s" % sender
print message
<|fim▁end|> | print s.createRegistrationToken()
print s.subcribe()
data = s.pull() |
<|file_name|>test.py<|end_file_name|><|fim▁begin|>from messenger import Skype
import keyring
import utils
token = keyring.get_password('messagesReceiver', 'skypeToken')
registrationToken = keyring.get_password('messagesReceiver', 'skypeRegistrationToken')
username = keyring.get_password('messagesReceiver', 'skypeUsername')
password = keyring.get_password('messagesReceiver', 'skypePassword')
s = Skype(token, registrationToken)
if s.token == None:
s.login(username, password)
print "logging in..."
if s.registrationToken == None:
print s.createRegistrationToken()
print s.subcribe()
print "creating endpoint and registrationToken..."
while True:
data = s.pull()
if data == 404:
print s.createRegistrationToken()
print s.subcribe()
data = s.pull()
if data == 400:
<|fim_middle|>
messages = utils.skypeParse(data)
if not messages:
continue
for sender, receiver, message in messages:
if receiver != None:
print "%s to %s" % (sender, receiver)
else:
print "From %s" % sender
print message
<|fim▁end|> | continue |
<|file_name|>test.py<|end_file_name|><|fim▁begin|>from messenger import Skype
import keyring
import utils
token = keyring.get_password('messagesReceiver', 'skypeToken')
registrationToken = keyring.get_password('messagesReceiver', 'skypeRegistrationToken')
username = keyring.get_password('messagesReceiver', 'skypeUsername')
password = keyring.get_password('messagesReceiver', 'skypePassword')
s = Skype(token, registrationToken)
if s.token == None:
s.login(username, password)
print "logging in..."
if s.registrationToken == None:
print s.createRegistrationToken()
print s.subcribe()
print "creating endpoint and registrationToken..."
while True:
data = s.pull()
if data == 404:
print s.createRegistrationToken()
print s.subcribe()
data = s.pull()
if data == 400:
continue
messages = utils.skypeParse(data)
if not messages:
<|fim_middle|>
for sender, receiver, message in messages:
if receiver != None:
print "%s to %s" % (sender, receiver)
else:
print "From %s" % sender
print message
<|fim▁end|> | continue |
<|file_name|>test.py<|end_file_name|><|fim▁begin|>from messenger import Skype
import keyring
import utils
token = keyring.get_password('messagesReceiver', 'skypeToken')
registrationToken = keyring.get_password('messagesReceiver', 'skypeRegistrationToken')
username = keyring.get_password('messagesReceiver', 'skypeUsername')
password = keyring.get_password('messagesReceiver', 'skypePassword')
s = Skype(token, registrationToken)
if s.token == None:
s.login(username, password)
print "logging in..."
if s.registrationToken == None:
print s.createRegistrationToken()
print s.subcribe()
print "creating endpoint and registrationToken..."
while True:
data = s.pull()
if data == 404:
print s.createRegistrationToken()
print s.subcribe()
data = s.pull()
if data == 400:
continue
messages = utils.skypeParse(data)
if not messages:
continue
for sender, receiver, message in messages:
if receiver != None:
<|fim_middle|>
else:
print "From %s" % sender
print message
<|fim▁end|> | print "%s to %s" % (sender, receiver) |
<|file_name|>test.py<|end_file_name|><|fim▁begin|>from messenger import Skype
import keyring
import utils
token = keyring.get_password('messagesReceiver', 'skypeToken')
registrationToken = keyring.get_password('messagesReceiver', 'skypeRegistrationToken')
username = keyring.get_password('messagesReceiver', 'skypeUsername')
password = keyring.get_password('messagesReceiver', 'skypePassword')
s = Skype(token, registrationToken)
if s.token == None:
s.login(username, password)
print "logging in..."
if s.registrationToken == None:
print s.createRegistrationToken()
print s.subcribe()
print "creating endpoint and registrationToken..."
while True:
data = s.pull()
if data == 404:
print s.createRegistrationToken()
print s.subcribe()
data = s.pull()
if data == 400:
continue
messages = utils.skypeParse(data)
if not messages:
continue
for sender, receiver, message in messages:
if receiver != None:
print "%s to %s" % (sender, receiver)
else:
<|fim_middle|>
print message
<|fim▁end|> | print "From %s" % sender |
<|file_name|>burrows_wheeler.py<|end_file_name|><|fim▁begin|>def burrows_wheeler(text):
"""Calculates the burrows wheeler transform of <text>.
<|fim▁hole|> text += "$"
all_permutations = []
for i in range(len(text)):
all_permutations.append((text[i:] + text[:i],i))
all_permutations.sort()
bw_l = [] # burrows wheeler as list
sa_i = [] # suffix array indices
for w,j in all_permutations:
bw_l.append(w[-1])
sa_i.append(j)
return "".join(bw_l), sa_i<|fim▁end|> | returns the burrows wheeler string and the suffix array indices
The text is assumed to not contain the character $"""
|
<|file_name|>burrows_wheeler.py<|end_file_name|><|fim▁begin|>
def burrows_wheeler(text):
<|fim_middle|>
<|fim▁end|> | """Calculates the burrows wheeler transform of <text>.
returns the burrows wheeler string and the suffix array indices
The text is assumed to not contain the character $"""
text += "$"
all_permutations = []
for i in range(len(text)):
all_permutations.append((text[i:] + text[:i],i))
all_permutations.sort()
bw_l = [] # burrows wheeler as list
sa_i = [] # suffix array indices
for w,j in all_permutations:
bw_l.append(w[-1])
sa_i.append(j)
return "".join(bw_l), sa_i |
<|file_name|>burrows_wheeler.py<|end_file_name|><|fim▁begin|>
def <|fim_middle|>(text):
"""Calculates the burrows wheeler transform of <text>.
returns the burrows wheeler string and the suffix array indices
The text is assumed to not contain the character $"""
text += "$"
all_permutations = []
for i in range(len(text)):
all_permutations.append((text[i:] + text[:i],i))
all_permutations.sort()
bw_l = [] # burrows wheeler as list
sa_i = [] # suffix array indices
for w,j in all_permutations:
bw_l.append(w[-1])
sa_i.append(j)
return "".join(bw_l), sa_i
<|fim▁end|> | burrows_wheeler |
<|file_name|>utils.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Some utils
import hashlib
import uuid
def get_hash(data):<|fim▁hole|> """Returns hashed string"""
return hashlib.sha256(data).hexdigest()
def get_token():
return str(uuid.uuid4())<|fim▁end|> | |
<|file_name|>utils.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Some utils
import hashlib
import uuid
def get_hash(data):
<|fim_middle|>
def get_token():
return str(uuid.uuid4())
<|fim▁end|> | """Returns hashed string"""
return hashlib.sha256(data).hexdigest() |
<|file_name|>utils.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Some utils
import hashlib
import uuid
def get_hash(data):
"""Returns hashed string"""
return hashlib.sha256(data).hexdigest()
def get_token():
<|fim_middle|>
<|fim▁end|> | return str(uuid.uuid4()) |
<|file_name|>utils.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Some utils
import hashlib
import uuid
def <|fim_middle|>(data):
"""Returns hashed string"""
return hashlib.sha256(data).hexdigest()
def get_token():
return str(uuid.uuid4())
<|fim▁end|> | get_hash |
<|file_name|>utils.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Some utils
import hashlib
import uuid
def get_hash(data):
"""Returns hashed string"""
return hashlib.sha256(data).hexdigest()
def <|fim_middle|>():
return str(uuid.uuid4())
<|fim▁end|> | get_token |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>''' The `Filter` hierarchy contains Transformer classes that take a `Stim`
of one type as input and return a `Stim` of the same type as output (but with
some changes to its data).
'''
from .audio import (AudioTrimmingFilter,
AudioResamplingFilter)
from .base import TemporalTrimmingFilter
from .image import (ImageCroppingFilter,
ImageResizingFilter,
PillowImageFilter)
from .text import (WordStemmingFilter,
TokenizingFilter,
TokenRemovalFilter,
PunctuationRemovalFilter,
LowerCasingFilter)
from .video import (FrameSamplingFilter,
VideoTrimmingFilter)
<|fim▁hole|> 'AudioResamplingFilter',
'TemporalTrimmingFilter',
'ImageCroppingFilter',
'ImageResizingFilter',
'PillowImageFilter',
'WordStemmingFilter',
'TokenizingFilter',
'TokenRemovalFilter',
'PunctuationRemovalFilter',
'LowerCasingFilter',
'FrameSamplingFilter',
'VideoTrimmingFilter'
]<|fim▁end|> | __all__ = [
'AudioTrimmingFilter', |
<|file_name|>ExaquteTaskPyCOMPSs.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,<|fim▁hole|># limitations under the License.
#
from exaqute.ExaquteTask import *
from pycompss.api.task import task
from pycompss.api.api import compss_wait_on
from pycompss.api.api import compss_barrier
from pycompss.api.api import compss_delete_object
from pycompss.api.api import compss_delete_file
from pycompss.api.parameter import *
from pycompss.api.implement import implement
from pycompss.api.constraint import *
class ExaquteTask(object):
def __init__(self, *args, **kwargs):
global scheduler
scheduler = "Current scheduler is PyCOMPSs"
self.task_instance = task(*args, **kwargs)
def __call__(self, f):
return self.task_instance.__call__(f)
def barrier(): # Wait
compss_barrier()
def get_value_from_remote(obj): # Gather
obj = compss_wait_on(obj)
return obj
def delete_object(obj): # Release
compss_delete_object(obj)
def delete_file(file_path):
compss_delete_file(file_path)
def compute(obj): # Submit task
return obj<|fim▁end|> | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and |
<|file_name|>ExaquteTaskPyCOMPSs.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from exaqute.ExaquteTask import *
from pycompss.api.task import task
from pycompss.api.api import compss_wait_on
from pycompss.api.api import compss_barrier
from pycompss.api.api import compss_delete_object
from pycompss.api.api import compss_delete_file
from pycompss.api.parameter import *
from pycompss.api.implement import implement
from pycompss.api.constraint import *
class ExaquteTask(object):
<|fim_middle|>
def barrier(): # Wait
compss_barrier()
def get_value_from_remote(obj): # Gather
obj = compss_wait_on(obj)
return obj
def delete_object(obj): # Release
compss_delete_object(obj)
def delete_file(file_path):
compss_delete_file(file_path)
def compute(obj): # Submit task
return obj
<|fim▁end|> | def __init__(self, *args, **kwargs):
global scheduler
scheduler = "Current scheduler is PyCOMPSs"
self.task_instance = task(*args, **kwargs)
def __call__(self, f):
return self.task_instance.__call__(f) |
<|file_name|>ExaquteTaskPyCOMPSs.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from exaqute.ExaquteTask import *
from pycompss.api.task import task
from pycompss.api.api import compss_wait_on
from pycompss.api.api import compss_barrier
from pycompss.api.api import compss_delete_object
from pycompss.api.api import compss_delete_file
from pycompss.api.parameter import *
from pycompss.api.implement import implement
from pycompss.api.constraint import *
class ExaquteTask(object):
def __init__(self, *args, **kwargs):
<|fim_middle|>
def __call__(self, f):
return self.task_instance.__call__(f)
def barrier(): # Wait
compss_barrier()
def get_value_from_remote(obj): # Gather
obj = compss_wait_on(obj)
return obj
def delete_object(obj): # Release
compss_delete_object(obj)
def delete_file(file_path):
compss_delete_file(file_path)
def compute(obj): # Submit task
return obj
<|fim▁end|> | global scheduler
scheduler = "Current scheduler is PyCOMPSs"
self.task_instance = task(*args, **kwargs) |
<|file_name|>ExaquteTaskPyCOMPSs.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from exaqute.ExaquteTask import *
from pycompss.api.task import task
from pycompss.api.api import compss_wait_on
from pycompss.api.api import compss_barrier
from pycompss.api.api import compss_delete_object
from pycompss.api.api import compss_delete_file
from pycompss.api.parameter import *
from pycompss.api.implement import implement
from pycompss.api.constraint import *
class ExaquteTask(object):
def __init__(self, *args, **kwargs):
global scheduler
scheduler = "Current scheduler is PyCOMPSs"
self.task_instance = task(*args, **kwargs)
def __call__(self, f):
<|fim_middle|>
def barrier(): # Wait
compss_barrier()
def get_value_from_remote(obj): # Gather
obj = compss_wait_on(obj)
return obj
def delete_object(obj): # Release
compss_delete_object(obj)
def delete_file(file_path):
compss_delete_file(file_path)
def compute(obj): # Submit task
return obj
<|fim▁end|> | return self.task_instance.__call__(f) |
<|file_name|>ExaquteTaskPyCOMPSs.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from exaqute.ExaquteTask import *
from pycompss.api.task import task
from pycompss.api.api import compss_wait_on
from pycompss.api.api import compss_barrier
from pycompss.api.api import compss_delete_object
from pycompss.api.api import compss_delete_file
from pycompss.api.parameter import *
from pycompss.api.implement import implement
from pycompss.api.constraint import *
class ExaquteTask(object):
def __init__(self, *args, **kwargs):
global scheduler
scheduler = "Current scheduler is PyCOMPSs"
self.task_instance = task(*args, **kwargs)
def __call__(self, f):
return self.task_instance.__call__(f)
def barrier(): # Wait
<|fim_middle|>
def get_value_from_remote(obj): # Gather
obj = compss_wait_on(obj)
return obj
def delete_object(obj): # Release
compss_delete_object(obj)
def delete_file(file_path):
compss_delete_file(file_path)
def compute(obj): # Submit task
return obj
<|fim▁end|> | compss_barrier() |
<|file_name|>ExaquteTaskPyCOMPSs.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from exaqute.ExaquteTask import *
from pycompss.api.task import task
from pycompss.api.api import compss_wait_on
from pycompss.api.api import compss_barrier
from pycompss.api.api import compss_delete_object
from pycompss.api.api import compss_delete_file
from pycompss.api.parameter import *
from pycompss.api.implement import implement
from pycompss.api.constraint import *
class ExaquteTask(object):
def __init__(self, *args, **kwargs):
global scheduler
scheduler = "Current scheduler is PyCOMPSs"
self.task_instance = task(*args, **kwargs)
def __call__(self, f):
return self.task_instance.__call__(f)
def barrier(): # Wait
compss_barrier()
def get_value_from_remote(obj): # Gather
<|fim_middle|>
def delete_object(obj): # Release
compss_delete_object(obj)
def delete_file(file_path):
compss_delete_file(file_path)
def compute(obj): # Submit task
return obj
<|fim▁end|> | obj = compss_wait_on(obj)
return obj |
<|file_name|>ExaquteTaskPyCOMPSs.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from exaqute.ExaquteTask import *
from pycompss.api.task import task
from pycompss.api.api import compss_wait_on
from pycompss.api.api import compss_barrier
from pycompss.api.api import compss_delete_object
from pycompss.api.api import compss_delete_file
from pycompss.api.parameter import *
from pycompss.api.implement import implement
from pycompss.api.constraint import *
class ExaquteTask(object):
def __init__(self, *args, **kwargs):
global scheduler
scheduler = "Current scheduler is PyCOMPSs"
self.task_instance = task(*args, **kwargs)
def __call__(self, f):
return self.task_instance.__call__(f)
def barrier(): # Wait
compss_barrier()
def get_value_from_remote(obj): # Gather
obj = compss_wait_on(obj)
return obj
def delete_object(obj): # Release
<|fim_middle|>
def delete_file(file_path):
compss_delete_file(file_path)
def compute(obj): # Submit task
return obj
<|fim▁end|> | compss_delete_object(obj) |
<|file_name|>ExaquteTaskPyCOMPSs.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from exaqute.ExaquteTask import *
from pycompss.api.task import task
from pycompss.api.api import compss_wait_on
from pycompss.api.api import compss_barrier
from pycompss.api.api import compss_delete_object
from pycompss.api.api import compss_delete_file
from pycompss.api.parameter import *
from pycompss.api.implement import implement
from pycompss.api.constraint import *
class ExaquteTask(object):
def __init__(self, *args, **kwargs):
global scheduler
scheduler = "Current scheduler is PyCOMPSs"
self.task_instance = task(*args, **kwargs)
def __call__(self, f):
return self.task_instance.__call__(f)
def barrier(): # Wait
compss_barrier()
def get_value_from_remote(obj): # Gather
obj = compss_wait_on(obj)
return obj
def delete_object(obj): # Release
compss_delete_object(obj)
def delete_file(file_path):
<|fim_middle|>
def compute(obj): # Submit task
return obj
<|fim▁end|> | compss_delete_file(file_path) |
<|file_name|>ExaquteTaskPyCOMPSs.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from exaqute.ExaquteTask import *
from pycompss.api.task import task
from pycompss.api.api import compss_wait_on
from pycompss.api.api import compss_barrier
from pycompss.api.api import compss_delete_object
from pycompss.api.api import compss_delete_file
from pycompss.api.parameter import *
from pycompss.api.implement import implement
from pycompss.api.constraint import *
class ExaquteTask(object):
def __init__(self, *args, **kwargs):
global scheduler
scheduler = "Current scheduler is PyCOMPSs"
self.task_instance = task(*args, **kwargs)
def __call__(self, f):
return self.task_instance.__call__(f)
def barrier(): # Wait
compss_barrier()
def get_value_from_remote(obj): # Gather
obj = compss_wait_on(obj)
return obj
def delete_object(obj): # Release
compss_delete_object(obj)
def delete_file(file_path):
compss_delete_file(file_path)
def compute(obj): # Submit task
<|fim_middle|>
<|fim▁end|> | return obj |
<|file_name|>ExaquteTaskPyCOMPSs.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from exaqute.ExaquteTask import *
from pycompss.api.task import task
from pycompss.api.api import compss_wait_on
from pycompss.api.api import compss_barrier
from pycompss.api.api import compss_delete_object
from pycompss.api.api import compss_delete_file
from pycompss.api.parameter import *
from pycompss.api.implement import implement
from pycompss.api.constraint import *
class ExaquteTask(object):
def <|fim_middle|>(self, *args, **kwargs):
global scheduler
scheduler = "Current scheduler is PyCOMPSs"
self.task_instance = task(*args, **kwargs)
def __call__(self, f):
return self.task_instance.__call__(f)
def barrier(): # Wait
compss_barrier()
def get_value_from_remote(obj): # Gather
obj = compss_wait_on(obj)
return obj
def delete_object(obj): # Release
compss_delete_object(obj)
def delete_file(file_path):
compss_delete_file(file_path)
def compute(obj): # Submit task
return obj
<|fim▁end|> | __init__ |
<|file_name|>ExaquteTaskPyCOMPSs.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from exaqute.ExaquteTask import *
from pycompss.api.task import task
from pycompss.api.api import compss_wait_on
from pycompss.api.api import compss_barrier
from pycompss.api.api import compss_delete_object
from pycompss.api.api import compss_delete_file
from pycompss.api.parameter import *
from pycompss.api.implement import implement
from pycompss.api.constraint import *
class ExaquteTask(object):
def __init__(self, *args, **kwargs):
global scheduler
scheduler = "Current scheduler is PyCOMPSs"
self.task_instance = task(*args, **kwargs)
def <|fim_middle|>(self, f):
return self.task_instance.__call__(f)
def barrier(): # Wait
compss_barrier()
def get_value_from_remote(obj): # Gather
obj = compss_wait_on(obj)
return obj
def delete_object(obj): # Release
compss_delete_object(obj)
def delete_file(file_path):
compss_delete_file(file_path)
def compute(obj): # Submit task
return obj
<|fim▁end|> | __call__ |
<|file_name|>ExaquteTaskPyCOMPSs.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from exaqute.ExaquteTask import *
from pycompss.api.task import task
from pycompss.api.api import compss_wait_on
from pycompss.api.api import compss_barrier
from pycompss.api.api import compss_delete_object
from pycompss.api.api import compss_delete_file
from pycompss.api.parameter import *
from pycompss.api.implement import implement
from pycompss.api.constraint import *
class ExaquteTask(object):
def __init__(self, *args, **kwargs):
global scheduler
scheduler = "Current scheduler is PyCOMPSs"
self.task_instance = task(*args, **kwargs)
def __call__(self, f):
return self.task_instance.__call__(f)
def <|fim_middle|>(): # Wait
compss_barrier()
def get_value_from_remote(obj): # Gather
obj = compss_wait_on(obj)
return obj
def delete_object(obj): # Release
compss_delete_object(obj)
def delete_file(file_path):
compss_delete_file(file_path)
def compute(obj): # Submit task
return obj
<|fim▁end|> | barrier |
<|file_name|>ExaquteTaskPyCOMPSs.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from exaqute.ExaquteTask import *
from pycompss.api.task import task
from pycompss.api.api import compss_wait_on
from pycompss.api.api import compss_barrier
from pycompss.api.api import compss_delete_object
from pycompss.api.api import compss_delete_file
from pycompss.api.parameter import *
from pycompss.api.implement import implement
from pycompss.api.constraint import *
class ExaquteTask(object):
def __init__(self, *args, **kwargs):
global scheduler
scheduler = "Current scheduler is PyCOMPSs"
self.task_instance = task(*args, **kwargs)
def __call__(self, f):
return self.task_instance.__call__(f)
def barrier(): # Wait
compss_barrier()
def <|fim_middle|>(obj): # Gather
obj = compss_wait_on(obj)
return obj
def delete_object(obj): # Release
compss_delete_object(obj)
def delete_file(file_path):
compss_delete_file(file_path)
def compute(obj): # Submit task
return obj
<|fim▁end|> | get_value_from_remote |
<|file_name|>ExaquteTaskPyCOMPSs.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from exaqute.ExaquteTask import *
from pycompss.api.task import task
from pycompss.api.api import compss_wait_on
from pycompss.api.api import compss_barrier
from pycompss.api.api import compss_delete_object
from pycompss.api.api import compss_delete_file
from pycompss.api.parameter import *
from pycompss.api.implement import implement
from pycompss.api.constraint import *
class ExaquteTask(object):
def __init__(self, *args, **kwargs):
global scheduler
scheduler = "Current scheduler is PyCOMPSs"
self.task_instance = task(*args, **kwargs)
def __call__(self, f):
return self.task_instance.__call__(f)
def barrier(): # Wait
compss_barrier()
def get_value_from_remote(obj): # Gather
obj = compss_wait_on(obj)
return obj
def <|fim_middle|>(obj): # Release
compss_delete_object(obj)
def delete_file(file_path):
compss_delete_file(file_path)
def compute(obj): # Submit task
return obj
<|fim▁end|> | delete_object |
<|file_name|>ExaquteTaskPyCOMPSs.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from exaqute.ExaquteTask import *
from pycompss.api.task import task
from pycompss.api.api import compss_wait_on
from pycompss.api.api import compss_barrier
from pycompss.api.api import compss_delete_object
from pycompss.api.api import compss_delete_file
from pycompss.api.parameter import *
from pycompss.api.implement import implement
from pycompss.api.constraint import *
class ExaquteTask(object):
def __init__(self, *args, **kwargs):
global scheduler
scheduler = "Current scheduler is PyCOMPSs"
self.task_instance = task(*args, **kwargs)
def __call__(self, f):
return self.task_instance.__call__(f)
def barrier(): # Wait
compss_barrier()
def get_value_from_remote(obj): # Gather
obj = compss_wait_on(obj)
return obj
def delete_object(obj): # Release
compss_delete_object(obj)
def <|fim_middle|>(file_path):
compss_delete_file(file_path)
def compute(obj): # Submit task
return obj
<|fim▁end|> | delete_file |
<|file_name|>ExaquteTaskPyCOMPSs.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from exaqute.ExaquteTask import *
from pycompss.api.task import task
from pycompss.api.api import compss_wait_on
from pycompss.api.api import compss_barrier
from pycompss.api.api import compss_delete_object
from pycompss.api.api import compss_delete_file
from pycompss.api.parameter import *
from pycompss.api.implement import implement
from pycompss.api.constraint import *
class ExaquteTask(object):
def __init__(self, *args, **kwargs):
global scheduler
scheduler = "Current scheduler is PyCOMPSs"
self.task_instance = task(*args, **kwargs)
def __call__(self, f):
return self.task_instance.__call__(f)
def barrier(): # Wait
compss_barrier()
def get_value_from_remote(obj): # Gather
obj = compss_wait_on(obj)
return obj
def delete_object(obj): # Release
compss_delete_object(obj)
def delete_file(file_path):
compss_delete_file(file_path)
def <|fim_middle|>(obj): # Submit task
return obj
<|fim▁end|> | compute |
<|file_name|>calibration.py<|end_file_name|><|fim▁begin|>import numpy as np
from scipy import sparse
from scipy.interpolate import interp1d
class calibration(object):
'''
some useful tools for manual calibration
'''
def normalize_zdata(self,z_data,cal_z_data):
return z_data/cal_z_data
def normalize_amplitude(self,z_data,cal_ampdata):
return z_data/cal_ampdata
def normalize_phase(self,z_data,cal_phase):
return z_data*np.exp(-1j*cal_phase)
def normalize_by_func(self,f_data,z_data,func):
return z_data/func(f_data)
def _baseline_als(self,y, lam, p, niter=10):
'''
see http://zanran_storage.s3.amazonaws.com/www.science.uva.nl/ContentPages/443199618.pdf
"Asymmetric Least Squares Smoothing" by P. Eilers and H. Boelens in 2005.
http://stackoverflow.com/questions/29156532/python-baseline-correction-library
"There are two parameters: p for asymmetry and lambda for smoothness. Both have to be
tuned to the data at hand. We found that generally 0.001<=p<=0.1 is a good choice
(for a trace with positive peaks) and 10e2<=lambda<=10e9, but exceptions may occur."
'''
L = len(y)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
for i in range(niter):
W = sparse.spdiags(w, 0, L, L)
Z = W + lam * D.dot(D.transpose())
z = sparse.linalg.spsolve(Z, w*y)
w = p * (y > z) + (1-p) * (y < z)
return z
def fit_baseline_amp(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.absolute(z_data),lam,p,niter=niter)<|fim▁hole|> '''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.absolute(z_data),lam,p,niter=niter), kind='cubic')
def baseline_func_phase(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.angle(z_data),lam,p,niter=niter), kind='cubic')
def fit_baseline_phase(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.angle(z_data),lam,p,niter=niter)
def GUIbaselinefit(self):
'''
A GUI to help you fit the baseline
'''
self.__lam = 1e6
self.__p = 0.9
niter = 10
self.__baseline = self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
fig, (ax0,ax1) = plt.subplots(nrows=2)
plt.suptitle('Use the sliders to make the green curve match the baseline.')
plt.subplots_adjust(left=0.25, bottom=0.25)
l0, = ax0.plot(np.absolute(self.z_data_raw))
l0b, = ax0.plot(np.absolute(self.__baseline))
l1, = ax1.plot(np.absolute(self.z_data_raw/self.__baseline))
ax0.set_ylabel('amp, rawdata vs. baseline')
ax1.set_ylabel('amp, corrected')
axcolor = 'lightgoldenrodyellow'
axSmooth = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axAsym = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
axbcorr = plt.axes([0.25, 0.05, 0.65, 0.03], axisbg=axcolor)
sSmooth = Slider(axSmooth, 'Smoothness', 0.1, 10., valinit=np.log10(self.__lam),valfmt='1E%f')
sAsym = Slider(axAsym, 'Asymmetry', 1e-4,0.99999, valinit=self.__p,valfmt='%f')
sbcorr = Slider(axbcorr, 'vertical shift',0.7,1.1,valinit=1.)
def update(val):
self.__lam = 10**sSmooth.val
self.__p = sAsym.val
self.__baseline = sbcorr.val*self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
l0.set_ydata(np.absolute(self.z_data_raw))
l0b.set_ydata(np.absolute(self.__baseline))
l1.set_ydata(np.absolute(self.z_data_raw/self.__baseline))
fig.canvas.draw_idle()
sSmooth.on_changed(update)
sAsym.on_changed(update)
sbcorr.on_changed(update)
plt.show()
self.z_data_raw /= self.__baseline
plt.close()<|fim▁end|> |
def baseline_func_amp(self,z_data,f_data,lam,p,niter=10): |
<|file_name|>calibration.py<|end_file_name|><|fim▁begin|>
import numpy as np
from scipy import sparse
from scipy.interpolate import interp1d
class calibration(object):
<|fim_middle|>
<|fim▁end|> | '''
some useful tools for manual calibration
'''
def normalize_zdata(self,z_data,cal_z_data):
return z_data/cal_z_data
def normalize_amplitude(self,z_data,cal_ampdata):
return z_data/cal_ampdata
def normalize_phase(self,z_data,cal_phase):
return z_data*np.exp(-1j*cal_phase)
def normalize_by_func(self,f_data,z_data,func):
return z_data/func(f_data)
def _baseline_als(self,y, lam, p, niter=10):
'''
see http://zanran_storage.s3.amazonaws.com/www.science.uva.nl/ContentPages/443199618.pdf
"Asymmetric Least Squares Smoothing" by P. Eilers and H. Boelens in 2005.
http://stackoverflow.com/questions/29156532/python-baseline-correction-library
"There are two parameters: p for asymmetry and lambda for smoothness. Both have to be
tuned to the data at hand. We found that generally 0.001<=p<=0.1 is a good choice
(for a trace with positive peaks) and 10e2<=lambda<=10e9, but exceptions may occur."
'''
L = len(y)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
for i in range(niter):
W = sparse.spdiags(w, 0, L, L)
Z = W + lam * D.dot(D.transpose())
z = sparse.linalg.spsolve(Z, w*y)
w = p * (y > z) + (1-p) * (y < z)
return z
def fit_baseline_amp(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.absolute(z_data),lam,p,niter=niter)
def baseline_func_amp(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.absolute(z_data),lam,p,niter=niter), kind='cubic')
def baseline_func_phase(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.angle(z_data),lam,p,niter=niter), kind='cubic')
def fit_baseline_phase(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.angle(z_data),lam,p,niter=niter)
def GUIbaselinefit(self):
'''
A GUI to help you fit the baseline
'''
self.__lam = 1e6
self.__p = 0.9
niter = 10
self.__baseline = self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
fig, (ax0,ax1) = plt.subplots(nrows=2)
plt.suptitle('Use the sliders to make the green curve match the baseline.')
plt.subplots_adjust(left=0.25, bottom=0.25)
l0, = ax0.plot(np.absolute(self.z_data_raw))
l0b, = ax0.plot(np.absolute(self.__baseline))
l1, = ax1.plot(np.absolute(self.z_data_raw/self.__baseline))
ax0.set_ylabel('amp, rawdata vs. baseline')
ax1.set_ylabel('amp, corrected')
axcolor = 'lightgoldenrodyellow'
axSmooth = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axAsym = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
axbcorr = plt.axes([0.25, 0.05, 0.65, 0.03], axisbg=axcolor)
sSmooth = Slider(axSmooth, 'Smoothness', 0.1, 10., valinit=np.log10(self.__lam),valfmt='1E%f')
sAsym = Slider(axAsym, 'Asymmetry', 1e-4,0.99999, valinit=self.__p,valfmt='%f')
sbcorr = Slider(axbcorr, 'vertical shift',0.7,1.1,valinit=1.)
def update(val):
self.__lam = 10**sSmooth.val
self.__p = sAsym.val
self.__baseline = sbcorr.val*self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
l0.set_ydata(np.absolute(self.z_data_raw))
l0b.set_ydata(np.absolute(self.__baseline))
l1.set_ydata(np.absolute(self.z_data_raw/self.__baseline))
fig.canvas.draw_idle()
sSmooth.on_changed(update)
sAsym.on_changed(update)
sbcorr.on_changed(update)
plt.show()
self.z_data_raw /= self.__baseline
plt.close() |
<|file_name|>calibration.py<|end_file_name|><|fim▁begin|>
import numpy as np
from scipy import sparse
from scipy.interpolate import interp1d
class calibration(object):
'''
some useful tools for manual calibration
'''
def normalize_zdata(self,z_data,cal_z_data):
<|fim_middle|>
def normalize_amplitude(self,z_data,cal_ampdata):
return z_data/cal_ampdata
def normalize_phase(self,z_data,cal_phase):
return z_data*np.exp(-1j*cal_phase)
def normalize_by_func(self,f_data,z_data,func):
return z_data/func(f_data)
def _baseline_als(self,y, lam, p, niter=10):
'''
see http://zanran_storage.s3.amazonaws.com/www.science.uva.nl/ContentPages/443199618.pdf
"Asymmetric Least Squares Smoothing" by P. Eilers and H. Boelens in 2005.
http://stackoverflow.com/questions/29156532/python-baseline-correction-library
"There are two parameters: p for asymmetry and lambda for smoothness. Both have to be
tuned to the data at hand. We found that generally 0.001<=p<=0.1 is a good choice
(for a trace with positive peaks) and 10e2<=lambda<=10e9, but exceptions may occur."
'''
L = len(y)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
for i in range(niter):
W = sparse.spdiags(w, 0, L, L)
Z = W + lam * D.dot(D.transpose())
z = sparse.linalg.spsolve(Z, w*y)
w = p * (y > z) + (1-p) * (y < z)
return z
def fit_baseline_amp(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.absolute(z_data),lam,p,niter=niter)
def baseline_func_amp(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.absolute(z_data),lam,p,niter=niter), kind='cubic')
def baseline_func_phase(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.angle(z_data),lam,p,niter=niter), kind='cubic')
def fit_baseline_phase(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.angle(z_data),lam,p,niter=niter)
def GUIbaselinefit(self):
'''
A GUI to help you fit the baseline
'''
self.__lam = 1e6
self.__p = 0.9
niter = 10
self.__baseline = self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
fig, (ax0,ax1) = plt.subplots(nrows=2)
plt.suptitle('Use the sliders to make the green curve match the baseline.')
plt.subplots_adjust(left=0.25, bottom=0.25)
l0, = ax0.plot(np.absolute(self.z_data_raw))
l0b, = ax0.plot(np.absolute(self.__baseline))
l1, = ax1.plot(np.absolute(self.z_data_raw/self.__baseline))
ax0.set_ylabel('amp, rawdata vs. baseline')
ax1.set_ylabel('amp, corrected')
axcolor = 'lightgoldenrodyellow'
axSmooth = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axAsym = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
axbcorr = plt.axes([0.25, 0.05, 0.65, 0.03], axisbg=axcolor)
sSmooth = Slider(axSmooth, 'Smoothness', 0.1, 10., valinit=np.log10(self.__lam),valfmt='1E%f')
sAsym = Slider(axAsym, 'Asymmetry', 1e-4,0.99999, valinit=self.__p,valfmt='%f')
sbcorr = Slider(axbcorr, 'vertical shift',0.7,1.1,valinit=1.)
def update(val):
self.__lam = 10**sSmooth.val
self.__p = sAsym.val
self.__baseline = sbcorr.val*self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
l0.set_ydata(np.absolute(self.z_data_raw))
l0b.set_ydata(np.absolute(self.__baseline))
l1.set_ydata(np.absolute(self.z_data_raw/self.__baseline))
fig.canvas.draw_idle()
sSmooth.on_changed(update)
sAsym.on_changed(update)
sbcorr.on_changed(update)
plt.show()
self.z_data_raw /= self.__baseline
plt.close()
<|fim▁end|> | return z_data/cal_z_data |
<|file_name|>calibration.py<|end_file_name|><|fim▁begin|>
import numpy as np
from scipy import sparse
from scipy.interpolate import interp1d
class calibration(object):
'''
some useful tools for manual calibration
'''
def normalize_zdata(self,z_data,cal_z_data):
return z_data/cal_z_data
def normalize_amplitude(self,z_data,cal_ampdata):
<|fim_middle|>
def normalize_phase(self,z_data,cal_phase):
return z_data*np.exp(-1j*cal_phase)
def normalize_by_func(self,f_data,z_data,func):
return z_data/func(f_data)
def _baseline_als(self,y, lam, p, niter=10):
'''
see http://zanran_storage.s3.amazonaws.com/www.science.uva.nl/ContentPages/443199618.pdf
"Asymmetric Least Squares Smoothing" by P. Eilers and H. Boelens in 2005.
http://stackoverflow.com/questions/29156532/python-baseline-correction-library
"There are two parameters: p for asymmetry and lambda for smoothness. Both have to be
tuned to the data at hand. We found that generally 0.001<=p<=0.1 is a good choice
(for a trace with positive peaks) and 10e2<=lambda<=10e9, but exceptions may occur."
'''
L = len(y)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
for i in range(niter):
W = sparse.spdiags(w, 0, L, L)
Z = W + lam * D.dot(D.transpose())
z = sparse.linalg.spsolve(Z, w*y)
w = p * (y > z) + (1-p) * (y < z)
return z
def fit_baseline_amp(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.absolute(z_data),lam,p,niter=niter)
def baseline_func_amp(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.absolute(z_data),lam,p,niter=niter), kind='cubic')
def baseline_func_phase(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.angle(z_data),lam,p,niter=niter), kind='cubic')
def fit_baseline_phase(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.angle(z_data),lam,p,niter=niter)
def GUIbaselinefit(self):
'''
A GUI to help you fit the baseline
'''
self.__lam = 1e6
self.__p = 0.9
niter = 10
self.__baseline = self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
fig, (ax0,ax1) = plt.subplots(nrows=2)
plt.suptitle('Use the sliders to make the green curve match the baseline.')
plt.subplots_adjust(left=0.25, bottom=0.25)
l0, = ax0.plot(np.absolute(self.z_data_raw))
l0b, = ax0.plot(np.absolute(self.__baseline))
l1, = ax1.plot(np.absolute(self.z_data_raw/self.__baseline))
ax0.set_ylabel('amp, rawdata vs. baseline')
ax1.set_ylabel('amp, corrected')
axcolor = 'lightgoldenrodyellow'
axSmooth = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axAsym = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
axbcorr = plt.axes([0.25, 0.05, 0.65, 0.03], axisbg=axcolor)
sSmooth = Slider(axSmooth, 'Smoothness', 0.1, 10., valinit=np.log10(self.__lam),valfmt='1E%f')
sAsym = Slider(axAsym, 'Asymmetry', 1e-4,0.99999, valinit=self.__p,valfmt='%f')
sbcorr = Slider(axbcorr, 'vertical shift',0.7,1.1,valinit=1.)
def update(val):
self.__lam = 10**sSmooth.val
self.__p = sAsym.val
self.__baseline = sbcorr.val*self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
l0.set_ydata(np.absolute(self.z_data_raw))
l0b.set_ydata(np.absolute(self.__baseline))
l1.set_ydata(np.absolute(self.z_data_raw/self.__baseline))
fig.canvas.draw_idle()
sSmooth.on_changed(update)
sAsym.on_changed(update)
sbcorr.on_changed(update)
plt.show()
self.z_data_raw /= self.__baseline
plt.close()
<|fim▁end|> | return z_data/cal_ampdata |
<|file_name|>calibration.py<|end_file_name|><|fim▁begin|>
import numpy as np
from scipy import sparse
from scipy.interpolate import interp1d
class calibration(object):
'''
some useful tools for manual calibration
'''
def normalize_zdata(self,z_data,cal_z_data):
return z_data/cal_z_data
def normalize_amplitude(self,z_data,cal_ampdata):
return z_data/cal_ampdata
def normalize_phase(self,z_data,cal_phase):
<|fim_middle|>
def normalize_by_func(self,f_data,z_data,func):
return z_data/func(f_data)
def _baseline_als(self,y, lam, p, niter=10):
'''
see http://zanran_storage.s3.amazonaws.com/www.science.uva.nl/ContentPages/443199618.pdf
"Asymmetric Least Squares Smoothing" by P. Eilers and H. Boelens in 2005.
http://stackoverflow.com/questions/29156532/python-baseline-correction-library
"There are two parameters: p for asymmetry and lambda for smoothness. Both have to be
tuned to the data at hand. We found that generally 0.001<=p<=0.1 is a good choice
(for a trace with positive peaks) and 10e2<=lambda<=10e9, but exceptions may occur."
'''
L = len(y)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
for i in range(niter):
W = sparse.spdiags(w, 0, L, L)
Z = W + lam * D.dot(D.transpose())
z = sparse.linalg.spsolve(Z, w*y)
w = p * (y > z) + (1-p) * (y < z)
return z
def fit_baseline_amp(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.absolute(z_data),lam,p,niter=niter)
def baseline_func_amp(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.absolute(z_data),lam,p,niter=niter), kind='cubic')
def baseline_func_phase(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.angle(z_data),lam,p,niter=niter), kind='cubic')
def fit_baseline_phase(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.angle(z_data),lam,p,niter=niter)
def GUIbaselinefit(self):
'''
A GUI to help you fit the baseline
'''
self.__lam = 1e6
self.__p = 0.9
niter = 10
self.__baseline = self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
fig, (ax0,ax1) = plt.subplots(nrows=2)
plt.suptitle('Use the sliders to make the green curve match the baseline.')
plt.subplots_adjust(left=0.25, bottom=0.25)
l0, = ax0.plot(np.absolute(self.z_data_raw))
l0b, = ax0.plot(np.absolute(self.__baseline))
l1, = ax1.plot(np.absolute(self.z_data_raw/self.__baseline))
ax0.set_ylabel('amp, rawdata vs. baseline')
ax1.set_ylabel('amp, corrected')
axcolor = 'lightgoldenrodyellow'
axSmooth = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axAsym = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
axbcorr = plt.axes([0.25, 0.05, 0.65, 0.03], axisbg=axcolor)
sSmooth = Slider(axSmooth, 'Smoothness', 0.1, 10., valinit=np.log10(self.__lam),valfmt='1E%f')
sAsym = Slider(axAsym, 'Asymmetry', 1e-4,0.99999, valinit=self.__p,valfmt='%f')
sbcorr = Slider(axbcorr, 'vertical shift',0.7,1.1,valinit=1.)
def update(val):
self.__lam = 10**sSmooth.val
self.__p = sAsym.val
self.__baseline = sbcorr.val*self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
l0.set_ydata(np.absolute(self.z_data_raw))
l0b.set_ydata(np.absolute(self.__baseline))
l1.set_ydata(np.absolute(self.z_data_raw/self.__baseline))
fig.canvas.draw_idle()
sSmooth.on_changed(update)
sAsym.on_changed(update)
sbcorr.on_changed(update)
plt.show()
self.z_data_raw /= self.__baseline
plt.close()
<|fim▁end|> | return z_data*np.exp(-1j*cal_phase) |
<|file_name|>calibration.py<|end_file_name|><|fim▁begin|>
import numpy as np
from scipy import sparse
from scipy.interpolate import interp1d
class calibration(object):
'''
some useful tools for manual calibration
'''
def normalize_zdata(self,z_data,cal_z_data):
return z_data/cal_z_data
def normalize_amplitude(self,z_data,cal_ampdata):
return z_data/cal_ampdata
def normalize_phase(self,z_data,cal_phase):
return z_data*np.exp(-1j*cal_phase)
def normalize_by_func(self,f_data,z_data,func):
<|fim_middle|>
def _baseline_als(self,y, lam, p, niter=10):
'''
see http://zanran_storage.s3.amazonaws.com/www.science.uva.nl/ContentPages/443199618.pdf
"Asymmetric Least Squares Smoothing" by P. Eilers and H. Boelens in 2005.
http://stackoverflow.com/questions/29156532/python-baseline-correction-library
"There are two parameters: p for asymmetry and lambda for smoothness. Both have to be
tuned to the data at hand. We found that generally 0.001<=p<=0.1 is a good choice
(for a trace with positive peaks) and 10e2<=lambda<=10e9, but exceptions may occur."
'''
L = len(y)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
for i in range(niter):
W = sparse.spdiags(w, 0, L, L)
Z = W + lam * D.dot(D.transpose())
z = sparse.linalg.spsolve(Z, w*y)
w = p * (y > z) + (1-p) * (y < z)
return z
def fit_baseline_amp(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.absolute(z_data),lam,p,niter=niter)
def baseline_func_amp(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.absolute(z_data),lam,p,niter=niter), kind='cubic')
def baseline_func_phase(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.angle(z_data),lam,p,niter=niter), kind='cubic')
def fit_baseline_phase(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.angle(z_data),lam,p,niter=niter)
def GUIbaselinefit(self):
'''
A GUI to help you fit the baseline
'''
self.__lam = 1e6
self.__p = 0.9
niter = 10
self.__baseline = self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
fig, (ax0,ax1) = plt.subplots(nrows=2)
plt.suptitle('Use the sliders to make the green curve match the baseline.')
plt.subplots_adjust(left=0.25, bottom=0.25)
l0, = ax0.plot(np.absolute(self.z_data_raw))
l0b, = ax0.plot(np.absolute(self.__baseline))
l1, = ax1.plot(np.absolute(self.z_data_raw/self.__baseline))
ax0.set_ylabel('amp, rawdata vs. baseline')
ax1.set_ylabel('amp, corrected')
axcolor = 'lightgoldenrodyellow'
axSmooth = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axAsym = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
axbcorr = plt.axes([0.25, 0.05, 0.65, 0.03], axisbg=axcolor)
sSmooth = Slider(axSmooth, 'Smoothness', 0.1, 10., valinit=np.log10(self.__lam),valfmt='1E%f')
sAsym = Slider(axAsym, 'Asymmetry', 1e-4,0.99999, valinit=self.__p,valfmt='%f')
sbcorr = Slider(axbcorr, 'vertical shift',0.7,1.1,valinit=1.)
def update(val):
self.__lam = 10**sSmooth.val
self.__p = sAsym.val
self.__baseline = sbcorr.val*self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
l0.set_ydata(np.absolute(self.z_data_raw))
l0b.set_ydata(np.absolute(self.__baseline))
l1.set_ydata(np.absolute(self.z_data_raw/self.__baseline))
fig.canvas.draw_idle()
sSmooth.on_changed(update)
sAsym.on_changed(update)
sbcorr.on_changed(update)
plt.show()
self.z_data_raw /= self.__baseline
plt.close()
<|fim▁end|> | return z_data/func(f_data) |
<|file_name|>calibration.py<|end_file_name|><|fim▁begin|>
import numpy as np
from scipy import sparse
from scipy.interpolate import interp1d
class calibration(object):
'''
some useful tools for manual calibration
'''
def normalize_zdata(self,z_data,cal_z_data):
return z_data/cal_z_data
def normalize_amplitude(self,z_data,cal_ampdata):
return z_data/cal_ampdata
def normalize_phase(self,z_data,cal_phase):
return z_data*np.exp(-1j*cal_phase)
def normalize_by_func(self,f_data,z_data,func):
return z_data/func(f_data)
def _baseline_als(self,y, lam, p, niter=10):
<|fim_middle|>
def fit_baseline_amp(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.absolute(z_data),lam,p,niter=niter)
def baseline_func_amp(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.absolute(z_data),lam,p,niter=niter), kind='cubic')
def baseline_func_phase(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.angle(z_data),lam,p,niter=niter), kind='cubic')
def fit_baseline_phase(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.angle(z_data),lam,p,niter=niter)
def GUIbaselinefit(self):
'''
A GUI to help you fit the baseline
'''
self.__lam = 1e6
self.__p = 0.9
niter = 10
self.__baseline = self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
fig, (ax0,ax1) = plt.subplots(nrows=2)
plt.suptitle('Use the sliders to make the green curve match the baseline.')
plt.subplots_adjust(left=0.25, bottom=0.25)
l0, = ax0.plot(np.absolute(self.z_data_raw))
l0b, = ax0.plot(np.absolute(self.__baseline))
l1, = ax1.plot(np.absolute(self.z_data_raw/self.__baseline))
ax0.set_ylabel('amp, rawdata vs. baseline')
ax1.set_ylabel('amp, corrected')
axcolor = 'lightgoldenrodyellow'
axSmooth = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axAsym = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
axbcorr = plt.axes([0.25, 0.05, 0.65, 0.03], axisbg=axcolor)
sSmooth = Slider(axSmooth, 'Smoothness', 0.1, 10., valinit=np.log10(self.__lam),valfmt='1E%f')
sAsym = Slider(axAsym, 'Asymmetry', 1e-4,0.99999, valinit=self.__p,valfmt='%f')
sbcorr = Slider(axbcorr, 'vertical shift',0.7,1.1,valinit=1.)
def update(val):
self.__lam = 10**sSmooth.val
self.__p = sAsym.val
self.__baseline = sbcorr.val*self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
l0.set_ydata(np.absolute(self.z_data_raw))
l0b.set_ydata(np.absolute(self.__baseline))
l1.set_ydata(np.absolute(self.z_data_raw/self.__baseline))
fig.canvas.draw_idle()
sSmooth.on_changed(update)
sAsym.on_changed(update)
sbcorr.on_changed(update)
plt.show()
self.z_data_raw /= self.__baseline
plt.close()
<|fim▁end|> | '''
see http://zanran_storage.s3.amazonaws.com/www.science.uva.nl/ContentPages/443199618.pdf
"Asymmetric Least Squares Smoothing" by P. Eilers and H. Boelens in 2005.
http://stackoverflow.com/questions/29156532/python-baseline-correction-library
"There are two parameters: p for asymmetry and lambda for smoothness. Both have to be
tuned to the data at hand. We found that generally 0.001<=p<=0.1 is a good choice
(for a trace with positive peaks) and 10e2<=lambda<=10e9, but exceptions may occur."
'''
L = len(y)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
for i in range(niter):
W = sparse.spdiags(w, 0, L, L)
Z = W + lam * D.dot(D.transpose())
z = sparse.linalg.spsolve(Z, w*y)
w = p * (y > z) + (1-p) * (y < z)
return z |
<|file_name|>calibration.py<|end_file_name|><|fim▁begin|>
import numpy as np
from scipy import sparse
from scipy.interpolate import interp1d
class calibration(object):
'''
some useful tools for manual calibration
'''
def normalize_zdata(self,z_data,cal_z_data):
return z_data/cal_z_data
def normalize_amplitude(self,z_data,cal_ampdata):
return z_data/cal_ampdata
def normalize_phase(self,z_data,cal_phase):
return z_data*np.exp(-1j*cal_phase)
def normalize_by_func(self,f_data,z_data,func):
return z_data/func(f_data)
def _baseline_als(self,y, lam, p, niter=10):
'''
see http://zanran_storage.s3.amazonaws.com/www.science.uva.nl/ContentPages/443199618.pdf
"Asymmetric Least Squares Smoothing" by P. Eilers and H. Boelens in 2005.
http://stackoverflow.com/questions/29156532/python-baseline-correction-library
"There are two parameters: p for asymmetry and lambda for smoothness. Both have to be
tuned to the data at hand. We found that generally 0.001<=p<=0.1 is a good choice
(for a trace with positive peaks) and 10e2<=lambda<=10e9, but exceptions may occur."
'''
L = len(y)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
for i in range(niter):
W = sparse.spdiags(w, 0, L, L)
Z = W + lam * D.dot(D.transpose())
z = sparse.linalg.spsolve(Z, w*y)
w = p * (y > z) + (1-p) * (y < z)
return z
def fit_baseline_amp(self,z_data,lam,p,niter=10):
<|fim_middle|>
def baseline_func_amp(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.absolute(z_data),lam,p,niter=niter), kind='cubic')
def baseline_func_phase(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.angle(z_data),lam,p,niter=niter), kind='cubic')
def fit_baseline_phase(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.angle(z_data),lam,p,niter=niter)
def GUIbaselinefit(self):
'''
A GUI to help you fit the baseline
'''
self.__lam = 1e6
self.__p = 0.9
niter = 10
self.__baseline = self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
fig, (ax0,ax1) = plt.subplots(nrows=2)
plt.suptitle('Use the sliders to make the green curve match the baseline.')
plt.subplots_adjust(left=0.25, bottom=0.25)
l0, = ax0.plot(np.absolute(self.z_data_raw))
l0b, = ax0.plot(np.absolute(self.__baseline))
l1, = ax1.plot(np.absolute(self.z_data_raw/self.__baseline))
ax0.set_ylabel('amp, rawdata vs. baseline')
ax1.set_ylabel('amp, corrected')
axcolor = 'lightgoldenrodyellow'
axSmooth = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axAsym = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
axbcorr = plt.axes([0.25, 0.05, 0.65, 0.03], axisbg=axcolor)
sSmooth = Slider(axSmooth, 'Smoothness', 0.1, 10., valinit=np.log10(self.__lam),valfmt='1E%f')
sAsym = Slider(axAsym, 'Asymmetry', 1e-4,0.99999, valinit=self.__p,valfmt='%f')
sbcorr = Slider(axbcorr, 'vertical shift',0.7,1.1,valinit=1.)
def update(val):
self.__lam = 10**sSmooth.val
self.__p = sAsym.val
self.__baseline = sbcorr.val*self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
l0.set_ydata(np.absolute(self.z_data_raw))
l0b.set_ydata(np.absolute(self.__baseline))
l1.set_ydata(np.absolute(self.z_data_raw/self.__baseline))
fig.canvas.draw_idle()
sSmooth.on_changed(update)
sAsym.on_changed(update)
sbcorr.on_changed(update)
plt.show()
self.z_data_raw /= self.__baseline
plt.close()
<|fim▁end|> | '''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.absolute(z_data),lam,p,niter=niter) |
<|file_name|>calibration.py<|end_file_name|><|fim▁begin|>
import numpy as np
from scipy import sparse
from scipy.interpolate import interp1d
class calibration(object):
'''
some useful tools for manual calibration
'''
def normalize_zdata(self,z_data,cal_z_data):
return z_data/cal_z_data
def normalize_amplitude(self,z_data,cal_ampdata):
return z_data/cal_ampdata
def normalize_phase(self,z_data,cal_phase):
return z_data*np.exp(-1j*cal_phase)
def normalize_by_func(self,f_data,z_data,func):
return z_data/func(f_data)
def _baseline_als(self,y, lam, p, niter=10):
'''
see http://zanran_storage.s3.amazonaws.com/www.science.uva.nl/ContentPages/443199618.pdf
"Asymmetric Least Squares Smoothing" by P. Eilers and H. Boelens in 2005.
http://stackoverflow.com/questions/29156532/python-baseline-correction-library
"There are two parameters: p for asymmetry and lambda for smoothness. Both have to be
tuned to the data at hand. We found that generally 0.001<=p<=0.1 is a good choice
(for a trace with positive peaks) and 10e2<=lambda<=10e9, but exceptions may occur."
'''
L = len(y)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
for i in range(niter):
W = sparse.spdiags(w, 0, L, L)
Z = W + lam * D.dot(D.transpose())
z = sparse.linalg.spsolve(Z, w*y)
w = p * (y > z) + (1-p) * (y < z)
return z
def fit_baseline_amp(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.absolute(z_data),lam,p,niter=niter)
def baseline_func_amp(self,z_data,f_data,lam,p,niter=10):
<|fim_middle|>
def baseline_func_phase(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.angle(z_data),lam,p,niter=niter), kind='cubic')
def fit_baseline_phase(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.angle(z_data),lam,p,niter=niter)
def GUIbaselinefit(self):
'''
A GUI to help you fit the baseline
'''
self.__lam = 1e6
self.__p = 0.9
niter = 10
self.__baseline = self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
fig, (ax0,ax1) = plt.subplots(nrows=2)
plt.suptitle('Use the sliders to make the green curve match the baseline.')
plt.subplots_adjust(left=0.25, bottom=0.25)
l0, = ax0.plot(np.absolute(self.z_data_raw))
l0b, = ax0.plot(np.absolute(self.__baseline))
l1, = ax1.plot(np.absolute(self.z_data_raw/self.__baseline))
ax0.set_ylabel('amp, rawdata vs. baseline')
ax1.set_ylabel('amp, corrected')
axcolor = 'lightgoldenrodyellow'
axSmooth = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axAsym = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
axbcorr = plt.axes([0.25, 0.05, 0.65, 0.03], axisbg=axcolor)
sSmooth = Slider(axSmooth, 'Smoothness', 0.1, 10., valinit=np.log10(self.__lam),valfmt='1E%f')
sAsym = Slider(axAsym, 'Asymmetry', 1e-4,0.99999, valinit=self.__p,valfmt='%f')
sbcorr = Slider(axbcorr, 'vertical shift',0.7,1.1,valinit=1.)
def update(val):
self.__lam = 10**sSmooth.val
self.__p = sAsym.val
self.__baseline = sbcorr.val*self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
l0.set_ydata(np.absolute(self.z_data_raw))
l0b.set_ydata(np.absolute(self.__baseline))
l1.set_ydata(np.absolute(self.z_data_raw/self.__baseline))
fig.canvas.draw_idle()
sSmooth.on_changed(update)
sAsym.on_changed(update)
sbcorr.on_changed(update)
plt.show()
self.z_data_raw /= self.__baseline
plt.close()
<|fim▁end|> | '''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.absolute(z_data),lam,p,niter=niter), kind='cubic') |
<|file_name|>calibration.py<|end_file_name|><|fim▁begin|>
import numpy as np
from scipy import sparse
from scipy.interpolate import interp1d
class calibration(object):
'''
some useful tools for manual calibration
'''
def normalize_zdata(self,z_data,cal_z_data):
return z_data/cal_z_data
def normalize_amplitude(self,z_data,cal_ampdata):
return z_data/cal_ampdata
def normalize_phase(self,z_data,cal_phase):
return z_data*np.exp(-1j*cal_phase)
def normalize_by_func(self,f_data,z_data,func):
return z_data/func(f_data)
def _baseline_als(self,y, lam, p, niter=10):
'''
see http://zanran_storage.s3.amazonaws.com/www.science.uva.nl/ContentPages/443199618.pdf
"Asymmetric Least Squares Smoothing" by P. Eilers and H. Boelens in 2005.
http://stackoverflow.com/questions/29156532/python-baseline-correction-library
"There are two parameters: p for asymmetry and lambda for smoothness. Both have to be
tuned to the data at hand. We found that generally 0.001<=p<=0.1 is a good choice
(for a trace with positive peaks) and 10e2<=lambda<=10e9, but exceptions may occur."
'''
L = len(y)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
for i in range(niter):
W = sparse.spdiags(w, 0, L, L)
Z = W + lam * D.dot(D.transpose())
z = sparse.linalg.spsolve(Z, w*y)
w = p * (y > z) + (1-p) * (y < z)
return z
def fit_baseline_amp(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.absolute(z_data),lam,p,niter=niter)
def baseline_func_amp(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.absolute(z_data),lam,p,niter=niter), kind='cubic')
def baseline_func_phase(self,z_data,f_data,lam,p,niter=10):
<|fim_middle|>
def fit_baseline_phase(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.angle(z_data),lam,p,niter=niter)
def GUIbaselinefit(self):
'''
A GUI to help you fit the baseline
'''
self.__lam = 1e6
self.__p = 0.9
niter = 10
self.__baseline = self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
fig, (ax0,ax1) = plt.subplots(nrows=2)
plt.suptitle('Use the sliders to make the green curve match the baseline.')
plt.subplots_adjust(left=0.25, bottom=0.25)
l0, = ax0.plot(np.absolute(self.z_data_raw))
l0b, = ax0.plot(np.absolute(self.__baseline))
l1, = ax1.plot(np.absolute(self.z_data_raw/self.__baseline))
ax0.set_ylabel('amp, rawdata vs. baseline')
ax1.set_ylabel('amp, corrected')
axcolor = 'lightgoldenrodyellow'
axSmooth = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axAsym = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
axbcorr = plt.axes([0.25, 0.05, 0.65, 0.03], axisbg=axcolor)
sSmooth = Slider(axSmooth, 'Smoothness', 0.1, 10., valinit=np.log10(self.__lam),valfmt='1E%f')
sAsym = Slider(axAsym, 'Asymmetry', 1e-4,0.99999, valinit=self.__p,valfmt='%f')
sbcorr = Slider(axbcorr, 'vertical shift',0.7,1.1,valinit=1.)
def update(val):
self.__lam = 10**sSmooth.val
self.__p = sAsym.val
self.__baseline = sbcorr.val*self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
l0.set_ydata(np.absolute(self.z_data_raw))
l0b.set_ydata(np.absolute(self.__baseline))
l1.set_ydata(np.absolute(self.z_data_raw/self.__baseline))
fig.canvas.draw_idle()
sSmooth.on_changed(update)
sAsym.on_changed(update)
sbcorr.on_changed(update)
plt.show()
self.z_data_raw /= self.__baseline
plt.close()
<|fim▁end|> | '''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.angle(z_data),lam,p,niter=niter), kind='cubic') |
<|file_name|>calibration.py<|end_file_name|><|fim▁begin|>
import numpy as np
from scipy import sparse
from scipy.interpolate import interp1d
class calibration(object):
'''
some useful tools for manual calibration
'''
def normalize_zdata(self,z_data,cal_z_data):
return z_data/cal_z_data
def normalize_amplitude(self,z_data,cal_ampdata):
return z_data/cal_ampdata
def normalize_phase(self,z_data,cal_phase):
return z_data*np.exp(-1j*cal_phase)
def normalize_by_func(self,f_data,z_data,func):
return z_data/func(f_data)
def _baseline_als(self,y, lam, p, niter=10):
'''
see http://zanran_storage.s3.amazonaws.com/www.science.uva.nl/ContentPages/443199618.pdf
"Asymmetric Least Squares Smoothing" by P. Eilers and H. Boelens in 2005.
http://stackoverflow.com/questions/29156532/python-baseline-correction-library
"There are two parameters: p for asymmetry and lambda for smoothness. Both have to be
tuned to the data at hand. We found that generally 0.001<=p<=0.1 is a good choice
(for a trace with positive peaks) and 10e2<=lambda<=10e9, but exceptions may occur."
'''
L = len(y)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
for i in range(niter):
W = sparse.spdiags(w, 0, L, L)
Z = W + lam * D.dot(D.transpose())
z = sparse.linalg.spsolve(Z, w*y)
w = p * (y > z) + (1-p) * (y < z)
return z
def fit_baseline_amp(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.absolute(z_data),lam,p,niter=niter)
def baseline_func_amp(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.absolute(z_data),lam,p,niter=niter), kind='cubic')
def baseline_func_phase(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.angle(z_data),lam,p,niter=niter), kind='cubic')
def fit_baseline_phase(self,z_data,lam,p,niter=10):
<|fim_middle|>
def GUIbaselinefit(self):
'''
A GUI to help you fit the baseline
'''
self.__lam = 1e6
self.__p = 0.9
niter = 10
self.__baseline = self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
fig, (ax0,ax1) = plt.subplots(nrows=2)
plt.suptitle('Use the sliders to make the green curve match the baseline.')
plt.subplots_adjust(left=0.25, bottom=0.25)
l0, = ax0.plot(np.absolute(self.z_data_raw))
l0b, = ax0.plot(np.absolute(self.__baseline))
l1, = ax1.plot(np.absolute(self.z_data_raw/self.__baseline))
ax0.set_ylabel('amp, rawdata vs. baseline')
ax1.set_ylabel('amp, corrected')
axcolor = 'lightgoldenrodyellow'
axSmooth = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axAsym = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
axbcorr = plt.axes([0.25, 0.05, 0.65, 0.03], axisbg=axcolor)
sSmooth = Slider(axSmooth, 'Smoothness', 0.1, 10., valinit=np.log10(self.__lam),valfmt='1E%f')
sAsym = Slider(axAsym, 'Asymmetry', 1e-4,0.99999, valinit=self.__p,valfmt='%f')
sbcorr = Slider(axbcorr, 'vertical shift',0.7,1.1,valinit=1.)
def update(val):
self.__lam = 10**sSmooth.val
self.__p = sAsym.val
self.__baseline = sbcorr.val*self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
l0.set_ydata(np.absolute(self.z_data_raw))
l0b.set_ydata(np.absolute(self.__baseline))
l1.set_ydata(np.absolute(self.z_data_raw/self.__baseline))
fig.canvas.draw_idle()
sSmooth.on_changed(update)
sAsym.on_changed(update)
sbcorr.on_changed(update)
plt.show()
self.z_data_raw /= self.__baseline
plt.close()
<|fim▁end|> | '''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.angle(z_data),lam,p,niter=niter) |
<|file_name|>calibration.py<|end_file_name|><|fim▁begin|>
import numpy as np
from scipy import sparse
from scipy.interpolate import interp1d
class calibration(object):
'''
some useful tools for manual calibration
'''
def normalize_zdata(self,z_data,cal_z_data):
return z_data/cal_z_data
def normalize_amplitude(self,z_data,cal_ampdata):
return z_data/cal_ampdata
def normalize_phase(self,z_data,cal_phase):
return z_data*np.exp(-1j*cal_phase)
def normalize_by_func(self,f_data,z_data,func):
return z_data/func(f_data)
def _baseline_als(self,y, lam, p, niter=10):
'''
see http://zanran_storage.s3.amazonaws.com/www.science.uva.nl/ContentPages/443199618.pdf
"Asymmetric Least Squares Smoothing" by P. Eilers and H. Boelens in 2005.
http://stackoverflow.com/questions/29156532/python-baseline-correction-library
"There are two parameters: p for asymmetry and lambda for smoothness. Both have to be
tuned to the data at hand. We found that generally 0.001<=p<=0.1 is a good choice
(for a trace with positive peaks) and 10e2<=lambda<=10e9, but exceptions may occur."
'''
L = len(y)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
for i in range(niter):
W = sparse.spdiags(w, 0, L, L)
Z = W + lam * D.dot(D.transpose())
z = sparse.linalg.spsolve(Z, w*y)
w = p * (y > z) + (1-p) * (y < z)
return z
def fit_baseline_amp(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.absolute(z_data),lam,p,niter=niter)
def baseline_func_amp(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.absolute(z_data),lam,p,niter=niter), kind='cubic')
def baseline_func_phase(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.angle(z_data),lam,p,niter=niter), kind='cubic')
def fit_baseline_phase(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.angle(z_data),lam,p,niter=niter)
def GUIbaselinefit(self):
<|fim_middle|>
<|fim▁end|> | '''
A GUI to help you fit the baseline
'''
self.__lam = 1e6
self.__p = 0.9
niter = 10
self.__baseline = self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
fig, (ax0,ax1) = plt.subplots(nrows=2)
plt.suptitle('Use the sliders to make the green curve match the baseline.')
plt.subplots_adjust(left=0.25, bottom=0.25)
l0, = ax0.plot(np.absolute(self.z_data_raw))
l0b, = ax0.plot(np.absolute(self.__baseline))
l1, = ax1.plot(np.absolute(self.z_data_raw/self.__baseline))
ax0.set_ylabel('amp, rawdata vs. baseline')
ax1.set_ylabel('amp, corrected')
axcolor = 'lightgoldenrodyellow'
axSmooth = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axAsym = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
axbcorr = plt.axes([0.25, 0.05, 0.65, 0.03], axisbg=axcolor)
sSmooth = Slider(axSmooth, 'Smoothness', 0.1, 10., valinit=np.log10(self.__lam),valfmt='1E%f')
sAsym = Slider(axAsym, 'Asymmetry', 1e-4,0.99999, valinit=self.__p,valfmt='%f')
sbcorr = Slider(axbcorr, 'vertical shift',0.7,1.1,valinit=1.)
def update(val):
self.__lam = 10**sSmooth.val
self.__p = sAsym.val
self.__baseline = sbcorr.val*self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
l0.set_ydata(np.absolute(self.z_data_raw))
l0b.set_ydata(np.absolute(self.__baseline))
l1.set_ydata(np.absolute(self.z_data_raw/self.__baseline))
fig.canvas.draw_idle()
sSmooth.on_changed(update)
sAsym.on_changed(update)
sbcorr.on_changed(update)
plt.show()
self.z_data_raw /= self.__baseline
plt.close() |
<|file_name|>calibration.py<|end_file_name|><|fim▁begin|>
import numpy as np
from scipy import sparse
from scipy.interpolate import interp1d
class calibration(object):
'''
some useful tools for manual calibration
'''
def normalize_zdata(self,z_data,cal_z_data):
return z_data/cal_z_data
def normalize_amplitude(self,z_data,cal_ampdata):
return z_data/cal_ampdata
def normalize_phase(self,z_data,cal_phase):
return z_data*np.exp(-1j*cal_phase)
def normalize_by_func(self,f_data,z_data,func):
return z_data/func(f_data)
def _baseline_als(self,y, lam, p, niter=10):
'''
see http://zanran_storage.s3.amazonaws.com/www.science.uva.nl/ContentPages/443199618.pdf
"Asymmetric Least Squares Smoothing" by P. Eilers and H. Boelens in 2005.
http://stackoverflow.com/questions/29156532/python-baseline-correction-library
"There are two parameters: p for asymmetry and lambda for smoothness. Both have to be
tuned to the data at hand. We found that generally 0.001<=p<=0.1 is a good choice
(for a trace with positive peaks) and 10e2<=lambda<=10e9, but exceptions may occur."
'''
L = len(y)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
for i in range(niter):
W = sparse.spdiags(w, 0, L, L)
Z = W + lam * D.dot(D.transpose())
z = sparse.linalg.spsolve(Z, w*y)
w = p * (y > z) + (1-p) * (y < z)
return z
def fit_baseline_amp(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.absolute(z_data),lam,p,niter=niter)
def baseline_func_amp(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.absolute(z_data),lam,p,niter=niter), kind='cubic')
def baseline_func_phase(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.angle(z_data),lam,p,niter=niter), kind='cubic')
def fit_baseline_phase(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.angle(z_data),lam,p,niter=niter)
def GUIbaselinefit(self):
'''
A GUI to help you fit the baseline
'''
self.__lam = 1e6
self.__p = 0.9
niter = 10
self.__baseline = self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
fig, (ax0,ax1) = plt.subplots(nrows=2)
plt.suptitle('Use the sliders to make the green curve match the baseline.')
plt.subplots_adjust(left=0.25, bottom=0.25)
l0, = ax0.plot(np.absolute(self.z_data_raw))
l0b, = ax0.plot(np.absolute(self.__baseline))
l1, = ax1.plot(np.absolute(self.z_data_raw/self.__baseline))
ax0.set_ylabel('amp, rawdata vs. baseline')
ax1.set_ylabel('amp, corrected')
axcolor = 'lightgoldenrodyellow'
axSmooth = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axAsym = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
axbcorr = plt.axes([0.25, 0.05, 0.65, 0.03], axisbg=axcolor)
sSmooth = Slider(axSmooth, 'Smoothness', 0.1, 10., valinit=np.log10(self.__lam),valfmt='1E%f')
sAsym = Slider(axAsym, 'Asymmetry', 1e-4,0.99999, valinit=self.__p,valfmt='%f')
sbcorr = Slider(axbcorr, 'vertical shift',0.7,1.1,valinit=1.)
def update(val):
<|fim_middle|>
sSmooth.on_changed(update)
sAsym.on_changed(update)
sbcorr.on_changed(update)
plt.show()
self.z_data_raw /= self.__baseline
plt.close()
<|fim▁end|> | self.__lam = 10**sSmooth.val
self.__p = sAsym.val
self.__baseline = sbcorr.val*self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
l0.set_ydata(np.absolute(self.z_data_raw))
l0b.set_ydata(np.absolute(self.__baseline))
l1.set_ydata(np.absolute(self.z_data_raw/self.__baseline))
fig.canvas.draw_idle() |
<|file_name|>calibration.py<|end_file_name|><|fim▁begin|>
import numpy as np
from scipy import sparse
from scipy.interpolate import interp1d
class calibration(object):
'''
some useful tools for manual calibration
'''
def <|fim_middle|>(self,z_data,cal_z_data):
return z_data/cal_z_data
def normalize_amplitude(self,z_data,cal_ampdata):
return z_data/cal_ampdata
def normalize_phase(self,z_data,cal_phase):
return z_data*np.exp(-1j*cal_phase)
def normalize_by_func(self,f_data,z_data,func):
return z_data/func(f_data)
def _baseline_als(self,y, lam, p, niter=10):
'''
see http://zanran_storage.s3.amazonaws.com/www.science.uva.nl/ContentPages/443199618.pdf
"Asymmetric Least Squares Smoothing" by P. Eilers and H. Boelens in 2005.
http://stackoverflow.com/questions/29156532/python-baseline-correction-library
"There are two parameters: p for asymmetry and lambda for smoothness. Both have to be
tuned to the data at hand. We found that generally 0.001<=p<=0.1 is a good choice
(for a trace with positive peaks) and 10e2<=lambda<=10e9, but exceptions may occur."
'''
L = len(y)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
for i in range(niter):
W = sparse.spdiags(w, 0, L, L)
Z = W + lam * D.dot(D.transpose())
z = sparse.linalg.spsolve(Z, w*y)
w = p * (y > z) + (1-p) * (y < z)
return z
def fit_baseline_amp(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.absolute(z_data),lam,p,niter=niter)
def baseline_func_amp(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.absolute(z_data),lam,p,niter=niter), kind='cubic')
def baseline_func_phase(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.angle(z_data),lam,p,niter=niter), kind='cubic')
def fit_baseline_phase(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.angle(z_data),lam,p,niter=niter)
def GUIbaselinefit(self):
'''
A GUI to help you fit the baseline
'''
self.__lam = 1e6
self.__p = 0.9
niter = 10
self.__baseline = self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
fig, (ax0,ax1) = plt.subplots(nrows=2)
plt.suptitle('Use the sliders to make the green curve match the baseline.')
plt.subplots_adjust(left=0.25, bottom=0.25)
l0, = ax0.plot(np.absolute(self.z_data_raw))
l0b, = ax0.plot(np.absolute(self.__baseline))
l1, = ax1.plot(np.absolute(self.z_data_raw/self.__baseline))
ax0.set_ylabel('amp, rawdata vs. baseline')
ax1.set_ylabel('amp, corrected')
axcolor = 'lightgoldenrodyellow'
axSmooth = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axAsym = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
axbcorr = plt.axes([0.25, 0.05, 0.65, 0.03], axisbg=axcolor)
sSmooth = Slider(axSmooth, 'Smoothness', 0.1, 10., valinit=np.log10(self.__lam),valfmt='1E%f')
sAsym = Slider(axAsym, 'Asymmetry', 1e-4,0.99999, valinit=self.__p,valfmt='%f')
sbcorr = Slider(axbcorr, 'vertical shift',0.7,1.1,valinit=1.)
def update(val):
self.__lam = 10**sSmooth.val
self.__p = sAsym.val
self.__baseline = sbcorr.val*self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
l0.set_ydata(np.absolute(self.z_data_raw))
l0b.set_ydata(np.absolute(self.__baseline))
l1.set_ydata(np.absolute(self.z_data_raw/self.__baseline))
fig.canvas.draw_idle()
sSmooth.on_changed(update)
sAsym.on_changed(update)
sbcorr.on_changed(update)
plt.show()
self.z_data_raw /= self.__baseline
plt.close()
<|fim▁end|> | normalize_zdata |
<|file_name|>calibration.py<|end_file_name|><|fim▁begin|>
import numpy as np
from scipy import sparse
from scipy.interpolate import interp1d
class calibration(object):
'''
some useful tools for manual calibration
'''
def normalize_zdata(self,z_data,cal_z_data):
return z_data/cal_z_data
def <|fim_middle|>(self,z_data,cal_ampdata):
return z_data/cal_ampdata
def normalize_phase(self,z_data,cal_phase):
return z_data*np.exp(-1j*cal_phase)
def normalize_by_func(self,f_data,z_data,func):
return z_data/func(f_data)
def _baseline_als(self,y, lam, p, niter=10):
'''
see http://zanran_storage.s3.amazonaws.com/www.science.uva.nl/ContentPages/443199618.pdf
"Asymmetric Least Squares Smoothing" by P. Eilers and H. Boelens in 2005.
http://stackoverflow.com/questions/29156532/python-baseline-correction-library
"There are two parameters: p for asymmetry and lambda for smoothness. Both have to be
tuned to the data at hand. We found that generally 0.001<=p<=0.1 is a good choice
(for a trace with positive peaks) and 10e2<=lambda<=10e9, but exceptions may occur."
'''
L = len(y)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
for i in range(niter):
W = sparse.spdiags(w, 0, L, L)
Z = W + lam * D.dot(D.transpose())
z = sparse.linalg.spsolve(Z, w*y)
w = p * (y > z) + (1-p) * (y < z)
return z
def fit_baseline_amp(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.absolute(z_data),lam,p,niter=niter)
def baseline_func_amp(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.absolute(z_data),lam,p,niter=niter), kind='cubic')
def baseline_func_phase(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.angle(z_data),lam,p,niter=niter), kind='cubic')
def fit_baseline_phase(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.angle(z_data),lam,p,niter=niter)
def GUIbaselinefit(self):
'''
A GUI to help you fit the baseline
'''
self.__lam = 1e6
self.__p = 0.9
niter = 10
self.__baseline = self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
fig, (ax0,ax1) = plt.subplots(nrows=2)
plt.suptitle('Use the sliders to make the green curve match the baseline.')
plt.subplots_adjust(left=0.25, bottom=0.25)
l0, = ax0.plot(np.absolute(self.z_data_raw))
l0b, = ax0.plot(np.absolute(self.__baseline))
l1, = ax1.plot(np.absolute(self.z_data_raw/self.__baseline))
ax0.set_ylabel('amp, rawdata vs. baseline')
ax1.set_ylabel('amp, corrected')
axcolor = 'lightgoldenrodyellow'
axSmooth = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axAsym = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
axbcorr = plt.axes([0.25, 0.05, 0.65, 0.03], axisbg=axcolor)
sSmooth = Slider(axSmooth, 'Smoothness', 0.1, 10., valinit=np.log10(self.__lam),valfmt='1E%f')
sAsym = Slider(axAsym, 'Asymmetry', 1e-4,0.99999, valinit=self.__p,valfmt='%f')
sbcorr = Slider(axbcorr, 'vertical shift',0.7,1.1,valinit=1.)
def update(val):
self.__lam = 10**sSmooth.val
self.__p = sAsym.val
self.__baseline = sbcorr.val*self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
l0.set_ydata(np.absolute(self.z_data_raw))
l0b.set_ydata(np.absolute(self.__baseline))
l1.set_ydata(np.absolute(self.z_data_raw/self.__baseline))
fig.canvas.draw_idle()
sSmooth.on_changed(update)
sAsym.on_changed(update)
sbcorr.on_changed(update)
plt.show()
self.z_data_raw /= self.__baseline
plt.close()
<|fim▁end|> | normalize_amplitude |
<|file_name|>calibration.py<|end_file_name|><|fim▁begin|>
import numpy as np
from scipy import sparse
from scipy.interpolate import interp1d
class calibration(object):
'''
some useful tools for manual calibration
'''
def normalize_zdata(self,z_data,cal_z_data):
return z_data/cal_z_data
def normalize_amplitude(self,z_data,cal_ampdata):
return z_data/cal_ampdata
def <|fim_middle|>(self,z_data,cal_phase):
return z_data*np.exp(-1j*cal_phase)
def normalize_by_func(self,f_data,z_data,func):
return z_data/func(f_data)
def _baseline_als(self,y, lam, p, niter=10):
'''
see http://zanran_storage.s3.amazonaws.com/www.science.uva.nl/ContentPages/443199618.pdf
"Asymmetric Least Squares Smoothing" by P. Eilers and H. Boelens in 2005.
http://stackoverflow.com/questions/29156532/python-baseline-correction-library
"There are two parameters: p for asymmetry and lambda for smoothness. Both have to be
tuned to the data at hand. We found that generally 0.001<=p<=0.1 is a good choice
(for a trace with positive peaks) and 10e2<=lambda<=10e9, but exceptions may occur."
'''
L = len(y)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
for i in range(niter):
W = sparse.spdiags(w, 0, L, L)
Z = W + lam * D.dot(D.transpose())
z = sparse.linalg.spsolve(Z, w*y)
w = p * (y > z) + (1-p) * (y < z)
return z
def fit_baseline_amp(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.absolute(z_data),lam,p,niter=niter)
def baseline_func_amp(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.absolute(z_data),lam,p,niter=niter), kind='cubic')
def baseline_func_phase(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.angle(z_data),lam,p,niter=niter), kind='cubic')
def fit_baseline_phase(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.angle(z_data),lam,p,niter=niter)
def GUIbaselinefit(self):
'''
A GUI to help you fit the baseline
'''
self.__lam = 1e6
self.__p = 0.9
niter = 10
self.__baseline = self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
fig, (ax0,ax1) = plt.subplots(nrows=2)
plt.suptitle('Use the sliders to make the green curve match the baseline.')
plt.subplots_adjust(left=0.25, bottom=0.25)
l0, = ax0.plot(np.absolute(self.z_data_raw))
l0b, = ax0.plot(np.absolute(self.__baseline))
l1, = ax1.plot(np.absolute(self.z_data_raw/self.__baseline))
ax0.set_ylabel('amp, rawdata vs. baseline')
ax1.set_ylabel('amp, corrected')
axcolor = 'lightgoldenrodyellow'
axSmooth = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axAsym = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
axbcorr = plt.axes([0.25, 0.05, 0.65, 0.03], axisbg=axcolor)
sSmooth = Slider(axSmooth, 'Smoothness', 0.1, 10., valinit=np.log10(self.__lam),valfmt='1E%f')
sAsym = Slider(axAsym, 'Asymmetry', 1e-4,0.99999, valinit=self.__p,valfmt='%f')
sbcorr = Slider(axbcorr, 'vertical shift',0.7,1.1,valinit=1.)
def update(val):
self.__lam = 10**sSmooth.val
self.__p = sAsym.val
self.__baseline = sbcorr.val*self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
l0.set_ydata(np.absolute(self.z_data_raw))
l0b.set_ydata(np.absolute(self.__baseline))
l1.set_ydata(np.absolute(self.z_data_raw/self.__baseline))
fig.canvas.draw_idle()
sSmooth.on_changed(update)
sAsym.on_changed(update)
sbcorr.on_changed(update)
plt.show()
self.z_data_raw /= self.__baseline
plt.close()
<|fim▁end|> | normalize_phase |
<|file_name|>calibration.py<|end_file_name|><|fim▁begin|>
import numpy as np
from scipy import sparse
from scipy.interpolate import interp1d
class calibration(object):
'''
some useful tools for manual calibration
'''
def normalize_zdata(self,z_data,cal_z_data):
return z_data/cal_z_data
def normalize_amplitude(self,z_data,cal_ampdata):
return z_data/cal_ampdata
def normalize_phase(self,z_data,cal_phase):
return z_data*np.exp(-1j*cal_phase)
def <|fim_middle|>(self,f_data,z_data,func):
return z_data/func(f_data)
def _baseline_als(self,y, lam, p, niter=10):
'''
see http://zanran_storage.s3.amazonaws.com/www.science.uva.nl/ContentPages/443199618.pdf
"Asymmetric Least Squares Smoothing" by P. Eilers and H. Boelens in 2005.
http://stackoverflow.com/questions/29156532/python-baseline-correction-library
"There are two parameters: p for asymmetry and lambda for smoothness. Both have to be
tuned to the data at hand. We found that generally 0.001<=p<=0.1 is a good choice
(for a trace with positive peaks) and 10e2<=lambda<=10e9, but exceptions may occur."
'''
L = len(y)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
for i in range(niter):
W = sparse.spdiags(w, 0, L, L)
Z = W + lam * D.dot(D.transpose())
z = sparse.linalg.spsolve(Z, w*y)
w = p * (y > z) + (1-p) * (y < z)
return z
def fit_baseline_amp(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.absolute(z_data),lam,p,niter=niter)
def baseline_func_amp(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.absolute(z_data),lam,p,niter=niter), kind='cubic')
def baseline_func_phase(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.angle(z_data),lam,p,niter=niter), kind='cubic')
def fit_baseline_phase(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.angle(z_data),lam,p,niter=niter)
def GUIbaselinefit(self):
'''
A GUI to help you fit the baseline
'''
self.__lam = 1e6
self.__p = 0.9
niter = 10
self.__baseline = self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
fig, (ax0,ax1) = plt.subplots(nrows=2)
plt.suptitle('Use the sliders to make the green curve match the baseline.')
plt.subplots_adjust(left=0.25, bottom=0.25)
l0, = ax0.plot(np.absolute(self.z_data_raw))
l0b, = ax0.plot(np.absolute(self.__baseline))
l1, = ax1.plot(np.absolute(self.z_data_raw/self.__baseline))
ax0.set_ylabel('amp, rawdata vs. baseline')
ax1.set_ylabel('amp, corrected')
axcolor = 'lightgoldenrodyellow'
axSmooth = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axAsym = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
axbcorr = plt.axes([0.25, 0.05, 0.65, 0.03], axisbg=axcolor)
sSmooth = Slider(axSmooth, 'Smoothness', 0.1, 10., valinit=np.log10(self.__lam),valfmt='1E%f')
sAsym = Slider(axAsym, 'Asymmetry', 1e-4,0.99999, valinit=self.__p,valfmt='%f')
sbcorr = Slider(axbcorr, 'vertical shift',0.7,1.1,valinit=1.)
def update(val):
self.__lam = 10**sSmooth.val
self.__p = sAsym.val
self.__baseline = sbcorr.val*self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
l0.set_ydata(np.absolute(self.z_data_raw))
l0b.set_ydata(np.absolute(self.__baseline))
l1.set_ydata(np.absolute(self.z_data_raw/self.__baseline))
fig.canvas.draw_idle()
sSmooth.on_changed(update)
sAsym.on_changed(update)
sbcorr.on_changed(update)
plt.show()
self.z_data_raw /= self.__baseline
plt.close()
<|fim▁end|> | normalize_by_func |
<|file_name|>calibration.py<|end_file_name|><|fim▁begin|>
import numpy as np
from scipy import sparse
from scipy.interpolate import interp1d
class calibration(object):
'''
some useful tools for manual calibration
'''
def normalize_zdata(self,z_data,cal_z_data):
return z_data/cal_z_data
def normalize_amplitude(self,z_data,cal_ampdata):
return z_data/cal_ampdata
def normalize_phase(self,z_data,cal_phase):
return z_data*np.exp(-1j*cal_phase)
def normalize_by_func(self,f_data,z_data,func):
return z_data/func(f_data)
def <|fim_middle|>(self,y, lam, p, niter=10):
'''
see http://zanran_storage.s3.amazonaws.com/www.science.uva.nl/ContentPages/443199618.pdf
"Asymmetric Least Squares Smoothing" by P. Eilers and H. Boelens in 2005.
http://stackoverflow.com/questions/29156532/python-baseline-correction-library
"There are two parameters: p for asymmetry and lambda for smoothness. Both have to be
tuned to the data at hand. We found that generally 0.001<=p<=0.1 is a good choice
(for a trace with positive peaks) and 10e2<=lambda<=10e9, but exceptions may occur."
'''
L = len(y)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
for i in range(niter):
W = sparse.spdiags(w, 0, L, L)
Z = W + lam * D.dot(D.transpose())
z = sparse.linalg.spsolve(Z, w*y)
w = p * (y > z) + (1-p) * (y < z)
return z
def fit_baseline_amp(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.absolute(z_data),lam,p,niter=niter)
def baseline_func_amp(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.absolute(z_data),lam,p,niter=niter), kind='cubic')
def baseline_func_phase(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.angle(z_data),lam,p,niter=niter), kind='cubic')
def fit_baseline_phase(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.angle(z_data),lam,p,niter=niter)
def GUIbaselinefit(self):
'''
A GUI to help you fit the baseline
'''
self.__lam = 1e6
self.__p = 0.9
niter = 10
self.__baseline = self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
fig, (ax0,ax1) = plt.subplots(nrows=2)
plt.suptitle('Use the sliders to make the green curve match the baseline.')
plt.subplots_adjust(left=0.25, bottom=0.25)
l0, = ax0.plot(np.absolute(self.z_data_raw))
l0b, = ax0.plot(np.absolute(self.__baseline))
l1, = ax1.plot(np.absolute(self.z_data_raw/self.__baseline))
ax0.set_ylabel('amp, rawdata vs. baseline')
ax1.set_ylabel('amp, corrected')
axcolor = 'lightgoldenrodyellow'
axSmooth = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axAsym = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
axbcorr = plt.axes([0.25, 0.05, 0.65, 0.03], axisbg=axcolor)
sSmooth = Slider(axSmooth, 'Smoothness', 0.1, 10., valinit=np.log10(self.__lam),valfmt='1E%f')
sAsym = Slider(axAsym, 'Asymmetry', 1e-4,0.99999, valinit=self.__p,valfmt='%f')
sbcorr = Slider(axbcorr, 'vertical shift',0.7,1.1,valinit=1.)
def update(val):
self.__lam = 10**sSmooth.val
self.__p = sAsym.val
self.__baseline = sbcorr.val*self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
l0.set_ydata(np.absolute(self.z_data_raw))
l0b.set_ydata(np.absolute(self.__baseline))
l1.set_ydata(np.absolute(self.z_data_raw/self.__baseline))
fig.canvas.draw_idle()
sSmooth.on_changed(update)
sAsym.on_changed(update)
sbcorr.on_changed(update)
plt.show()
self.z_data_raw /= self.__baseline
plt.close()
<|fim▁end|> | _baseline_als |
<|file_name|>calibration.py<|end_file_name|><|fim▁begin|>
import numpy as np
from scipy import sparse
from scipy.interpolate import interp1d
class calibration(object):
'''
some useful tools for manual calibration
'''
def normalize_zdata(self,z_data,cal_z_data):
return z_data/cal_z_data
def normalize_amplitude(self,z_data,cal_ampdata):
return z_data/cal_ampdata
def normalize_phase(self,z_data,cal_phase):
return z_data*np.exp(-1j*cal_phase)
def normalize_by_func(self,f_data,z_data,func):
return z_data/func(f_data)
def _baseline_als(self,y, lam, p, niter=10):
'''
see http://zanran_storage.s3.amazonaws.com/www.science.uva.nl/ContentPages/443199618.pdf
"Asymmetric Least Squares Smoothing" by P. Eilers and H. Boelens in 2005.
http://stackoverflow.com/questions/29156532/python-baseline-correction-library
"There are two parameters: p for asymmetry and lambda for smoothness. Both have to be
tuned to the data at hand. We found that generally 0.001<=p<=0.1 is a good choice
(for a trace with positive peaks) and 10e2<=lambda<=10e9, but exceptions may occur."
'''
L = len(y)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
for i in range(niter):
W = sparse.spdiags(w, 0, L, L)
Z = W + lam * D.dot(D.transpose())
z = sparse.linalg.spsolve(Z, w*y)
w = p * (y > z) + (1-p) * (y < z)
return z
def <|fim_middle|>(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.absolute(z_data),lam,p,niter=niter)
def baseline_func_amp(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.absolute(z_data),lam,p,niter=niter), kind='cubic')
def baseline_func_phase(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.angle(z_data),lam,p,niter=niter), kind='cubic')
def fit_baseline_phase(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.angle(z_data),lam,p,niter=niter)
def GUIbaselinefit(self):
'''
A GUI to help you fit the baseline
'''
self.__lam = 1e6
self.__p = 0.9
niter = 10
self.__baseline = self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
fig, (ax0,ax1) = plt.subplots(nrows=2)
plt.suptitle('Use the sliders to make the green curve match the baseline.')
plt.subplots_adjust(left=0.25, bottom=0.25)
l0, = ax0.plot(np.absolute(self.z_data_raw))
l0b, = ax0.plot(np.absolute(self.__baseline))
l1, = ax1.plot(np.absolute(self.z_data_raw/self.__baseline))
ax0.set_ylabel('amp, rawdata vs. baseline')
ax1.set_ylabel('amp, corrected')
axcolor = 'lightgoldenrodyellow'
axSmooth = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axAsym = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
axbcorr = plt.axes([0.25, 0.05, 0.65, 0.03], axisbg=axcolor)
sSmooth = Slider(axSmooth, 'Smoothness', 0.1, 10., valinit=np.log10(self.__lam),valfmt='1E%f')
sAsym = Slider(axAsym, 'Asymmetry', 1e-4,0.99999, valinit=self.__p,valfmt='%f')
sbcorr = Slider(axbcorr, 'vertical shift',0.7,1.1,valinit=1.)
def update(val):
self.__lam = 10**sSmooth.val
self.__p = sAsym.val
self.__baseline = sbcorr.val*self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
l0.set_ydata(np.absolute(self.z_data_raw))
l0b.set_ydata(np.absolute(self.__baseline))
l1.set_ydata(np.absolute(self.z_data_raw/self.__baseline))
fig.canvas.draw_idle()
sSmooth.on_changed(update)
sAsym.on_changed(update)
sbcorr.on_changed(update)
plt.show()
self.z_data_raw /= self.__baseline
plt.close()
<|fim▁end|> | fit_baseline_amp |
<|file_name|>calibration.py<|end_file_name|><|fim▁begin|>
import numpy as np
from scipy import sparse
from scipy.interpolate import interp1d
class calibration(object):
'''
some useful tools for manual calibration
'''
def normalize_zdata(self,z_data,cal_z_data):
return z_data/cal_z_data
def normalize_amplitude(self,z_data,cal_ampdata):
return z_data/cal_ampdata
def normalize_phase(self,z_data,cal_phase):
return z_data*np.exp(-1j*cal_phase)
def normalize_by_func(self,f_data,z_data,func):
return z_data/func(f_data)
def _baseline_als(self,y, lam, p, niter=10):
'''
see http://zanran_storage.s3.amazonaws.com/www.science.uva.nl/ContentPages/443199618.pdf
"Asymmetric Least Squares Smoothing" by P. Eilers and H. Boelens in 2005.
http://stackoverflow.com/questions/29156532/python-baseline-correction-library
"There are two parameters: p for asymmetry and lambda for smoothness. Both have to be
tuned to the data at hand. We found that generally 0.001<=p<=0.1 is a good choice
(for a trace with positive peaks) and 10e2<=lambda<=10e9, but exceptions may occur."
'''
L = len(y)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
for i in range(niter):
W = sparse.spdiags(w, 0, L, L)
Z = W + lam * D.dot(D.transpose())
z = sparse.linalg.spsolve(Z, w*y)
w = p * (y > z) + (1-p) * (y < z)
return z
def fit_baseline_amp(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.absolute(z_data),lam,p,niter=niter)
def <|fim_middle|>(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.absolute(z_data),lam,p,niter=niter), kind='cubic')
def baseline_func_phase(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.angle(z_data),lam,p,niter=niter), kind='cubic')
def fit_baseline_phase(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.angle(z_data),lam,p,niter=niter)
def GUIbaselinefit(self):
'''
A GUI to help you fit the baseline
'''
self.__lam = 1e6
self.__p = 0.9
niter = 10
self.__baseline = self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
fig, (ax0,ax1) = plt.subplots(nrows=2)
plt.suptitle('Use the sliders to make the green curve match the baseline.')
plt.subplots_adjust(left=0.25, bottom=0.25)
l0, = ax0.plot(np.absolute(self.z_data_raw))
l0b, = ax0.plot(np.absolute(self.__baseline))
l1, = ax1.plot(np.absolute(self.z_data_raw/self.__baseline))
ax0.set_ylabel('amp, rawdata vs. baseline')
ax1.set_ylabel('amp, corrected')
axcolor = 'lightgoldenrodyellow'
axSmooth = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axAsym = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
axbcorr = plt.axes([0.25, 0.05, 0.65, 0.03], axisbg=axcolor)
sSmooth = Slider(axSmooth, 'Smoothness', 0.1, 10., valinit=np.log10(self.__lam),valfmt='1E%f')
sAsym = Slider(axAsym, 'Asymmetry', 1e-4,0.99999, valinit=self.__p,valfmt='%f')
sbcorr = Slider(axbcorr, 'vertical shift',0.7,1.1,valinit=1.)
def update(val):
self.__lam = 10**sSmooth.val
self.__p = sAsym.val
self.__baseline = sbcorr.val*self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
l0.set_ydata(np.absolute(self.z_data_raw))
l0b.set_ydata(np.absolute(self.__baseline))
l1.set_ydata(np.absolute(self.z_data_raw/self.__baseline))
fig.canvas.draw_idle()
sSmooth.on_changed(update)
sAsym.on_changed(update)
sbcorr.on_changed(update)
plt.show()
self.z_data_raw /= self.__baseline
plt.close()
<|fim▁end|> | baseline_func_amp |
<|file_name|>calibration.py<|end_file_name|><|fim▁begin|>
import numpy as np
from scipy import sparse
from scipy.interpolate import interp1d
class calibration(object):
'''
some useful tools for manual calibration
'''
def normalize_zdata(self,z_data,cal_z_data):
return z_data/cal_z_data
def normalize_amplitude(self,z_data,cal_ampdata):
return z_data/cal_ampdata
def normalize_phase(self,z_data,cal_phase):
return z_data*np.exp(-1j*cal_phase)
def normalize_by_func(self,f_data,z_data,func):
return z_data/func(f_data)
def _baseline_als(self,y, lam, p, niter=10):
'''
see http://zanran_storage.s3.amazonaws.com/www.science.uva.nl/ContentPages/443199618.pdf
"Asymmetric Least Squares Smoothing" by P. Eilers and H. Boelens in 2005.
http://stackoverflow.com/questions/29156532/python-baseline-correction-library
"There are two parameters: p for asymmetry and lambda for smoothness. Both have to be
tuned to the data at hand. We found that generally 0.001<=p<=0.1 is a good choice
(for a trace with positive peaks) and 10e2<=lambda<=10e9, but exceptions may occur."
'''
L = len(y)
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
for i in range(niter):
W = sparse.spdiags(w, 0, L, L)
Z = W + lam * D.dot(D.transpose())
z = sparse.linalg.spsolve(Z, w*y)
w = p * (y > z) + (1-p) * (y < z)
return z
def fit_baseline_amp(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.absolute(z_data),lam,p,niter=niter)
def baseline_func_amp(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.absolute(z_data),lam,p,niter=niter), kind='cubic')
def <|fim_middle|>(self,z_data,f_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
returns the baseline as a function
the points in between the datapoints are computed by cubic interpolation
'''
return interp1d(f_data, self._baseline_als(np.angle(z_data),lam,p,niter=niter), kind='cubic')
def fit_baseline_phase(self,z_data,lam,p,niter=10):
'''
for this to work, you need to analyze a large part of the baseline
tune lam and p until you get the desired result
'''
return self._baseline_als(np.angle(z_data),lam,p,niter=niter)
def GUIbaselinefit(self):
'''
A GUI to help you fit the baseline
'''
self.__lam = 1e6
self.__p = 0.9
niter = 10
self.__baseline = self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
fig, (ax0,ax1) = plt.subplots(nrows=2)
plt.suptitle('Use the sliders to make the green curve match the baseline.')
plt.subplots_adjust(left=0.25, bottom=0.25)
l0, = ax0.plot(np.absolute(self.z_data_raw))
l0b, = ax0.plot(np.absolute(self.__baseline))
l1, = ax1.plot(np.absolute(self.z_data_raw/self.__baseline))
ax0.set_ylabel('amp, rawdata vs. baseline')
ax1.set_ylabel('amp, corrected')
axcolor = 'lightgoldenrodyellow'
axSmooth = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axAsym = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
axbcorr = plt.axes([0.25, 0.05, 0.65, 0.03], axisbg=axcolor)
sSmooth = Slider(axSmooth, 'Smoothness', 0.1, 10., valinit=np.log10(self.__lam),valfmt='1E%f')
sAsym = Slider(axAsym, 'Asymmetry', 1e-4,0.99999, valinit=self.__p,valfmt='%f')
sbcorr = Slider(axbcorr, 'vertical shift',0.7,1.1,valinit=1.)
def update(val):
self.__lam = 10**sSmooth.val
self.__p = sAsym.val
self.__baseline = sbcorr.val*self._baseline_als(np.absolute(self.z_data_raw),self.__lam,self.__p,niter=niter)
l0.set_ydata(np.absolute(self.z_data_raw))
l0b.set_ydata(np.absolute(self.__baseline))
l1.set_ydata(np.absolute(self.z_data_raw/self.__baseline))
fig.canvas.draw_idle()
sSmooth.on_changed(update)
sAsym.on_changed(update)
sbcorr.on_changed(update)
plt.show()
self.z_data_raw /= self.__baseline
plt.close()
<|fim▁end|> | baseline_func_phase |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.