code
stringlengths
31
1.05M
apis
list
extract_api
stringlengths
97
1.91M
from sklearn.cluster import KMeans from random import randint import numpy as np import csv import matplotlib.pyplot as plt matriz = [] arrayCriacaoCentroides = [] with open('dataset_iris.csv') as csvfile: reader = csv.DictReader(csvfile) for row in reader: largPetala = (row['larguraPetala']) compSepala = (row['comprimentoSepala']) matriz.append([float(largPetala), float(compSepala)]) matriz = np.array(matriz) def criacaoCentroideRandom(): array = [[randint(0, 9), randint(0, 9)], [randint(0, 9), randint(0, 9)], [randint(0, 9), randint(0, 9)]] array = np.array(array) global arrayCriacaoCentroides arrayCriacaoCentroides = array return array def avaliacaoAcertos(arrayAnalise): g1 = 0 g2 = 0 g3 = 0 acertos = 0 for i in range(0, len(arrayAnalise)): if (arrayAnalise[i] == 0): g1+=1 if (arrayAnalise[i] == 1): g2+=1 if (arrayAnalise[i] == 2): g3+=1 if (i == 49) or (i == 99) or (i == 149): print("Agrupamento:", g1, g2, g3) acertos += max(g1, g2, g3) g1 = 0 g2 = 0 g3 = 0 return round(acertos/150*100, 2) for i in range(1, 4): if (i != 3): #Minha geração de centroides; trabmeans = KMeans(n_clusters=3, init=criacaoCentroideRandom(), n_init=1).fit(matriz) else: #Geração de centroides otimizada da própria lib; trabmeans = KMeans(n_clusters=3).fit(matriz) plt.figure(i) plt.scatter(matriz[:, 0], matriz[:, 1], s = 100, c = trabmeans.labels_) if (i != 3): plt.scatter(arrayCriacaoCentroides[:, 0], arrayCriacaoCentroides[:, 1], s = 100, c = 'green', label = 'Centroides Iniciais') plt.scatter(trabmeans.cluster_centers_[:, 0], trabmeans.cluster_centers_[:, 1], s = 100, c = 'red', label = 'Centroides Finais') plt.xlabel('Largura da Petala') plt.ylabel('Comprimento da Sepala') plt.legend() if (i != 3): print("Centroide inicial - Grupo " + str(i) + ":", arrayCriacaoCentroides[0], arrayCriacaoCentroides[1], arrayCriacaoCentroides[2]) else: print("Coordenadas do Centroide geradas de maneira otimizada pelo algoritmo.") print("Porcentagem acerto - Grupo " + str(i) + ":", avaliacaoAcertos(trabmeans.labels_)) print("\n") plt.show()
[ "matplotlib.pyplot.show", "random.randint", "csv.DictReader", "matplotlib.pyplot.scatter", "matplotlib.pyplot.legend", "sklearn.cluster.KMeans", "matplotlib.pyplot.figure", "numpy.array", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel" ]
[((432, 448), 'numpy.array', 'np.array', (['matriz'], {}), '(matriz)\n', (440, 448), True, 'import numpy as np\n'), ((2352, 2362), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2360, 2362), True, 'import matplotlib.pyplot as plt\n'), ((221, 244), 'csv.DictReader', 'csv.DictReader', (['csvfile'], {}), '(csvfile)\n', (235, 244), False, 'import csv\n'), ((601, 616), 'numpy.array', 'np.array', (['array'], {}), '(array)\n', (609, 616), True, 'import numpy as np\n'), ((1520, 1533), 'matplotlib.pyplot.figure', 'plt.figure', (['i'], {}), '(i)\n', (1530, 1533), True, 'import matplotlib.pyplot as plt\n'), ((1538, 1605), 'matplotlib.pyplot.scatter', 'plt.scatter', (['matriz[:, 0]', 'matriz[:, 1]'], {'s': '(100)', 'c': 'trabmeans.labels_'}), '(matriz[:, 0], matriz[:, 1], s=100, c=trabmeans.labels_)\n', (1549, 1605), True, 'import matplotlib.pyplot as plt\n'), ((1764, 1890), 'matplotlib.pyplot.scatter', 'plt.scatter', (['trabmeans.cluster_centers_[:, 0]', 'trabmeans.cluster_centers_[:, 1]'], {'s': '(100)', 'c': '"""red"""', 'label': '"""Centroides Finais"""'}), "(trabmeans.cluster_centers_[:, 0], trabmeans.cluster_centers_[:,\n 1], s=100, c='red', label='Centroides Finais')\n", (1775, 1890), True, 'import matplotlib.pyplot as plt\n'), ((1897, 1928), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Largura da Petala"""'], {}), "('Largura da Petala')\n", (1907, 1928), True, 'import matplotlib.pyplot as plt\n'), ((1933, 1968), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Comprimento da Sepala"""'], {}), "('Comprimento da Sepala')\n", (1943, 1968), True, 'import matplotlib.pyplot as plt\n'), ((1973, 1985), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1983, 1985), True, 'import matplotlib.pyplot as plt\n'), ((1635, 1758), 'matplotlib.pyplot.scatter', 'plt.scatter', (['arrayCriacaoCentroides[:, 0]', 'arrayCriacaoCentroides[:, 1]'], {'s': '(100)', 'c': '"""green"""', 'label': '"""Centroides Iniciais"""'}), "(arrayCriacaoCentroides[:, 0], arrayCriacaoCentroides[:, 1], s=\n 100, c='green', label='Centroides Iniciais')\n", (1646, 1758), True, 'import matplotlib.pyplot as plt\n'), ((494, 507), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (501, 507), False, 'from random import randint\n'), ((509, 522), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (516, 522), False, 'from random import randint\n'), ((526, 539), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (533, 539), False, 'from random import randint\n'), ((541, 554), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (548, 554), False, 'from random import randint\n'), ((558, 571), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (565, 571), False, 'from random import randint\n'), ((573, 586), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (580, 586), False, 'from random import randint\n'), ((1482, 1502), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(3)'}), '(n_clusters=3)\n', (1488, 1502), False, 'from sklearn.cluster import KMeans\n')]
import logging from typing import List import matplotlib.pyplot as plt import numpy as np import streamlit as st from matplotlib.animation import FuncAnimation from scipy import integrate from utils.objects import Body logger = logging.getLogger(__name__) # Arbitrary value for G (gravitational constant) G = 1 def create_initial_conditions(bodies: List[Body]) -> List[int]: """ :param bodies: List of Body classes :return: list of starting x, y, vx, and vy values for each Body in bodies """ initial = [] # Loop through bodies and create initial conditions to be passed into the integrator logger.info(f"Creating initial conditions for the {len(bodies)} bodies") for body in bodies: values = [body.x, body.vx, body.y, body.vy] initial += values return initial def calc_2d_distance(x1: float, y1: float, x2: float, y2: float) -> float: """ Returns: Distance between the 2-dimensional co-ordinates supplied. """ return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5 def calc_dvel(c1: float, c2: float, r: float, m2: float) -> float: """ Calculates the change in velocity on a target body due to the gravitational force of another body (source body) in a single dimension. Args: c1: value for target body position in x or y dimension c2: value for source body position in x or y dimension r: distance between 2 bodies m2: mass of the source body Returns: change in target body velocity (float) """ return (-G * m2 * (c1 - c2)) * r ** (-3) def n_body_func(t: int, pos_vel: np.ndarray, bodies: List[Body]) -> np.ndarray: """ Function to be passed into the ode integrator. Calculates and stores the changes in spatial and velocity values. Args: t: time step pos_vel: array containing x, y, vx and vy values for each body [x1, vx1, y1, vy1, x2, ...] bodies: list of Body objects Returns: array containing change in spatial and velocity values for each body """ # Set up array to store updated spatial and velocity values dpos_dvel = np.zeros(4 * len(bodies)) # Change in x, y is velocity in x, y dpos_dvel[0 : len(dpos_dvel) : 4] = pos_vel[1 : len(pos_vel) : 4] dpos_dvel[2 : len(dpos_dvel) : 4] = pos_vel[3 : len(pos_vel) : 4] # Loop through bodies, calculating change in vx, vy due to all other bodies for i, body in enumerate(bodies): # Extract x, y values of body x1 = pos_vel[i * 4] y1 = pos_vel[i * 4 + 2] vx1 = 0 vy1 = 0 for j, other_body in enumerate(bodies): # Check bodies aren't the same if i != j: # Extract x, y & mass of other body x2 = pos_vel[j * 4] y2 = pos_vel[j * 4 + 2] # Distance to other body r = calc_2d_distance(x1=x1, y1=y1, x2=x2, y2=y2,) # Change in x, y vx1 += calc_dvel(c1=x1, c2=x2, r=r, m2=other_body.mass) vy1 += calc_dvel(c1=y1, c2=y2, r=r, m2=other_body.mass) # Add resultant change in vel to array dpos_dvel[i * 4 + 1] = vx1 dpos_dvel[i * 4 + 3] = vy1 return dpos_dvel def calc_orbits(bodies: List[Body], t0: int, t1: int, dt: int) -> np.ndarray: """ Calculate the change in x, y, vx and vy at each time step between t0 and t1 due to the gravitational forces of all other bodies in the system. The integrator used is dopri835. Args: bodies: List of Body classes that describe the starting conditions and masses of the bodies t0: start time t1: end time dt: time step (seconds) Returns: Array containing spatial coordinates and velocities of bodies at each time step """ logger.info( f"""Orbit settings: n_bodies: {len(bodies)}, t0: {t0}, t1: {t1}, dt: {dt}""" ) # Initial conditions (x, vx, y, vy) initial = create_initial_conditions(bodies=bodies) # Time period over which to calculate orbit paths t = np.linspace(t0, t1, dt) # Array for solution y = np.zeros((len(t), len(bodies) * 4)) y[0, :] = initial # Setup integrator integrator = ( integrate.ode(n_body_func) .set_integrator("dop853", rtol=1e-6, atol=1e-10) .set_initial_value(initial, t0) .set_f_params(bodies) ) # Iterate over time intervals and integrate, storing updated spatial coordinates # and velocities of bodies progress_text = st.sidebar.text(f"Iteration: 0/{len(t)}") progress_bar = st.sidebar.progress(0) logger.info("Calculating orbits") for i in range(1, len(t)): progress_text.text(f"Iteration: {i}/{len(t)-1}") progress_bar.progress((i + 1) / len(t)) y[i, :] = integrator.integrate(t[i]) progress_text.text("Complete!") return y def animate_orbits(orbit_paths: np.ndarray) -> None: """ Animates the orbits Args: orbit_paths: array containing spatial and velocity values over time """ logger.info("Animating orbits") fig = plt.figure(figsize=(6, 6)) # set size of axis based on max/min spatial values x_min = orbit_paths[:, 0::4].min() * 1.1 x_max = orbit_paths[:, 0::4].max() * 1.1 y_min = orbit_paths[:, 2::4].min() * 1.1 y_max = orbit_paths[:, 2::4].max() * 1.1 ax = plt.axes(xlim=(x_min, x_max), ylim=(y_min, y_max)) n_bodies = int(orbit_paths.shape[1] / 4) colours = ["red", "blue", "orange", "green", "black"] lines = [] for index in range(n_bodies * 2): if index < n_bodies: lobj = ax.plot([], [], "--", lw=1, color=colours[index % len(colours)])[0] else: lobj = ax.plot( [], [], "o", color=colours[(index - n_bodies) % len(colours)] )[0] lines.append(lobj) def init(): for line in lines: line.set_data([], []) return lines def animate(i): for j, line in enumerate(lines): if j < n_bodies: orbit_tail_length = 30 if i > orbit_tail_length: x = orbit_paths[i - orbit_tail_length : i, j * 4] y = orbit_paths[i - orbit_tail_length : i, j * 4 + 2] else: x = orbit_paths[:i, j * 4] y = orbit_paths[:i, j * 4 + 2] else: x = orbit_paths[i, (j - n_bodies) * 4] y = orbit_paths[i, (j - n_bodies) * 4 + 2] line.set_data(x, y) return lines # TODO: ensure a consistent maximum number of frames so animations aren't too slow # or too fast anim = FuncAnimation( fig, animate, init_func=init, frames=orbit_paths.shape[0], interval=1, blit=True ) plt.show() def plot_orbits(orbit_paths: np.ndarray, title: str) -> None: """ Plots the orbits Args: orbit_paths: array containing spatial and velocity values over time title: title to use for figure """ logger.info("Plotting orbits") fig = plt.figure(figsize=(10, 10)) plt.title(title) for i in range(int(orbit_paths.shape[1] / 4)): plt.plot(orbit_paths[:, i * 4], orbit_paths[:, i * 4 + 2]) st.pyplot(fig) plt.show()
[ "matplotlib.pyplot.title", "matplotlib.pyplot.show", "scipy.integrate.ode", "matplotlib.pyplot.plot", "matplotlib.pyplot.axes", "streamlit.sidebar.progress", "matplotlib.animation.FuncAnimation", "matplotlib.pyplot.figure", "numpy.linspace", "streamlit.pyplot", "logging.getLogger" ]
[((231, 258), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (248, 258), False, 'import logging\n'), ((4142, 4165), 'numpy.linspace', 'np.linspace', (['t0', 't1', 'dt'], {}), '(t0, t1, dt)\n', (4153, 4165), True, 'import numpy as np\n'), ((4667, 4689), 'streamlit.sidebar.progress', 'st.sidebar.progress', (['(0)'], {}), '(0)\n', (4686, 4689), True, 'import streamlit as st\n'), ((5188, 5214), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (5198, 5214), True, 'import matplotlib.pyplot as plt\n'), ((5460, 5510), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'xlim': '(x_min, x_max)', 'ylim': '(y_min, y_max)'}), '(xlim=(x_min, x_max), ylim=(y_min, y_max))\n', (5468, 5510), True, 'import matplotlib.pyplot as plt\n'), ((6794, 6893), 'matplotlib.animation.FuncAnimation', 'FuncAnimation', (['fig', 'animate'], {'init_func': 'init', 'frames': 'orbit_paths.shape[0]', 'interval': '(1)', 'blit': '(True)'}), '(fig, animate, init_func=init, frames=orbit_paths.shape[0],\n interval=1, blit=True)\n', (6807, 6893), False, 'from matplotlib.animation import FuncAnimation\n'), ((6908, 6918), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6916, 6918), True, 'import matplotlib.pyplot as plt\n'), ((7191, 7219), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (7201, 7219), True, 'import matplotlib.pyplot as plt\n'), ((7224, 7240), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (7233, 7240), True, 'import matplotlib.pyplot as plt\n'), ((7365, 7379), 'streamlit.pyplot', 'st.pyplot', (['fig'], {}), '(fig)\n', (7374, 7379), True, 'import streamlit as st\n'), ((7385, 7395), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7393, 7395), True, 'import matplotlib.pyplot as plt\n'), ((7301, 7359), 'matplotlib.pyplot.plot', 'plt.plot', (['orbit_paths[:, i * 4]', 'orbit_paths[:, i * 4 + 2]'], {}), '(orbit_paths[:, i * 4], orbit_paths[:, i * 4 + 2])\n', (7309, 7359), True, 'import matplotlib.pyplot as plt\n'), ((4309, 4335), 'scipy.integrate.ode', 'integrate.ode', (['n_body_func'], {}), '(n_body_func)\n', (4322, 4335), False, 'from scipy import integrate\n')]
import sys from operator import itemgetter import numpy as np import cv2 import math import matplotlib.pyplot as plt # -----------------------------# # 计算原始输入图像 # 每一次缩放的比例 # -----------------------------# def calculateScales(img): copy_img = img.copy() pr_scale = 1.0 h, w, _ = copy_img.shape if min(w, h) > 500: pr_scale = 500.0 / min(h, w) w = int(w * pr_scale) h = int(h * pr_scale) elif max(w, h) < 500: pr_scale = 500.0 / max(h, w) w = int(w * pr_scale) h = int(h * pr_scale) scales = [] factor = 0.709 factor_count = 0 minl = min(h, w) while minl >= 12: scales.append(pr_scale * pow(factor, factor_count)) minl *= factor factor_count += 1 return scales # -------------------------------------# # 对pnet处理后的结果进行处理 # -------------------------------------# def detect_face_12net(cls_prob, roi, out_side, scale, width, height, threshold): cls_prob = np.swapaxes(cls_prob, 0, 1) roi = np.swapaxes(roi, 0, 2) stride = 0 # stride略等于2 if out_side != 1: stride = float(2 * out_side - 1) / (out_side - 1) (x, y) = np.where(cls_prob >= threshold) boundingbox = np.array([x, y]).T # 找到对应原图的位置 bb1 = np.fix((stride * (boundingbox) + 0) * scale) bb2 = np.fix((stride * (boundingbox) + 11) * scale) # plt.scatter(bb1[:,0],bb1[:,1],linewidths=1) # plt.scatter(bb2[:,0],bb2[:,1],linewidths=1,c='r') # plt.show() boundingbox = np.concatenate((bb1, bb2), axis=1) dx1 = roi[0][x, y] dx2 = roi[1][x, y] dx3 = roi[2][x, y] dx4 = roi[3][x, y] score = np.array([cls_prob[x, y]]).T offset = np.array([dx1, dx2, dx3, dx4]).T boundingbox = boundingbox + offset * 12.0 * scale rectangles = np.concatenate((boundingbox, score), axis=1) rectangles = rect2square(rectangles) pick = [] for i in range(len(rectangles)): x1 = int(max(0, rectangles[i][0])) y1 = int(max(0, rectangles[i][1])) x2 = int(min(width, rectangles[i][2])) y2 = int(min(height, rectangles[i][3])) sc = rectangles[i][4] if x2 > x1 and y2 > y1: pick.append([x1, y1, x2, y2, sc]) return NMS(pick, 0.3) # -----------------------------# # 将长方形调整为正方形 # -----------------------------# def rect2square(rectangles): w = rectangles[:, 2] - rectangles[:, 0] h = rectangles[:, 3] - rectangles[:, 1] l = np.maximum(w, h).T rectangles[:, 0] = rectangles[:, 0] + w * 0.5 - l * 0.5 rectangles[:, 1] = rectangles[:, 1] + h * 0.5 - l * 0.5 rectangles[:, 2:4] = rectangles[:, 0:2] + np.repeat([l], 2, axis=0).T return rectangles # -------------------------------------# # 非极大抑制 # -------------------------------------# def NMS(rectangles, threshold): if len(rectangles) == 0: return rectangles boxes = np.array(rectangles) x1 = boxes[:, 0] y1 = boxes[:, 1] x2 = boxes[:, 2] y2 = boxes[:, 3] s = boxes[:, 4] area = np.multiply(x2 - x1 + 1, y2 - y1 + 1) I = np.array(s.argsort()) pick = [] while len(I) > 0: xx1 = np.maximum(x1[I[-1]], x1[I[0:-1]]) # I[-1] have hightest prob score, I[0:-1]->others yy1 = np.maximum(y1[I[-1]], y1[I[0:-1]]) xx2 = np.minimum(x2[I[-1]], x2[I[0:-1]]) yy2 = np.minimum(y2[I[-1]], y2[I[0:-1]]) w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) inter = w * h o = inter / (area[I[-1]] + area[I[0:-1]] - inter) pick.append(I[-1]) I = I[np.where(o <= threshold)[0]] result_rectangle = boxes[pick].tolist() return result_rectangle # -------------------------------------# # 对pnet处理后的结果进行处理 # -------------------------------------# def filter_face_24net(cls_prob, roi, rectangles, width, height, threshold): prob = cls_prob[:, 1] pick = np.where(prob >= threshold) rectangles = np.array(rectangles) x1 = rectangles[pick, 0] y1 = rectangles[pick, 1] x2 = rectangles[pick, 2] y2 = rectangles[pick, 3] sc = np.array([prob[pick]]).T dx1 = roi[pick, 0] dx2 = roi[pick, 1] dx3 = roi[pick, 2] dx4 = roi[pick, 3] w = x2 - x1 h = y2 - y1 x1 = np.array([(x1 + dx1 * w)[0]]).T y1 = np.array([(y1 + dx2 * h)[0]]).T x2 = np.array([(x2 + dx3 * w)[0]]).T y2 = np.array([(y2 + dx4 * h)[0]]).T rectangles = np.concatenate((x1, y1, x2, y2, sc), axis=1) rectangles = rect2square(rectangles) pick = [] for i in range(len(rectangles)): x1 = int(max(0, rectangles[i][0])) y1 = int(max(0, rectangles[i][1])) x2 = int(min(width, rectangles[i][2])) y2 = int(min(height, rectangles[i][3])) sc = rectangles[i][4] if x2 > x1 and y2 > y1: pick.append([x1, y1, x2, y2, sc]) return NMS(pick, 0.3) # -------------------------------------# # 对onet处理后的结果进行处理 # -------------------------------------# def filter_face_48net(cls_prob, roi, pts, rectangles, width, height, threshold): prob = cls_prob[:, 1] pick = np.where(prob >= threshold) rectangles = np.array(rectangles) x1 = rectangles[pick, 0] y1 = rectangles[pick, 1] x2 = rectangles[pick, 2] y2 = rectangles[pick, 3] sc = np.array([prob[pick]]).T dx1 = roi[pick, 0] dx2 = roi[pick, 1] dx3 = roi[pick, 2] dx4 = roi[pick, 3] w = x2 - x1 h = y2 - y1 pts0 = np.array([(w * pts[pick, 0] + x1)[0]]).T pts1 = np.array([(h * pts[pick, 5] + y1)[0]]).T pts2 = np.array([(w * pts[pick, 1] + x1)[0]]).T pts3 = np.array([(h * pts[pick, 6] + y1)[0]]).T pts4 = np.array([(w * pts[pick, 2] + x1)[0]]).T pts5 = np.array([(h * pts[pick, 7] + y1)[0]]).T pts6 = np.array([(w * pts[pick, 3] + x1)[0]]).T pts7 = np.array([(h * pts[pick, 8] + y1)[0]]).T pts8 = np.array([(w * pts[pick, 4] + x1)[0]]).T pts9 = np.array([(h * pts[pick, 9] + y1)[0]]).T x1 = np.array([(x1 + dx1 * w)[0]]).T y1 = np.array([(y1 + dx2 * h)[0]]).T x2 = np.array([(x2 + dx3 * w)[0]]).T y2 = np.array([(y2 + dx4 * h)[0]]).T rectangles = np.concatenate((x1, y1, x2, y2, sc, pts0, pts1, pts2, pts3, pts4, pts5, pts6, pts7, pts8, pts9), axis=1) pick = [] for i in range(len(rectangles)): x1 = int(max(0, rectangles[i][0])) y1 = int(max(0, rectangles[i][1])) x2 = int(min(width, rectangles[i][2])) y2 = int(min(height, rectangles[i][3])) if x2 > x1 and y2 > y1: pick.append([x1, y1, x2, y2, rectangles[i][4], rectangles[i][5], rectangles[i][6], rectangles[i][7], rectangles[i][8], rectangles[i][9], rectangles[i][10], rectangles[i][11], rectangles[i][12], rectangles[i][13], rectangles[i][14]]) return NMS(pick, 0.3) # -------------------------------------# # 人脸对齐 # -------------------------------------# def Alignment_1(img, landmark): if landmark.shape[0] == 68: x = landmark[36, 0] - landmark[45, 0] y = landmark[36, 1] - landmark[45, 1] elif landmark.shape[0] == 5: x = landmark[0, 0] - landmark[1, 0] y = landmark[0, 1] - landmark[1, 1] if x == 0: angle = 0 else: angle = math.atan(y / x) * 180 / math.pi center = (img.shape[1] // 2, img.shape[0] // 2) RotationMatrix = cv2.getRotationMatrix2D(center, angle, 1) new_img = cv2.warpAffine(img, RotationMatrix, (img.shape[1], img.shape[0])) RotationMatrix = np.array(RotationMatrix) new_landmark = [] for i in range(landmark.shape[0]): pts = [] pts.append(RotationMatrix[0, 0] * landmark[i, 0] + RotationMatrix[0, 1] * landmark[i, 1] + RotationMatrix[0, 2]) pts.append(RotationMatrix[1, 0] * landmark[i, 0] + RotationMatrix[1, 1] * landmark[i, 1] + RotationMatrix[1, 2]) new_landmark.append(pts) new_landmark = np.array(new_landmark) return new_img, new_landmark def Alignment_2(img, std_landmark, landmark): def Transformation(std_landmark, landmark): std_landmark = np.matrix(std_landmark).astype(np.float64) landmark = np.matrix(landmark).astype(np.float64) c1 = np.mean(std_landmark, axis=0) c2 = np.mean(landmark, axis=0) std_landmark -= c1 landmark -= c2 s1 = np.std(std_landmark) s2 = np.std(landmark) std_landmark /= s1 landmark /= s2 U, S, Vt = np.linalg.svd(std_landmark.T * landmark) R = (U * Vt).T return np.vstack([np.hstack(((s2 / s1) * R, c2.T - (s2 / s1) * R * c1.T)), np.matrix([0., 0., 1.])]) Trans_Matrix = Transformation(std_landmark, landmark) # Shape: 3 * 3 Trans_Matrix = Trans_Matrix[:2] Trans_Matrix = cv2.invertAffineTransform(Trans_Matrix) new_img = cv2.warpAffine(img, Trans_Matrix, (img.shape[1], img.shape[0])) Trans_Matrix = np.array(Trans_Matrix) new_landmark = [] for i in range(landmark.shape[0]): pts = [] pts.append(Trans_Matrix[0, 0] * landmark[i, 0] + Trans_Matrix[0, 1] * landmark[i, 1] + Trans_Matrix[0, 2]) pts.append(Trans_Matrix[1, 0] * landmark[i, 0] + Trans_Matrix[1, 1] * landmark[i, 1] + Trans_Matrix[1, 2]) new_landmark.append(pts) new_landmark = np.array(new_landmark) return new_img, new_landmark # ---------------------------------# # 图片预处理 # 高斯归一化 # ---------------------------------# def pre_process(x): if x.ndim == 4: axis = (1, 2, 3) size = x[0].size elif x.ndim == 3: axis = (0, 1, 2) size = x.size else: raise ValueError('Dimension should be 3 or 4') mean = np.mean(x, axis=axis, keepdims=True) std = np.std(x, axis=axis, keepdims=True) std_adj = np.maximum(std, 1.0 / np.sqrt(size)) y = (x - mean) / std_adj return y # ---------------------------------# # l2标准化 # ---------------------------------# def l2_normalize(x, axis=-1, epsilon=1e-10): output = x / np.sqrt(np.maximum(np.sum(np.square(x), axis=axis, keepdims=True), epsilon)) return output # ---------------------------------# # 计算128特征值 # ---------------------------------# def calc_128_vec(model, img): face_img = pre_process(img) pre = model.predict(face_img) pre = l2_normalize(np.concatenate(pre)) pre = np.reshape(pre, [128]) return pre # ---------------------------------# # 计算人脸距离 # ---------------------------------# def face_distance(face_encodings, face_to_compare): if len(face_encodings) == 0: return np.empty((0)) return np.linalg.norm(face_encodings - face_to_compare, axis=1) # ---------------------------------# # 比较人脸 # ---------------------------------# def compare_faces(known_face_encodings, face_encoding_to_check, tolerance=0.6): dis = face_distance(known_face_encodings, face_encoding_to_check) return list(dis <= tolerance)
[ "numpy.maximum", "numpy.empty", "cv2.warpAffine", "numpy.mean", "numpy.linalg.norm", "numpy.linalg.svd", "cv2.getRotationMatrix2D", "cv2.invertAffineTransform", "numpy.multiply", "numpy.std", "numpy.swapaxes", "numpy.reshape", "numpy.repeat", "numpy.minimum", "numpy.fix", "numpy.square", "numpy.hstack", "numpy.concatenate", "numpy.matrix", "math.atan", "numpy.where", "numpy.array", "numpy.sqrt" ]
[((984, 1011), 'numpy.swapaxes', 'np.swapaxes', (['cls_prob', '(0)', '(1)'], {}), '(cls_prob, 0, 1)\n', (995, 1011), True, 'import numpy as np\n'), ((1022, 1044), 'numpy.swapaxes', 'np.swapaxes', (['roi', '(0)', '(2)'], {}), '(roi, 0, 2)\n', (1033, 1044), True, 'import numpy as np\n'), ((1171, 1202), 'numpy.where', 'np.where', (['(cls_prob >= threshold)'], {}), '(cls_prob >= threshold)\n', (1179, 1202), True, 'import numpy as np\n'), ((1267, 1309), 'numpy.fix', 'np.fix', (['((stride * boundingbox + 0) * scale)'], {}), '((stride * boundingbox + 0) * scale)\n', (1273, 1309), True, 'import numpy as np\n'), ((1322, 1365), 'numpy.fix', 'np.fix', (['((stride * boundingbox + 11) * scale)'], {}), '((stride * boundingbox + 11) * scale)\n', (1328, 1365), True, 'import numpy as np\n'), ((1509, 1543), 'numpy.concatenate', 'np.concatenate', (['(bb1, bb2)'], {'axis': '(1)'}), '((bb1, bb2), axis=1)\n', (1523, 1543), True, 'import numpy as np\n'), ((1797, 1841), 'numpy.concatenate', 'np.concatenate', (['(boundingbox, score)'], {'axis': '(1)'}), '((boundingbox, score), axis=1)\n', (1811, 1841), True, 'import numpy as np\n'), ((2886, 2906), 'numpy.array', 'np.array', (['rectangles'], {}), '(rectangles)\n', (2894, 2906), True, 'import numpy as np\n'), ((3022, 3059), 'numpy.multiply', 'np.multiply', (['(x2 - x1 + 1)', '(y2 - y1 + 1)'], {}), '(x2 - x1 + 1, y2 - y1 + 1)\n', (3033, 3059), True, 'import numpy as np\n'), ((3898, 3925), 'numpy.where', 'np.where', (['(prob >= threshold)'], {}), '(prob >= threshold)\n', (3906, 3925), True, 'import numpy as np\n'), ((3944, 3964), 'numpy.array', 'np.array', (['rectangles'], {}), '(rectangles)\n', (3952, 3964), True, 'import numpy as np\n'), ((4426, 4470), 'numpy.concatenate', 'np.concatenate', (['(x1, y1, x2, y2, sc)'], {'axis': '(1)'}), '((x1, y1, x2, y2, sc), axis=1)\n', (4440, 4470), True, 'import numpy as np\n'), ((5100, 5127), 'numpy.where', 'np.where', (['(prob >= threshold)'], {}), '(prob >= threshold)\n', (5108, 5127), True, 'import numpy as np\n'), ((5145, 5165), 'numpy.array', 'np.array', (['rectangles'], {}), '(rectangles)\n', (5153, 5165), True, 'import numpy as np\n'), ((6152, 6260), 'numpy.concatenate', 'np.concatenate', (['(x1, y1, x2, y2, sc, pts0, pts1, pts2, pts3, pts4, pts5, pts6, pts7, pts8, pts9\n )'], {'axis': '(1)'}), '((x1, y1, x2, y2, sc, pts0, pts1, pts2, pts3, pts4, pts5,\n pts6, pts7, pts8, pts9), axis=1)\n', (6166, 6260), True, 'import numpy as np\n'), ((7413, 7454), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['center', 'angle', '(1)'], {}), '(center, angle, 1)\n', (7436, 7454), False, 'import cv2\n'), ((7469, 7534), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'RotationMatrix', '(img.shape[1], img.shape[0])'], {}), '(img, RotationMatrix, (img.shape[1], img.shape[0]))\n', (7483, 7534), False, 'import cv2\n'), ((7557, 7581), 'numpy.array', 'np.array', (['RotationMatrix'], {}), '(RotationMatrix)\n', (7565, 7581), True, 'import numpy as np\n'), ((7955, 7977), 'numpy.array', 'np.array', (['new_landmark'], {}), '(new_landmark)\n', (7963, 7977), True, 'import numpy as np\n'), ((8804, 8843), 'cv2.invertAffineTransform', 'cv2.invertAffineTransform', (['Trans_Matrix'], {}), '(Trans_Matrix)\n', (8829, 8843), False, 'import cv2\n'), ((8858, 8921), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'Trans_Matrix', '(img.shape[1], img.shape[0])'], {}), '(img, Trans_Matrix, (img.shape[1], img.shape[0]))\n', (8872, 8921), False, 'import cv2\n'), ((8942, 8964), 'numpy.array', 'np.array', (['Trans_Matrix'], {}), '(Trans_Matrix)\n', (8950, 8964), True, 'import numpy as np\n'), ((9326, 9348), 'numpy.array', 'np.array', (['new_landmark'], {}), '(new_landmark)\n', (9334, 9348), True, 'import numpy as np\n'), ((9715, 9751), 'numpy.mean', 'np.mean', (['x'], {'axis': 'axis', 'keepdims': '(True)'}), '(x, axis=axis, keepdims=True)\n', (9722, 9751), True, 'import numpy as np\n'), ((9762, 9797), 'numpy.std', 'np.std', (['x'], {'axis': 'axis', 'keepdims': '(True)'}), '(x, axis=axis, keepdims=True)\n', (9768, 9797), True, 'import numpy as np\n'), ((10373, 10395), 'numpy.reshape', 'np.reshape', (['pre', '[128]'], {}), '(pre, [128])\n', (10383, 10395), True, 'import numpy as np\n'), ((10623, 10679), 'numpy.linalg.norm', 'np.linalg.norm', (['(face_encodings - face_to_compare)'], {'axis': '(1)'}), '(face_encodings - face_to_compare, axis=1)\n', (10637, 10679), True, 'import numpy as np\n'), ((1222, 1238), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (1230, 1238), True, 'import numpy as np\n'), ((1649, 1675), 'numpy.array', 'np.array', (['[cls_prob[x, y]]'], {}), '([cls_prob[x, y]])\n', (1657, 1675), True, 'import numpy as np\n'), ((1691, 1721), 'numpy.array', 'np.array', (['[dx1, dx2, dx3, dx4]'], {}), '([dx1, dx2, dx3, dx4])\n', (1699, 1721), True, 'import numpy as np\n'), ((2458, 2474), 'numpy.maximum', 'np.maximum', (['w', 'h'], {}), '(w, h)\n', (2468, 2474), True, 'import numpy as np\n'), ((3140, 3174), 'numpy.maximum', 'np.maximum', (['x1[I[-1]]', 'x1[I[0:-1]]'], {}), '(x1[I[-1]], x1[I[0:-1]])\n', (3150, 3174), True, 'import numpy as np\n'), ((3240, 3274), 'numpy.maximum', 'np.maximum', (['y1[I[-1]]', 'y1[I[0:-1]]'], {}), '(y1[I[-1]], y1[I[0:-1]])\n', (3250, 3274), True, 'import numpy as np\n'), ((3289, 3323), 'numpy.minimum', 'np.minimum', (['x2[I[-1]]', 'x2[I[0:-1]]'], {}), '(x2[I[-1]], x2[I[0:-1]])\n', (3299, 3323), True, 'import numpy as np\n'), ((3338, 3372), 'numpy.minimum', 'np.minimum', (['y2[I[-1]]', 'y2[I[0:-1]]'], {}), '(y2[I[-1]], y2[I[0:-1]])\n', (3348, 3372), True, 'import numpy as np\n'), ((3385, 3415), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1 + 1)'], {}), '(0.0, xx2 - xx1 + 1)\n', (3395, 3415), True, 'import numpy as np\n'), ((3428, 3458), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1 + 1)'], {}), '(0.0, yy2 - yy1 + 1)\n', (3438, 3458), True, 'import numpy as np\n'), ((4092, 4114), 'numpy.array', 'np.array', (['[prob[pick]]'], {}), '([prob[pick]])\n', (4100, 4114), True, 'import numpy as np\n'), ((4253, 4282), 'numpy.array', 'np.array', (['[(x1 + dx1 * w)[0]]'], {}), '([(x1 + dx1 * w)[0]])\n', (4261, 4282), True, 'import numpy as np\n'), ((4294, 4323), 'numpy.array', 'np.array', (['[(y1 + dx2 * h)[0]]'], {}), '([(y1 + dx2 * h)[0]])\n', (4302, 4323), True, 'import numpy as np\n'), ((4335, 4364), 'numpy.array', 'np.array', (['[(x2 + dx3 * w)[0]]'], {}), '([(x2 + dx3 * w)[0]])\n', (4343, 4364), True, 'import numpy as np\n'), ((4376, 4405), 'numpy.array', 'np.array', (['[(y2 + dx4 * h)[0]]'], {}), '([(y2 + dx4 * h)[0]])\n', (4384, 4405), True, 'import numpy as np\n'), ((5293, 5315), 'numpy.array', 'np.array', (['[prob[pick]]'], {}), '([prob[pick]])\n', (5301, 5315), True, 'import numpy as np\n'), ((5456, 5494), 'numpy.array', 'np.array', (['[(w * pts[pick, 0] + x1)[0]]'], {}), '([(w * pts[pick, 0] + x1)[0]])\n', (5464, 5494), True, 'import numpy as np\n'), ((5508, 5546), 'numpy.array', 'np.array', (['[(h * pts[pick, 5] + y1)[0]]'], {}), '([(h * pts[pick, 5] + y1)[0]])\n', (5516, 5546), True, 'import numpy as np\n'), ((5561, 5599), 'numpy.array', 'np.array', (['[(w * pts[pick, 1] + x1)[0]]'], {}), '([(w * pts[pick, 1] + x1)[0]])\n', (5569, 5599), True, 'import numpy as np\n'), ((5613, 5651), 'numpy.array', 'np.array', (['[(h * pts[pick, 6] + y1)[0]]'], {}), '([(h * pts[pick, 6] + y1)[0]])\n', (5621, 5651), True, 'import numpy as np\n'), ((5666, 5704), 'numpy.array', 'np.array', (['[(w * pts[pick, 2] + x1)[0]]'], {}), '([(w * pts[pick, 2] + x1)[0]])\n', (5674, 5704), True, 'import numpy as np\n'), ((5718, 5756), 'numpy.array', 'np.array', (['[(h * pts[pick, 7] + y1)[0]]'], {}), '([(h * pts[pick, 7] + y1)[0]])\n', (5726, 5756), True, 'import numpy as np\n'), ((5771, 5809), 'numpy.array', 'np.array', (['[(w * pts[pick, 3] + x1)[0]]'], {}), '([(w * pts[pick, 3] + x1)[0]])\n', (5779, 5809), True, 'import numpy as np\n'), ((5823, 5861), 'numpy.array', 'np.array', (['[(h * pts[pick, 8] + y1)[0]]'], {}), '([(h * pts[pick, 8] + y1)[0]])\n', (5831, 5861), True, 'import numpy as np\n'), ((5876, 5914), 'numpy.array', 'np.array', (['[(w * pts[pick, 4] + x1)[0]]'], {}), '([(w * pts[pick, 4] + x1)[0]])\n', (5884, 5914), True, 'import numpy as np\n'), ((5928, 5966), 'numpy.array', 'np.array', (['[(h * pts[pick, 9] + y1)[0]]'], {}), '([(h * pts[pick, 9] + y1)[0]])\n', (5936, 5966), True, 'import numpy as np\n'), ((5979, 6008), 'numpy.array', 'np.array', (['[(x1 + dx1 * w)[0]]'], {}), '([(x1 + dx1 * w)[0]])\n', (5987, 6008), True, 'import numpy as np\n'), ((6020, 6049), 'numpy.array', 'np.array', (['[(y1 + dx2 * h)[0]]'], {}), '([(y1 + dx2 * h)[0]])\n', (6028, 6049), True, 'import numpy as np\n'), ((6061, 6090), 'numpy.array', 'np.array', (['[(x2 + dx3 * w)[0]]'], {}), '([(x2 + dx3 * w)[0]])\n', (6069, 6090), True, 'import numpy as np\n'), ((6102, 6131), 'numpy.array', 'np.array', (['[(y2 + dx4 * h)[0]]'], {}), '([(y2 + dx4 * h)[0]])\n', (6110, 6131), True, 'import numpy as np\n'), ((8246, 8275), 'numpy.mean', 'np.mean', (['std_landmark'], {'axis': '(0)'}), '(std_landmark, axis=0)\n', (8253, 8275), True, 'import numpy as np\n'), ((8289, 8314), 'numpy.mean', 'np.mean', (['landmark'], {'axis': '(0)'}), '(landmark, axis=0)\n', (8296, 8314), True, 'import numpy as np\n'), ((8379, 8399), 'numpy.std', 'np.std', (['std_landmark'], {}), '(std_landmark)\n', (8385, 8399), True, 'import numpy as np\n'), ((8413, 8429), 'numpy.std', 'np.std', (['landmark'], {}), '(landmark)\n', (8419, 8429), True, 'import numpy as np\n'), ((8500, 8540), 'numpy.linalg.svd', 'np.linalg.svd', (['(std_landmark.T * landmark)'], {}), '(std_landmark.T * landmark)\n', (8513, 8540), True, 'import numpy as np\n'), ((10342, 10361), 'numpy.concatenate', 'np.concatenate', (['pre'], {}), '(pre)\n', (10356, 10361), True, 'import numpy as np\n'), ((10598, 10609), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (10606, 10609), True, 'import numpy as np\n'), ((2643, 2668), 'numpy.repeat', 'np.repeat', (['[l]', '(2)'], {'axis': '(0)'}), '([l], 2, axis=0)\n', (2652, 2668), True, 'import numpy as np\n'), ((9834, 9847), 'numpy.sqrt', 'np.sqrt', (['size'], {}), '(size)\n', (9841, 9847), True, 'import numpy as np\n'), ((3580, 3604), 'numpy.where', 'np.where', (['(o <= threshold)'], {}), '(o <= threshold)\n', (3588, 3604), True, 'import numpy as np\n'), ((7305, 7321), 'math.atan', 'math.atan', (['(y / x)'], {}), '(y / x)\n', (7314, 7321), False, 'import math\n'), ((8131, 8154), 'numpy.matrix', 'np.matrix', (['std_landmark'], {}), '(std_landmark)\n', (8140, 8154), True, 'import numpy as np\n'), ((8193, 8212), 'numpy.matrix', 'np.matrix', (['landmark'], {}), '(landmark)\n', (8202, 8212), True, 'import numpy as np\n'), ((8591, 8642), 'numpy.hstack', 'np.hstack', (['(s2 / s1 * R, c2.T - s2 / s1 * R * c1.T)'], {}), '((s2 / s1 * R, c2.T - s2 / s1 * R * c1.T))\n', (8600, 8642), True, 'import numpy as np\n'), ((8648, 8674), 'numpy.matrix', 'np.matrix', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (8657, 8674), True, 'import numpy as np\n'), ((10065, 10077), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (10074, 10077), True, 'import numpy as np\n')]
# Copyright 2021 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from hypothesis import settings, given, strategies as st import pytest import numpy as np from scipy.special import factorial from thewalrus.quantum import total_photon_number_distribution from mrmustard.lab import * from mrmustard.physics.fock import dm_to_ket, ket_to_dm # helper strategies st_angle = st.floats(min_value=0, max_value=2 * np.pi) @given(n_mean=st.floats(0, 3), phi=st_angle) def test_two_mode_squeezing_fock(n_mean, phi): """Tests that perfect number correlations are obtained for a two-mode squeezed vacuum state Note that this is consistent with the Strawberryfields convention""" cutoff = 4 r = np.arcsinh(np.sqrt(n_mean)) circ = Circuit(ops=[S2gate(r=r, phi=phi)]) amps = (Vacuum(num_modes=2) >> circ).ket(cutoffs=[cutoff, cutoff]) diag = (1 / np.cosh(r)) * (np.exp(1j * phi) * np.tanh(r)) ** np.arange(cutoff) expected = np.diag(diag) assert np.allclose(amps, expected) @given(n_mean=st.floats(0, 3), phi=st_angle, varphi=st_angle) def test_hong_ou_mandel(n_mean, phi, varphi): """Tests that perfect number correlations are obtained for a two-mode squeezed vacuum state""" cutoff = 2 r = np.arcsinh(np.sqrt(n_mean)) ops = [ S2gate(r=r, phi=phi)[0, 1], S2gate(r=r, phi=phi)[2, 3], BSgate(theta=np.pi / 4, phi=varphi)[1, 2], ] circ = Circuit(ops) amps = (Vacuum(4) >> circ).ket(cutoffs=[cutoff, cutoff, cutoff, cutoff]) assert np.allclose(amps[1, 1, 1, 1], 0.0, atol=1e-6) @given(alpha=st.complex_numbers(min_magnitude=0, max_magnitude=2)) def test_coherent_state(alpha): """Test that coherent states have the correct photon number statistics""" cutoff = 10 amps = Coherent(x=alpha.real, y=alpha.imag).ket(cutoffs=[cutoff]) expected = np.exp(-0.5 * np.abs(alpha) ** 2) * np.array( [alpha**n / np.sqrt(factorial(n)) for n in range(cutoff)] ) assert np.allclose(amps, expected, atol=1e-6) @given(r=st.floats(0, 2), phi=st_angle) def test_squeezed_state(r, phi): """Test that squeezed states have the correct photon number statistics Note that we use the same sign with respect to SMSV in https://en.wikipedia.org/wiki/Squeezed_coherent_state""" cutoff = 10 amps = SqueezedVacuum(r=r, phi=phi).ket(cutoffs=[cutoff]) assert np.allclose(amps[1::2], 0.0) non_zero_amps = amps[0::2] len_non_zero = len(non_zero_amps) amp_pairs = ( 1 / np.sqrt(np.cosh(r)) * np.array( [ (-np.exp(1j * phi) * np.tanh(r)) ** n * np.sqrt(factorial(2 * n)) / (2**n * factorial(n)) for n in range(len_non_zero) ] ) ) assert np.allclose(non_zero_amps, amp_pairs) @given(n_mean=st.floats(0, 3), phi=st_angle) def test_two_mode_squeezing_fock_mean_and_covar(n_mean, phi): """Tests that perfect number correlations are obtained for a two-mode squeezed vacuum state""" r = np.arcsinh(np.sqrt(n_mean)) state = Vacuum(num_modes=2) >> S2gate(r=r, phi=phi) meanN = state.number_means covN = state.number_cov expectedN = np.array([n_mean, n_mean]) expectedCov = n_mean * (n_mean + 1) * np.ones([2, 2]) assert np.allclose(meanN, expectedN) assert np.allclose(covN, expectedCov) @given(n_mean=st.floats(0, 2), phi=st_angle, eta=st.floats(min_value=0, max_value=1)) def test_lossy_squeezing(n_mean, phi, eta): """Tests the total photon number distribution of a lossy squeezed state""" r = np.arcsinh(np.sqrt(n_mean)) cutoff = 40 ps = (SqueezedVacuum(r=r, phi=phi) >> Attenuator(transmissivity=eta)).fock_probabilities( [cutoff] ) expected = np.array([total_photon_number_distribution(n, 1, r, eta) for n in range(cutoff)]) assert np.allclose(ps, expected, atol=1e-6) @given(n_mean=st.floats(0, 2), phi=st_angle, eta_0=st.floats(0, 1), eta_1=st.floats(0, 1)) def test_lossy_two_mode_squeezing(n_mean, phi, eta_0, eta_1): """Tests the photon number distribution of a lossy two-mode squeezed state""" cutoff = 40 n = np.arange(cutoff) L = Attenuator(transmissivity=[eta_0, eta_1]) state = TMSV(r=np.arcsinh(np.sqrt(n_mean)), phi=phi) >> L ps0 = state.get_modes(0).fock_probabilities([cutoff]) ps1 = state.get_modes(1).fock_probabilities([cutoff]) mean_0 = np.sum(n * ps0) mean_1 = np.sum(n * ps1) assert np.allclose(mean_0, n_mean * eta_0, atol=1e-5) assert np.allclose(mean_1, n_mean * eta_1, atol=1e-5) @given(num_modes=st.integers(1, 3)) def test_density_matrix(num_modes): """Tests the density matrix of a pure state is equal to |psi><psi|""" modes = list(range(num_modes)) cutoffs = [num_modes + 1] * num_modes G = Ggate(num_modes=num_modes) L = Attenuator(transmissivity=1.0) rho_legit = (Vacuum(num_modes) >> G >> L[modes]).dm(cutoffs=cutoffs) rho_made = (Vacuum(num_modes) >> G).dm(cutoffs=cutoffs) # rho_legit = L[modes](G(Vacuum(num_modes))).dm(cutoffs=cutoffs) # rho_built = G(Vacuum(num_modes=num_modes)).dm(cutoffs=cutoffs) assert np.allclose(rho_legit, rho_made) @pytest.mark.parametrize( "state", [ Vacuum(num_modes=2), Fock(4), Coherent(x=0.1, y=-0.4, cutoffs=[15]), Gaussian(num_modes=2, cutoffs=[15]), ], ) def test_dm_to_ket(state): """Tests pure state density matrix conversion to ket""" dm = state.dm() ket = dm_to_ket(dm) # check if ket is normalized assert np.allclose(np.linalg.norm(ket), 1) # check kets are equivalent assert np.allclose(ket, state.ket()) dm_reconstructed = ket_to_dm(ket) # check ket leads to same dm assert np.allclose(dm, dm_reconstructed) def test_dm_to_ket_error(): """Test dm_to_ket raises an error when state is mixed""" state = Coherent(x=0.1, y=-0.4, cutoffs=[15]) >> Attenuator(0.5) with pytest.raises(ValueError): dm_to_ket(state)
[ "numpy.sum", "numpy.abs", "numpy.allclose", "numpy.ones", "numpy.arange", "numpy.linalg.norm", "numpy.exp", "numpy.diag", "pytest.raises", "hypothesis.strategies.complex_numbers", "hypothesis.strategies.integers", "mrmustard.physics.fock.dm_to_ket", "scipy.special.factorial", "numpy.tanh", "numpy.cosh", "hypothesis.strategies.floats", "numpy.array", "mrmustard.physics.fock.ket_to_dm", "thewalrus.quantum.total_photon_number_distribution", "numpy.sqrt" ]
[((901, 944), 'hypothesis.strategies.floats', 'st.floats', ([], {'min_value': '(0)', 'max_value': '(2 * np.pi)'}), '(min_value=0, max_value=2 * np.pi)\n', (910, 944), True, 'from hypothesis import settings, given, strategies as st\n'), ((1475, 1488), 'numpy.diag', 'np.diag', (['diag'], {}), '(diag)\n', (1482, 1488), True, 'import numpy as np\n'), ((1500, 1527), 'numpy.allclose', 'np.allclose', (['amps', 'expected'], {}), '(amps, expected)\n', (1511, 1527), True, 'import numpy as np\n'), ((2041, 2087), 'numpy.allclose', 'np.allclose', (['amps[1, 1, 1, 1]', '(0.0)'], {'atol': '(1e-06)'}), '(amps[1, 1, 1, 1], 0.0, atol=1e-06)\n', (2052, 2087), True, 'import numpy as np\n'), ((2496, 2535), 'numpy.allclose', 'np.allclose', (['amps', 'expected'], {'atol': '(1e-06)'}), '(amps, expected, atol=1e-06)\n', (2507, 2535), True, 'import numpy as np\n'), ((2890, 2918), 'numpy.allclose', 'np.allclose', (['amps[1::2]', '(0.0)'], {}), '(amps[1::2], 0.0)\n', (2901, 2918), True, 'import numpy as np\n'), ((3304, 3341), 'numpy.allclose', 'np.allclose', (['non_zero_amps', 'amp_pairs'], {}), '(non_zero_amps, amp_pairs)\n', (3315, 3341), True, 'import numpy as np\n'), ((3717, 3743), 'numpy.array', 'np.array', (['[n_mean, n_mean]'], {}), '([n_mean, n_mean])\n', (3725, 3743), True, 'import numpy as np\n'), ((3813, 3842), 'numpy.allclose', 'np.allclose', (['meanN', 'expectedN'], {}), '(meanN, expectedN)\n', (3824, 3842), True, 'import numpy as np\n'), ((3854, 3884), 'numpy.allclose', 'np.allclose', (['covN', 'expectedCov'], {}), '(covN, expectedCov)\n', (3865, 3884), True, 'import numpy as np\n'), ((4373, 4410), 'numpy.allclose', 'np.allclose', (['ps', 'expected'], {'atol': '(1e-06)'}), '(ps, expected, atol=1e-06)\n', (4384, 4410), True, 'import numpy as np\n'), ((4671, 4688), 'numpy.arange', 'np.arange', (['cutoff'], {}), '(cutoff)\n', (4680, 4688), True, 'import numpy as np\n'), ((4930, 4945), 'numpy.sum', 'np.sum', (['(n * ps0)'], {}), '(n * ps0)\n', (4936, 4945), True, 'import numpy as np\n'), ((4959, 4974), 'numpy.sum', 'np.sum', (['(n * ps1)'], {}), '(n * ps1)\n', (4965, 4974), True, 'import numpy as np\n'), ((4986, 5033), 'numpy.allclose', 'np.allclose', (['mean_0', '(n_mean * eta_0)'], {'atol': '(1e-05)'}), '(mean_0, n_mean * eta_0, atol=1e-05)\n', (4997, 5033), True, 'import numpy as np\n'), ((5044, 5091), 'numpy.allclose', 'np.allclose', (['mean_1', '(n_mean * eta_1)'], {'atol': '(1e-05)'}), '(mean_1, n_mean * eta_1, atol=1e-05)\n', (5055, 5091), True, 'import numpy as np\n'), ((5672, 5704), 'numpy.allclose', 'np.allclose', (['rho_legit', 'rho_made'], {}), '(rho_legit, rho_made)\n', (5683, 5704), True, 'import numpy as np\n'), ((6017, 6030), 'mrmustard.physics.fock.dm_to_ket', 'dm_to_ket', (['dm'], {}), '(dm)\n', (6026, 6030), False, 'from mrmustard.physics.fock import dm_to_ket, ket_to_dm\n'), ((6208, 6222), 'mrmustard.physics.fock.ket_to_dm', 'ket_to_dm', (['ket'], {}), '(ket)\n', (6217, 6222), False, 'from mrmustard.physics.fock import dm_to_ket, ket_to_dm\n'), ((6267, 6300), 'numpy.allclose', 'np.allclose', (['dm', 'dm_reconstructed'], {}), '(dm, dm_reconstructed)\n', (6278, 6300), True, 'import numpy as np\n'), ((1242, 1257), 'numpy.sqrt', 'np.sqrt', (['n_mean'], {}), '(n_mean)\n', (1249, 1257), True, 'import numpy as np\n'), ((961, 976), 'hypothesis.strategies.floats', 'st.floats', (['(0)', '(3)'], {}), '(0, 3)\n', (970, 976), True, 'from hypothesis import settings, given, strategies as st\n'), ((1771, 1786), 'numpy.sqrt', 'np.sqrt', (['n_mean'], {}), '(n_mean)\n', (1778, 1786), True, 'import numpy as np\n'), ((1544, 1559), 'hypothesis.strategies.floats', 'st.floats', (['(0)', '(3)'], {}), '(0, 3)\n', (1553, 1559), True, 'from hypothesis import settings, given, strategies as st\n'), ((2102, 2154), 'hypothesis.strategies.complex_numbers', 'st.complex_numbers', ([], {'min_magnitude': '(0)', 'max_magnitude': '(2)'}), '(min_magnitude=0, max_magnitude=2)\n', (2120, 2154), True, 'from hypothesis import settings, given, strategies as st\n'), ((2546, 2561), 'hypothesis.strategies.floats', 'st.floats', (['(0)', '(2)'], {}), '(0, 2)\n', (2555, 2561), True, 'from hypothesis import settings, given, strategies as st\n'), ((3569, 3584), 'numpy.sqrt', 'np.sqrt', (['n_mean'], {}), '(n_mean)\n', (3576, 3584), True, 'import numpy as np\n'), ((3786, 3801), 'numpy.ones', 'np.ones', (['[2, 2]'], {}), '([2, 2])\n', (3793, 3801), True, 'import numpy as np\n'), ((3358, 3373), 'hypothesis.strategies.floats', 'st.floats', (['(0)', '(3)'], {}), '(0, 3)\n', (3367, 3373), True, 'from hypothesis import settings, given, strategies as st\n'), ((4115, 4130), 'numpy.sqrt', 'np.sqrt', (['n_mean'], {}), '(n_mean)\n', (4122, 4130), True, 'import numpy as np\n'), ((3901, 3916), 'hypothesis.strategies.floats', 'st.floats', (['(0)', '(2)'], {}), '(0, 2)\n', (3910, 3916), True, 'from hypothesis import settings, given, strategies as st\n'), ((3936, 3971), 'hypothesis.strategies.floats', 'st.floats', ([], {'min_value': '(0)', 'max_value': '(1)'}), '(min_value=0, max_value=1)\n', (3945, 3971), True, 'from hypothesis import settings, given, strategies as st\n'), ((4426, 4441), 'hypothesis.strategies.floats', 'st.floats', (['(0)', '(2)'], {}), '(0, 2)\n', (4435, 4441), True, 'from hypothesis import settings, given, strategies as st\n'), ((4463, 4478), 'hypothesis.strategies.floats', 'st.floats', (['(0)', '(1)'], {}), '(0, 1)\n', (4472, 4478), True, 'from hypothesis import settings, given, strategies as st\n'), ((4486, 4501), 'hypothesis.strategies.floats', 'st.floats', (['(0)', '(1)'], {}), '(0, 1)\n', (4495, 4501), True, 'from hypothesis import settings, given, strategies as st\n'), ((5110, 5127), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(3)'], {}), '(1, 3)\n', (5121, 5127), True, 'from hypothesis import settings, given, strategies as st\n'), ((6087, 6106), 'numpy.linalg.norm', 'np.linalg.norm', (['ket'], {}), '(ket)\n', (6101, 6106), True, 'import numpy as np\n'), ((6471, 6496), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6484, 6496), False, 'import pytest\n'), ((6506, 6522), 'mrmustard.physics.fock.dm_to_ket', 'dm_to_ket', (['state'], {}), '(state)\n', (6515, 6522), False, 'from mrmustard.physics.fock import dm_to_ket, ket_to_dm\n'), ((1393, 1403), 'numpy.cosh', 'np.cosh', (['r'], {}), '(r)\n', (1400, 1403), True, 'import numpy as np\n'), ((1442, 1459), 'numpy.arange', 'np.arange', (['cutoff'], {}), '(cutoff)\n', (1451, 1459), True, 'import numpy as np\n'), ((4290, 4336), 'thewalrus.quantum.total_photon_number_distribution', 'total_photon_number_distribution', (['n', '(1)', 'r', 'eta'], {}), '(n, 1, r, eta)\n', (4322, 4336), False, 'from thewalrus.quantum import total_photon_number_distribution\n'), ((1408, 1426), 'numpy.exp', 'np.exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (1414, 1426), True, 'import numpy as np\n'), ((1427, 1437), 'numpy.tanh', 'np.tanh', (['r'], {}), '(r)\n', (1434, 1437), True, 'import numpy as np\n'), ((3034, 3044), 'numpy.cosh', 'np.cosh', (['r'], {}), '(r)\n', (3041, 3044), True, 'import numpy as np\n'), ((2381, 2394), 'numpy.abs', 'np.abs', (['alpha'], {}), '(alpha)\n', (2387, 2394), True, 'import numpy as np\n'), ((4769, 4784), 'numpy.sqrt', 'np.sqrt', (['n_mean'], {}), '(n_mean)\n', (4776, 4784), True, 'import numpy as np\n'), ((2441, 2453), 'scipy.special.factorial', 'factorial', (['n'], {}), '(n)\n', (2450, 2453), False, 'from scipy.special import factorial\n'), ((3204, 3216), 'scipy.special.factorial', 'factorial', (['n'], {}), '(n)\n', (3213, 3216), False, 'from scipy.special import factorial\n'), ((3160, 3176), 'scipy.special.factorial', 'factorial', (['(2 * n)'], {}), '(2 * n)\n', (3169, 3176), False, 'from scipy.special import factorial\n'), ((3117, 3127), 'numpy.tanh', 'np.tanh', (['r'], {}), '(r)\n', (3124, 3127), True, 'import numpy as np\n'), ((3098, 3116), 'numpy.exp', 'np.exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (3104, 3116), True, 'import numpy as np\n')]
# Kokeillaan mediaanin ja keskiarvon eroa. Kuvissa (30kpl) on oppilaita # satunnaisissa kohdissa, ja kamera oli jalustalla luokkahuoneessa. Otetaan # toisaalta keskiarvot ja toisaalta mediaanit pikseliarvoista. # Lopputulokset ovat hyvin erilaiset! # # <NAME> huhtikuu 2021 # Matlab -> Python Ville Tilvis kesäkuu 2021 import numpy as np import matplotlib.pyplot as plt # Kuvien lukumäärä Nim = 30 # Alustetaan matriisit, joihin tallennetaam keskiarvot ja mediaanit im_ave = np.zeros([2000,2997,3]) im_median = np.zeros([2000,2997,3]) im_4D = np.zeros([2000,2997,3,Nim]) print("Ladataan kuvat:") # Avataan kuvat yksi kerrallaan for iii in range (0,Nim): fname = '../_kuvat/IMGP'+str(1423+iii)+'.jpg' im_orig = plt.imread(fname,'jpg'); # Lisätään tämänhetkinen kuva pakkaan im_4D[:,:,:,iii] = im_orig; # Seuraa ajoa print(iii+1,"/",Nim) print("Lasketaan keskiarvo ja mediaani...") im_ave = np.mean(im_4D,axis=3)/255; im_median = np.median(im_4D,axis=3)/255; print("Valmis!") print("") print("Näytetään kuvat...") # Vähennetään keskiarvokuva ja # mediaanikuva tyhjän kuvan # punaisesta värikanavasta im0 = np.array(plt.imread('../_kuvat/IMGP1444.jpg','jpg'))/255 error1 = np.abs(im_ave-im0) error2 = np.abs(im_median-im0) errorpic = np.concatenate((error1,error2),axis=1) errorpic = errorpic/np.max(errorpic[:,:,0]) errorpic = np.power(errorpic,0.3) #Katsotaan kuvia plt.subplot(2,1,1) plt.imshow(np.concatenate((im_ave,im_median),axis=1)) plt.axis('off') plt.gcf().set_dpi(600) plt.subplot(2,1,2) plt.imshow(errorpic[:,:,0],cmap='gray', interpolation='none') plt.axis('off') plt.gcf().set_dpi(600) plt.show() print("Valmis!") print("") print("Tallennetaan kuvat...") # Tallennetaan kuvat plt.imsave('../_kuvat/im_average.jpg',im_ave,); plt.imsave('../_kuvat/im_median.jpg',im_median); print("Valmis!")
[ "matplotlib.pyplot.subplot", "numpy.abs", "matplotlib.pyplot.show", "numpy.median", "numpy.power", "matplotlib.pyplot.imshow", "numpy.zeros", "matplotlib.pyplot.axis", "numpy.max", "numpy.mean", "matplotlib.pyplot.imsave", "matplotlib.pyplot.gcf", "matplotlib.pyplot.imread", "numpy.concatenate" ]
[((479, 504), 'numpy.zeros', 'np.zeros', (['[2000, 2997, 3]'], {}), '([2000, 2997, 3])\n', (487, 504), True, 'import numpy as np\n'), ((515, 540), 'numpy.zeros', 'np.zeros', (['[2000, 2997, 3]'], {}), '([2000, 2997, 3])\n', (523, 540), True, 'import numpy as np\n'), ((547, 577), 'numpy.zeros', 'np.zeros', (['[2000, 2997, 3, Nim]'], {}), '([2000, 2997, 3, Nim])\n', (555, 577), True, 'import numpy as np\n'), ((1215, 1235), 'numpy.abs', 'np.abs', (['(im_ave - im0)'], {}), '(im_ave - im0)\n', (1221, 1235), True, 'import numpy as np\n'), ((1243, 1266), 'numpy.abs', 'np.abs', (['(im_median - im0)'], {}), '(im_median - im0)\n', (1249, 1266), True, 'import numpy as np\n'), ((1276, 1316), 'numpy.concatenate', 'np.concatenate', (['(error1, error2)'], {'axis': '(1)'}), '((error1, error2), axis=1)\n', (1290, 1316), True, 'import numpy as np\n'), ((1370, 1393), 'numpy.power', 'np.power', (['errorpic', '(0.3)'], {}), '(errorpic, 0.3)\n', (1378, 1393), True, 'import numpy as np\n'), ((1411, 1431), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (1422, 1431), True, 'import matplotlib.pyplot as plt\n'), ((1486, 1501), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1494, 1501), True, 'import matplotlib.pyplot as plt\n'), ((1526, 1546), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (1537, 1546), True, 'import matplotlib.pyplot as plt\n'), ((1546, 1610), 'matplotlib.pyplot.imshow', 'plt.imshow', (['errorpic[:, :, 0]'], {'cmap': '"""gray"""', 'interpolation': '"""none"""'}), "(errorpic[:, :, 0], cmap='gray', interpolation='none')\n", (1556, 1610), True, 'import matplotlib.pyplot as plt\n'), ((1608, 1623), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1616, 1623), True, 'import matplotlib.pyplot as plt\n'), ((1648, 1658), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1656, 1658), True, 'import matplotlib.pyplot as plt\n'), ((1741, 1787), 'matplotlib.pyplot.imsave', 'plt.imsave', (['"""../_kuvat/im_average.jpg"""', 'im_ave'], {}), "('../_kuvat/im_average.jpg', im_ave)\n", (1751, 1787), True, 'import matplotlib.pyplot as plt\n'), ((1789, 1837), 'matplotlib.pyplot.imsave', 'plt.imsave', (['"""../_kuvat/im_median.jpg"""', 'im_median'], {}), "('../_kuvat/im_median.jpg', im_median)\n", (1799, 1837), True, 'import matplotlib.pyplot as plt\n'), ((724, 748), 'matplotlib.pyplot.imread', 'plt.imread', (['fname', '"""jpg"""'], {}), "(fname, 'jpg')\n", (734, 748), True, 'import matplotlib.pyplot as plt\n'), ((931, 953), 'numpy.mean', 'np.mean', (['im_4D'], {'axis': '(3)'}), '(im_4D, axis=3)\n', (938, 953), True, 'import numpy as np\n'), ((970, 994), 'numpy.median', 'np.median', (['im_4D'], {'axis': '(3)'}), '(im_4D, axis=3)\n', (979, 994), True, 'import numpy as np\n'), ((1335, 1360), 'numpy.max', 'np.max', (['errorpic[:, :, 0]'], {}), '(errorpic[:, :, 0])\n', (1341, 1360), True, 'import numpy as np\n'), ((1443, 1486), 'numpy.concatenate', 'np.concatenate', (['(im_ave, im_median)'], {'axis': '(1)'}), '((im_ave, im_median), axis=1)\n', (1457, 1486), True, 'import numpy as np\n'), ((1157, 1200), 'matplotlib.pyplot.imread', 'plt.imread', (['"""../_kuvat/IMGP1444.jpg"""', '"""jpg"""'], {}), "('../_kuvat/IMGP1444.jpg', 'jpg')\n", (1167, 1200), True, 'import matplotlib.pyplot as plt\n'), ((1502, 1511), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1509, 1511), True, 'import matplotlib.pyplot as plt\n'), ((1624, 1633), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1631, 1633), True, 'import matplotlib.pyplot as plt\n')]
import numpy as np import gym from reps.acreps import acREPS np.random.seed(1337) env = gym.make('Pendulum-RL-v1') env._max_episode_steps = 250 env.unwrapped.dt = 0.05 env.unwrapped.sigma = 1e-4 # env.seed(1337) acreps = acREPS(env=env, kl_bound=0.1, discount=0.985, lmbda=0.95, scale=[1., 1., 8.0, 2.5], mult=0.5, nb_vfeat=75, nb_pfeat=75, vf_reg=1e-12) acreps.run(nb_iter=15, nb_train_samples=5000, nb_eval_rollouts=25, nb_eval_steps=100) # evaluate rollouts, _ = acreps.evaluate(nb_rollouts=25, nb_steps=100) import matplotlib.pyplot as plt fig, ax = plt.subplots(nrows=1, ncols=acreps.state_dim + acreps.act_dim, figsize=(12, 4)) for roll in rollouts: for k, col in enumerate(ax[:-1]): col.plot(roll['x'][:, k]) ax[-1].plot(roll['uc']) plt.show()
[ "reps.acreps.acREPS", "numpy.random.seed", "gym.make", "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ]
[((63, 83), 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), '(1337)\n', (77, 83), True, 'import numpy as np\n'), ((91, 117), 'gym.make', 'gym.make', (['"""Pendulum-RL-v1"""'], {}), "('Pendulum-RL-v1')\n", (99, 117), False, 'import gym\n'), ((225, 365), 'reps.acreps.acREPS', 'acREPS', ([], {'env': 'env', 'kl_bound': '(0.1)', 'discount': '(0.985)', 'lmbda': '(0.95)', 'scale': '[1.0, 1.0, 8.0, 2.5]', 'mult': '(0.5)', 'nb_vfeat': '(75)', 'nb_pfeat': '(75)', 'vf_reg': '(1e-12)'}), '(env=env, kl_bound=0.1, discount=0.985, lmbda=0.95, scale=[1.0, 1.0, \n 8.0, 2.5], mult=0.5, nb_vfeat=75, nb_pfeat=75, vf_reg=1e-12)\n', (231, 365), False, 'from reps.acreps import acREPS\n'), ((604, 683), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(acreps.state_dim + acreps.act_dim)', 'figsize': '(12, 4)'}), '(nrows=1, ncols=acreps.state_dim + acreps.act_dim, figsize=(12, 4))\n', (616, 683), True, 'import matplotlib.pyplot as plt\n'), ((806, 816), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (814, 816), True, 'import matplotlib.pyplot as plt\n')]
from sage.all import RDF, CDF, matrix, prod import scipy.linalg import numpy as np def column_space_intersection(*As, tol, orthonormal=False): r""" Return a matrix with orthonormal columns spanning the intersection of the column spaces of the given matrices. INPUT: - ``*As`` -- matrices with a fixed number of rows and linearly independent (or orthonormal) columns each - ``tol`` -- tolerance for truncating the singular values to determine the rank of the intersection - ``orthonormal`` -- boolean (default: ``False``); if ``True``, the columns of each matrix are assumed to be orthonormal ALGORITHM: <NAME> -- Algorithm 12.4.3 """ if len(As) < 1: raise ValueError("at least one matrix required") n = As[0].nrows() for A in As: if A.nrows() != n: raise ValueError("matrices must have same number of rows") if all(A.base_ring().is_exact() for A in As): V = As[0].column_space() for A in As[1:]: V = V.intersection(A.column_space()) return V.basis_matrix().T for A in As: if A.base_ring() not in (RDF, CDF): raise ValueError("only matrices over RDF/CDF or exact fields supported") if any(A.ncols() == 0 for A in As): return matrix(As[0].base_ring(), n, 0) Qs = As if orthonormal else [A.QR()[0][:,:A.ncols()] for A in As] if len(As) == 1: return Qs[0] # for better performance, we switch to numpy # Taking slices or hermitian transposes is a bottleneck with double dense matrices in Sage. Qs = [Q.numpy() for Q in Qs] # C = prod([Qs[0].H] + [Q*Q.H for Q in Qs[1:-1]] + [Qs[-1]]) # sort Qs such that smallest matrix is last, second smallest first Q_last = Qs.pop(min(range(len(Qs)), key=lambda j: Qs[j].shape[1])) Q_first = Qs.pop(min(range(len(Qs)), key=lambda j: Qs[j].shape[1])) C = Q_last for Q in Qs: # without Q_last and Q_first C = Q @ (Q.conj().T @ C) # this should be faster than (Q * Q.H) * C, since Q*Q.H is very large C = Q_first.conj().T @ C Σ, Vh = scipy.linalg.svd(C, overwrite_a=True)[1:] # we can overwrite, since C involves at least 1 multiplication rk = np.sum(1-Σ < tol) return matrix(Q_last @ Vh.T[:,:rk].conj()) def null_space_intersection(*As, tol): r""" Return a matrix with orthonormal columns spanning the intersection of the null spaces of the given matrices. INPUT: - ``*As`` -- matrices with a fixed number of columns - ``tol`` -- tolerance for truncating the singular values to determine the rank of intermediate results ALGORITHM: <NAME> -- Algorithm 12.4.2 """ if len(As) < 1: raise ValueError("at least one matrix required") n = As[0].ncols() if all(A.base_ring().is_exact() for A in As): ker = As[0].right_kernel() for A in As[1:]: ker = ker.intersection(A.right_kernel()) # TODO document that this does not have orthonormal columns return ker.basis_matrix().T for A in As: if A.base_ring() not in (RDF, CDF): raise ValueError("only matrices over RDF/CDF or exact rings supported") if A.ncols() != n: raise ValueError("matrices must have same number of columns") Y = None for A in As: if A.nrows() == 0: continue C = A * Y if Y is not None else A Σ, V = C.SVD()[1:] q = len([s for s in Σ.diagonal() if s > tol]) if q >= C.ncols(): return matrix(As[0].base_ring(), n, 0) X = V[:, q:] Y = Y * X if Y is not None else X if Y is None: # all the matrices have 0 rows return matrix.identity(As[0].base_ring(), n) else: return Y def null_space(A, tol): import numpy import scipy.linalg if A.nrows() == 0: return matrix.identity(A.base_ring(), A.ncols()) return matrix(numpy.ascontiguousarray(scipy.linalg.null_space(A, rcond=tol))) def _tests_sage(): """ TESTS:: sage: from momentproblems import intersections sage: TestSuite(intersections._tests_sage()).run(skip='_test_pickling') """ from sage.all import SageObject, matrix, RDF, ZZ import numpy import numpy.linalg import scipy.linalg class Tests(SageObject): def matrices(self): # test data for _ in range(5): for num in range(1, 5): # generate some matrices with few rows, so we can intersect their kernels matrices = [matrix.random(RDF, ZZ.random_element(0, 4), 9) for _ in range(num)] yield matrices def matrices2(self): # test data for _ in range(5): for num in range(1, 5): # generate some matrices with few rows, so we can intersect their kernels matrices = [matrix.random(RDF, 9, 9 - ZZ.random_element(0, 4)) for _ in range(num)] yield matrices def equal_spaces(self, A, B, tol): from numpy.linalg import matrix_rank return matrix_rank(A.augment(B), tol) == matrix_rank(A, tol) == matrix_rank(B, tol) def _test_null_space_intersection(self, **kwds): tol = 1e-10 for As in self.matrices(): ker = null_space_intersection(*As, tol=tol) assert all([ker.ncols() == 0 or A.nrows() == 0 or (A * ker).norm() < tol for A in As]) assert max(0, As[0].ncols() - sum([A.nrows() for A in As])) == ker.ncols() # generically the correct dimension # the intersection is also simply the null space of the augmented matrix ker2 = null_space(matrix(RDF, [v for A in As for v in A.rows()], ncols=As[0].ncols()), tol) assert self.equal_spaces(ker, ker2, tol) def _test_column_space_intersection(self, **kwds): tol = 1e-10 for As in self.matrices2(): B = column_space_intersection(*As, tol=tol) assert B.ncols() == max(0, As[0].nrows() - sum([A.nrows() - A.ncols() for A in As])) # generically the correct dimension for A in As: assert self.equal_spaces(A.augment(B), A, tol) # B is contained in A def _test_compatibilty(self, **kwds): tol = 1e-10 for As in self.matrices(): # computing null space intersection is the same as computing # column space intersection of null spaces ker = null_space_intersection(*As, tol=tol) ker2 = column_space_intersection(*[null_space(A, tol) for A in As], tol=tol, orthonormal=True) assert self.equal_spaces(ker, ker2, tol) return Tests()
[ "numpy.linalg.matrix_rank", "numpy.sum", "sage.all.ZZ.random_element" ]
[((2226, 2245), 'numpy.sum', 'np.sum', (['(1 - Σ < tol)'], {}), '(1 - Σ < tol)\n', (2232, 2245), True, 'import numpy as np\n'), ((5203, 5222), 'numpy.linalg.matrix_rank', 'matrix_rank', (['A', 'tol'], {}), '(A, tol)\n', (5214, 5222), False, 'from numpy.linalg import matrix_rank\n'), ((5226, 5245), 'numpy.linalg.matrix_rank', 'matrix_rank', (['B', 'tol'], {}), '(B, tol)\n', (5237, 5245), False, 'from numpy.linalg import matrix_rank\n'), ((4615, 4638), 'sage.all.ZZ.random_element', 'ZZ.random_element', (['(0)', '(4)'], {}), '(0, 4)\n', (4632, 4638), False, 'from sage.all import SageObject, matrix, RDF, ZZ\n'), ((4976, 4999), 'sage.all.ZZ.random_element', 'ZZ.random_element', (['(0)', '(4)'], {}), '(0, 4)\n', (4993, 4999), False, 'from sage.all import SageObject, matrix, RDF, ZZ\n')]
''' This file is a modification of the file below to enable map save https://github.com/simondlevy/PyRoboViz/blob/master/roboviz/__init__.py roboviz.py - Python classes for displaying maps and robots Requires: numpy, matplotlib Copyright (C) 2018 <NAME> This file is part of PyRoboViz. PyRoboViz is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. PyRoboViz is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. ''' # Essential imports import matplotlib.pyplot as plt import matplotlib.cm as colormap import matplotlib.lines as mlines from mpl_toolkits.mplot3d import Axes3D import numpy as np import datetime # This helps with Raspberry Pi import matplotlib matplotlib.use('TkAgg') class Visualizer(object): # Robot display params ROBOT_HEIGHT_M = 0.5 ROBOT_WIDTH_M = 0.3 def __init__(self, map_size_pixels, map_size_meters, title, show_trajectory=False, zero_angle=0): # Put origin in center self._init(map_size_pixels, map_size_meters, title, -map_size_pixels / 2, show_trajectory, zero_angle) def display(self, x_m, y_m, theta_deg): self._setPose(x_m, y_m, theta_deg) return self._refresh() def _init(self, map_size_pixels, map_size_meters, title, shift, show_trajectory=False, zero_angle=0): # Store constants for update map_size_meters = map_size_meters self.map_size_pixels = map_size_pixels self.map_scale_meters_per_pixel = map_size_meters / float(map_size_pixels) # Create a byte array to display the map with a color overlay self.bgrbytes = bytearray(map_size_pixels * map_size_pixels * 3) # Make a nice big (10"x10") figure fig = plt.figure(figsize=(10,10), facecolor="white") fig.set_facecolor("white") # Added this line to make sure the map background is white plt.rcParams['figure.facecolor'] = 'white' # Store Python ID of figure to detect window close self.figid = id(fig) fig.canvas.set_window_title('SLAM') plt.title(title) # Use an "artist" to speed up map drawing self.img_artist = None # No vehicle to show yet self.vehicle = None # Create axes self.ax = fig.gca() self.ax.set_xlabel('X (m)') self.ax.set_ylabel('Y (m)') # self.ax.grid(False) # Hence we must relabel the axis ticks to show millimeters ticks = np.arange(shift,self.map_size_pixels+shift+100,100) labels = [str(self.map_scale_meters_per_pixel * tick) for tick in ticks] self.ax.set_xticklabels(labels) self.ax.set_yticklabels(labels) self.ax.set_facecolor('w') # Store previous position for trajectory self.prevpos = None self.showtraj = show_trajectory # We base the axis on pixels, to support displaying the map self.ax.set_xlim([shift, self.map_size_pixels+shift]) self.ax.set_ylim([shift, self.map_size_pixels+shift]) # Set up default shift for centering at origin shift = -self.map_size_pixels / 2 # print("shift = " + str(shift)) self.zero_angle = zero_angle self.start_angle = None self.rotate_angle = 0 def _setPose(self, x_m, y_m, theta_deg): ''' Sets vehicle pose: X: left/right (m) Y: forward/back (m) theta: rotation (degrees) ''' # If zero-angle was indicated, grab first angle to compute rotation if self.start_angle is None and self.zero_angle != 0: self.start_angle = theta_deg self.rotate_angle = self.zero_angle - self.start_angle # Rotate by computed angle, or zero if no zero-angle indicated d = self.rotate_angle a = np.radians(d) c = np.cos(a) s = np.sin(a) x_m,y_m = x_m*c-y_m*s, y_m*c+x_m*s # Erase previous vehicle image after first iteration if not self.vehicle is None: self.vehicle.remove() # Use a very short arrow shaft to orient the head of the arrow theta_rad = np.radians(theta_deg+d) c = np.cos(theta_rad) s = np.sin(theta_rad) l = 0.1 dx = l * c dy = l * s s = self.map_scale_meters_per_pixel self.vehicle=self.ax.arrow(x_m/s, y_m/s, dx, dy, head_width=Visualizer.ROBOT_WIDTH_M/s, head_length=Visualizer.ROBOT_HEIGHT_M/s, fc='r', ec='r') # Show trajectory if indicated currpos = self._m2pix(x_m,y_m) if self.showtraj and not self.prevpos is None: if (self.prevpos[0] != 0 and self.prevpos[1] != 0): self.ax.add_line(mlines.Line2D((self.prevpos[0],currpos[0]), (self.prevpos[1],currpos[1]))) self.prevpos = currpos def _refresh(self): # If we have a new figure, something went wrong (closing figure failed) if self.figid != id(plt.gcf()): return False # Added this line to make sure the map background is white plt.rcParams['figure.facecolor'] = 'white' plt.rcParams['axes.facecolor'] = 'white' plt.rcParams['savefig.facecolor'] = 'white' # Redraw current objects without blocking plt.draw() now = datetime.datetime.now() # Create a directory named 'gif' inside the base directory plt.savefig('gif/slamMap' + '- ' + str(now.hour).zfill(2) + '- ' + str(now.minute).zfill(2) + '- ' + str(now.second).zfill(2) + '.png') # Refresh display, setting flag on window close or keyboard interrupt try: plt.pause(.01) # Arbitrary pause to force redraw return True except: return False return True def _m2pix(self, x_m, y_m): s = self.map_scale_meters_per_pixel return x_m/s, y_m/s class MapVisualizer(Visualizer): def __init__(self, map_size_pixels, map_size_meters, title='MapVisualizer', show_trajectory=False): # Put origin in lower left; disallow zero-angle setting Visualizer._init(self, map_size_pixels, map_size_meters, title, 0, show_trajectory, 0) def display(self, x_m, y_m, theta_deg, mapbytes): self._setPose(x_m, y_m, theta_deg) mapimg = np.reshape(np.frombuffer(mapbytes, dtype=np.uint8), (self.map_size_pixels, self.map_size_pixels)) # Pause to allow display to refresh plt.pause(.001) if self.img_artist is None: self.img_artist = self.ax.imshow(mapimg, cmap=colormap.gray) else: self.img_artist.set_data(mapimg) return self._refresh()
[ "matplotlib.pyplot.title", "numpy.radians", "matplotlib.lines.Line2D", "numpy.frombuffer", "matplotlib.pyplot.draw", "matplotlib.pyplot.figure", "matplotlib.use", "numpy.arange", "numpy.sin", "numpy.cos", "matplotlib.pyplot.pause", "matplotlib.pyplot.gcf", "datetime.datetime.now" ]
[((1023, 1046), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (1037, 1046), False, 'import matplotlib\n'), ((2046, 2093), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)', 'facecolor': '"""white"""'}), "(figsize=(10, 10), facecolor='white')\n", (2056, 2093), True, 'import matplotlib.pyplot as plt\n'), ((2388, 2404), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2397, 2404), True, 'import matplotlib.pyplot as plt\n'), ((2786, 2843), 'numpy.arange', 'np.arange', (['shift', '(self.map_size_pixels + shift + 100)', '(100)'], {}), '(shift, self.map_size_pixels + shift + 100, 100)\n', (2795, 2843), True, 'import numpy as np\n'), ((4145, 4158), 'numpy.radians', 'np.radians', (['d'], {}), '(d)\n', (4155, 4158), True, 'import numpy as np\n'), ((4171, 4180), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (4177, 4180), True, 'import numpy as np\n'), ((4193, 4202), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (4199, 4202), True, 'import numpy as np\n'), ((4471, 4496), 'numpy.radians', 'np.radians', (['(theta_deg + d)'], {}), '(theta_deg + d)\n', (4481, 4496), True, 'import numpy as np\n'), ((4507, 4524), 'numpy.cos', 'np.cos', (['theta_rad'], {}), '(theta_rad)\n', (4513, 4524), True, 'import numpy as np\n'), ((4537, 4554), 'numpy.sin', 'np.sin', (['theta_rad'], {}), '(theta_rad)\n', (4543, 4554), True, 'import numpy as np\n'), ((5648, 5658), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (5656, 5658), True, 'import matplotlib.pyplot as plt\n'), ((5673, 5696), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5694, 5696), False, 'import datetime\n'), ((6830, 6846), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (6839, 6846), True, 'import matplotlib.pyplot as plt\n'), ((6013, 6028), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (6022, 6028), True, 'import matplotlib.pyplot as plt\n'), ((6690, 6729), 'numpy.frombuffer', 'np.frombuffer', (['mapbytes'], {'dtype': 'np.uint8'}), '(mapbytes, dtype=np.uint8)\n', (6703, 6729), True, 'import numpy as np\n'), ((5332, 5341), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5339, 5341), True, 'import matplotlib.pyplot as plt\n'), ((5073, 5148), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['(self.prevpos[0], currpos[0])', '(self.prevpos[1], currpos[1])'], {}), '((self.prevpos[0], currpos[0]), (self.prevpos[1], currpos[1]))\n', (5086, 5148), True, 'import matplotlib.lines as mlines\n')]
#!/usr/bin/env python # coding: utf-8 # # Import libraries and data # # Dataset was obtained in the capstone project description (direct link [here](https://d3c33hcgiwev3.cloudfront.net/_429455574e396743d399f3093a3cc23b_capstone.zip?Expires=1530403200&Signature=FECzbTVo6TH7aRh7dXXmrASucl~Cy5mlO94P7o0UXygd13S~Afi38FqCD7g9BOLsNExNB0go0aGkYPtodekxCGblpc3I~R8TCtWRrys~2gciwuJLGiRp4CfNtfp08sFvY9NENaRb6WE2H4jFsAo2Z2IbXV~llOJelI3k-9Waj~M_&Key-Pair-Id=<KEY>)) and splited manually in separated csv files. They were stored at my personal github account (folder link [here](https://github.com/caiomiyashiro/RecommenderSystemsNotebooks/tree/master/data/capstone)) and you can download and paste inside your working directory in order for this notebook to run. # In[1]: import pandas as pd import numpy as np # ## Preprocess data # # Float data came with ',' in the csv and python works with '.', so it treated the number as text. In order to convert them to numbers, I first replaced all the commas by punct and then converted the columns to float. # In[2]: items = pd.read_csv('data/capstone/Capstone Data - Office Products - Items.csv', index_col=0) actual_ratings = pd.read_csv('data/capstone/Capstone Data - Office Products - Ratings.csv', index_col=0) content_based = pd.read_csv('data/capstone/Capstone Data - Office Products - CBF.csv', index_col=0) user_user = pd.read_csv('data/capstone/Capstone Data - Office Products - User-User.csv', index_col=0) item_item = pd.read_csv('data/capstone/Capstone Data - Office Products - Item-Item.csv', index_col=0) matrix_fact = pd.read_csv('data/capstone/Capstone Data - Office Products - MF.csv', index_col=0) pers_bias = pd.read_csv('data/capstone/Capstone Data - Office Products - PersBias.csv', index_col=0) items[['Availability','Price']] = items[['Availability','Price']].apply(lambda col: col.apply(lambda elem: str(elem).replace(',', '.'))).astype(float) # preprocess content_based = content_based.apply(lambda col: col.apply(lambda elem: str(elem).replace(',', '.'))).astype(float) user_user = user_user.apply(lambda col: col.apply(lambda elem: str(elem).replace(',', '.'))).astype(float) item_item = item_item.apply(lambda col: col.apply(lambda elem: str(elem).replace(',', '.'))).astype(float) matrix_fact = matrix_fact.apply(lambda col: col.apply(lambda elem: str(elem).replace(',', '.'))).astype(float) pers_bias = pers_bias.apply(lambda col: col.apply(lambda elem: str(elem).replace(',', '.'))).astype(float) print('items.shape = ' + str(items.shape)) print('actual_ratings.shape = ' + str(actual_ratings.shape)) print('content_based.shape = ' + str(content_based.shape)) print('user_user.shape = ' + str(user_user.shape)) print('item_item.shape = ' + str(item_item.shape)) print('matrix_fact.shape = ' + str(matrix_fact.shape)) print('pers_bias.shape = ' + str(pers_bias.shape)) actual_ratings.head() # # Class RecommenderEvaluator # # In order to become easier to evaluate the metrics, I created a class that receives all the original ratings and predicted ratings for every recommender system and defined functions to extract all the metrics established in section 1 of the capstone report. Lets take a look at a summary of the class before looking at the code: # - **Constructor (init)**: receive all recommendation algorithms, besides the actual rating list and the list of items. All data is contained in the data downloaded from Coursera. Besides storing all recommendation algorithms, the constructor also calculate the 20 most frequent items, which is used in the popularity metric calculation. # # - **get_observed_ratings**: as the ratings matrix is sparse, this method only returns the items a user with id userId has purchased. # # - **get_top_n**: by ordering all the predicted ratings for each recommendation algorithm, we can extract what would be their 'top' recommendation for a given user. Given a parameter $n$, we can then return all the top $n$ recommendations for all the recommendation algorithms. # # - **rmse**: by comparing the observed ratings a given user has given to an item and the predicted rating an algorithm has defined for a user, we can have an idea of how much error the algorithm is predicting the user's ratings. Here we don't work with lists, as usually each user has rated only a few amount of items. So here we get all the items the user has rated, recover these items from the algorithms' recommendations and them calculate the error. # # - **nDCG**: By looking at lists now, we can have an idea of how optimal the ranked lists are. By using the scoring factor defined in the report, we can calculate the overall DCG for the recommenders' lists and then normalise them using the concepts of the nDCG. # # - **Price and avalaibility diversity**: Diversity metric which evaluate how the recommended items' prices vary, *i.e.*, how is the standard deviation of the price. The higher, the better in this case. The same is for the availability index, but here, with higher standard deviations, it means the models are recommending items which are present and not present in local stores. # # - **Popularity**: A popular recommender tries to recommend items which has a high chance of being purchased. In the formulation of this metric, an item has a high chance of being purchased if lots of people have purchased them. In the class constructor, we take the observed ratings data and the item list and select which were the top $n$ (standard = 20) most purchased data. In a recommendation list, we return the ration of how many items were inside this list of top $n$ ones. # In[3]: class RecommenderEvaluator: def __init__(self, items, actual_ratings, content_based, user_user, item_item, matrix_fact, pers_bias): self.items = items self.actual_ratings = actual_ratings # static data containing the average score given by each user self.average_rating_per_userid = actual_ratings.apply(lambda row: np.average(row[~np.isnan(row)])) self.content_based = content_based self.user_user = user_user self.item_item = item_item self.matrix_fact = matrix_fact self.pers_bias = pers_bias # aggregate list. Makes for loops among all recommenders' predictions easier self.recommenders_list = [self.content_based, self.user_user, self.item_item, self.matrix_fact,self.pers_bias] self.recommenders_list_names = ['content_based', 'user_user', 'item_item', 'matrix_fact','pers_bias'] # Used for item popularity metric. # Calculate the 20 most popular items (item which most of the customers bought) N_LIM = 20 perc_users_bought_item = self.actual_ratings.apply(lambda item: np.sum(~np.isnan(item)), axis=0)/actual_ratings.shape[1] sort_pop_items = np.argsort(perc_users_bought_item)[::-1] self.pop_items = perc_users_bought_item.iloc[sort_pop_items][:N_LIM].index.values.astype(np.int) def get_observed_ratings(self, userId): """ Returns all the items a given user evaluated and their ratings. Used mainly by all the metrics calculation :parameter: userId - user id :return: array of rated items. Index is the item id and value is the item rating """ userId = str(userId) filtered_ratings = self.actual_ratings[userId] rated_items = filtered_ratings[~np.isnan(filtered_ratings)] return rated_items def get_top_n(self, userId, n): """ Get the top n recommendations for every recommender in the list given a user id :parameter: userId - user id :parameter: n - max number of recommendations to return :return: dictionary where the key is the recommender's name and the value is an array of size n for the top n recommnendations. """ userId = str(userId) predicted_ratings = dict() for recommender, recommender_name in zip(self.recommenders_list,self.recommenders_list_names): item_ids = recommender[userId].argsort().sort_values()[:n].index.values predicted_ratings[recommender_name] = item_ids return predicted_ratings def rmse(self, userId): """ Root Mean Square Error of the predicted and observed values between the recommender's prediction and the actual ratings :parameter: userId - user id :return: dataframe of containing the rmse from all recommenders given user id """ userId = str(userId) observed_ratings = self.get_observed_ratings(userId) rmse_list = {'rmse': []} for recommender in self.recommenders_list: predicted_ratings = recommender.loc[observed_ratings.index, userId] rmse_list['rmse'].append(np.sqrt(np.average((predicted_ratings - observed_ratings)**2))) rmse_list = pd.DataFrame(rmse_list, index = self.recommenders_list_names) return rmse_list def nDCG(self, userId, top_n = 5, individual_recommendation = None): """ Normalised Discounted Cumulative Gain for all recommenders given user id :parameter: userId - user id :return: dataframe of containing the nDCG from all recommenders given user id """ ri = self.get_observed_ratings(userId) if(individual_recommendation is None): topn = self.get_top_n(userId,top_n) results_pandas_index = self.recommenders_list_names else: topn = individual_recommendation results_pandas_index = list(individual_recommendation.keys()) # 1st step: Given recommendations, transform list into scores (see score transcriptions in the capstone report) scores_all = [] for name, item_list in topn.items(): scores = np.empty_like(item_list) # initialise 'random' array scores[:] = -10 ########################### # check which items returned by the recommender is_already_rated = np.isin(item_list, ri.index.values) # the user already rated. Items users didn't rate scores[~is_already_rated] = 0 # receive score = 0 for index, score in enumerate(scores): if(score != 0): # for each recommended items the user rated if(ri[item_list[index]] < self.average_rating_per_userid[userId] - 1): # score accordingly the report scores[index] = -1 elif((ri[item_list[index]] >= self.average_rating_per_userid[userId] - 1) & (ri[item_list[index]] < self.average_rating_per_userid[userId] + 0.5)): scores[index] = 1 else: scores[index] = 2 scores_all.append(scores) # append all the transformed scores scores_all # 2nd step: Given scores, calculate the model's DCG, ideal DCG and then nDCG nDCG_all = dict() for index_model, scores_model in enumerate(scores_all): # for each model model_DCG = 0 # calculate model's DCG for index, score in enumerate(scores_model): # index_ = index + 1 # model_DCG = model_DCG + score/np.log2(index_ + 1) # ideal_rank_items = np.sort(scores_model)[::-1] # calculate model's ideal DCG ideal_rank_DCG = 0 # for index, ideal_score in enumerate(ideal_rank_items): # index_ = index + 1 # ideal_rank_DCG = ideal_rank_DCG + ideal_score/np.log2(index_ + 1) # if((ideal_rank_DCG == 0) | (np.abs(ideal_rank_DCG) < np.abs(model_DCG))): # if nDCG is 0 or only negative scores came up nDCG = 0 else: # calculate final nDCG when ideal DCG is != 0 nDCG = model_DCG/ideal_rank_DCG nDCG_all[results_pandas_index[index_model]] = nDCG # save each model's nDCG in a dict # convert it to dataframe result_final = pd.DataFrame(nDCG_all, index=range(1)).transpose() result_final.columns = ['nDCG'] return result_final def price_diversity(self,userId,top_n = 5,individual_recommendation = None): """ Mean and standard deviation of the price of the top n products recommended by each algorithm. Intuition for a high price wise diversity recommender is to have a high price standard deviation :parameter: userId - user id :return: dataframe of containing the price's mean and standard deviation from all recommenders given user id """ if(individual_recommendation is None): topn = self.get_top_n(userId,top_n) else: topn = individual_recommendation stats = pd.DataFrame() for key, value in topn.items(): data_filtered = self.items.loc[topn[key]][['Price']].agg(['mean','std']).transpose() data_filtered.index = [key] stats = stats.append(data_filtered) return stats def availability_diversity(self,userId,top_n = 5,individual_recommendation = None): """ Mean and standard deviation of the availabity index of the top n products recommended by each algorithm. Intuition for a high availabity diversity is to have a small mean value in the availabity index :parameter: userId - user id :return: dataframe of containing the availabity index's mean and standard deviation from all recommenders given user id """ if(individual_recommendation is None): topn = self.get_top_n(userId,top_n) else: topn = individual_recommendation stats = pd.DataFrame() for key, value in topn.items(): data_filtered = self.items.loc[topn[key]][['Availability']].agg(['mean','std']).transpose() data_filtered.index = [key] stats = stats.append(data_filtered) return stats def popularity(self, userId,top_n = 5,individual_recommendation = None): """ Return the ratio of how many items of the top n items are among the most popular purchased items. Default is the 20 most purchased items. :parameter: userId - user id :return: dataframe of containing ratio of popular items in the recommended list from all recommenders given user id """ if(individual_recommendation is None): topn = self.get_top_n(userId,top_n) results_pandas_index = self.recommenders_list_names else: topn = individual_recommendation results_pandas_index = list(individual_recommendation.keys()) results = {'popularity': []} for recommender, recommendations in topn.items(): popularity = np.sum(np.isin(recommendations,self.pop_items)) results['popularity'].append(popularity) return pd.DataFrame(results,index = results_pandas_index) def precision_at_n(self, userId, top_n = 5, individual_recommendation = None): if(individual_recommendation is None): topn = self.get_top_n(userId,top_n) results_pandas_index = self.recommenders_list_names else: topn = individual_recommendation results_pandas_index = list(individual_recommendation.keys()) observed_ratings = self.get_observed_ratings(userId).index.values precisions = {'precision_at_'+str(top_n): []} for recommender, recommendations in topn.items(): precisions['precision_at_'+str(top_n)].append(np.sum(np.isin(recommendations, observed_ratings))/top_n) return pd.DataFrame(precisions,index = results_pandas_index) # # Test methods: # # Just to have an idea of the output of each method, lets call all them with a test user. At the next section we will calculate these metrics for all users. # In[4]: userId = '64' re = RecommenderEvaluator(items, actual_ratings, content_based, user_user, item_item, matrix_fact, pers_bias) # ## Test RMSE # In[5]: re.rmse(userId) # ## Test nDCG # In[6]: re.nDCG(userId) # ## Test Diversity - Price and Availability # In[7]: re.price_diversity(userId) # In[8]: re.availability_diversity(userId) # ## Test Popularity # In[9]: re.popularity(userId) # ## Test Precision@N # In[10]: re.precision_at_n(userId) # # Average metrics by all users # # Espefically for user 907, the recommendations from the user user came with all nulls (original dataset). This specifically impacted the RMSE calculation, as one Nan damaged the entire average calculation. So specifically for RMSE we did a separate calculation section. All the other metrics are going the be calculated in the next code block. # In[11]: re = RecommenderEvaluator(items, actual_ratings, content_based, user_user, item_item, matrix_fact, pers_bias) i = 0 count = np.array([0,0,0,0,0]) for userId in actual_ratings.columns: if(userId == '907'): rmse_recommenders = re.rmse(userId).fillna(0) else: rmse_recommenders = re.rmse(userId) count = count + rmse_recommenders['rmse'] # as we didn't use user 907 for user user, divide it by the number of users - 1 denominator = [len(actual_ratings.columns)] * 5 denominator[1] = len(actual_ratings.columns) - 1 print('Average RMSE for all users') count/ denominator # In[12]: count_nDCG = np.array([0,0,0,0,0]) count_diversity_price = np.ndarray([5,2]) count_diversity_availability = np.ndarray([5,2]) count_popularity = np.array([0,0,0,0,0]) count_precision_at_5 = np.array([0,0,0,0,0]) for userId in actual_ratings.columns: nDCG_recommenders = re.nDCG(userId) count_nDCG = count_nDCG + nDCG_recommenders['nDCG'] diversity_price_recommenders = re.price_diversity(userId) count_diversity_price = count_diversity_price + diversity_price_recommenders[['mean','std']] diversity_availability_recommenders = re.availability_diversity(userId) count_diversity_availability = count_diversity_availability + diversity_availability_recommenders[['mean','std']] popularity_recommenders = re.popularity(userId) count_popularity = count_popularity + popularity_recommenders['popularity'] precision_recommenders = re.precision_at_n(userId) count_precision_at_5 = count_precision_at_5 + precision_recommenders['precision_at_5'] print('\n---') print('Average nDCG') print('---\n') print(count_nDCG/len(actual_ratings.columns)) print('\n---') print('Average Price - Diversity Measure') print('---\n') print(count_diversity_price/len(actual_ratings.columns)) print('\n---') print('Average Availability - Diversity Measure') print('---\n') print(count_diversity_availability/len(actual_ratings.columns)) print('\n---') print('Average Popularity') print('---\n') print(count_popularity/len(actual_ratings.columns)) print('---\n') print('Average Precision@5') print('---\n') print(count_precision_at_5/len(actual_ratings.columns)) # # Final Analysis # # In terms of **RMSE**, the user-user collaborative filtering showed to be the most effective, despite it not being significantly better. # # For nDCG rank score, again user user and now item item collaborative filtering were the best. # # In terms of price diversity, the item item algorith was the most diverse, providing products varying ~32 dollars from the mean item price list. Matrix factorisation and user user follow right behind, with price standard deviation around 25 dollars. An interesting factor here was the *pers_bias* algorithm, as it recommended basically cheap products with a low standard deviation. # # For the availabity index, all the algorithms besides the user user managed to recommend items not so present in the local stores **together** with items present in local stores, as we can see they also provided items with availability index high (high standard deviation). # # In terms of popularity, no algorithm actually managed to obtain good scores in the way we defined. So, if the popularity is focused in the future, we can either change the popularity concept or improve mechanics in the recommender so it predict higher scores for the most popular items in the store. # # After this evaluation, it seemed to us that the item-item recommender system had an overall better performance, highlighted in terms of its diversity scores. Unfortunately, the items that item item recommender has suggested are in overall pricy, and we can check if there is any mixture possibility with the pers_bias algorithm, as it really indicated cheap prices and a low price standard deviation. Matrix factorization performed good as well but it didn't outperform any of the other recommenders. # # Hibridization Techniques - Part III # # We are trying four different types of hibridization here. # # 1. Linear ensemble # 2. Non linear ensemble # 3. Top 1 from each recommender # 4. Recommender switching # # The first two options approach the recommender's performance in terms of how good it predicts the users' ratings, so its only evaluation will be in terms of RMSE. # # The third approach have the intuition that, if we get the top 1 recommendation from each algorithm, the resulting 5 item list will have a better performance in terms of identyfing 'good' items to users. In this case, we defined the good items if the recommender suggested an already bought item for an user. Therefore, the final measurement of this hibridization mechanism is through the precision@5, as we end up with a 5 item list. # # The final mixing algorithm has the underlying theory of how collaborative filtering mechanisms perform with items that had not enough users/items in its calculations. As a well known weakness of these recommenders, the idea was to check how many items we would affect if we established a threshold of enough data in order for us to use a collaborative filtering. Otherwise, if the item doesn't have enough support in form of users' ratings we could have a support of a content based recommendation, or even, in last case, a non personalised one. # # # ## Dataset Creation and User Sample Definition # # ### Dataset # # For the first and second approach, we need another perspective on the data. The dataset contains all the existing ratings from all users and concatenates all the predictions made the 5 traditional recommenders. The idea is to use the observed rating as target variable and all recommenders' predictions as dependent variable, *i.e.* treat this as a regression problems. # In[13]: obs_ratings_list = [] content_based_list = [] user_user_list = [] item_item_list = [] matrix_fact_list = [] pers_bias_list = [] re = RecommenderEvaluator(items, actual_ratings, content_based, user_user, item_item, matrix_fact, pers_bias) for userId in actual_ratings.columns: observed_ratings = re.get_observed_ratings(userId) obs_ratings_list.extend(observed_ratings.values) content_based_list.extend(content_based.loc[observed_ratings.index, userId].values) user_user_list.extend(user_user.loc[observed_ratings.index, userId].values) item_item_list.extend(item_item.loc[observed_ratings.index, userId].values) matrix_fact_list.extend(matrix_fact.loc[observed_ratings.index, userId].values) pers_bias_list.extend(pers_bias.loc[observed_ratings.index, userId].values) dataset = pd.DataFrame({'rating': obs_ratings_list, 'content_based':content_based_list, 'user_user': user_user_list, 'item_item':item_item_list, 'matrix_fact':matrix_fact_list,'pers_bias':pers_bias_list}) dataset = dataset.dropna() dataset.head() # ### In order to have an idea of the results, let's choose 3 users randomly to show the predictions using the new hybrid models # In[14]: np.random.seed(42) sample_users = np.random.choice(actual_ratings.columns, 3).astype(str) print('sample_users: ' + str(sample_users)) # ### Get recommenders' predictions for sample users in order to create input for ensemble models (hybridization I and II) # In[15]: from collections import OrderedDict df_sample = pd.DataFrame() for user in sample_users: content_based_ = re.content_based[user] user_user_ = re.user_user[user] item_item_ = re.item_item[user] matrix_fact_ = re.matrix_fact[user] pers_bias_ = re.pers_bias[user] df_sample = df_sample.append(pd.DataFrame(OrderedDict({'user':user,'item':actual_ratings.index.values,'content_based':content_based_, 'user_user':user_user_, 'item_item':item_item_, 'matrix_fact':matrix_fact_,'pers_bias':pers_bias_})), ignore_index=True) df_sample.head() # # ## Focus on Performance (RMSE) I - Linear Model # In[16]: from sklearn.linear_model import LinearRegression from sklearn.model_selection import cross_val_score linear = LinearRegression() print('RMSE for linear ensemble of recommender systems:') np.mean(cross_val_score(linear, dataset.drop('rating', axis=1), dataset['rating'], cv=5)) # ### Predictions for sample users: Creating top 5 recommendations for sample users # In[17]: pred_cols = ['content_based','user_user','item_item','matrix_fact','pers_bias'] predictions = linear.fit(dataset.drop('rating', axis=1), dataset['rating']).predict(df_sample[pred_cols]) recommendations = pd.DataFrame(OrderedDict({'user':df_sample['user'], 'item':df_sample['item'], 'predictions':predictions})) recommendations.groupby('user').apply(lambda df_user : df_user.loc[df_user['predictions'].sort_values(ascending=False)[:5].index.values]) # ## Focus on Performance (RMSE) II - Emsemble # In[18]: from sklearn.ensemble import RandomForestRegressor rf = RandomForestRegressor(random_state=42) print('RMSE for non linear ensemble of recommender systems:') np.mean(cross_val_score(rf, dataset.drop('rating', axis=1), dataset['rating'], cv=5)) # ### Predictions for sample users: # In[19]: predictions = rf.fit(dataset.drop('rating', axis=1), dataset['rating']).predict(df_sample[pred_cols]) recommendations = pd.DataFrame(OrderedDict({'user':df_sample['user'], 'item':df_sample['item'], 'predictions':predictions})) recommendations.groupby('user').apply(lambda df_user : df_user.loc[df_user['predictions'].sort_values(ascending=False)[:5].index.values]) # ## Focus on Recommendations - Top 1 from each Recommender # # With the all top 1 recommender, we can evaluate its performance not just with RMSE, but all the list metrics we evaluated before. As a business constraint, we will also pay more attention to the *precision@5* metric, as a general information on how good is the recommender on providing suggestions that the user will buy, or already bought in this case. # The majority of metrics were in the same scale as the best metrics in the all models comparison. However, it's good to highlight the the top 1 all recommender had the best *precision@5* metric among all recommender, showing to be a **good suitable hibridization mechanism**. # In[20]: count_nDCG = np.array([0]) count_diversity_price = np.ndarray([1,2]) count_diversity_availability = np.ndarray([1,2]) count_popularity = np.array([0]) count_precision = np.array([0]) for userId in actual_ratings.columns: top_n_1 = re.get_top_n(userId,1) user_items = {} user_items['top_1_all'] = [a[0] for a in top_n_1.values()] nDCG_recommenders = re.nDCG(userId, individual_recommendation = user_items) count_nDCG = count_nDCG + nDCG_recommenders['nDCG'] diversity_price_recommenders = re.price_diversity(userId, individual_recommendation = user_items) count_diversity_price = count_diversity_price + diversity_price_recommenders[['mean','std']] diversity_availability_recommenders = re.availability_diversity(userId, individual_recommendation = user_items) count_diversity_availability = count_diversity_availability + diversity_availability_recommenders[['mean','std']] popularity_recommenders = re.popularity(userId, individual_recommendation = user_items) count_popularity = count_popularity + popularity_recommenders['popularity'] precision_recommenders = re.precision_at_n(userId, individual_recommendation = user_items) count_precision = count_precision + precision_recommenders['precision_at_5'] print('\n---') print('Average nDCG') print('---\n') print(count_nDCG/len(actual_ratings.columns)) print('\n---') print('Average Price - Diversity Measure') print('---\n') print(count_diversity_price/len(actual_ratings.columns)) print('\n---') print('Average Availability - Diversity Measure') print('---\n') print(count_diversity_availability/len(actual_ratings.columns)) print('\n---') print('Average Popularity') print('---\n') print(count_popularity/len(actual_ratings.columns)) print('\n---') print('Average Precision@5') print('---\n') print(count_precision/len(actual_ratings.columns)) # ### Predictions for sample users: # In[21]: results = {} for user_sample in sample_users: results[user_sample] = [a[0] for a in list(re.get_top_n(user_sample, 1).values())] results # ## Focus on Recommendations - Switching algorithm # # ### Can we use a Content Based Recommender for items with less evaluations? # # We can see in the cumulative histogram that only around 20% of the rated items had 10 or more ratings. This signals us that maybe we can prioritize the use of a content based recommender or even a non personalised one for the majority of the items which don't have a sufficient amount of ratings in order to make the collaborative filtering algorithms to be stable. # In[23]: import matplotlib.pyplot as plt item_nbr_ratings = actual_ratings.apply(lambda col: np.sum(~np.isnan(col)), axis=1) item_max_nbr_ratings = item_nbr_ratings.max() range_item_max_nbr_ratings = range(item_max_nbr_ratings+1) plt.figure(figsize=(15,3)) plt.subplot(121) nbr_ratings_items = [] for i in range_item_max_nbr_ratings: nbr_ratings_items.append(len(item_nbr_ratings[item_nbr_ratings == i])) plt.plot(nbr_ratings_items) plt.xlabel('Number of ratings') plt.ylabel('Amount of items') plt.title('Histogram of amount of ratings') plt.subplot(122) cum_nbr_ratings_items = [] for i in range(len(nbr_ratings_items)): cum_nbr_ratings_items.append(np.sum(nbr_ratings_items[:i])) cum_nbr_ratings_items = np.array(cum_nbr_ratings_items) plt.plot(cum_nbr_ratings_items/actual_ratings.shape[0]) plt.xlabel('Number of ratings') plt.ylabel('Cumulative distribution') plt.title('Cumulative histogram of amount of ratings'); # In[ ]:
[ "matplotlib.pyplot.title", "numpy.isin", "numpy.random.seed", "numpy.sum", "numpy.abs", "pandas.read_csv", "numpy.isnan", "numpy.argsort", "matplotlib.pyplot.figure", "numpy.ndarray", "pandas.DataFrame", "numpy.empty_like", "numpy.random.choice", "numpy.average", "numpy.log2", "sklearn.ensemble.RandomForestRegressor", "sklearn.linear_model.LinearRegression", "numpy.sort", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.subplot", "matplotlib.pyplot.plot", "numpy.array", "collections.OrderedDict", "matplotlib.pyplot.xlabel" ]
[((1069, 1158), 'pandas.read_csv', 'pd.read_csv', (['"""data/capstone/Capstone Data - Office Products - Items.csv"""'], {'index_col': '(0)'}), "('data/capstone/Capstone Data - Office Products - Items.csv',\n index_col=0)\n", (1080, 1158), True, 'import pandas as pd\n'), ((1173, 1264), 'pandas.read_csv', 'pd.read_csv', (['"""data/capstone/Capstone Data - Office Products - Ratings.csv"""'], {'index_col': '(0)'}), "('data/capstone/Capstone Data - Office Products - Ratings.csv',\n index_col=0)\n", (1184, 1264), True, 'import pandas as pd\n'), ((1279, 1366), 'pandas.read_csv', 'pd.read_csv', (['"""data/capstone/Capstone Data - Office Products - CBF.csv"""'], {'index_col': '(0)'}), "('data/capstone/Capstone Data - Office Products - CBF.csv',\n index_col=0)\n", (1290, 1366), True, 'import pandas as pd\n'), ((1375, 1468), 'pandas.read_csv', 'pd.read_csv', (['"""data/capstone/Capstone Data - Office Products - User-User.csv"""'], {'index_col': '(0)'}), "('data/capstone/Capstone Data - Office Products - User-User.csv',\n index_col=0)\n", (1386, 1468), True, 'import pandas as pd\n'), ((1477, 1570), 'pandas.read_csv', 'pd.read_csv', (['"""data/capstone/Capstone Data - Office Products - Item-Item.csv"""'], {'index_col': '(0)'}), "('data/capstone/Capstone Data - Office Products - Item-Item.csv',\n index_col=0)\n", (1488, 1570), True, 'import pandas as pd\n'), ((1581, 1667), 'pandas.read_csv', 'pd.read_csv', (['"""data/capstone/Capstone Data - Office Products - MF.csv"""'], {'index_col': '(0)'}), "('data/capstone/Capstone Data - Office Products - MF.csv',\n index_col=0)\n", (1592, 1667), True, 'import pandas as pd\n'), ((1676, 1768), 'pandas.read_csv', 'pd.read_csv', (['"""data/capstone/Capstone Data - Office Products - PersBias.csv"""'], {'index_col': '(0)'}), "('data/capstone/Capstone Data - Office Products - PersBias.csv',\n index_col=0)\n", (1687, 1768), True, 'import pandas as pd\n'), ((17465, 17490), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0])\n', (17473, 17490), True, 'import numpy as np\n'), ((17964, 17989), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0])\n', (17972, 17989), True, 'import numpy as np\n'), ((18010, 18028), 'numpy.ndarray', 'np.ndarray', (['[5, 2]'], {}), '([5, 2])\n', (18020, 18028), True, 'import numpy as np\n'), ((18059, 18077), 'numpy.ndarray', 'np.ndarray', (['[5, 2]'], {}), '([5, 2])\n', (18069, 18077), True, 'import numpy as np\n'), ((18096, 18121), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0])\n', (18104, 18121), True, 'import numpy as np\n'), ((18141, 18166), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0])\n', (18149, 18166), True, 'import numpy as np\n'), ((23941, 24152), 'pandas.DataFrame', 'pd.DataFrame', (["{'rating': obs_ratings_list, 'content_based': content_based_list,\n 'user_user': user_user_list, 'item_item': item_item_list, 'matrix_fact':\n matrix_fact_list, 'pers_bias': pers_bias_list}"], {}), "({'rating': obs_ratings_list, 'content_based':\n content_based_list, 'user_user': user_user_list, 'item_item':\n item_item_list, 'matrix_fact': matrix_fact_list, 'pers_bias':\n pers_bias_list})\n", (23953, 24152), True, 'import pandas as pd\n'), ((24335, 24353), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (24349, 24353), True, 'import numpy as np\n'), ((24657, 24671), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (24669, 24671), True, 'import pandas as pd\n'), ((25365, 25383), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (25381, 25383), False, 'from sklearn.linear_model import LinearRegression\n'), ((26199, 26237), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'random_state': '(42)'}), '(random_state=42)\n', (26220, 26237), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((27526, 27539), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (27534, 27539), True, 'import numpy as np\n'), ((27564, 27582), 'numpy.ndarray', 'np.ndarray', (['[1, 2]'], {}), '([1, 2])\n', (27574, 27582), True, 'import numpy as np\n'), ((27613, 27631), 'numpy.ndarray', 'np.ndarray', (['[1, 2]'], {}), '([1, 2])\n', (27623, 27631), True, 'import numpy as np\n'), ((27650, 27663), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (27658, 27663), True, 'import numpy as np\n'), ((27682, 27695), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (27690, 27695), True, 'import numpy as np\n'), ((30332, 30359), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 3)'}), '(figsize=(15, 3))\n', (30342, 30359), True, 'import matplotlib.pyplot as plt\n'), ((30359, 30375), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (30370, 30375), True, 'import matplotlib.pyplot as plt\n'), ((30511, 30538), 'matplotlib.pyplot.plot', 'plt.plot', (['nbr_ratings_items'], {}), '(nbr_ratings_items)\n', (30519, 30538), True, 'import matplotlib.pyplot as plt\n'), ((30539, 30570), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of ratings"""'], {}), "('Number of ratings')\n", (30549, 30570), True, 'import matplotlib.pyplot as plt\n'), ((30571, 30600), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amount of items"""'], {}), "('Amount of items')\n", (30581, 30600), True, 'import matplotlib.pyplot as plt\n'), ((30601, 30644), 'matplotlib.pyplot.title', 'plt.title', (['"""Histogram of amount of ratings"""'], {}), "('Histogram of amount of ratings')\n", (30610, 30644), True, 'import matplotlib.pyplot as plt\n'), ((30646, 30662), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (30657, 30662), True, 'import matplotlib.pyplot as plt\n'), ((30823, 30854), 'numpy.array', 'np.array', (['cum_nbr_ratings_items'], {}), '(cum_nbr_ratings_items)\n', (30831, 30854), True, 'import numpy as np\n'), ((30855, 30912), 'matplotlib.pyplot.plot', 'plt.plot', (['(cum_nbr_ratings_items / actual_ratings.shape[0])'], {}), '(cum_nbr_ratings_items / actual_ratings.shape[0])\n', (30863, 30912), True, 'import matplotlib.pyplot as plt\n'), ((30911, 30942), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of ratings"""'], {}), "('Number of ratings')\n", (30921, 30942), True, 'import matplotlib.pyplot as plt\n'), ((30943, 30980), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cumulative distribution"""'], {}), "('Cumulative distribution')\n", (30953, 30980), True, 'import matplotlib.pyplot as plt\n'), ((30981, 31035), 'matplotlib.pyplot.title', 'plt.title', (['"""Cumulative histogram of amount of ratings"""'], {}), "('Cumulative histogram of amount of ratings')\n", (30990, 31035), True, 'import matplotlib.pyplot as plt\n'), ((25848, 25947), 'collections.OrderedDict', 'OrderedDict', (["{'user': df_sample['user'], 'item': df_sample['item'], 'predictions':\n predictions}"], {}), "({'user': df_sample['user'], 'item': df_sample['item'],\n 'predictions': predictions})\n", (25859, 25947), False, 'from collections import OrderedDict\n'), ((26570, 26669), 'collections.OrderedDict', 'OrderedDict', (["{'user': df_sample['user'], 'item': df_sample['item'], 'predictions':\n predictions}"], {}), "({'user': df_sample['user'], 'item': df_sample['item'],\n 'predictions': predictions})\n", (26581, 26669), False, 'from collections import OrderedDict\n'), ((8900, 8959), 'pandas.DataFrame', 'pd.DataFrame', (['rmse_list'], {'index': 'self.recommenders_list_names'}), '(rmse_list, index=self.recommenders_list_names)\n', (8912, 8959), True, 'import pandas as pd\n'), ((13310, 13324), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (13322, 13324), True, 'import pandas as pd\n'), ((14242, 14256), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (14254, 14256), True, 'import pandas as pd\n'), ((15460, 15509), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {'index': 'results_pandas_index'}), '(results, index=results_pandas_index)\n', (15472, 15509), True, 'import pandas as pd\n'), ((16226, 16278), 'pandas.DataFrame', 'pd.DataFrame', (['precisions'], {'index': 'results_pandas_index'}), '(precisions, index=results_pandas_index)\n', (16238, 16278), True, 'import pandas as pd\n'), ((24369, 24412), 'numpy.random.choice', 'np.random.choice', (['actual_ratings.columns', '(3)'], {}), '(actual_ratings.columns, 3)\n', (24385, 24412), True, 'import numpy as np\n'), ((30763, 30792), 'numpy.sum', 'np.sum', (['nbr_ratings_items[:i]'], {}), '(nbr_ratings_items[:i])\n', (30769, 30792), True, 'import numpy as np\n'), ((6836, 6870), 'numpy.argsort', 'np.argsort', (['perc_users_bought_item'], {}), '(perc_users_bought_item)\n', (6846, 6870), True, 'import numpy as np\n'), ((9852, 9876), 'numpy.empty_like', 'np.empty_like', (['item_list'], {}), '(item_list)\n', (9865, 9876), True, 'import numpy as np\n'), ((10125, 10160), 'numpy.isin', 'np.isin', (['item_list', 'ri.index.values'], {}), '(item_list, ri.index.values)\n', (10132, 10160), True, 'import numpy as np\n'), ((24936, 25145), 'collections.OrderedDict', 'OrderedDict', (["{'user': user, 'item': actual_ratings.index.values, 'content_based':\n content_based_, 'user_user': user_user_, 'item_item': item_item_,\n 'matrix_fact': matrix_fact_, 'pers_bias': pers_bias_}"], {}), "({'user': user, 'item': actual_ratings.index.values,\n 'content_based': content_based_, 'user_user': user_user_, 'item_item':\n item_item_, 'matrix_fact': matrix_fact_, 'pers_bias': pers_bias_})\n", (24947, 25145), False, 'from collections import OrderedDict\n'), ((7429, 7455), 'numpy.isnan', 'np.isnan', (['filtered_ratings'], {}), '(filtered_ratings)\n', (7437, 7455), True, 'import numpy as np\n'), ((11586, 11607), 'numpy.sort', 'np.sort', (['scores_model'], {}), '(scores_model)\n', (11593, 11607), True, 'import numpy as np\n'), ((15351, 15391), 'numpy.isin', 'np.isin', (['recommendations', 'self.pop_items'], {}), '(recommendations, self.pop_items)\n', (15358, 15391), True, 'import numpy as np\n'), ((30202, 30215), 'numpy.isnan', 'np.isnan', (['col'], {}), '(col)\n', (30210, 30215), True, 'import numpy as np\n'), ((8824, 8879), 'numpy.average', 'np.average', (['((predicted_ratings - observed_ratings) ** 2)'], {}), '((predicted_ratings - observed_ratings) ** 2)\n', (8834, 8879), True, 'import numpy as np\n'), ((12043, 12065), 'numpy.abs', 'np.abs', (['ideal_rank_DCG'], {}), '(ideal_rank_DCG)\n', (12049, 12065), True, 'import numpy as np\n'), ((12068, 12085), 'numpy.abs', 'np.abs', (['model_DCG'], {}), '(model_DCG)\n', (12074, 12085), True, 'import numpy as np\n'), ((11530, 11549), 'numpy.log2', 'np.log2', (['(index_ + 1)'], {}), '(index_ + 1)\n', (11537, 11549), True, 'import numpy as np\n'), ((11981, 12000), 'numpy.log2', 'np.log2', (['(index_ + 1)'], {}), '(index_ + 1)\n', (11988, 12000), True, 'import numpy as np\n'), ((16160, 16202), 'numpy.isin', 'np.isin', (['recommendations', 'observed_ratings'], {}), '(recommendations, observed_ratings)\n', (16167, 16202), True, 'import numpy as np\n'), ((5987, 6000), 'numpy.isnan', 'np.isnan', (['row'], {}), '(row)\n', (5995, 6000), True, 'import numpy as np\n'), ((6762, 6776), 'numpy.isnan', 'np.isnan', (['item'], {}), '(item)\n', (6770, 6776), True, 'import numpy as np\n')]
from keras.preprocessing.image import img_to_array from keras.models import load_model import imutils import cv2 import numpy as np import sys # parameters for loading data and images detection_model_path = 'haarcascade_files/haarcascade_frontalface_default.xml' emotion_model_path = 'models/_mini_XCEPTION.106-0.65.hdf5' img_path = sys.argv[1] # hyper-parameters for bounding boxes shape # loading models face_detection = cv2.CascadeClassifier(detection_model_path) emotion_classifier = load_model(emotion_model_path, compile=False) EMOTIONS = ["angry","disgust","scared", "happy", "sad", "surprised","neutral"] #reading the frame orig_frame = cv2.imread(img_path) frame = cv2.imread(img_path,0) faces = face_detection.detectMultiScale(frame,scaleFactor=1.2,minNeighbors=5,minSize=(30,30),flags=cv2.CASCADE_SCALE_IMAGE) if len(faces) > 0: faces = sorted(faces, reverse=True,key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0] (fX, fY, fW, fH) = faces roi = frame[fY:fY + fH, fX:fX + fW] roi = cv2.resize(roi, (48, 48)) roi = roi.astype("float") / 255.0 roi = img_to_array(roi) roi = np.expand_dims(roi, axis=0) preds = emotion_classifier.predict(roi)[0] emotion_probability = np.max(preds) label = EMOTIONS[preds.argmax()] cv2.putText(orig_frame, label, (fX, fY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2) cv2.rectangle(orig_frame, (fX, fY), (fX + fW, fY + fH),(0, 0, 255), 2) print(label) cv2.imshow('test_face', orig_frame) cv2.imwrite('test_output/'+img_path.split('/')[-1],orig_frame) if (cv2.waitKey(2000) & 0xFF == ord('q')): sys.exit("Thanks") cv2.destroyAllWindows()
[ "keras.models.load_model", "cv2.putText", "cv2.waitKey", "cv2.destroyAllWindows", "numpy.expand_dims", "cv2.rectangle", "cv2.imread", "keras.preprocessing.image.img_to_array", "numpy.max", "cv2.CascadeClassifier", "sys.exit", "cv2.imshow", "cv2.resize" ]
[((425, 468), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['detection_model_path'], {}), '(detection_model_path)\n', (446, 468), False, 'import cv2\n'), ((490, 535), 'keras.models.load_model', 'load_model', (['emotion_model_path'], {'compile': '(False)'}), '(emotion_model_path, compile=False)\n', (500, 535), False, 'from keras.models import load_model\n'), ((649, 669), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (659, 669), False, 'import cv2\n'), ((679, 702), 'cv2.imread', 'cv2.imread', (['img_path', '(0)'], {}), '(img_path, 0)\n', (689, 702), False, 'import cv2\n'), ((1461, 1496), 'cv2.imshow', 'cv2.imshow', (['"""test_face"""', 'orig_frame'], {}), "('test_face', orig_frame)\n", (1471, 1496), False, 'import cv2\n'), ((1626, 1649), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1647, 1649), False, 'import cv2\n'), ((1012, 1037), 'cv2.resize', 'cv2.resize', (['roi', '(48, 48)'], {}), '(roi, (48, 48))\n', (1022, 1037), False, 'import cv2\n'), ((1086, 1103), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['roi'], {}), '(roi)\n', (1098, 1103), False, 'from keras.preprocessing.image import img_to_array\n'), ((1114, 1141), 'numpy.expand_dims', 'np.expand_dims', (['roi'], {'axis': '(0)'}), '(roi, axis=0)\n', (1128, 1141), True, 'import numpy as np\n'), ((1215, 1228), 'numpy.max', 'np.max', (['preds'], {}), '(preds)\n', (1221, 1228), True, 'import numpy as np\n'), ((1270, 1368), 'cv2.putText', 'cv2.putText', (['orig_frame', 'label', '(fX, fY - 10)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.45)', '(0, 0, 255)', '(2)'], {}), '(orig_frame, label, (fX, fY - 10), cv2.FONT_HERSHEY_SIMPLEX, \n 0.45, (0, 0, 255), 2)\n', (1281, 1368), False, 'import cv2\n'), ((1368, 1439), 'cv2.rectangle', 'cv2.rectangle', (['orig_frame', '(fX, fY)', '(fX + fW, fY + fH)', '(0, 0, 255)', '(2)'], {}), '(orig_frame, (fX, fY), (fX + fW, fY + fH), (0, 0, 255), 2)\n', (1381, 1439), False, 'import cv2\n'), ((1607, 1625), 'sys.exit', 'sys.exit', (['"""Thanks"""'], {}), "('Thanks')\n", (1615, 1625), False, 'import sys\n'), ((1564, 1581), 'cv2.waitKey', 'cv2.waitKey', (['(2000)'], {}), '(2000)\n', (1575, 1581), False, 'import cv2\n')]
from ehr_functions.models.types._sklearn import SKLearnModel from sklearn.linear_model import ElasticNet as EN import numpy as np class ElasticNet(SKLearnModel): def __init__(self, round_output=False, **kwargs): super().__init__(EN, kwargs) self.round_output = round_output def predict(self, x): output = super().predict(x) if self.round_output: output = np.round(output) return output
[ "numpy.round" ]
[((410, 426), 'numpy.round', 'np.round', (['output'], {}), '(output)\n', (418, 426), True, 'import numpy as np\n')]
from __future__ import annotations from typing import Iterable, Optional, Union import materia as mtr import numpy as np import scipy.linalg __all__ = [ "Identity", "Inversion", "Reflection", "ProperRotation", "ImproperRotation", "SymmetryOperation", ] class SymmetryOperation: def __init__( self, matrix: Optional[np.ndarray] = None, determinant: Optional[Union[int, float]] = None, trace: Optional[float] = None, axis: Optional[np.ndarray] = None, ) -> None: if matrix is not None: self.matrix, _ = scipy.linalg.polar(matrix) elif determinant is not None and trace is not None: if axis is None: self.matrix, _ = scipy.linalg.polar( determinant * np.eye(3).astype("float64") ) else: a = mtr.normalize(axis) cos_theta = (trace - determinant) / 2 cos_theta = max(min(cos_theta, 1), -1) theta = np.arccos(cos_theta) self.matrix = mtr.rotation_matrix( axis=a, theta=theta, improper=(determinant == -1) ) else: raise ValueError def __eq__(self, other: SymmetryOperation) -> bool: return hasattr(other, "matrix") and np.allclose( self.matrix, other.matrix, atol=1e-3 ) @property def det(self) -> int: return int(round(np.linalg.det(self.matrix))) @property def tr(self) -> float: return np.trace(self.matrix) @property def cos_theta(self) -> float: return max(min((self.tr - np.sign(self.det)) / 2, 1.0), -1.0) @property def axis(self) -> np.ndarray: # algorithm from scipp.ucsc.edu/~haber/ph116A/rotation_11.pdf if np.isclose(abs(self.tr), 3): return None if np.isclose(self.tr * self.det, -1): S = (np.eye(3) + self.det * self.matrix) / 2 for i in range(3): signs = np.sign(S[:, i]) if not np.allclose(signs, [0, 0, 0]): return signs * np.sqrt(np.abs(np.diag(S))) inds = np.triu_indices(3, k=1) return mtr.normalize( (self.matrix.T - self.matrix)[inds][::-1] * np.array([1, -1, 1]) ) @property def inverse(self) -> SymmetryOperation: return SymmetryOperation(matrix=self.matrix.T) def apply(self, structure: mtr.Structure): return self.matrix @ structure.centered_atomic_positions.value def error(self, structure: mtr.Structure): kdt = scipy.spatial.KDTree(structure.centered_atomic_positions.value.T) dists, _ = np.abs(kdt.query(self.apply(structure).T)) rs = np.abs(self.axis @ structure.centered_atomic_positions.value) return dists / rs def is_symmetry_of(self, structure: mtr.Structure, tolerance: float) -> bool: round_to = round(-np.log(tolerance) / np.log(10)) X = structure.centered_atomic_positions.value return set( tuple(row) for row in self.apply(structure).T.round(round_to) ) == set(tuple(row) for row in X.T.round(round_to)) @property def order(self) -> int: return mtr.periodicity(self.matrix) def __mul__(self, other): return SymmetryOperation(matrix=self.matrix @ other.matrix) class Identity(SymmetryOperation): def __init__(self) -> None: determinant = 1 trace = 3 axis = None super().__init__(determinant=determinant, trace=trace, axis=axis) class Inversion(SymmetryOperation): def __init__(self) -> None: determinant = -1 trace = -3 axis = None super().__init__(determinant=determinant, trace=trace, axis=axis) class Reflection(SymmetryOperation): def __init__(self, axis: Iterable[Union[float, int]]) -> None: determinant = -1 trace = 1 super().__init__(determinant=determinant, trace=trace, axis=axis) class ProperRotation(SymmetryOperation): def __init__(self, order: int, axis: Iterable[Union[float, int]]) -> None: determinant = 1 trace = 2 * np.cos(2 * np.pi / order) + determinant super().__init__(determinant=determinant, trace=trace, axis=axis) def __repr__(self) -> str: return f"ProperRotation(order={self.order})" class ImproperRotation(SymmetryOperation): def __init__(self, order: int, axis: Iterable[Union[float, int]]) -> None: determinant = -1 trace = 2 * np.cos(2 * np.pi / order) + determinant super().__init__(determinant=determinant, trace=trace, axis=axis)
[ "materia.rotation_matrix", "numpy.trace", "numpy.abs", "numpy.log", "numpy.eye", "materia.normalize", "numpy.allclose", "numpy.triu_indices", "numpy.isclose", "numpy.array", "numpy.cos", "numpy.sign", "numpy.linalg.det", "materia.periodicity", "numpy.arccos", "numpy.diag" ]
[((1568, 1589), 'numpy.trace', 'np.trace', (['self.matrix'], {}), '(self.matrix)\n', (1576, 1589), True, 'import numpy as np\n'), ((1904, 1938), 'numpy.isclose', 'np.isclose', (['(self.tr * self.det)', '(-1)'], {}), '(self.tr * self.det, -1)\n', (1914, 1938), True, 'import numpy as np\n'), ((2202, 2225), 'numpy.triu_indices', 'np.triu_indices', (['(3)'], {'k': '(1)'}), '(3, k=1)\n', (2217, 2225), True, 'import numpy as np\n'), ((2779, 2840), 'numpy.abs', 'np.abs', (['(self.axis @ structure.centered_atomic_positions.value)'], {}), '(self.axis @ structure.centered_atomic_positions.value)\n', (2785, 2840), True, 'import numpy as np\n'), ((3275, 3303), 'materia.periodicity', 'mtr.periodicity', (['self.matrix'], {}), '(self.matrix)\n', (3290, 3303), True, 'import materia as mtr\n'), ((1344, 1394), 'numpy.allclose', 'np.allclose', (['self.matrix', 'other.matrix'], {'atol': '(0.001)'}), '(self.matrix, other.matrix, atol=0.001)\n', (1355, 1394), True, 'import numpy as np\n'), ((1482, 1508), 'numpy.linalg.det', 'np.linalg.det', (['self.matrix'], {}), '(self.matrix)\n', (1495, 1508), True, 'import numpy as np\n'), ((2052, 2068), 'numpy.sign', 'np.sign', (['S[:, i]'], {}), '(S[:, i])\n', (2059, 2068), True, 'import numpy as np\n'), ((2312, 2332), 'numpy.array', 'np.array', (['[1, -1, 1]'], {}), '([1, -1, 1])\n', (2320, 2332), True, 'import numpy as np\n'), ((2996, 3006), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (3002, 3006), True, 'import numpy as np\n'), ((4205, 4230), 'numpy.cos', 'np.cos', (['(2 * np.pi / order)'], {}), '(2 * np.pi / order)\n', (4211, 4230), True, 'import numpy as np\n'), ((4573, 4598), 'numpy.cos', 'np.cos', (['(2 * np.pi / order)'], {}), '(2 * np.pi / order)\n', (4579, 4598), True, 'import numpy as np\n'), ((885, 904), 'materia.normalize', 'mtr.normalize', (['axis'], {}), '(axis)\n', (898, 904), True, 'import materia as mtr\n'), ((1039, 1059), 'numpy.arccos', 'np.arccos', (['cos_theta'], {}), '(cos_theta)\n', (1048, 1059), True, 'import numpy as np\n'), ((1091, 1159), 'materia.rotation_matrix', 'mtr.rotation_matrix', ([], {'axis': 'a', 'theta': 'theta', 'improper': '(determinant == -1)'}), '(axis=a, theta=theta, improper=determinant == -1)\n', (1110, 1159), True, 'import materia as mtr\n'), ((1957, 1966), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1963, 1966), True, 'import numpy as np\n'), ((2092, 2121), 'numpy.allclose', 'np.allclose', (['signs', '[0, 0, 0]'], {}), '(signs, [0, 0, 0])\n', (2103, 2121), True, 'import numpy as np\n'), ((2976, 2993), 'numpy.log', 'np.log', (['tolerance'], {}), '(tolerance)\n', (2982, 2993), True, 'import numpy as np\n'), ((1673, 1690), 'numpy.sign', 'np.sign', (['self.det'], {}), '(self.det)\n', (1680, 1690), True, 'import numpy as np\n'), ((2173, 2183), 'numpy.diag', 'np.diag', (['S'], {}), '(S)\n', (2180, 2183), True, 'import numpy as np\n'), ((801, 810), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (807, 810), True, 'import numpy as np\n')]
# Copyright (c) 2015-2020, Oracle and/or its affiliates. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import os def generate_data(mode='train', problem_type='binary'): assert mode == 'train' or mode == 'test' rng = np.random.RandomState(1) if problem_type == 'binary': labels = ['POS', 'NEG'] else: labels = ['POS', 'NEG', 'NEU'] texts = ['aaa', 'bbb', 'ccc'] counts = {label: 0 for label in labels} if mode == 'train': n = 1000 else: n = 100 lns = [] for i in range(n): y = rng.choice(labels) counts[y] += 1 x = rng.choice(texts) lns.append('%s##%s\n' % (y, x)) print(counts) with open('%s_input_%s.tribuo' % (mode, problem_type), 'w') as f: for ln in lns: f.write(ln) def generate_models(): lltypes = [ 'L2R_LR', 'L2R_L2LOSS_SVC_DUAL', 'L2R_L2LOSS_SVC', 'L2R_L1LOSS_SVC_DUAL', 'MCSVM_CS', 'L1R_L2LOSS_SVC', 'L1R_LR', 'L2R_LR_DUAL' ] for lltype in lltypes: cmd = './src/test/scripts/generate-model.sh %s %s %s %s' % (lltype, lltype, 'train_input_binary.tribuo', 'test_input_binary.tribuo') print(cmd) os.system(cmd) # multiclass model lltype = 'L2R_LR' cmd = './src/test/scripts/generate-model.sh %s %s %s %s' % (lltype, lltype+'_multiclass', 'train_input_multiclass.tribuo', 'test_input_multiclass.tribuo') print(cmd) os.system(cmd) if __name__ == '__main__': generate_data(mode='train') generate_data(mode='test') generate_data(mode='train', problem_type='multiclass') generate_data(mode='test', problem_type='multiclass') generate_models()
[ "os.system", "numpy.random.RandomState" ]
[((775, 799), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (796, 799), True, 'import numpy as np\n'), ((2026, 2040), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (2035, 2040), False, 'import os\n'), ((1788, 1802), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (1797, 1802), False, 'import os\n')]
#!/usr/bin/env # -*- coding: utf-8 -*- # Módulo de Gauss: # Métodos de calculo da solução de um sistema linear por eliminação de gauss # Método para calculo do erro da solução de gauss em relação a solução real import numpy as np import construtor import solve # Calcula o vetor solução pelo método de Gauss. # Entradas: matriz, vetor de termos independentes, número de pontos # Retorno: vetor solução def v_sol(m, v, n): # Verifica e escalona a matriz for j in range(n): if m[j][j] == 0: k = j while True: if 0 == m[k][j]: k += 1 if k == n: print("Matriz inválida") break else: temp = m[k].copy() m[k] = m[j].copy() m[j] = temp.copy() break for i in range(j + 1, n): mult = - m[i][j] / m[j][j] for k in range(j, n): m[i][k] += mult * m[j][k] v[i] += mult * v[j] # Resolve a matriz escalonada x = [None] * n for i in range(n-1, -1, -1): x[i] = v[i] for j in range(i + 1, n): x[i] -= m[i][j] * x[j] x[i] = x[i] / m[i][i] return x # Calcula o vetor solução, para a matriz de uma equação, pelo método de Gauss. # Entradas: q(x), r(x), malha de pontos, passo, número de pontos, y(a), y(b) # Retorno: vetor solução def v_sol_mh(q, r, x, h, n, a_, b_): # Calcula a matriz e o vetor de termos independentes m_h = construtor.matriz(q, x, h, n) v_h = construtor.vetor(r, x, h, n, a_, b_) # Calcula e retorna o vetor solução return v_sol(m_h, v_h, n - 1) # Calcula o vetor solução, para a matriz de uma equação e diversos valores de n, pelo método de Gauss. # Compara os valores solução do método de Gauss com a solução real. # Plota o gráfico do erro máximo para cada valor de n. # Entradas: y(x), q(x), r(x), extremo inicial (a), extremo final (b), y(a), y(b) # Retorno: vetor com o erro máximo para cada valor de n. def erro_n(y, q, r, a, b, a_, b_, n, n_step): # Erro entre valores obtidos pelo método de Gauss e a solução conhecida e = [] # Erro máximo da cada iteração e_max = [] for ni in range(5, n, n_step): # Calcula o passo adequado ao intervalo h = (b - a) / ni # Cria a malha de pontos x = [] for i in range(1, ni): x.append(a + i * h) # Calcula o vetor solução real v_sol = solve.v_sol(y, x) # Calcula o vetor solução pelo método de Gauss v_gauss = v_sol_mh(q, r, x, h, ni, a_, b_) # Compara as soluções dif = [abs(i) for i in (np.array(v_sol) - np.array(v_gauss)).tolist()] e.append(dif) e_max.append(np.max(dif)) return e_max # ----------------teste---------------- if __name__ == "__main__": b = [[1, 2, 3], [4, 5, 8], [7, 8, 5]] c = [10, 11, 12] print(v_sol(b, c, 3))
[ "solve.v_sol", "numpy.max", "construtor.vetor", "numpy.array", "construtor.matriz" ]
[((1625, 1654), 'construtor.matriz', 'construtor.matriz', (['q', 'x', 'h', 'n'], {}), '(q, x, h, n)\n', (1642, 1654), False, 'import construtor\n'), ((1666, 1702), 'construtor.vetor', 'construtor.vetor', (['r', 'x', 'h', 'n', 'a_', 'b_'], {}), '(r, x, h, n, a_, b_)\n', (1682, 1702), False, 'import construtor\n'), ((2631, 2648), 'solve.v_sol', 'solve.v_sol', (['y', 'x'], {}), '(y, x)\n', (2642, 2648), False, 'import solve\n'), ((2917, 2928), 'numpy.max', 'np.max', (['dif'], {}), '(dif)\n', (2923, 2928), True, 'import numpy as np\n'), ((2825, 2840), 'numpy.array', 'np.array', (['v_sol'], {}), '(v_sol)\n', (2833, 2840), True, 'import numpy as np\n'), ((2843, 2860), 'numpy.array', 'np.array', (['v_gauss'], {}), '(v_gauss)\n', (2851, 2860), True, 'import numpy as np\n')]
""" Datacube interop functions are here """ import numpy as np from itertools import chain from types import SimpleNamespace from datacube.storage.storage import measurement_paths from datacube.utils import uri_to_local_path from datacube.api import GridWorkflow def flatmap(f, items): return chain.from_iterable(map(f, items)) def first_val(x): return next(iter(x.values())) def list_native_cell(product, cell_index, dc, **query): index = dc.index p = index.products.get_by_name(product) if p.grid_spec is None: raise ValueError('Supplied product does not have a grid spec') gw = GridWorkflow(index, grid_spec=p.grid_spec) tile = gw.list_cells(cell_index=cell_index, product=product, **query)[cell_index] return list(flatmap(lambda x: x, tile.sources.values)) def group_by_storage(dss, bands=None): """ returns [StorageResource] StorageResource .uri - string, URI of the resource .local_path - PosixPath, path on a filesystem, could be None if not a file resource .bands - Dictionary of bands (copied from Dataset) .time - np.ndarray<datetime64[ns]> Timestamps to be read from this resource .datasets - List<datacube.Dataset> referencing this resource """ su_all = {} if bands is None: def check_band(band): return True else: bands = set(bands) def check_band(band): return band in bands def local_path(uri): try: return uri_to_local_path(uri) except ValueError: return None def update(su, ds, band=None): if band is None: bb = {k: ds.measurements[k] for k in ds.measurements if check_band(k)} else: bb = {band: ds.measurements[band]} if su not in su_all: su_all[su] = SimpleNamespace(bands=bb, uri=su, local_path=local_path(su), datasets=[ds]) else: su_all[su].datasets.append(ds) for ds in dss: pp = measurement_paths(ds) paths = set(pp.values()) if len(paths) == 1: # All bands in one file update(paths.pop(), ds) elif len(paths) == len(pp): # Each band in it's own file for band, file in pp.items(): if check_band(band): update(file, ds, band) else: raise ValueError('Not supporting multiple multi-band files') for s in su_all.values(): s.time = np.array([ds.center_time for ds in s.datasets], dtype='datetime64[ns]') return sorted(su_all.values(), key=lambda s: s.time[0]) def compute_time_slice(requested_time, file_time): """ Given requested time stamps and available timestamps (both assumed to be sorted in ascending order), computes roi such that requested_time in file_time[roi] Returns (roi, contigous, complete) Where: roi: slice object contigous: True|False if False not all file stamps in the range are needed complete: True|False, if False some requested timestamps were not found """ assert requested_time.dtype == file_time.dtype ii = np.where((file_time >= requested_time.min()) * (file_time <= requested_time.max()))[0] if len(ii) == 0: raise ValueError("No overlap") roi = slice(ii[0], ii[-1]+1) file_time = set(file_time[roi]) requested_time = set(requested_time) contigous = (file_time == requested_time) complete = requested_time.issubset(file_time) return roi, contigous, complete
[ "datacube.storage.storage.measurement_paths", "numpy.array", "datacube.utils.uri_to_local_path", "datacube.api.GridWorkflow" ]
[((621, 663), 'datacube.api.GridWorkflow', 'GridWorkflow', (['index'], {'grid_spec': 'p.grid_spec'}), '(index, grid_spec=p.grid_spec)\n', (633, 663), False, 'from datacube.api import GridWorkflow\n'), ((2234, 2255), 'datacube.storage.storage.measurement_paths', 'measurement_paths', (['ds'], {}), '(ds)\n', (2251, 2255), False, 'from datacube.storage.storage import measurement_paths\n'), ((2702, 2773), 'numpy.array', 'np.array', (['[ds.center_time for ds in s.datasets]'], {'dtype': '"""datetime64[ns]"""'}), "([ds.center_time for ds in s.datasets], dtype='datetime64[ns]')\n", (2710, 2773), True, 'import numpy as np\n'), ((1593, 1615), 'datacube.utils.uri_to_local_path', 'uri_to_local_path', (['uri'], {}), '(uri)\n', (1610, 1615), False, 'from datacube.utils import uri_to_local_path\n')]
from __future__ import print_function import numpy as np import matplotlib.pyplot as plt from skimage import transform from skimage.transform import estimate_transform source = np.array([(129, 72), (302, 76), (90, 185), (326, 193)]) target = np.array([[0, 0], [400, 0], [0, 400], [400, 400]]) tf = estimate_transform('projective', source, target) H = tf.params # in older versions of skimage, this should be # H = tf._matrix print(H) # H = np.array([[ 3.04026872e+00, 1.04929628e+00, -4.67743998e+02], # [ -1.44134582e-01, 6.23382067e+00, -4.30241727e+02], # [ 2.63620673e-05, 4.17694527e-03, 1.00000000e+00]]) def rectify(xy): x = xy[:, 0] y = xy[:, 1] # You must fill in your code here. # # Handy functions are: # # - np.dot (matrix multiplication) # - np.ones_like (make an array of ones the same shape as another array) # - np.column_stack # - A.T -- type .T after a matrix to transpose it # - x.reshape -- reshapes the array x # We need to provide the backward mapping HH = np.linalg.inv(H) homogeneous_coordinates = np.column_stack([x, y, np.ones_like(x)]) xyz = np.dot(HH, homogeneous_coordinates.T) # We want one coordinate per row xyz = xyz.T # Turn z into a column vector z = xyz[:, 2] z = z.reshape([len(z), 1]) xyz = xyz / z return xyz[:, :2] image = plt.imread('../../images/chapel_floor.png') out = transform.warp(image, rectify, output_shape=(400, 400)) f, (ax0, ax1) = plt.subplots(1, 2, figsize=(8, 4)) ax0.imshow(image) ax1.imshow(out) plt.show()
[ "matplotlib.pyplot.show", "numpy.ones_like", "numpy.array", "numpy.linalg.inv", "skimage.transform.warp", "skimage.transform.estimate_transform", "numpy.dot", "matplotlib.pyplot.imread", "matplotlib.pyplot.subplots" ]
[((182, 237), 'numpy.array', 'np.array', (['[(129, 72), (302, 76), (90, 185), (326, 193)]'], {}), '([(129, 72), (302, 76), (90, 185), (326, 193)])\n', (190, 237), True, 'import numpy as np\n'), ((305, 355), 'numpy.array', 'np.array', (['[[0, 0], [400, 0], [0, 400], [400, 400]]'], {}), '([[0, 0], [400, 0], [0, 400], [400, 400]])\n', (313, 355), True, 'import numpy as np\n'), ((419, 467), 'skimage.transform.estimate_transform', 'estimate_transform', (['"""projective"""', 'source', 'target'], {}), "('projective', source, target)\n", (437, 467), False, 'from skimage.transform import estimate_transform\n'), ((1541, 1584), 'matplotlib.pyplot.imread', 'plt.imread', (['"""../../images/chapel_floor.png"""'], {}), "('../../images/chapel_floor.png')\n", (1551, 1584), True, 'import matplotlib.pyplot as plt\n'), ((1591, 1646), 'skimage.transform.warp', 'transform.warp', (['image', 'rectify'], {'output_shape': '(400, 400)'}), '(image, rectify, output_shape=(400, 400))\n', (1605, 1646), False, 'from skimage import transform\n'), ((1664, 1698), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(8, 4)'}), '(1, 2, figsize=(8, 4))\n', (1676, 1698), True, 'import matplotlib.pyplot as plt\n'), ((1734, 1744), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1742, 1744), True, 'import matplotlib.pyplot as plt\n'), ((1215, 1231), 'numpy.linalg.inv', 'np.linalg.inv', (['H'], {}), '(H)\n', (1228, 1231), True, 'import numpy as np\n'), ((1314, 1351), 'numpy.dot', 'np.dot', (['HH', 'homogeneous_coordinates.T'], {}), '(HH, homogeneous_coordinates.T)\n', (1320, 1351), True, 'import numpy as np\n'), ((1286, 1301), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (1298, 1301), True, 'import numpy as np\n')]
""" Test that computes the refined mean field approximation for the two-choice model (with order 1 and 2 and a few parameter) Compare the computed value with a value already stored in a pickle file """ import pickle import numpy as np from approximately_equal import approximately_equal import os PWD=os.getcwd() if PWD[-5:] == 'tests': CACHE_DIR = 'output_tests' else: CACHE_DIR = 'tests/output_tests' import sys sys.path.append('../') sys.path.append('.') import src.rmf_tool as rmf def dChoiceModel(K, rho, d): ddpp = rmf.DDPP() # The vector 'e(i)' is a vector where the $i$th coordinate is equal to $1$ (the other being equal to $0$) def e(i): l = np.zeros(K) l[i] = 1 return l # We then add the transitions : for i in range(K): if i >= 1: ddpp.add_transition(e(i),eval('lambda x: {}*(x[{}]**{} - x[{}]**{} )'.format(rho, i-1, d, i, d))) if i < K-1: ddpp.add_transition(-e(i),eval('lambda x: (x[{}] - x[{}])'.format(i,i+1) )) ddpp.add_transition(e(0), lambda x : eval('{}*(1-x[0]**{})'.format(rho,d))) ddpp.add_transition(-e(K-1), lambda x : x[K-1]) ddpp.set_initial_state(e(0)) return ddpp def generate_data(): """ Generate all data and store them in a pickle file (to be used one times when the test is initialized) """ data = dict([]) for rho in [0.6, 0.7, 0.8, 0.9]: for d in [2, 3]: for K in [5, 9, 15, 20]: for order in ([1, 2] if K <= 5 else [1]): ddpp = dChoiceModel(K, rho, d) data[(K, rho, d, order)] = ddpp.meanFieldExpansionSteadyState(order=order) with open('{}/d_choice.pickle'.format(CACHE_DIR), 'wb') as f: # Pickle the 'data' dictionary using the highest protocol available. pickle.dump(data, f, pickle.HIGHEST_PROTOCOL) def test_two_choice(): """ Compare the new data with previously computed data. """ with open('{}/d_choice.pickle'.format(CACHE_DIR), 'rb') as f: # The protocol version used is detected automatically, so we do not # have to specify it. data = pickle.load(f) for key in data: (K,rho,d,order) = key print(key) ddpp = dChoiceModel(K, rho, d) new_data = ddpp.meanFieldExpansionSteadyState(order=order) test_data = data[key] assert approximately_equal(new_data, test_data) <= 1e-8 #generate_data() #test_two_choice()
[ "sys.path.append", "src.rmf_tool.DDPP", "pickle.dump", "os.getcwd", "numpy.zeros", "pickle.load", "approximately_equal.approximately_equal" ]
[((303, 314), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (312, 314), False, 'import os\n'), ((425, 447), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (440, 447), False, 'import sys\n'), ((448, 468), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (463, 468), False, 'import sys\n'), ((537, 547), 'src.rmf_tool.DDPP', 'rmf.DDPP', ([], {}), '()\n', (545, 547), True, 'import src.rmf_tool as rmf\n'), ((685, 696), 'numpy.zeros', 'np.zeros', (['K'], {}), '(K)\n', (693, 696), True, 'import numpy as np\n'), ((1832, 1877), 'pickle.dump', 'pickle.dump', (['data', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(data, f, pickle.HIGHEST_PROTOCOL)\n', (1843, 1877), False, 'import pickle\n'), ((2161, 2175), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2172, 2175), False, 'import pickle\n'), ((2425, 2465), 'approximately_equal.approximately_equal', 'approximately_equal', (['new_data', 'test_data'], {}), '(new_data, test_data)\n', (2444, 2465), False, 'from approximately_equal import approximately_equal\n')]
import time import numpy as np from sklearn.model_selection import train_test_split from keras.optimizers import Adam from keras.utils import plot_model from CNNTripletModel import build_network, build_model from BatchBuilder import get_batch_random_demo input_shape = (28, 28, 1) evaluate_every = 5 n_val = 5 batch_size = 20 data = np.load("/Users/niklastecklenburg/Desktop/Test/Data/images.npy") labels = np.load("/Users/niklastecklenburg/Desktop/Test/Data/labels.npy") data_train, data_test, labels_train, labels_test = train_test_split( data, labels, test_size=0.2, random_state=42 ) network = build_network(input_shape, embeddingsize=10) network_train = build_model(input_shape, network) optimizer = Adam(lr=0.00006) network_train.compile(loss=None, optimizer=optimizer) network_train.summary() plot_model( network_train, show_shapes=True, show_layer_names=True, to_file="02 model.png" ) print(network_train.metrics_names) network_train.load_weights("mnist-160k_weights.h5") t_start = time.time() n_iteration = 0 for i in range(30): # triplets = get_batch_hard(200,16,16,network) triplets = get_batch_random_demo(data_train, labels_train, batch_size) loss = network_train.train_on_batch(triplets, None) print(loss) # n_iteration += 1 # if i % evaluate_every == 0: # print("\n ------------- \n") # print("[{3}] Time for {0} iterations: {1:.1f} mins, Train Loss: {2}".format(i, (time.time()-t_start)/60.0,loss,n_iteration)) # probs,yprob = compute_probs(network,test_images[:n_val,:,:,:],y_test_origin[:n_val])
[ "numpy.load", "CNNTripletModel.build_network", "sklearn.model_selection.train_test_split", "keras.optimizers.Adam", "time.time", "keras.utils.plot_model", "BatchBuilder.get_batch_random_demo", "CNNTripletModel.build_model" ]
[((340, 404), 'numpy.load', 'np.load', (['"""/Users/niklastecklenburg/Desktop/Test/Data/images.npy"""'], {}), "('/Users/niklastecklenburg/Desktop/Test/Data/images.npy')\n", (347, 404), True, 'import numpy as np\n'), ((414, 478), 'numpy.load', 'np.load', (['"""/Users/niklastecklenburg/Desktop/Test/Data/labels.npy"""'], {}), "('/Users/niklastecklenburg/Desktop/Test/Data/labels.npy')\n", (421, 478), True, 'import numpy as np\n'), ((531, 593), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'labels'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(data, labels, test_size=0.2, random_state=42)\n', (547, 593), False, 'from sklearn.model_selection import train_test_split\n'), ((611, 655), 'CNNTripletModel.build_network', 'build_network', (['input_shape'], {'embeddingsize': '(10)'}), '(input_shape, embeddingsize=10)\n', (624, 655), False, 'from CNNTripletModel import build_network, build_model\n'), ((672, 705), 'CNNTripletModel.build_model', 'build_model', (['input_shape', 'network'], {}), '(input_shape, network)\n', (683, 705), False, 'from CNNTripletModel import build_network, build_model\n'), ((718, 732), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(6e-05)'}), '(lr=6e-05)\n', (722, 732), False, 'from keras.optimizers import Adam\n'), ((813, 908), 'keras.utils.plot_model', 'plot_model', (['network_train'], {'show_shapes': '(True)', 'show_layer_names': '(True)', 'to_file': '"""02 model.png"""'}), "(network_train, show_shapes=True, show_layer_names=True, to_file=\n '02 model.png')\n", (823, 908), False, 'from keras.utils import plot_model\n'), ((1009, 1020), 'time.time', 'time.time', ([], {}), '()\n', (1018, 1020), False, 'import time\n'), ((1123, 1182), 'BatchBuilder.get_batch_random_demo', 'get_batch_random_demo', (['data_train', 'labels_train', 'batch_size'], {}), '(data_train, labels_train, batch_size)\n', (1144, 1182), False, 'from BatchBuilder import get_batch_random_demo\n')]
import numpy as np from torch.optim.lr_scheduler import _LRScheduler from torch.utils.data.dataset import Dataset from math import cos, pi import librosa from scipy.io import wavfile import random class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def cycle(iterable): """ convert dataloader to iterator :param iterable: :return: """ while True: for x in iterable: yield x class CosineLR(_LRScheduler): """cosine annealing. """ def __init__(self, optimizer, step_size_min=1e-5, t0=100, tmult=2, curr_epoch=-1, last_epoch=-1): self.step_size_min = step_size_min self.t0 = t0 self.tmult = tmult self.epochs_since_restart = curr_epoch super(CosineLR, self).__init__(optimizer, last_epoch) def get_lr(self): self.epochs_since_restart += 1 if self.epochs_since_restart > self.t0: self.t0 *= self.tmult self.epochs_since_restart = 0 lrs = [self.step_size_min + ( 0.5 * (base_lr - self.step_size_min) * (1 + cos(self.epochs_since_restart * pi / self.t0))) for base_lr in self.base_lrs] return lrs class MelDataset(Dataset): def __init__(self, X, y, crop=-1, mixup=False, freqmask=False, gain=False, crop_mode='original',crop_rate=0.25 ): self.X= X self.y= y self.crop = crop self.mixup = mixup self.freqmask = freqmask self.gain = gain self.crop_mode = crop_mode self.crop_rate = crop_rate def do_additional_crop(self, img): len_img = img.shape[1] img_new = np.zeros([img.shape[0], self.crop], np.float32) rate = np.random.random() * (1 - self.crop_rate) + self.crop_rate if np.random.random() < 0.5: rate = 1 if img.shape[1] <= self.crop: len_crop = int(img.shape[1] * rate) if img.shape[1] - len_crop == 0: shift_crop = 0 else: shift_crop = np.random.randint(0, img.shape[1] - len_crop) img = img[:, shift_crop:shift_crop + len_crop] if self.crop - len_crop == 0: shift = 0 else: shift = np.random.randint(0, self.crop - len_crop) img_new[:, shift:shift + len_crop] = img else: shift = np.random.randint(0, img.shape[1] - self.crop) img_new = img[:, shift:shift + self.crop] len_crop = int(self.crop * rate) if self.crop - len_crop == 0: shift_crop = 0 else: shift_crop = np.random.randint(0, self.crop - len_crop) img_new[:shift_crop] = 0 img_new[shift_crop + len_crop:] = 0 return img_new def do_random_crop(self, img): img_new = np.zeros([img.shape[0], self.crop], np.float32) if img.shape[1] < self.crop: shift = np.random.randint(0, self.crop - img.shape[1]) img_new[:, shift:shift + img.shape[1]] = img elif img.shape[1] == self.crop: img_new = img else: shift = np.random.randint(0, img.shape[1] - self.crop) img_new = img[:, shift:shift + self.crop] return img_new def do_crop(self, img): if self.crop_mode == 'random': return self.do_random_crop(img) elif self.crop_mode == 'additional': return self.do_additional_crop(img) elif self.crop_mode == 'original': return img def do_mixup(self, img, label, alpha=1.): idx = np.random.randint(0, len(self.X)) img2 = np.load("{}.npy".format(self.X[idx][:-4])) img2 = self.do_crop(img2) label2 = self.y[idx].astype(np.float32) rate = np.random.beta(alpha, alpha) img = img * rate + img2 * (1 - rate) label = label * rate + label2 * (1 - rate) return img, label def do_freqmask(self, img, max=32): coord = np.random.randint(0, img.shape[0]) width = np.random.randint(8, max) cut = np.array([coord - width, coord + width]) cut = np.clip(cut, 0, img.shape[0]) img[cut[0]:cut[1]] = 0 return img def do_gain(self, img, max=0.1): rate = 1 - max + np.random.random() * max * 2 return img * rate def __getitem__(self, index): img = np.load("{}.npy".format(self.X[index][:-4])) img = self.do_crop(img) label = self.y[index].astype(np.float32) if self.mixup and np.random.random() < 0.5: img, label = self.do_mixup(img, label) if self.gain and np.random.random() < 0.5: img = self.do_gain(img) if self.freqmask and np.random.random() < 0.5: img = self.do_freqmask(img) img = librosa.power_to_db(img) img = (img - img.mean()) / (img.std() + 1e-7) img = img.reshape([1, img.shape[0], img.shape[1]]) return img, label def __len__(self): return len(self.X) def compute_gain(sound, fs, min_db=-80.0, mode='RMSE'): if fs == 16000: n_fft = 2048 elif fs == 44100: n_fft = 4096 else: raise Exception('Invalid fs {}'.format(fs)) stride = n_fft // 2 gain = [] for i in range(0, len(sound) - n_fft + 1, stride): if mode == 'RMSE': g = np.mean(sound[i: i + n_fft] ** 2) elif mode == 'A_weighting': spec = np.fft.rfft(np.hanning(n_fft + 1)[:-1] * sound[i: i + n_fft]) power_spec = np.abs(spec) ** 2 a_weighted_spec = power_spec * np.power(10, a_weight(fs, n_fft) / 10) g = np.sum(a_weighted_spec) else: raise Exception('Invalid mode {}'.format(mode)) gain.append(g) gain = np.array(gain) gain = np.maximum(gain, np.power(10, min_db / 10)) gain_db = 10 * np.log10(gain) return gain_db def mix(sound1, sound2, r, fs): gain1 = np.max(compute_gain(sound1, fs)) # Decibel gain2 = np.max(compute_gain(sound2, fs)) t = 1.0 / (1 + np.power(10, (gain1 - gain2) / 20.) * (1 - r) / r) sound = ((sound1 * t + sound2 * (1 - t)) / np.sqrt(t ** 2 + (1 - t) ** 2)) sound = sound.astype(np.float32) return sound class WaveDataset(Dataset): def __init__(self, X, y, crop=-1, crop_mode='original', padding=0, mixup=False, scaling=-1, gain=-1, fs=44100, ): self.X = X self.y = y self.crop = crop self.crop_mode = crop_mode self.padding = padding self.mixup = mixup self.scaling = scaling self.gain = gain self.fs = fs def preprocess(self, sound): for f in self.preprocess_funcs: sound = f(sound) return sound def do_padding(self, snd): snd_new = np.pad(snd, self.padding, 'constant') return snd_new def do_crop(self, snd): if self.crop_mode=='random': shift = np.random.randint(0, snd.shape[0] - self.crop) snd_new = snd[shift:shift + self.crop] else: snd_new = snd return snd_new def do_gain(self, snd): snd_new = snd * np.power(10, random.uniform(-self.gain, self.gain) / 20.0) return snd_new def do_scaling(self, snd, interpolate='Nearest'): scale = np.power(self.scaling, random.uniform(-1, 1)) output_size = int(len(snd) * scale) ref = np.arange(output_size) / scale if interpolate == 'Linear': ref1 = ref.astype(np.int32) ref2 = np.minimum(ref1+1, len(snd)-1) r = ref - ref1 snd_new = snd[ref1] * (1-r) + snd[ref2] * r elif interpolate == 'Nearest': snd_new = snd[ref.astype(np.int32)] else: raise Exception('Invalid interpolation mode {}'.format(interpolate)) return snd_new def do_mixup(self, snd, label, alpha=1): idx2 = np.random.randint(0, len(self.X)) _, snd2 = wavfile.read("{}".format(self.X[idx2])) label2 = self.y[idx2].astype(np.float32) if self.scaling!=-1: snd2 = self.do_scaling(snd2) snd2 = self.do_padding(snd2) snd2 = self.do_crop(snd2) rate = np.random.beta(alpha, alpha) snd_new = mix(snd, snd, rate, self.fs) label_new = label * rate + label2 * (1 - rate) return snd_new, label_new def __getitem__(self, index): _, snd = wavfile.read("{}".format(self.X[index])) label = self.y[index].astype(np.float32) if self.scaling!=-1: snd = self.do_scaling(snd) snd = self.do_padding(snd) snd = self.do_crop(snd) if self.mixup: snd, label = self.do_mixup(snd, label) if self.gain!=-1: snd = self.do_gain(snd) snd = snd.reshape([1, 1, -1]).astype(np.float32) / 32768.0 return snd, label def __len__(self): return len(self.X) def _one_sample_positive_class_precisions(scores, truth): """Calculate precisions for each true class for a single sample. Args: scores: np.array of (num_classes,) giving the individual classifier scores. truth: np.array of (num_classes,) bools indicating which classes are true. Returns: pos_class_indices: np.array of indices of the true classes for this sample. pos_class_precisions: np.array of precisions corresponding to each of those classes. """ num_classes = scores.shape[0] pos_class_indices = np.flatnonzero(truth > 0) # Only calculate precisions if there are some true classes. if not len(pos_class_indices): return pos_class_indices, np.zeros(0) # Retrieval list of classes for this sample. retrieved_classes = np.argsort(scores)[::-1] # class_rankings[top_scoring_class_index] == 0 etc. class_rankings = np.zeros(num_classes, dtype=np.int) class_rankings[retrieved_classes] = range(num_classes) # Which of these is a true label? retrieved_class_true = np.zeros(num_classes, dtype=np.bool) retrieved_class_true[class_rankings[pos_class_indices]] = True # Num hits for every truncated retrieval list. retrieved_cumulative_hits = np.cumsum(retrieved_class_true) # Precision of retrieval list truncated at each hit, in order of pos_labels. precision_at_hits = ( retrieved_cumulative_hits[class_rankings[pos_class_indices]] / (1 + class_rankings[pos_class_indices].astype(np.float))) return pos_class_indices, precision_at_hits # All-in-one calculation of per-class lwlrap. def calculate_per_class_lwlrap(truth, scores): """Calculate label-weighted label-ranking average precision. Arguments: truth: np.array of (num_samples, num_classes) giving boolean ground-truth of presence of that class in that sample. scores: np.array of (num_samples, num_classes) giving the classifier-under- test's real-valued score for each class for each sample. Returns: per_class_lwlrap: np.array of (num_classes,) giving the lwlrap for each class. weight_per_class: np.array of (num_classes,) giving the prior of each class within the truth labels. Then the overall unbalanced lwlrap is simply np.sum(per_class_lwlrap * weight_per_class) """ assert truth.shape == scores.shape num_samples, num_classes = scores.shape # Space to store a distinct precision value for each class on each sample. # Only the classes that are true for each sample will be filled in. precisions_for_samples_by_classes = np.zeros((num_samples, num_classes)) for sample_num in range(num_samples): pos_class_indices, precision_at_hits = ( _one_sample_positive_class_precisions(scores[sample_num, :], truth[sample_num, :])) precisions_for_samples_by_classes[sample_num, pos_class_indices] = ( precision_at_hits) labels_per_class = np.sum(truth > 0, axis=0) weight_per_class = labels_per_class / float(np.sum(labels_per_class)) # Form average of each column, i.e. all the precisions assigned to labels in # a particular class. per_class_lwlrap = (np.sum(precisions_for_samples_by_classes, axis=0) / np.maximum(1, labels_per_class)) # overall_lwlrap = simple average of all the actual per-class, per-sample precisions # = np.sum(precisions_for_samples_by_classes) / np.sum(precisions_for_samples_by_classes > 0) # also = weighted mean of per-class lwlraps, weighted by class label prior across samples # = np.sum(per_class_lwlrap * weight_per_class) return per_class_lwlrap, weight_per_class
[ "numpy.sum", "numpy.maximum", "numpy.abs", "numpy.clip", "numpy.argsort", "numpy.random.randint", "librosa.power_to_db", "numpy.mean", "numpy.arange", "numpy.pad", "numpy.power", "numpy.cumsum", "math.cos", "numpy.hanning", "numpy.log10", "numpy.random.beta", "random.uniform", "numpy.flatnonzero", "numpy.zeros", "numpy.random.random", "numpy.array", "numpy.sqrt" ]
[((6190, 6204), 'numpy.array', 'np.array', (['gain'], {}), '(gain)\n', (6198, 6204), True, 'import numpy as np\n'), ((9986, 10011), 'numpy.flatnonzero', 'np.flatnonzero', (['(truth > 0)'], {}), '(truth > 0)\n', (10000, 10011), True, 'import numpy as np\n'), ((10332, 10367), 'numpy.zeros', 'np.zeros', (['num_classes'], {'dtype': 'np.int'}), '(num_classes, dtype=np.int)\n', (10340, 10367), True, 'import numpy as np\n'), ((10492, 10528), 'numpy.zeros', 'np.zeros', (['num_classes'], {'dtype': 'np.bool'}), '(num_classes, dtype=np.bool)\n', (10500, 10528), True, 'import numpy as np\n'), ((10679, 10710), 'numpy.cumsum', 'np.cumsum', (['retrieved_class_true'], {}), '(retrieved_class_true)\n', (10688, 10710), True, 'import numpy as np\n'), ((12067, 12103), 'numpy.zeros', 'np.zeros', (['(num_samples, num_classes)'], {}), '((num_samples, num_classes))\n', (12075, 12103), True, 'import numpy as np\n'), ((12472, 12497), 'numpy.sum', 'np.sum', (['(truth > 0)'], {'axis': '(0)'}), '(truth > 0, axis=0)\n', (12478, 12497), True, 'import numpy as np\n'), ((2029, 2076), 'numpy.zeros', 'np.zeros', (['[img.shape[0], self.crop]', 'np.float32'], {}), '([img.shape[0], self.crop], np.float32)\n', (2037, 2076), True, 'import numpy as np\n'), ((3223, 3270), 'numpy.zeros', 'np.zeros', (['[img.shape[0], self.crop]', 'np.float32'], {}), '([img.shape[0], self.crop], np.float32)\n', (3231, 3270), True, 'import numpy as np\n'), ((4179, 4207), 'numpy.random.beta', 'np.random.beta', (['alpha', 'alpha'], {}), '(alpha, alpha)\n', (4193, 4207), True, 'import numpy as np\n'), ((4388, 4422), 'numpy.random.randint', 'np.random.randint', (['(0)', 'img.shape[0]'], {}), '(0, img.shape[0])\n', (4405, 4422), True, 'import numpy as np\n'), ((4439, 4464), 'numpy.random.randint', 'np.random.randint', (['(8)', 'max'], {}), '(8, max)\n', (4456, 4464), True, 'import numpy as np\n'), ((4479, 4519), 'numpy.array', 'np.array', (['[coord - width, coord + width]'], {}), '([coord - width, coord + width])\n', (4487, 4519), True, 'import numpy as np\n'), ((4534, 4563), 'numpy.clip', 'np.clip', (['cut', '(0)', 'img.shape[0]'], {}), '(cut, 0, img.shape[0])\n', (4541, 4563), True, 'import numpy as np\n'), ((5208, 5232), 'librosa.power_to_db', 'librosa.power_to_db', (['img'], {}), '(img)\n', (5227, 5232), False, 'import librosa\n'), ((6233, 6258), 'numpy.power', 'np.power', (['(10)', '(min_db / 10)'], {}), '(10, min_db / 10)\n', (6241, 6258), True, 'import numpy as np\n'), ((6279, 6293), 'numpy.log10', 'np.log10', (['gain'], {}), '(gain)\n', (6287, 6293), True, 'import numpy as np\n'), ((6566, 6596), 'numpy.sqrt', 'np.sqrt', (['(t ** 2 + (1 - t) ** 2)'], {}), '(t ** 2 + (1 - t) ** 2)\n', (6573, 6596), True, 'import numpy as np\n'), ((7277, 7314), 'numpy.pad', 'np.pad', (['snd', 'self.padding', '"""constant"""'], {}), "(snd, self.padding, 'constant')\n", (7283, 7314), True, 'import numpy as np\n'), ((8700, 8728), 'numpy.random.beta', 'np.random.beta', (['alpha', 'alpha'], {}), '(alpha, alpha)\n', (8714, 8728), True, 'import numpy as np\n'), ((10230, 10248), 'numpy.argsort', 'np.argsort', (['scores'], {}), '(scores)\n', (10240, 10248), True, 'import numpy as np\n'), ((12703, 12752), 'numpy.sum', 'np.sum', (['precisions_for_samples_by_classes'], {'axis': '(0)'}), '(precisions_for_samples_by_classes, axis=0)\n', (12709, 12752), True, 'import numpy as np\n'), ((12779, 12810), 'numpy.maximum', 'np.maximum', (['(1)', 'labels_per_class'], {}), '(1, labels_per_class)\n', (12789, 12810), True, 'import numpy as np\n'), ((2162, 2180), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2178, 2180), True, 'import numpy as np\n'), ((2752, 2798), 'numpy.random.randint', 'np.random.randint', (['(0)', '(img.shape[1] - self.crop)'], {}), '(0, img.shape[1] - self.crop)\n', (2769, 2798), True, 'import numpy as np\n'), ((3328, 3374), 'numpy.random.randint', 'np.random.randint', (['(0)', '(self.crop - img.shape[1])'], {}), '(0, self.crop - img.shape[1])\n', (3345, 3374), True, 'import numpy as np\n'), ((5765, 5797), 'numpy.mean', 'np.mean', (['(sound[i:i + n_fft] ** 2)'], {}), '(sound[i:i + n_fft] ** 2)\n', (5772, 5797), True, 'import numpy as np\n'), ((7424, 7470), 'numpy.random.randint', 'np.random.randint', (['(0)', '(snd.shape[0] - self.crop)'], {}), '(0, snd.shape[0] - self.crop)\n', (7441, 7470), True, 'import numpy as np\n'), ((7814, 7835), 'random.uniform', 'random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (7828, 7835), False, 'import random\n'), ((7895, 7917), 'numpy.arange', 'np.arange', (['output_size'], {}), '(output_size)\n', (7904, 7917), True, 'import numpy as np\n'), ((10145, 10156), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (10153, 10156), True, 'import numpy as np\n'), ((12546, 12570), 'numpy.sum', 'np.sum', (['labels_per_class'], {}), '(labels_per_class)\n', (12552, 12570), True, 'import numpy as np\n'), ((2092, 2110), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2108, 2110), True, 'import numpy as np\n'), ((2407, 2452), 'numpy.random.randint', 'np.random.randint', (['(0)', '(img.shape[1] - len_crop)'], {}), '(0, img.shape[1] - len_crop)\n', (2424, 2452), True, 'import numpy as np\n'), ((2622, 2664), 'numpy.random.randint', 'np.random.randint', (['(0)', '(self.crop - len_crop)'], {}), '(0, self.crop - len_crop)\n', (2639, 2664), True, 'import numpy as np\n'), ((3018, 3060), 'numpy.random.randint', 'np.random.randint', (['(0)', '(self.crop - len_crop)'], {}), '(0, self.crop - len_crop)\n', (3035, 3060), True, 'import numpy as np\n'), ((3532, 3578), 'numpy.random.randint', 'np.random.randint', (['(0)', '(img.shape[1] - self.crop)'], {}), '(0, img.shape[1] - self.crop)\n', (3549, 3578), True, 'import numpy as np\n'), ((4934, 4952), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4950, 4952), True, 'import numpy as np\n'), ((5036, 5054), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5052, 5054), True, 'import numpy as np\n'), ((5127, 5145), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5143, 5145), True, 'import numpy as np\n'), ((6057, 6080), 'numpy.sum', 'np.sum', (['a_weighted_spec'], {}), '(a_weighted_spec)\n', (6063, 6080), True, 'import numpy as np\n'), ((4677, 4695), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4693, 4695), True, 'import numpy as np\n'), ((5941, 5953), 'numpy.abs', 'np.abs', (['spec'], {}), '(spec)\n', (5947, 5953), True, 'import numpy as np\n'), ((6468, 6504), 'numpy.power', 'np.power', (['(10)', '((gain1 - gain2) / 20.0)'], {}), '(10, (gain1 - gain2) / 20.0)\n', (6476, 6504), True, 'import numpy as np\n'), ((7651, 7688), 'random.uniform', 'random.uniform', (['(-self.gain)', 'self.gain'], {}), '(-self.gain, self.gain)\n', (7665, 7688), False, 'import random\n'), ((1413, 1458), 'math.cos', 'cos', (['(self.epochs_since_restart * pi / self.t0)'], {}), '(self.epochs_since_restart * pi / self.t0)\n', (1416, 1458), False, 'from math import cos, pi\n'), ((5866, 5887), 'numpy.hanning', 'np.hanning', (['(n_fft + 1)'], {}), '(n_fft + 1)\n', (5876, 5887), True, 'import numpy as np\n')]
import numpy as np import matplotlib.pyplot as plt def make_pulses(data, T, pulse): widen = np.zeros(len(data) * T, dtype=np.complex64) for idx, val in enumerate(widen): if idx % T == 0: widen[idx] = data[ idx//T ] return np.array(np.convolve(widen, pulse, 'full'), dtype=np.complex64) def raised_cosine(size, T): W = 1/T pulse = np.zeros(size, dtype=np.complex64) alpha = 0.5 for idx, t in enumerate(range(-size//T, size//T)): val = np.sinc(2*W*t) * ( np.cos( 2*np.pi*alpha*W*t )/( 1 - 16 * (alpha**2) * (W**2) * (t**2)) ) pulse[idx] = t plt.plot(pulse) plt.show() exit() return pulse if __name__ == "__main__": data_path = '../data/' # Gen noise np.random.seed(45) noise_size = 10000 noise1 = np.array(np.random.choice([0.5, -0.5], size=noise_size)) noise2 = np.array(np.random.choice([0.5, -0.5], size=noise_size)) # Make noise into pulses T = 10 pulse = np.ones(10) noise1 = make_pulses(noise1, T, pulse) noise2 = make_pulses(noise2, T, pulse) # Save noise for cross correlation later noise1.tofile(data_path + "noise_1.bin") noise2.tofile(data_path + "noise_2.bin") # Make filler so we can send everything at once zeros_gap = np.zeros(10000) zeros = np.zeros(len(noise1)) # Data for channel 1 channel1 = np.concatenate( [noise1, zeros_gap, zeros] ) channel2 = np.concatenate( [zeros, zeros_gap, noise2] ) channel1 = np.array( channel1, dtype=np.complex64 ) channel2 = np.array( channel2, dtype=np.complex64 ) # Save out data channel1.tofile(data_path + "noise_1_transmit.bin") channel2.tofile(data_path + "noise_2_transmit.bin") # Plot for verification plt.plot(channel1) plt.plot(channel2) plt.show()
[ "numpy.random.seed", "matplotlib.pyplot.show", "matplotlib.pyplot.plot", "numpy.zeros", "numpy.ones", "numpy.sinc", "numpy.array", "numpy.cos", "numpy.random.choice", "numpy.convolve", "numpy.concatenate" ]
[((374, 408), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.complex64'}), '(size, dtype=np.complex64)\n', (382, 408), True, 'import numpy as np\n'), ((617, 632), 'matplotlib.pyplot.plot', 'plt.plot', (['pulse'], {}), '(pulse)\n', (625, 632), True, 'import matplotlib.pyplot as plt\n'), ((637, 647), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (645, 647), True, 'import matplotlib.pyplot as plt\n'), ((758, 776), 'numpy.random.seed', 'np.random.seed', (['(45)'], {}), '(45)\n', (772, 776), True, 'import numpy as np\n'), ((993, 1004), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (1000, 1004), True, 'import numpy as np\n'), ((1296, 1311), 'numpy.zeros', 'np.zeros', (['(10000)'], {}), '(10000)\n', (1304, 1311), True, 'import numpy as np\n'), ((1387, 1429), 'numpy.concatenate', 'np.concatenate', (['[noise1, zeros_gap, zeros]'], {}), '([noise1, zeros_gap, zeros])\n', (1401, 1429), True, 'import numpy as np\n'), ((1447, 1489), 'numpy.concatenate', 'np.concatenate', (['[zeros, zeros_gap, noise2]'], {}), '([zeros, zeros_gap, noise2])\n', (1461, 1489), True, 'import numpy as np\n'), ((1508, 1546), 'numpy.array', 'np.array', (['channel1'], {'dtype': 'np.complex64'}), '(channel1, dtype=np.complex64)\n', (1516, 1546), True, 'import numpy as np\n'), ((1564, 1602), 'numpy.array', 'np.array', (['channel2'], {'dtype': 'np.complex64'}), '(channel2, dtype=np.complex64)\n', (1572, 1602), True, 'import numpy as np\n'), ((1771, 1789), 'matplotlib.pyplot.plot', 'plt.plot', (['channel1'], {}), '(channel1)\n', (1779, 1789), True, 'import matplotlib.pyplot as plt\n'), ((1794, 1812), 'matplotlib.pyplot.plot', 'plt.plot', (['channel2'], {}), '(channel2)\n', (1802, 1812), True, 'import matplotlib.pyplot as plt\n'), ((1817, 1827), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1825, 1827), True, 'import matplotlib.pyplot as plt\n'), ((266, 299), 'numpy.convolve', 'np.convolve', (['widen', 'pulse', '"""full"""'], {}), "(widen, pulse, 'full')\n", (277, 299), True, 'import numpy as np\n'), ((822, 868), 'numpy.random.choice', 'np.random.choice', (['[0.5, -0.5]'], {'size': 'noise_size'}), '([0.5, -0.5], size=noise_size)\n', (838, 868), True, 'import numpy as np\n'), ((892, 938), 'numpy.random.choice', 'np.random.choice', (['[0.5, -0.5]'], {'size': 'noise_size'}), '([0.5, -0.5], size=noise_size)\n', (908, 938), True, 'import numpy as np\n'), ((499, 517), 'numpy.sinc', 'np.sinc', (['(2 * W * t)'], {}), '(2 * W * t)\n', (506, 517), True, 'import numpy as np\n'), ((518, 551), 'numpy.cos', 'np.cos', (['(2 * np.pi * alpha * W * t)'], {}), '(2 * np.pi * alpha * W * t)\n', (524, 551), True, 'import numpy as np\n')]
from nipype.interfaces.ants.base import ANTSCommandInputSpec, ANTSCommand from nipype.interfaces.ants.segmentation import N4BiasFieldCorrectionOutputSpec from nipype.interfaces.base import (File, traits, isdefined) from nipype.utils.filemanip import split_filename import nipype.pipeline.engine as pe import nipype.interfaces.utility as niu import nipype.interfaces.fsl as fsl import os from nipype.workflows.dmri.fsl.artifacts import _xfm_jacobian, _checkrnum from nipype.workflows.dmri.fsl.utils import b0_average, apply_all_corrections, insert_mat, \ rotate_bvecs, vsm2warp, extract_bval, recompose_xfm, recompose_dwi, _checkinitxfm, enhance __author__ = '<NAME>' __date__ = "2015-05-08" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" """This module overwrites some parts of Nipype since they did not work correctly. The idea is that at a low level two functions did not work correctly. To enable nipype to use the fixed versions of these functions we have to copy the entire chain to make it work. Also, the original implementation calculated the read out times from the EPI parameters. This implementation requires you to predefine the read out times. """ def all_peb_pipeline(name='hmc_sdc_ecc', epi_params={'read_out_times': None, 'enc_dir': 'y-'}, altepi_params={'read_out_times': None, 'enc_dir': 'y'}): """ Builds a pipeline including three artifact corrections: head-motion correction (HMC), susceptibility-derived distortion correction (SDC), and Eddy currents-derived distortion correction (ECC). .. warning:: this workflow rotates the gradients table (*b*-vectors) [Leemans09]_. Examples -------- >>> from nipype.workflows.dmri.fsl.artifacts import all_peb_pipeline >>> allcorr = all_peb_pipeline() >>> allcorr.inputs.inputnode.in_file = 'epi.nii' >>> allcorr.inputs.inputnode.alt_file = 'epi_rev.nii' >>> allcorr.inputs.inputnode.in_bval = 'diffusion.bval' >>> allcorr.inputs.inputnode.in_bvec = 'diffusion.bvec' >>> allcorr.run() # doctest: +SKIP """ inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'in_bvec', 'in_bval', 'alt_file']), name='inputnode') outputnode = pe.Node(niu.IdentityInterface(fields=['out_file', 'out_mask', 'out_bvec']), name='outputnode') avg_b0_0 = pe.Node(niu.Function(input_names=['in_dwi', 'in_bval'], output_names=['out_file'], function=b0_average), name='b0_avg_pre') avg_b0_1 = pe.Node(niu.Function(input_names=['in_dwi', 'in_bval'], output_names=['out_file'], function=b0_average), name='b0_avg_post') bet_dwi0 = pe.Node(fsl.BET(frac=0.3, mask=True, robust=True), name='bet_dwi_pre') bet_dwi1 = pe.Node(fsl.BET(frac=0.3, mask=True, robust=True), name='bet_dwi_post') hmc = hmc_pipeline() sdc = sdc_peb(epi_params=epi_params, altepi_params=altepi_params) ecc = ecc_pipeline() unwarp = apply_all_corrections() wf = pe.Workflow(name=name) wf.connect([ (inputnode, hmc, [('in_file', 'inputnode.in_file'), ('in_bvec', 'inputnode.in_bvec'), ('in_bval', 'inputnode.in_bval')]), (inputnode, avg_b0_0, [('in_file', 'in_dwi'), ('in_bval', 'in_bval')]), (avg_b0_0, bet_dwi0, [('out_file', 'in_file')]), (bet_dwi0, hmc, [('mask_file', 'inputnode.in_mask')]), (hmc, sdc, [('outputnode.out_file', 'inputnode.in_file')]), (bet_dwi0, sdc, [('mask_file', 'inputnode.in_mask')]), (inputnode, sdc, [('in_bval', 'inputnode.in_bval'), ('alt_file', 'inputnode.alt_file')]), (inputnode, ecc, [('in_file', 'inputnode.in_file'), ('in_bval', 'inputnode.in_bval')]), (bet_dwi0, ecc, [('mask_file', 'inputnode.in_mask')]), (hmc, ecc, [('outputnode.out_xfms', 'inputnode.in_xfms')]), (ecc, avg_b0_1, [('outputnode.out_file', 'in_dwi')]), (inputnode, avg_b0_1, [('in_bval', 'in_bval')]), (avg_b0_1, bet_dwi1, [('out_file', 'in_file')]), (inputnode, unwarp, [('in_file', 'inputnode.in_dwi')]), (hmc, unwarp, [('outputnode.out_xfms', 'inputnode.in_hmc')]), (ecc, unwarp, [('outputnode.out_xfms', 'inputnode.in_ecc')]), (sdc, unwarp, [('outputnode.out_warp', 'inputnode.in_sdc')]), (hmc, outputnode, [('outputnode.out_bvec', 'out_bvec')]), (unwarp, outputnode, [('outputnode.out_file', 'out_file')]), (bet_dwi1, outputnode, [('mask_file', 'out_mask')]) ]) return wf def hmc_pipeline(name='motion_correct'): """ HMC stands for head-motion correction. Creates a pipeline that corrects for head motion artifacts in dMRI sequences. It takes a series of diffusion weighted images and rigidly co-registers them to one reference image. Finally, the `b`-matrix is rotated accordingly [Leemans09]_ making use of the rotation matrix obtained by FLIRT. Search angles have been limited to 4 degrees, based on results in [Yendiki13]_. A list of rigid transformation matrices is provided, so that transforms can be chained. This is useful to correct for artifacts with only one interpolation process (as previously discussed `here <https://github.com/nipy/nipype/pull/530#issuecomment-14505042>`_), and also to compute nuisance regressors as proposed by [Yendiki13]_. .. warning:: This workflow rotates the `b`-vectors, so please be advised that not all the dicom converters ensure the consistency between the resulting nifti orientation and the gradients table (e.g. dcm2nii checks it). .. admonition:: References .. [Leemans09] <NAME>, and <NAME>, `The B-matrix must be rotated when correcting for subject motion in DTI data <http://dx.doi.org/10.1002/mrm.21890>`_, Magn Reson Med. 61(6):1336-49. 2009. doi: 10.1002/mrm.21890. .. [Yendiki13] <NAME> et al., `Spurious group differences due to head motion in a diffusion MRI study <http://dx.doi.org/10.1016/j.neuroimage.2013.11.027>`_. Neuroimage. 21(88C):79-90. 2013. doi: 10.1016/j.neuroimage.2013.11.027 Example ------- >>> from nipype.workflows.dmri.fsl.artifacts import hmc_pipeline >>> hmc = hmc_pipeline() >>> hmc.inputs.inputnode.in_file = 'diffusion.nii' >>> hmc.inputs.inputnode.in_bvec = 'diffusion.bvec' >>> hmc.inputs.inputnode.in_bval = 'diffusion.bval' >>> hmc.inputs.inputnode.in_mask = 'mask.nii' >>> hmc.run() # doctest: +SKIP Inputs:: inputnode.in_file - input dwi file inputnode.in_mask - weights mask of reference image (a file with data \ range in [0.0, 1.0], indicating the weight of each voxel when computing the \ metric. inputnode.in_bvec - gradients file (b-vectors) inputnode.ref_num (optional, default=0) index of the b0 volume that \ should be taken as reference Outputs:: outputnode.out_file - corrected dwi file outputnode.out_bvec - rotated gradient vectors table outputnode.out_xfms - list of transformation matrices """ from nipype.workflows.data import get_flirt_schedule params = dict(dof=6, bgvalue=0, save_log=True, no_search=True, # cost='mutualinfo', cost_func='mutualinfo', bins=64, schedule=get_flirt_schedule('hmc')) inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'ref_num', 'in_bvec', 'in_bval', 'in_mask']), name='inputnode') split = pe.Node(niu.Function(function=hmc_split, input_names=['in_file', 'in_bval', 'ref_num'], output_names=['out_ref', 'out_mov', 'out_bval', 'volid']), name='SplitDWI') flirt = dwi_flirt(flirt_param=params) insmat = pe.Node(niu.Function(input_names=['inlist', 'volid'], output_names=['out'], function=insert_mat), name='InsertRefmat') rot_bvec = pe.Node(niu.Function(input_names=['in_bvec', 'in_matrix'], output_names=['out_file'], function=rotate_bvecs), name='Rotate_Bvec') outputnode = pe.Node(niu.IdentityInterface(fields=['out_file', 'out_bvec', 'out_xfms']), name='outputnode') wf = pe.Workflow(name=name) wf.connect([ (inputnode, split, [('in_file', 'in_file'), ('in_bval', 'in_bval'), ('ref_num', 'ref_num')]), (inputnode, flirt, [('in_mask', 'inputnode.ref_mask')]), (split, flirt, [('out_ref', 'inputnode.reference'), ('out_mov', 'inputnode.in_file'), ('out_bval', 'inputnode.in_bval')]), (flirt, insmat, [('outputnode.out_xfms', 'inlist')]), (split, insmat, [('volid', 'volid')]), (inputnode, rot_bvec, [('in_bvec', 'in_bvec')]), (insmat, rot_bvec, [('out', 'in_matrix')]), (rot_bvec, outputnode, [('out_file', 'out_bvec')]), (flirt, outputnode, [('outputnode.out_file', 'out_file')]), (insmat, outputnode, [('out', 'out_xfms')]) ]) return wf def ecc_pipeline(name='eddy_correct'): """ ECC stands for Eddy currents correction. Creates a pipeline that corrects for artifacts induced by Eddy currents in dMRI sequences. It takes a series of diffusion weighted images and linearly co-registers them to one reference image (the average of all b0s in the dataset). DWIs are also modulated by the determinant of the Jacobian as indicated by [Jones10]_ and [Rohde04]_. A list of rigid transformation matrices can be provided, sourcing from a :func:`.hmc_pipeline` workflow, to initialize registrations in a *motion free* framework. A list of affine transformation matrices is available as output, so that transforms can be chained (discussion `here <https://github.com/nipy/nipype/pull/530#issuecomment-14505042>`_). .. admonition:: References .. [Jones10] Jones DK, `The signal intensity must be modulated by the determinant of the Jacobian when correcting for eddy currents in diffusion MRI <http://cds.ismrm.org/protected/10MProceedings/files/1644_129.pdf>`_, Proc. ISMRM 18th Annual Meeting, (2010). .. [Rohde04] Rohde et al., `Comprehensive Approach for Correction of Motion and Distortion in Diffusion-Weighted MRI <http://stbb.nichd.nih.gov/pdf/com_app_cor_mri04.pdf>`_, MRM 51:103-114 (2004). Example ------- >>> from nipype.workflows.dmri.fsl.artifacts import ecc_pipeline >>> ecc = ecc_pipeline() >>> ecc.inputs.inputnode.in_file = 'diffusion.nii' >>> ecc.inputs.inputnode.in_bval = 'diffusion.bval' >>> ecc.inputs.inputnode.in_mask = 'mask.nii' >>> ecc.run() # doctest: +SKIP Inputs:: inputnode.in_file - input dwi file inputnode.in_mask - weights mask of reference image (a file with data \ range sin [0.0, 1.0], indicating the weight of each voxel when computing the \ metric. inputnode.in_bval - b-values table inputnode.in_xfms - list of matrices to initialize registration (from \ head-motion correction) Outputs:: outputnode.out_file - corrected dwi file outputnode.out_xfms - list of transformation matrices """ from nipype.workflows.data import get_flirt_schedule params = dict(dof=12, no_search=True, interp='spline', bgvalue=0, schedule=get_flirt_schedule('ecc')) # cost='normmi', cost_func='normmi', bins=64, inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'in_bval', 'in_mask', 'in_xfms']), name='inputnode') avg_b0 = pe.Node(niu.Function(input_names=['in_dwi', 'in_bval'], output_names=['out_file'], function=b0_average), name='b0_avg') pick_dws = pe.Node(niu.Function(input_names=['in_dwi', 'in_bval', 'b'], output_names=['out_file'], function=extract_bval), name='ExtractDWI') pick_dws.inputs.b = 'diff' flirt = dwi_flirt(flirt_param=params, excl_nodiff=True) mult = pe.MapNode(fsl.BinaryMaths(operation='mul'), name='ModulateDWIs', iterfield=['in_file', 'operand_value']) thres = pe.MapNode(fsl.Threshold(thresh=0.0), iterfield=['in_file'], name='RemoveNegative') split = pe.Node(fsl.Split(dimension='t'), name='SplitDWIs') get_mat = pe.Node(niu.Function(input_names=['in_bval', 'in_xfms'], output_names=['out_files'], function=recompose_xfm), name='GatherMatrices') merge = pe.Node(niu.Function(input_names=['in_dwi', 'in_bval', 'in_corrected'], output_names=['out_file'], function=recompose_dwi), name='MergeDWIs') outputnode = pe.Node(niu.IdentityInterface(fields=['out_file', 'out_xfms']), name='outputnode') wf = pe.Workflow(name=name) wf.connect([ (inputnode, avg_b0, [('in_file', 'in_dwi'), ('in_bval', 'in_bval')]), (inputnode, pick_dws, [('in_file', 'in_dwi'), ('in_bval', 'in_bval')]), (inputnode, merge, [('in_file', 'in_dwi'), ('in_bval', 'in_bval')]), (inputnode, flirt, [('in_mask', 'inputnode.ref_mask'), ('in_xfms', 'inputnode.in_xfms'), ('in_bval', 'inputnode.in_bval')]), (inputnode, get_mat, [('in_bval', 'in_bval')]), (avg_b0, flirt, [('out_file', 'inputnode.reference')]), (pick_dws, flirt, [('out_file', 'inputnode.in_file')]), (flirt, get_mat, [('outputnode.out_xfms', 'in_xfms')]), (flirt, mult, [(('outputnode.out_xfms', _xfm_jacobian), 'operand_value')]), (flirt, split, [('outputnode.out_file', 'in_file')]), (split, mult, [('out_files', 'in_file')]), (mult, thres, [('out_file', 'in_file')]), (thres, merge, [('out_file', 'in_corrected')]), (get_mat, outputnode, [('out_files', 'out_xfms')]), (merge, outputnode, [('out_file', 'out_file')]) ]) return wf def sdc_peb(name='peb_correction', epi_params={'read_out_times': None, 'enc_dir': 'y-'}, altepi_params={'read_out_times': None, 'enc_dir': 'y'}): """ SDC stands for susceptibility distortion correction. PEB stands for phase-encoding-based. The phase-encoding-based (PEB) method implements SDC by acquiring diffusion images with two different enconding directions [Andersson2003]_. The most typical case is acquiring with opposed phase-gradient blips (e.g. *A>>>P* and *P>>>A*, or equivalently, *-y* and *y*) as in [Chiou2000]_, but it is also possible to use orthogonal configurations [Cordes2000]_ (e.g. *A>>>P* and *L>>>R*, or equivalently *-y* and *x*). This workflow uses the implementation of FSL (`TOPUP <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/TOPUP>`_). Example ------- >>> from nipype.workflows.dmri.fsl.artifacts import sdc_peb >>> peb = sdc_peb() >>> peb.inputs.inputnode.in_file = 'epi.nii' >>> peb.inputs.inputnode.alt_file = 'epi_rev.nii' >>> peb.inputs.inputnode.in_bval = 'diffusion.bval' >>> peb.inputs.inputnode.in_mask = 'mask.nii' >>> peb.run() # doctest: +SKIP .. admonition:: References .. [Andersson2003] <NAME>L et al., `How to correct susceptibility distortions in spin-echo echo-planar images: application to diffusion tensor imaging <http://dx.doi.org/10.1016/S1053-8119(03)00336-7>`_. Neuroimage. 2003 Oct;20(2):870-88. doi: 10.1016/S1053-8119(03)00336-7 .. [Cordes2000] <NAME> et al., Geometric distortion correction in EPI using two images with orthogonal phase-encoding directions, in Proc. ISMRM (8), p.1712, Denver, US, 2000. .. [Chiou2000] <NAME>, and <NAME>, A simple method to correct off-resonance related distortion in echo planar imaging, in Proc. ISMRM (8), p.1712, Denver, US, 2000. """ inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'in_bval', 'in_mask', 'alt_file', 'ref_num']), name='inputnode') outputnode = pe.Node(niu.IdentityInterface(fields=['out_file', 'out_vsm', 'out_warp']), name='outputnode') b0_ref = pe.Node(fsl.ExtractROI(t_size=1), name='b0_ref') b0_alt = pe.Node(fsl.ExtractROI(t_size=1), name='b0_alt') b0_comb = pe.Node(niu.Merge(2), name='b0_list') b0_merge = pe.Node(fsl.Merge(dimension='t'), name='b0_merged') topup = pe.Node(fsl.TOPUP(), name='topup') topup.inputs.encoding_direction = [epi_params['enc_dir'], altepi_params['enc_dir']] readout = epi_params['read_out_time'] topup.inputs.readout_times = [readout, altepi_params['read_out_time']] unwarp = pe.Node(fsl.ApplyTOPUP(in_index=[1], method='jac'), name='unwarp') # scaling = pe.Node(niu.Function(input_names=['in_file', 'enc_dir'], # output_names=['factor'], function=_get_zoom), # name='GetZoom') # scaling.inputs.enc_dir = epi_params['enc_dir'] vsm2dfm = vsm2warp() vsm2dfm.inputs.inputnode.enc_dir = epi_params['enc_dir'] vsm2dfm.inputs.inputnode.scaling = readout wf = pe.Workflow(name=name) wf.connect([ (inputnode, b0_ref, [('in_file', 'in_file'), (('ref_num', _checkrnum), 't_min')]), (inputnode, b0_alt, [('alt_file', 'in_file'), (('ref_num', _checkrnum), 't_min')]), (b0_ref, b0_comb, [('roi_file', 'in1')]), (b0_alt, b0_comb, [('roi_file', 'in2')]), (b0_comb, b0_merge, [('out', 'in_files')]), (b0_merge, topup, [('merged_file', 'in_file')]), (topup, unwarp, [('out_fieldcoef', 'in_topup_fieldcoef'), ('out_movpar', 'in_topup_movpar'), ('out_enc_file', 'encoding_file')]), (inputnode, unwarp, [('in_file', 'in_files')]), (unwarp, outputnode, [('out_corrected', 'out_file')]), # (b0_ref, scaling, [('roi_file', 'in_file')]), # (scaling, vsm2dfm, [('factor', 'inputnode.scaling')]), (b0_ref, vsm2dfm, [('roi_file', 'inputnode.in_ref')]), (topup, vsm2dfm, [('out_field', 'inputnode.in_vsm')]), (topup, outputnode, [('out_field', 'out_vsm')]), (vsm2dfm, outputnode, [('outputnode.out_warp', 'out_warp')]) ]) return wf def hmc_split(in_file, in_bval, ref_num=0, lowbval=25.0): """ Selects the reference and moving volumes from a dwi dataset for the purpose of HMC. """ import numpy as np import nibabel as nb import os.path as op from nipype.interfaces.base import isdefined im = nb.load(in_file) data = im.get_data() hdr = im.get_header().copy() bval = np.loadtxt(in_bval) lowbs = np.where(bval <= lowbval)[0] volid = lowbs[0] if (isdefined(ref_num) and (ref_num < len(lowbs))): volid = [ref_num] # todo add next two lines in Nipype git if len(volid) == 1: volid = volid[0] if volid == 0: data = data[..., 1:] bval = bval[1:] elif volid == (data.shape[-1] - 1): data = data[..., :-1] bval = bval[:-1] else: data = np.concatenate((data[..., :volid], data[..., (volid + 1):]), axis=3) bval = np.hstack((bval[:volid], bval[(volid + 1):])) out_ref = op.abspath('hmc_ref.nii.gz') out_mov = op.abspath('hmc_mov.nii.gz') out_bval = op.abspath('bval_split.txt') refdata = data[..., volid] hdr.set_data_shape(refdata.shape) nb.Nifti1Image(refdata, im.get_affine(), hdr).to_filename(out_ref) hdr.set_data_shape(data.shape) nb.Nifti1Image(data, im.get_affine(), hdr).to_filename(out_mov) np.savetxt(out_bval, bval) return [out_ref, out_mov, out_bval, volid] class N4BiasFieldCorrectionInputSpec(ANTSCommandInputSpec): # todo dimensionality in Nipype git dimension = traits.Enum(3, 2, argstr='--image-dimensionality %d', usedefault=True, desc='image dimension (2 or 3)') input_image = File(argstr='--input-image %s', mandatory=True, desc=('image to apply transformation to (generally a ' 'coregistered functional)')) mask_image = File(argstr='--mask-image %s') weight_image = File(argstr='--weight-image %s') output_image = traits.Str(argstr='--output %s', desc='output file name', genfile=True, hash_files=False) bspline_fitting_distance = traits.Float(argstr="--bspline-fitting %s") bspline_order = traits.Int(requires=['bspline_fitting_distance']) shrink_factor = traits.Int(argstr="--shrink-factor %d") n_iterations = traits.List(traits.Int(), argstr="--convergence %s", requires=['convergence_threshold']) convergence_threshold = traits.Float(requires=['n_iterations']) save_bias = traits.Bool(False, mandatory=True, usedefault=True, desc=('True if the estimated bias should be saved' ' to file.'), xor=['bias_image']) bias_image = File(desc='Filename for the estimated bias.', hash_files=False) class N4BiasFieldCorrection(ANTSCommand): """N4 is a variant of the popular N3 (nonparameteric nonuniform normalization) retrospective bias correction algorithm. Based on the assumption that the corruption of the low frequency bias field can be modeled as a convolution of the intensity histogram by a Gaussian, the basic algorithmic protocol is to iterate between deconvolving the intensity histogram by a Gaussian, remapping the intensities, and then spatially smoothing this result by a B-spline modeling of the bias field itself. The modifications from and improvements obtained over the original N3 algorithm are described in [Tustison2010]_. .. [Tustison2010] <NAME> et al., N4ITK: Improved N3 Bias Correction, IEEE Transactions on Medical Imaging, 29(6):1310-1320, June 2010. Examples -------- >>> import copy >>> from nipype.interfaces.ants import N4BiasFieldCorrection >>> n4 = N4BiasFieldCorrection() >>> n4.inputs.dimension = 3 >>> n4.inputs.input_image = 'structural.nii' >>> n4.inputs.bspline_fitting_distance = 300 >>> n4.inputs.shrink_factor = 3 >>> n4.inputs.n_iterations = [50,50,30,20] >>> n4.inputs.convergence_threshold = 1e-6 >>> n4.cmdline 'N4BiasFieldCorrection --bspline-fitting [ 300 ] \ --image-dimension 3 --input-image structural.nii \ --convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii \ --shrink-factor 3' >>> n4_2 = copy.deepcopy(n4) >>> n4_2.inputs.bspline_order = 5 >>> n4_2.cmdline 'N4BiasFieldCorrection --bspline-fitting [ 300, 5 ] \ --image-dimension 3 --input-image structural.nii \ --convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii \ --shrink-factor 3' >>> n4_3 = N4BiasFieldCorrection() >>> n4_3.inputs.input_image = 'structural.nii' >>> n4_3.inputs.save_bias = True >>> n4_3.cmdline 'N4BiasFieldCorrection --image-dimension 3 --input-image structural.nii \ --output [ structural_corrected.nii, structural_bias.nii ]' """ _cmd = 'N4BiasFieldCorrection' input_spec = N4BiasFieldCorrectionInputSpec output_spec = N4BiasFieldCorrectionOutputSpec def _gen_filename(self, name): if name == 'output_image': output = self.inputs.output_image if not isdefined(output): _, name, ext = split_filename(self.inputs.input_image) output = name + '_corrected' + ext return output if name == 'bias_image': output = self.inputs.bias_image if not isdefined(output): _, name, ext = split_filename(self.inputs.input_image) output = name + '_bias' + ext return output return None def _format_arg(self, name, trait_spec, value): if ((name == 'output_image') and (self.inputs.save_bias or isdefined(self.inputs.bias_image))): bias_image = self._gen_filename('bias_image') output = self._gen_filename('output_image') newval = '[ %s, %s ]' % (output, bias_image) return trait_spec.argstr % newval if name == 'bspline_fitting_distance': if isdefined(self.inputs.bspline_order): newval = '[ %g, %d ]' % (value, self.inputs.bspline_order) else: newval = '[ %g ]' % value return trait_spec.argstr % newval if ((name == 'n_iterations') and (isdefined(self.inputs.convergence_threshold))): newval = '[ %s, %g ]' % ('x'.join([str(elt) for elt in value]), self.inputs.convergence_threshold) return trait_spec.argstr % newval return super(N4BiasFieldCorrection, self)._format_arg(name, trait_spec, value) def _parse_inputs(self, skip=None): if skip is None: skip = [] skip += ['save_bias', 'bias_image'] return super(N4BiasFieldCorrection, self)._parse_inputs(skip=skip) def _list_outputs(self): outputs = self._outputs().get() outputs['output_image'] = os.path.abspath(self._gen_filename('output_image')) if self.inputs.save_bias or isdefined(self.inputs.bias_image): outputs['bias_image'] = os.path.abspath(self._gen_filename('bias_image')) return outputs # todo remove this if N4BiasFieldCorrection works again def dwi_flirt(name='DWICoregistration', excl_nodiff=False, flirt_param={}): """ Generates a workflow for linear registration of dwi volumes """ inputnode = pe.Node(niu.IdentityInterface(fields=['reference', 'in_file', 'ref_mask', 'in_xfms', 'in_bval']), name='inputnode') initmat = pe.Node(niu.Function(input_names=['in_bval', 'in_xfms', 'excl_nodiff'], output_names=['init_xfms'], function=_checkinitxfm), name='InitXforms') initmat.inputs.excl_nodiff = excl_nodiff dilate = pe.Node(fsl.maths.MathsCommand(nan2zeros=True, args='-kernel sphere 5 -dilM'), name='MskDilate') split = pe.Node(fsl.Split(dimension='t'), name='SplitDWIs') pick_ref = pe.Node(niu.Select(), name='Pick_b0') n4 = pe.Node(N4BiasFieldCorrection(dimension=3), name='Bias') enhb0 = pe.Node(niu.Function(input_names=['in_file', 'in_mask', 'clip_limit'], output_names=['out_file'], function=enhance), name='B0Equalize') enhb0.inputs.clip_limit = 0.015 enhdw = pe.MapNode(niu.Function(input_names=['in_file', 'in_mask'], output_names=['out_file'], function=enhance), name='DWEqualize', iterfield=['in_file']) flirt = pe.MapNode(fsl.FLIRT(**flirt_param), name='CoRegistration', iterfield=['in_file', 'in_matrix_file']) thres = pe.MapNode(fsl.Threshold(thresh=0.0), iterfield=['in_file'], name='RemoveNegative') merge = pe.Node(fsl.Merge(dimension='t'), name='MergeDWIs') outputnode = pe.Node(niu.IdentityInterface(fields=['out_file', 'out_xfms']), name='outputnode') wf = pe.Workflow(name=name) wf.connect([ (inputnode, split, [('in_file', 'in_file')]), (inputnode, dilate, [('ref_mask', 'in_file')]), (inputnode, enhb0, [('ref_mask', 'in_mask')]), (inputnode, initmat, [('in_xfms', 'in_xfms'), ('in_bval', 'in_bval')]), (inputnode, n4, [('reference', 'input_image'), ('ref_mask', 'mask_image')]), (dilate, flirt, [('out_file', 'ref_weight'), ('out_file', 'in_weight')]), (n4, enhb0, [('output_image', 'in_file')]), (split, enhdw, [('out_files', 'in_file')]), (dilate, enhdw, [('out_file', 'in_mask')]), (enhb0, flirt, [('out_file', 'reference')]), (enhdw, flirt, [('out_file', 'in_file')]), (initmat, flirt, [('init_xfms', 'in_matrix_file')]), (flirt, thres, [('out_file', 'in_file')]), (thres, merge, [('out_file', 'in_files')]), (merge, outputnode, [('merged_file', 'out_file')]), (flirt, outputnode, [('out_matrix_file', 'out_xfms')]) ]) return wf
[ "nipype.interfaces.fsl.ApplyTOPUP", "nipype.interfaces.base.traits.Float", "nipype.interfaces.utility.IdentityInterface", "nipype.interfaces.fsl.ExtractROI", "nipype.interfaces.fsl.Split", "nipype.interfaces.fsl.Merge", "nipype.workflows.dmri.fsl.utils.apply_all_corrections", "nipype.interfaces.base.isdefined", "os.path.abspath", "nipype.interfaces.fsl.Threshold", "numpy.savetxt", "nipype.interfaces.base.traits.Int", "nipype.interfaces.base.traits.Bool", "numpy.loadtxt", "nipype.workflows.data.get_flirt_schedule", "nipype.pipeline.engine.Workflow", "nipype.utils.filemanip.split_filename", "nipype.interfaces.fsl.TOPUP", "nipype.interfaces.base.traits.Enum", "nipype.interfaces.utility.Merge", "numpy.hstack", "nipype.interfaces.utility.Select", "nipype.interfaces.fsl.FLIRT", "nipype.interfaces.utility.Function", "numpy.concatenate", "nipype.interfaces.fsl.BET", "nipype.interfaces.base.traits.Str", "nipype.workflows.dmri.fsl.utils.vsm2warp", "nibabel.load", "nipype.interfaces.fsl.maths.MathsCommand", "nipype.interfaces.base.File", "numpy.where", "nipype.interfaces.fsl.BinaryMaths" ]
[((3100, 3123), 'nipype.workflows.dmri.fsl.utils.apply_all_corrections', 'apply_all_corrections', ([], {}), '()\n', (3121, 3123), False, 'from nipype.workflows.dmri.fsl.utils import b0_average, apply_all_corrections, insert_mat, rotate_bvecs, vsm2warp, extract_bval, recompose_xfm, recompose_dwi, _checkinitxfm, enhance\n'), ((3134, 3156), 'nipype.pipeline.engine.Workflow', 'pe.Workflow', ([], {'name': 'name'}), '(name=name)\n', (3145, 3156), True, 'import nipype.pipeline.engine as pe\n'), ((8736, 8758), 'nipype.pipeline.engine.Workflow', 'pe.Workflow', ([], {'name': 'name'}), '(name=name)\n', (8747, 8758), True, 'import nipype.pipeline.engine as pe\n'), ((13563, 13585), 'nipype.pipeline.engine.Workflow', 'pe.Workflow', ([], {'name': 'name'}), '(name=name)\n', (13574, 13585), True, 'import nipype.pipeline.engine as pe\n'), ((18116, 18126), 'nipype.workflows.dmri.fsl.utils.vsm2warp', 'vsm2warp', ([], {}), '()\n', (18124, 18126), False, 'from nipype.workflows.dmri.fsl.utils import b0_average, apply_all_corrections, insert_mat, rotate_bvecs, vsm2warp, extract_bval, recompose_xfm, recompose_dwi, _checkinitxfm, enhance\n'), ((18245, 18267), 'nipype.pipeline.engine.Workflow', 'pe.Workflow', ([], {'name': 'name'}), '(name=name)\n', (18256, 18267), True, 'import nipype.pipeline.engine as pe\n'), ((19860, 19876), 'nibabel.load', 'nb.load', (['in_file'], {}), '(in_file)\n', (19867, 19876), True, 'import nibabel as nb\n'), ((19946, 19965), 'numpy.loadtxt', 'np.loadtxt', (['in_bval'], {}), '(in_bval)\n', (19956, 19965), True, 'import numpy as np\n'), ((20574, 20602), 'os.path.abspath', 'op.abspath', (['"""hmc_ref.nii.gz"""'], {}), "('hmc_ref.nii.gz')\n", (20584, 20602), True, 'import os.path as op\n'), ((20617, 20645), 'os.path.abspath', 'op.abspath', (['"""hmc_mov.nii.gz"""'], {}), "('hmc_mov.nii.gz')\n", (20627, 20645), True, 'import os.path as op\n'), ((20661, 20689), 'os.path.abspath', 'op.abspath', (['"""bval_split.txt"""'], {}), "('bval_split.txt')\n", (20671, 20689), True, 'import os.path as op\n'), ((20939, 20965), 'numpy.savetxt', 'np.savetxt', (['out_bval', 'bval'], {}), '(out_bval, bval)\n', (20949, 20965), True, 'import numpy as np\n'), ((21131, 21239), 'nipype.interfaces.base.traits.Enum', 'traits.Enum', (['(3)', '(2)'], {'argstr': '"""--image-dimensionality %d"""', 'usedefault': '(True)', 'desc': '"""image dimension (2 or 3)"""'}), "(3, 2, argstr='--image-dimensionality %d', usedefault=True, desc\n ='image dimension (2 or 3)')\n", (21142, 21239), False, 'from nipype.interfaces.base import File, traits, isdefined\n'), ((21309, 21440), 'nipype.interfaces.base.File', 'File', ([], {'argstr': '"""--input-image %s"""', 'mandatory': '(True)', 'desc': '"""image to apply transformation to (generally a coregistered functional)"""'}), "(argstr='--input-image %s', mandatory=True, desc=\n 'image to apply transformation to (generally a coregistered functional)')\n", (21313, 21440), False, 'from nipype.interfaces.base import File, traits, isdefined\n'), ((21510, 21540), 'nipype.interfaces.base.File', 'File', ([], {'argstr': '"""--mask-image %s"""'}), "(argstr='--mask-image %s')\n", (21514, 21540), False, 'from nipype.interfaces.base import File, traits, isdefined\n'), ((21560, 21592), 'nipype.interfaces.base.File', 'File', ([], {'argstr': '"""--weight-image %s"""'}), "(argstr='--weight-image %s')\n", (21564, 21592), False, 'from nipype.interfaces.base import File, traits, isdefined\n'), ((21612, 21705), 'nipype.interfaces.base.traits.Str', 'traits.Str', ([], {'argstr': '"""--output %s"""', 'desc': '"""output file name"""', 'genfile': '(True)', 'hash_files': '(False)'}), "(argstr='--output %s', desc='output file name', genfile=True,\n hash_files=False)\n", (21622, 21705), False, 'from nipype.interfaces.base import File, traits, isdefined\n'), ((21793, 21836), 'nipype.interfaces.base.traits.Float', 'traits.Float', ([], {'argstr': '"""--bspline-fitting %s"""'}), "(argstr='--bspline-fitting %s')\n", (21805, 21836), False, 'from nipype.interfaces.base import File, traits, isdefined\n'), ((21857, 21906), 'nipype.interfaces.base.traits.Int', 'traits.Int', ([], {'requires': "['bspline_fitting_distance']"}), "(requires=['bspline_fitting_distance'])\n", (21867, 21906), False, 'from nipype.interfaces.base import File, traits, isdefined\n'), ((21927, 21966), 'nipype.interfaces.base.traits.Int', 'traits.Int', ([], {'argstr': '"""--shrink-factor %d"""'}), "(argstr='--shrink-factor %d')\n", (21937, 21966), False, 'from nipype.interfaces.base import File, traits, isdefined\n'), ((22134, 22173), 'nipype.interfaces.base.traits.Float', 'traits.Float', ([], {'requires': "['n_iterations']"}), "(requires=['n_iterations'])\n", (22146, 22173), False, 'from nipype.interfaces.base import File, traits, isdefined\n'), ((22190, 22326), 'nipype.interfaces.base.traits.Bool', 'traits.Bool', (['(False)'], {'mandatory': '(True)', 'usedefault': '(True)', 'desc': '"""True if the estimated bias should be saved to file."""', 'xor': "['bias_image']"}), "(False, mandatory=True, usedefault=True, desc=\n 'True if the estimated bias should be saved to file.', xor=['bias_image'])\n", (22201, 22326), False, 'from nipype.interfaces.base import File, traits, isdefined\n'), ((22406, 22469), 'nipype.interfaces.base.File', 'File', ([], {'desc': '"""Filename for the estimated bias."""', 'hash_files': '(False)'}), "(desc='Filename for the estimated bias.', hash_files=False)\n", (22410, 22469), False, 'from nipype.interfaces.base import File, traits, isdefined\n'), ((28747, 28769), 'nipype.pipeline.engine.Workflow', 'pe.Workflow', ([], {'name': 'name'}), '(name=name)\n', (28758, 28769), True, 'import nipype.pipeline.engine as pe\n'), ((2117, 2192), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['in_file', 'in_bvec', 'in_bval', 'alt_file']"}), "(fields=['in_file', 'in_bvec', 'in_bval', 'alt_file'])\n", (2138, 2192), True, 'import nipype.interfaces.utility as niu\n'), ((2262, 2328), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['out_file', 'out_mask', 'out_bvec']"}), "(fields=['out_file', 'out_mask', 'out_bvec'])\n", (2283, 2328), True, 'import nipype.interfaces.utility as niu\n'), ((2398, 2497), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'input_names': "['in_dwi', 'in_bval']", 'output_names': "['out_file']", 'function': 'b0_average'}), "(input_names=['in_dwi', 'in_bval'], output_names=['out_file'],\n function=b0_average)\n", (2410, 2497), True, 'import nipype.interfaces.utility as niu\n'), ((2583, 2682), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'input_names': "['in_dwi', 'in_bval']", 'output_names': "['out_file']", 'function': 'b0_average'}), "(input_names=['in_dwi', 'in_bval'], output_names=['out_file'],\n function=b0_average)\n", (2595, 2682), True, 'import nipype.interfaces.utility as niu\n'), ((2769, 2810), 'nipype.interfaces.fsl.BET', 'fsl.BET', ([], {'frac': '(0.3)', 'mask': '(True)', 'robust': '(True)'}), '(frac=0.3, mask=True, robust=True)\n', (2776, 2810), True, 'import nipype.interfaces.fsl as fsl\n'), ((2878, 2919), 'nipype.interfaces.fsl.BET', 'fsl.BET', ([], {'frac': '(0.3)', 'mask': '(True)', 'robust': '(True)'}), '(frac=0.3, mask=True, robust=True)\n', (2885, 2919), True, 'import nipype.interfaces.fsl as fsl\n'), ((7792, 7881), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['in_file', 'ref_num', 'in_bvec', 'in_bval', 'in_mask']"}), "(fields=['in_file', 'ref_num', 'in_bvec', 'in_bval',\n 'in_mask'])\n", (7813, 7881), True, 'import nipype.interfaces.utility as niu\n'), ((7941, 8082), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'function': 'hmc_split', 'input_names': "['in_file', 'in_bval', 'ref_num']", 'output_names': "['out_ref', 'out_mov', 'out_bval', 'volid']"}), "(function=hmc_split, input_names=['in_file', 'in_bval',\n 'ref_num'], output_names=['out_ref', 'out_mov', 'out_bval', 'volid'])\n", (7953, 8082), True, 'import nipype.interfaces.utility as niu\n'), ((8220, 8312), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'input_names': "['inlist', 'volid']", 'output_names': "['out']", 'function': 'insert_mat'}), "(input_names=['inlist', 'volid'], output_names=['out'],\n function=insert_mat)\n", (8232, 8312), True, 'import nipype.interfaces.utility as niu\n'), ((8396, 8501), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'input_names': "['in_bvec', 'in_matrix']", 'output_names': "['out_file']", 'function': 'rotate_bvecs'}), "(input_names=['in_bvec', 'in_matrix'], output_names=['out_file'\n ], function=rotate_bvecs)\n", (8408, 8501), True, 'import nipype.interfaces.utility as niu\n'), ((8589, 8655), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['out_file', 'out_bvec', 'out_xfms']"}), "(fields=['out_file', 'out_bvec', 'out_xfms'])\n", (8610, 8655), True, 'import nipype.interfaces.utility as niu\n'), ((12161, 12235), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['in_file', 'in_bval', 'in_mask', 'in_xfms']"}), "(fields=['in_file', 'in_bval', 'in_mask', 'in_xfms'])\n", (12182, 12235), True, 'import nipype.interfaces.utility as niu\n'), ((12300, 12399), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'input_names': "['in_dwi', 'in_bval']", 'output_names': "['out_file']", 'function': 'b0_average'}), "(input_names=['in_dwi', 'in_bval'], output_names=['out_file'],\n function=b0_average)\n", (12312, 12399), True, 'import nipype.interfaces.utility as niu\n'), ((12477, 12584), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'input_names': "['in_dwi', 'in_bval', 'b']", 'output_names': "['out_file']", 'function': 'extract_bval'}), "(input_names=['in_dwi', 'in_bval', 'b'], output_names=[\n 'out_file'], function=extract_bval)\n", (12489, 12584), True, 'import nipype.interfaces.utility as niu\n'), ((12761, 12793), 'nipype.interfaces.fsl.BinaryMaths', 'fsl.BinaryMaths', ([], {'operation': '"""mul"""'}), "(operation='mul')\n", (12776, 12793), True, 'import nipype.interfaces.fsl as fsl\n'), ((12901, 12926), 'nipype.interfaces.fsl.Threshold', 'fsl.Threshold', ([], {'thresh': '(0.0)'}), '(thresh=0.0)\n', (12914, 12926), True, 'import nipype.interfaces.fsl as fsl\n'), ((13018, 13042), 'nipype.interfaces.fsl.Split', 'fsl.Split', ([], {'dimension': '"""t"""'}), "(dimension='t')\n", (13027, 13042), True, 'import nipype.interfaces.fsl as fsl\n'), ((13084, 13188), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'input_names': "['in_bval', 'in_xfms']", 'output_names': "['out_files']", 'function': 'recompose_xfm'}), "(input_names=['in_bval', 'in_xfms'], output_names=['out_files'],\n function=recompose_xfm)\n", (13096, 13188), True, 'import nipype.interfaces.utility as niu\n'), ((13273, 13391), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'input_names': "['in_dwi', 'in_bval', 'in_corrected']", 'output_names': "['out_file']", 'function': 'recompose_dwi'}), "(input_names=['in_dwi', 'in_bval', 'in_corrected'],\n output_names=['out_file'], function=recompose_dwi)\n", (13285, 13391), True, 'import nipype.interfaces.utility as niu\n'), ((13453, 13507), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['out_file', 'out_xfms']"}), "(fields=['out_file', 'out_xfms'])\n", (13474, 13507), True, 'import nipype.interfaces.utility as niu\n'), ((16923, 17013), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['in_file', 'in_bval', 'in_mask', 'alt_file', 'ref_num']"}), "(fields=['in_file', 'in_bval', 'in_mask', 'alt_file',\n 'ref_num'])\n", (16944, 17013), True, 'import nipype.interfaces.utility as niu\n'), ((17102, 17167), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['out_file', 'out_vsm', 'out_warp']"}), "(fields=['out_file', 'out_vsm', 'out_warp'])\n", (17123, 17167), True, 'import nipype.interfaces.utility as niu\n'), ((17235, 17259), 'nipype.interfaces.fsl.ExtractROI', 'fsl.ExtractROI', ([], {'t_size': '(1)'}), '(t_size=1)\n', (17249, 17259), True, 'import nipype.interfaces.fsl as fsl\n'), ((17297, 17321), 'nipype.interfaces.fsl.ExtractROI', 'fsl.ExtractROI', ([], {'t_size': '(1)'}), '(t_size=1)\n', (17311, 17321), True, 'import nipype.interfaces.fsl as fsl\n'), ((17360, 17372), 'nipype.interfaces.utility.Merge', 'niu.Merge', (['(2)'], {}), '(2)\n', (17369, 17372), True, 'import nipype.interfaces.utility as niu\n'), ((17413, 17437), 'nipype.interfaces.fsl.Merge', 'fsl.Merge', ([], {'dimension': '"""t"""'}), "(dimension='t')\n", (17422, 17437), True, 'import nipype.interfaces.fsl as fsl\n'), ((17478, 17489), 'nipype.interfaces.fsl.TOPUP', 'fsl.TOPUP', ([], {}), '()\n', (17487, 17489), True, 'import nipype.interfaces.fsl as fsl\n'), ((17806, 17848), 'nipype.interfaces.fsl.ApplyTOPUP', 'fsl.ApplyTOPUP', ([], {'in_index': '[1]', 'method': '"""jac"""'}), "(in_index=[1], method='jac')\n", (17820, 17848), True, 'import nipype.interfaces.fsl as fsl\n'), ((19979, 20004), 'numpy.where', 'np.where', (['(bval <= lowbval)'], {}), '(bval <= lowbval)\n', (19987, 20004), True, 'import numpy as np\n'), ((20038, 20056), 'nipype.interfaces.base.isdefined', 'isdefined', (['ref_num'], {}), '(ref_num)\n', (20047, 20056), False, 'from nipype.interfaces.base import isdefined\n'), ((21998, 22010), 'nipype.interfaces.base.traits.Int', 'traits.Int', ([], {}), '()\n', (22008, 22010), False, 'from nipype.interfaces.base import File, traits, isdefined\n'), ((27120, 27212), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['reference', 'in_file', 'ref_mask', 'in_xfms', 'in_bval']"}), "(fields=['reference', 'in_file', 'ref_mask', 'in_xfms',\n 'in_bval'])\n", (27141, 27212), True, 'import nipype.interfaces.utility as niu\n'), ((27299, 27418), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'input_names': "['in_bval', 'in_xfms', 'excl_nodiff']", 'output_names': "['init_xfms']", 'function': '_checkinitxfm'}), "(input_names=['in_bval', 'in_xfms', 'excl_nodiff'],\n output_names=['init_xfms'], function=_checkinitxfm)\n", (27311, 27418), True, 'import nipype.interfaces.utility as niu\n'), ((27558, 27627), 'nipype.interfaces.fsl.maths.MathsCommand', 'fsl.maths.MathsCommand', ([], {'nan2zeros': '(True)', 'args': '"""-kernel sphere 5 -dilM"""'}), "(nan2zeros=True, args='-kernel sphere 5 -dilM')\n", (27580, 27627), True, 'import nipype.interfaces.fsl as fsl\n'), ((27688, 27712), 'nipype.interfaces.fsl.Split', 'fsl.Split', ([], {'dimension': '"""t"""'}), "(dimension='t')\n", (27697, 27712), True, 'import nipype.interfaces.fsl as fsl\n'), ((27755, 27767), 'nipype.interfaces.utility.Select', 'niu.Select', ([], {}), '()\n', (27765, 27767), True, 'import nipype.interfaces.utility as niu\n'), ((27871, 27983), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'input_names': "['in_file', 'in_mask', 'clip_limit']", 'output_names': "['out_file']", 'function': 'enhance'}), "(input_names=['in_file', 'in_mask', 'clip_limit'], output_names\n =['out_file'], function=enhance)\n", (27883, 27983), True, 'import nipype.interfaces.utility as niu\n'), ((28111, 28208), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'input_names': "['in_file', 'in_mask']", 'output_names': "['out_file']", 'function': 'enhance'}), "(input_names=['in_file', 'in_mask'], output_names=['out_file'],\n function=enhance)\n", (28123, 28208), True, 'import nipype.interfaces.utility as niu\n'), ((28317, 28341), 'nipype.interfaces.fsl.FLIRT', 'fsl.FLIRT', ([], {}), '(**flirt_param)\n', (28326, 28341), True, 'import nipype.interfaces.fsl as fsl\n'), ((28453, 28478), 'nipype.interfaces.fsl.Threshold', 'fsl.Threshold', ([], {'thresh': '(0.0)'}), '(thresh=0.0)\n', (28466, 28478), True, 'import nipype.interfaces.fsl as fsl\n'), ((28569, 28593), 'nipype.interfaces.fsl.Merge', 'fsl.Merge', ([], {'dimension': '"""t"""'}), "(dimension='t')\n", (28578, 28593), True, 'import nipype.interfaces.fsl as fsl\n'), ((28638, 28692), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['out_file', 'out_xfms']"}), "(fields=['out_file', 'out_xfms'])\n", (28659, 28692), True, 'import nipype.interfaces.utility as niu\n'), ((7740, 7765), 'nipype.workflows.data.get_flirt_schedule', 'get_flirt_schedule', (['"""hmc"""'], {}), "('hmc')\n", (7758, 7765), False, 'from nipype.workflows.data import get_flirt_schedule\n'), ((12059, 12084), 'nipype.workflows.data.get_flirt_schedule', 'get_flirt_schedule', (['"""ecc"""'], {}), "('ecc')\n", (12077, 12084), False, 'from nipype.workflows.data import get_flirt_schedule\n'), ((20399, 20465), 'numpy.concatenate', 'np.concatenate', (['(data[..., :volid], data[..., volid + 1:])'], {'axis': '(3)'}), '((data[..., :volid], data[..., volid + 1:]), axis=3)\n', (20413, 20465), True, 'import numpy as np\n'), ((20513, 20556), 'numpy.hstack', 'np.hstack', (['(bval[:volid], bval[volid + 1:])'], {}), '((bval[:volid], bval[volid + 1:]))\n', (20522, 20556), True, 'import numpy as np\n'), ((25700, 25736), 'nipype.interfaces.base.isdefined', 'isdefined', (['self.inputs.bspline_order'], {}), '(self.inputs.bspline_order)\n', (25709, 25736), False, 'from nipype.interfaces.base import isdefined\n'), ((25973, 26017), 'nipype.interfaces.base.isdefined', 'isdefined', (['self.inputs.convergence_threshold'], {}), '(self.inputs.convergence_threshold)\n', (25982, 26017), False, 'from nipype.interfaces.base import isdefined\n'), ((26724, 26757), 'nipype.interfaces.base.isdefined', 'isdefined', (['self.inputs.bias_image'], {}), '(self.inputs.bias_image)\n', (26733, 26757), False, 'from nipype.interfaces.base import isdefined\n'), ((24806, 24823), 'nipype.interfaces.base.isdefined', 'isdefined', (['output'], {}), '(output)\n', (24815, 24823), False, 'from nipype.interfaces.base import isdefined\n'), ((24856, 24895), 'nipype.utils.filemanip.split_filename', 'split_filename', (['self.inputs.input_image'], {}), '(self.inputs.input_image)\n', (24870, 24895), False, 'from nipype.utils.filemanip import split_filename\n'), ((25070, 25087), 'nipype.interfaces.base.isdefined', 'isdefined', (['output'], {}), '(output)\n', (25079, 25087), False, 'from nipype.interfaces.base import isdefined\n'), ((25120, 25159), 'nipype.utils.filemanip.split_filename', 'split_filename', (['self.inputs.input_image'], {}), '(self.inputs.input_image)\n', (25134, 25159), False, 'from nipype.utils.filemanip import split_filename\n'), ((25383, 25416), 'nipype.interfaces.base.isdefined', 'isdefined', (['self.inputs.bias_image'], {}), '(self.inputs.bias_image)\n', (25392, 25416), False, 'from nipype.interfaces.base import isdefined\n')]
import os import glob import numpy as np import pandas as pd # geospatial libaries from osgeo import gdal, osr from xml.etree import ElementTree def read_geo_info(fname): """ This function takes as input the geotiff name and the path of the folder that the images are stored, reads the geographic information of the image Parameters ---------- fname : string path and file name of a geotiff image Returns ------- spatialRef : string osr.SpatialReference in well known text geoTransform : tuple, size=(8,1) affine transformation coefficients, but also giving the image dimensions targetprj : osgeo.osr.SpatialReference() object coordinate reference system (CRS) rows : integer number of rows in the image, that is its height cols : integer number of collumns in the image, that is its width bands : integer number of bands in the image, that is its depth See Also -------- read_geo_image : basic function to import geographic imagery data """ assert len(glob.glob(fname)) != 0, ('file does not seem to be present') img = gdal.Open(fname) spatialRef = img.GetProjection() geoTransform = img.GetGeoTransform() targetprj = osr.SpatialReference(wkt=img.GetProjection()) rows = img.RasterYSize cols = img.RasterXSize bands = img.RasterCount geoTransform += (rows, cols,) return spatialRef, geoTransform, targetprj, rows, cols, bands def read_geo_image(fname, boi=np.array([])): """ This function takes as input the geotiff name and the path of the folder that the images are stored, reads the image and returns the data as an array Parameters ---------- fname : string geotiff file name and path. boi : numpy.array, size=(k,1) bands of interest, if a multispectral image is read, a selection can be specified Returns ------- data : numpy.array, size=(m,n), ndim=2 data array of the band spatialRef : string osr.SpatialReference in well known text geoTransform : tuple, size=(6,1) affine transformation coefficients. targetprj : osgeo.osr.SpatialReference() object coordinate reference system (CRS) See Also -------- make_geo_im : basic function to write out geographic data read_geo_info : basic function to get meta data of geographic imagery Example ------- >>> import os >>> fpath = os.path.join(os.getcwd(), "data.jp2" ) >>> (I, spatialRef, geoTransform, targetPrj) = read_geo_image(fpath) >>> I_ones = np.zeros(I.shape, dtype=bool) >>> make_geo_im(I_ones, geoTransformM, spatialRefM, "ones.tif") assert os.path.exists(fname), ('file must exist') """ assert len(glob.glob(fname)) != 0, ('file does not seem to be present') img = gdal.Open(fname) # imagery can consist of multiple bands if len(boi) == 0: for counter in range(img.RasterCount): band = np.array(img.GetRasterBand(counter+1).ReadAsArray()) data = band if counter == 0 else np.dstack((data, band[:,:,np.newaxis])) else: num_bands = img.RasterCount assert (np.max(boi)+1)<=num_bands, 'bands of interest is out of range' for band_id, counter in enumerate(boi): band = np.array(img.GetRasterBand(band_id+1).ReadAsArray()) data = band if counter==0 else np.dstack((data, band[:, :, np.newaxis])) spatialRef = img.GetProjection() geoTransform = img.GetGeoTransform() targetprj = osr.SpatialReference(wkt=img.GetProjection()) return data, spatialRef, geoTransform, targetprj # output functions def make_geo_im(I, R, crs, fName, meta_descr='project Eratosthenes', no_dat=np.nan, sun_angles='az:360-zn:90', date_created='-0276-00-00'): """ Create georeferenced tiff file (a GeoTIFF) Parameters ---------- I : numpy.array, size=(m,n) band image R : list, size=(1,6) GDAL georeference transform of an image crs : string coordinate reference string fname : string filename for the image with extension no_dat : datatype, integer no data value sun_angles : string string giving meta data about the illumination angles date_created : string string given the acquistion date in YYYY-MM-DD Example ------- >>> import os >>> fpath = os.path.join(os.getcwd(), "data.jp2") >>> (I, spatialRef, geoTransform, targetPrj) = read_geo_image(fpath) >>> I_ones = np.zeros(I.shape, dtype=bool) >>> make_geo_im(I_ones, geoTransformM, spatialRefM, ‘ones.tif’) """ drv = gdal.GetDriverByName("GTiff") # export image if I.ndim == 3: bands=I.shape[2] else: bands = 1 # make it type dependent if I.dtype == 'float64': ds = drv.Create(fName,xsize=I.shape[1], ysize=I.shape[0],bands=bands, eType=gdal.GDT_Float64) elif I.dtype == 'float32': ds = drv.Create(fName,xsize=I.shape[1], ysize=I.shape[0],bands=bands, eType=gdal.GDT_Float32) elif I.dtype == 'bool': ds = drv.Create(fName, xsize=I.shape[1], ysize=I.shape[0], bands=bands, eType=gdal.GDT_Byte) else: ds = drv.Create(fName, xsize=I.shape[1], ysize=I.shape[0], bands=bands, eType=gdal.GDT_Int32) # set metadata in datasource ds.SetMetadata({'TIFFTAG_SOFTWARE':'dhdt v0.1', 'TIFFTAG_ARTIST':'bas altena and team Atlas', 'TIFFTAG_COPYRIGHT': 'contains modified Copernicus data', 'TIFFTAG_IMAGEDESCRIPTION': meta_descr, 'TIFFTAG_RESOLUTIONUNIT' : sun_angles, 'TIFFTAG_DATETIME': date_created}) # set georeferencing metadata if len(R)!=6: R = R[:6] ds.SetGeoTransform(R) if not isinstance(crs, str): crs = crs.ExportToWkt() ds.SetProjection(crs) if I.ndim == 3: for count in np.arange(1,I.shape[2]+1,1): band = ds.GetRasterBand(int(count)) band.WriteArray(I[:,:,count-1],0,0) if count==1: band.SetNoDataValue(no_dat) band = None else: ds.GetRasterBand(1).WriteArray(I) ds.GetRasterBand(1).SetNoDataValue(no_dat) ds = None del ds def make_multispectral_vrt(df, fpath=None, fname='multispec.vrt'): """ virtual raster tile (VRT) is a description of datasets written in an XML format, it eases the display of multi-spectral data or other means. Parameters ---------- df : pandas.DataFrame organization of the different spectral bands fpath : string path of the directory of interest fname : string file name of the virtual raster tile """ assert isinstance(df, pd.DataFrame), ('please provide a dataframe') assert 'filepath' in df, ('please first run "get_S2_image_locations"'+ ' to find the proper file locations') if fpath is None: fpath = os.path.commonpath(df.filepath.tolist()) ffull = os.path.join(fpath, fname) vrt_options = gdal.BuildVRTOptions(resampleAlg=gdal.GRA_NearestNeighbour, addAlpha=False, separate=True, srcNodata=0) my_vrt = gdal.BuildVRT(ffull, [f+'.jp2' for f in df['filepath']], options=vrt_options) my_vrt = None # modify the vrt-file to include band names tree = ElementTree.parse(ffull) root = tree.getroot() for idx, band in enumerate(root.iter("VRTRasterBand")): description = ElementTree.SubElement(band, "Description") description.text = df.common_name[idx] tree.write(ffull) # update the file on disk return
[ "numpy.dstack", "xml.etree.ElementTree.parse", "os.path.join", "numpy.max", "numpy.array", "numpy.arange", "osgeo.gdal.BuildVRTOptions", "xml.etree.ElementTree.SubElement", "glob.glob", "osgeo.gdal.Open", "osgeo.gdal.GetDriverByName", "osgeo.gdal.BuildVRT" ]
[((1163, 1179), 'osgeo.gdal.Open', 'gdal.Open', (['fname'], {}), '(fname)\n', (1172, 1179), False, 'from osgeo import gdal, osr\n'), ((1538, 1550), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1546, 1550), True, 'import numpy as np\n'), ((2892, 2908), 'osgeo.gdal.Open', 'gdal.Open', (['fname'], {}), '(fname)\n', (2901, 2908), False, 'from osgeo import gdal, osr\n'), ((4848, 4877), 'osgeo.gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (4868, 4877), False, 'from osgeo import gdal, osr\n'), ((7354, 7380), 'os.path.join', 'os.path.join', (['fpath', 'fname'], {}), '(fpath, fname)\n', (7366, 7380), False, 'import os\n'), ((7399, 7506), 'osgeo.gdal.BuildVRTOptions', 'gdal.BuildVRTOptions', ([], {'resampleAlg': 'gdal.GRA_NearestNeighbour', 'addAlpha': '(False)', 'separate': '(True)', 'srcNodata': '(0)'}), '(resampleAlg=gdal.GRA_NearestNeighbour, addAlpha=False,\n separate=True, srcNodata=0)\n', (7419, 7506), False, 'from osgeo import gdal, osr\n'), ((7633, 7719), 'osgeo.gdal.BuildVRT', 'gdal.BuildVRT', (['ffull', "[(f + '.jp2') for f in df['filepath']]"], {'options': 'vrt_options'}), "(ffull, [(f + '.jp2') for f in df['filepath']], options=\n vrt_options)\n", (7646, 7719), False, 'from osgeo import gdal, osr\n'), ((7816, 7840), 'xml.etree.ElementTree.parse', 'ElementTree.parse', (['ffull'], {}), '(ffull)\n', (7833, 7840), False, 'from xml.etree import ElementTree\n'), ((6233, 6264), 'numpy.arange', 'np.arange', (['(1)', '(I.shape[2] + 1)', '(1)'], {}), '(1, I.shape[2] + 1, 1)\n', (6242, 6264), True, 'import numpy as np\n'), ((7949, 7992), 'xml.etree.ElementTree.SubElement', 'ElementTree.SubElement', (['band', '"""Description"""'], {}), "(band, 'Description')\n", (7971, 7992), False, 'from xml.etree import ElementTree\n'), ((1091, 1107), 'glob.glob', 'glob.glob', (['fname'], {}), '(fname)\n', (1100, 1107), False, 'import glob\n'), ((2820, 2836), 'glob.glob', 'glob.glob', (['fname'], {}), '(fname)\n', (2829, 2836), False, 'import glob\n'), ((3139, 3180), 'numpy.dstack', 'np.dstack', (['(data, band[:, :, np.newaxis])'], {}), '((data, band[:, :, np.newaxis]))\n', (3148, 3180), True, 'import numpy as np\n'), ((3297, 3308), 'numpy.max', 'np.max', (['boi'], {}), '(boi)\n', (3303, 3308), True, 'import numpy as np\n'), ((3523, 3564), 'numpy.dstack', 'np.dstack', (['(data, band[:, :, np.newaxis])'], {}), '((data, band[:, :, np.newaxis]))\n', (3532, 3564), True, 'import numpy as np\n')]
import logging import math import re from collections import defaultdict from dataclasses import dataclass from typing import Dict, Iterable, Iterator, List, Optional, Sequence, Set, Tuple import cv2 import numpy as np import scipy.spatial from lib.image_processing import ( Point, Rectangle, _contains_start_graphic, find_boxes_with_rgb, find_in_image, ) from lib.instrument_tex import Detectable, FontSize from lib.parse_formula_tex import TexSymbol, TexToken logger = logging.getLogger("texsymdetect") PageNumber = int MathMl = str @dataclass(frozen=True) class Id: """ To uniquely identify a symbol in the symbol search functionality (i.e., not confuse two symbols with each other), one needs both the MathML for the symbol, and the size it was rendered at. """ mathml: str level: FontSize @dataclass class TokenInstance: id_: Id location: Rectangle @dataclass class SymbolInstance: id_: Id location: Rectangle @dataclass class TokenTemplate: symbol: Id images: List[np.array] @dataclass class Component: symbol_id: Id center: Point " Position of center of component, relative to center of anchor component. " @dataclass class SymbolTemplate: anchor: Id " Leftmost member of the composite template. " members: List[Component] " All members of the composite template except for the anchor. " def create_symbol_template( symbol_image: np.array, token_images: Dict[MathMl, Dict[FontSize, List[np.array]]], token_mathmls: Iterable[str], require_blank_border_around_tokens: bool = True, ) -> Optional[SymbolTemplate]: # Unpack token images into a 1-D list. token_image_list: List[np.array] = [] mathmls: List[MathMl] = [] font_sizes: List[FontSize] = [] for mathml, sizes in token_images.items(): if mathml not in token_mathmls: continue for font_size, images in sizes.items(): for image in images: token_image_list.append(image) font_sizes.append(font_size) mathmls.append(mathml) # Search in image for tokens. rects = find_in_image( token_image_list, symbol_image, require_blank_border=require_blank_border_around_tokens, ) # Unroll tokens into a 1-D list. rects_unrolled: List[Rectangle] = [] mathmls_unrolled: List[MathMl] = [] font_sizes_unrolled: List[FontSize] = [] for mathml, font_size, rect_list in zip(mathmls, font_sizes, rects): for rect in rect_list: rects_unrolled.append(rect) mathmls_unrolled.append(mathml) font_sizes_unrolled.append(font_size) # Find positions of child symbols in the composite symbol image. components: List[Component] = [] # Add tokens to the template left-to-right. for (mathml, font_size, rect) in sorted( zip(mathmls_unrolled, font_sizes_unrolled, rects_unrolled), key=lambda t: t[2].left, ): if mathml in token_mathmls: center = Point(rect.left + rect.width / 2.0, rect.top + rect.height / 2.0) component = Component(Id(mathml, font_size), center) if component not in components: components.append(component) # Composite symbol needs at least one component. if not components: return None # Select 'anchor' for the template as the leftmost component. components.sort(key=lambda c: c.center.x) anchor = components.pop(0) # Normalize the positions of components relative to the anchor. for component in components: component.center.x -= anchor.center.x component.center.y -= anchor.center.y # assert ( # False # ), "May want to filter out overlapping tokens... for instance, by blanking out the part of the image that matches." return SymbolTemplate(anchor.symbol_id, components) def extract_templates( page_images: Dict[PageNumber, np.array], detectables: Sequence[Detectable], ) -> Tuple[Dict[Detectable, List[np.array]], Dict[Detectable, SymbolTemplate]]: """ Given images of pages from a paper that has been modified to include appearances of many tokens and symbols (i.e., 'detectables'), extract templates for those tokens and symbols that can be used to identify them in other documents. Returns a collection of token templates (images), and symbol templates (a flexible template format). Note that both tokens and symbols must be passed in as detectables; symbols cannot be found without first finding their component tokens. All detectables should be provided in the order that they appear in the TeX, which should include all tokens first, followed by all symbols. """ sorted_page_images = [page_images[pn] for pn in sorted(page_images.keys())] def dequeue_page() -> Optional[np.array]: " Remove image of the next page from the list of all pages in the document. " if not sorted_page_images: return None image = sorted_page_images.pop(0) return image page_image = dequeue_page() next_page_image = dequeue_page() # Scan all pages until the marker is found that suggests that the original LaTeX # document has ended, and the detectables (i.e., colorized tokens and symbols) # are about to appear. while True: if not _contains_start_graphic(page_image): page_image = next_page_image next_page_image = dequeue_page() continue # Once the marker has been found, skip forward one more page so that # symbols and tokens will be detected on the page after the marker. page_image = next_page_image next_page_image = dequeue_page() break # Templates are extracted for detecting both tokens and symbols. Templates # for tokens are images of single letters or marks. Templates for symbols # are groups of tokens and the expected (but somewhat flexible) spatial # relationships between them. token_images: Dict[Detectable, List[np.array]] = defaultdict(list) token_images_lookup: Dict[MathMl, Dict[FontSize, List[np.array]]] = defaultdict( dict ) symbol_templates: Dict[Detectable, SymbolTemplate] = {} for d in detectables: # Find a bounding box around the token / symbol. red, green, blue = d.color rects = find_boxes_with_rgb(page_image, red, green, blue) if next_page_image is not None: if not rects: rects = find_boxes_with_rgb(next_page_image, red, green, blue) if not rects: logger.warning("Could not find detectable %s.", d) continue page_image = next_page_image next_page_image = dequeue_page() else: rects.extend(find_boxes_with_rgb(next_page_image, red, green, blue)) if len(rects) > 1: logger.warning( "Unexpectedly more than one instance of detectable %s. " + "There may have been a problem in the coloring code.", d, ) if not rects: logger.warning("Could not find detectable %s.", d) box = rects[0] logger.debug(f"Found symbol at {box}.") # Extract a cropped, black-and-white image of the token or symbol. cropped_bw = page_image[ box.top : box.top + box.height, box.left : box.left + box.width ] cropped_bw[ np.where( (cropped_bw[:, :, 0] != 255) | (cropped_bw[:, :, 1] != 255) | (cropped_bw[:, :, 2] != 255) ) ] = [0, 0, 0] cropped_bw = cv2.cvtColor(cropped_bw, cv2.COLOR_BGR2GRAY) # For simple symbols, extract images. if isinstance(d.entity, TexToken): # Only save a template if it has a different appearance from the other templates # saved for a symbol. This is important as a bunch of templates for the symbol # at the same size are created to try to make sure that templates are saved for # every way that extra space might have been introduced between characters in the # symbol when the PDF was rendered to an image. already_saved = False for img in token_images[d]: if np.array_equal(img, cropped_bw): already_saved = True break if not already_saved: token_images[d].append(cropped_bw) lookup_dict = token_images_lookup[d.entity.mathml] if d.font_size not in lookup_dict: lookup_dict[d.font_size] = [] lookup_dict[d.font_size].append(cropped_bw) # Note that, if the caller of this function did their job in ordering the list of # detectables, symbols will be processed only after all tokens have been processed. if isinstance(d.entity, TexSymbol): token_mathmls = [t.mathml for t in d.entity.tokens] template = create_symbol_template( cropped_bw, token_images_lookup, token_mathmls ) if template: symbol_templates[d] = template return token_images, symbol_templates class TokenIndex: " Index of appearances of all tokens on a page. " def __init__(self, tokens: Iterable[TokenInstance]) -> None: self._tokens: List[TokenInstance] = list(tokens) # Build a KD search tree over symbols to support faster spatial querying. token_centers = [ ( t.location.left + t.location.width / 2.0, t.location.top + t.location.height / 2.0, ) for t in tokens ] if not tokens: token_centers = np.empty(shape=(0, 2)) self._tree = scipy.spatial.KDTree(token_centers) def get_instances(self, id_: Id = None) -> List[TokenInstance]: " Get all tokens with a specific key. " if not id_: return list(self._tokens) return [t for t in self._tokens if t.id_ == id_] def find( self, id_: Id, center: Point, tolerance: Optional[Point] = None, ) -> List[TokenInstance]: """ Get all tokens near a specific point matching a specification for the token (its key and level). Matching tokens are returned if: * its center x falls within [center[0] - tolerance[0], center[0] + tolerance[0]] * its center y falls within [center[1] - tolerance[1], center[1] + tolerance[1]] """ tolerance = tolerance or Point(1.0, 1.0) # Initial query for candidate symbols is made using the KDTree 'query_ball_point' method, # as it will in many cases filter symbols according to position in two-dimensional space # than an iteratively searching over a list of all symbols. radius = math.sqrt(tolerance.x * tolerance.x + tolerance.y * tolerance.y) nearby_points = self._tree.query_ball_point(x=[center.x, center.y], r=radius) matches = [] for token_i in nearby_points: # Rule out symbols that are not the requested symbol. token = self._tokens[token_i] if token.id_ != id_: continue # Rule out symbols that are not within the tolerated distance of the query point. token_center_x = token.location.left + token.location.width / 2.0 token_center_y = token.location.top + token.location.height / 2.0 if ( abs(token_center_x - center.x) > tolerance.x or abs(token_center_y - center.y) > tolerance.y ): continue matches.append(token) return matches def detect_tokens( page_images: Dict[PageNumber, np.array], token_images: Dict[Detectable, List[np.array]], require_blank_border: bool = True, ) -> Dict[PageNumber, TokenIndex]: """ Detect appearances of tokens in images of pages. If 'require_blank_border' is set, filter the detected tokens to just those that are surrounded with whitespace. This option is intended to help reduce the number of false positives. See the implementation comments below for more details. """ tokens: Dict[PageNumber, TokenIndex] = {} # Unpack token images into a 1-D list. token_image_list = [] token_list = [] for (token, images) in token_images.items(): for image in images: token_image_list.append(image) token_list.append(token) for page_no, page_image in sorted(page_images.items(), key=lambda t: t[0]): logger.debug("Detecting tokens on page %d.", page_no) page_image_gray = cv2.cvtColor(page_image, cv2.COLOR_BGR2GRAY) rects = find_in_image( token_image_list, page_image_gray, require_blank_border=require_blank_border, ) token_instances: List[TokenInstance] = [] for (token, rect_list) in zip(token_list, rects): for rect in rect_list: token_instances.append( TokenInstance( id_=Id(token.entity.mathml, token.font_size), location=rect ) ) tokens[page_no] = TokenIndex(token_instances) return tokens def detect_symbols( token_instances: Dict[PageNumber, TokenIndex], symbol_templates: Dict[Detectable, SymbolTemplate], ) -> Dict[PageNumber, List[SymbolInstance]]: symbol_instances: Dict[PageNumber, List[SymbolInstance]] = defaultdict(list) for page_no, token_index in token_instances.items(): logger.debug("Scanning page %d for symbols.", page_no) for detectable, template in symbol_templates.items(): for rect in find_symbols(template, token_index): instance = SymbolInstance( Id(detectable.entity.mathml, detectable.font_size), rect ) # Deduplicate symbols, in case two symbols are actually the same symbol (as # may happen if two symbols had different TeX, but the same MathML). if instance not in symbol_instances[page_no]: symbol_instances[page_no].append(instance) return symbol_instances def find_symbols(template: SymbolTemplate, index: TokenIndex) -> Iterator[Rectangle]: """ Search for appearances of a symbol given an index of tokens. """ # Search for anchors---that is, leftmost glyphs in a symbol, relative # to which all other tokens in a composite symbol will be searched. anchor_candidates = index.get_instances(template.anchor) # For each anchor found, attempt to fill out the rest of the composite symbol template. for a in anchor_candidates: template_incomplete = False member_matches: List[TokenInstance] = [] anchor_center_x = a.location.left + a.location.width / 2.0 anchor_center_y = a.location.top + a.location.height / 2.0 # For each expected member of the composite symbol (i.e., all simple symbols the composite # symbol should be made up of), search for appearances of the member at the expected # location relative to the anchor. for member in template.members: expected_center = Point( anchor_center_x + member.center.x, anchor_center_y + member.center.y ) # Note that the tolerance for the position of a member symbol is higher the further away # that member is from the anchor, as it is assumed that TeX might insert or remove space # between members, which will accumulate the further away the member is from the anchor. tolerance = Point( math.ceil(abs(member.center.x) / 5.0) + 1, math.ceil(abs(member.center.y) / 5.0) + 1, ) member_candidates = index.find( id_=member.symbol_id, center=expected_center, tolerance=tolerance, ) # If multiple symbols could fill the member slot in the composite symbol, select the # leftmost symbol that has not yet been used to fill a slot. member_found = False member_candidates.sort(key=lambda c: c.location.left) for m in member_candidates: if m not in member_matches: member_matches.append(m) member_found = True break # If any member slot of the template cannot be filled, a composite symbol cannot be # created. Advance to the next potential anchor. if not member_found: template_incomplete = True break # Create an instance of the composite symbol if the template has been completed. if not template_incomplete: tokens = [a] + member_matches left = min([t.location.left for t in tokens]) top = min([t.location.top for t in tokens]) right = max([t.location.left + t.location.width for t in tokens]) bottom = max([t.location.top + t.location.height for t in tokens]) yield Rectangle(left, top, right - left, bottom - top)
[ "lib.image_processing.Point", "lib.image_processing._contains_start_graphic", "numpy.array_equal", "lib.image_processing.Rectangle", "math.sqrt", "cv2.cvtColor", "numpy.empty", "logging.getLogger", "lib.image_processing.find_boxes_with_rgb", "collections.defaultdict", "numpy.where", "lib.image_processing.find_in_image", "dataclasses.dataclass" ]
[((494, 527), 'logging.getLogger', 'logging.getLogger', (['"""texsymdetect"""'], {}), "('texsymdetect')\n", (511, 527), False, 'import logging\n'), ((562, 584), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (571, 584), False, 'from dataclasses import dataclass\n'), ((2173, 2280), 'lib.image_processing.find_in_image', 'find_in_image', (['token_image_list', 'symbol_image'], {'require_blank_border': 'require_blank_border_around_tokens'}), '(token_image_list, symbol_image, require_blank_border=\n require_blank_border_around_tokens)\n', (2186, 2280), False, 'from lib.image_processing import Point, Rectangle, _contains_start_graphic, find_boxes_with_rgb, find_in_image\n'), ((6140, 6157), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6151, 6157), False, 'from collections import defaultdict\n'), ((6230, 6247), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (6241, 6247), False, 'from collections import defaultdict\n'), ((13804, 13821), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (13815, 13821), False, 'from collections import defaultdict\n'), ((6458, 6507), 'lib.image_processing.find_boxes_with_rgb', 'find_boxes_with_rgb', (['page_image', 'red', 'green', 'blue'], {}), '(page_image, red, green, blue)\n', (6477, 6507), False, 'from lib.image_processing import Point, Rectangle, _contains_start_graphic, find_boxes_with_rgb, find_in_image\n'), ((7854, 7898), 'cv2.cvtColor', 'cv2.cvtColor', (['cropped_bw', 'cv2.COLOR_BGR2GRAY'], {}), '(cropped_bw, cv2.COLOR_BGR2GRAY)\n', (7866, 7898), False, 'import cv2\n'), ((11107, 11171), 'math.sqrt', 'math.sqrt', (['(tolerance.x * tolerance.x + tolerance.y * tolerance.y)'], {}), '(tolerance.x * tolerance.x + tolerance.y * tolerance.y)\n', (11116, 11171), False, 'import math\n'), ((12951, 12995), 'cv2.cvtColor', 'cv2.cvtColor', (['page_image', 'cv2.COLOR_BGR2GRAY'], {}), '(page_image, cv2.COLOR_BGR2GRAY)\n', (12963, 12995), False, 'import cv2\n'), ((13012, 13108), 'lib.image_processing.find_in_image', 'find_in_image', (['token_image_list', 'page_image_gray'], {'require_blank_border': 'require_blank_border'}), '(token_image_list, page_image_gray, require_blank_border=\n require_blank_border)\n', (13025, 13108), False, 'from lib.image_processing import Point, Rectangle, _contains_start_graphic, find_boxes_with_rgb, find_in_image\n'), ((3075, 3140), 'lib.image_processing.Point', 'Point', (['(rect.left + rect.width / 2.0)', '(rect.top + rect.height / 2.0)'], {}), '(rect.left + rect.width / 2.0, rect.top + rect.height / 2.0)\n', (3080, 3140), False, 'from lib.image_processing import Point, Rectangle, _contains_start_graphic, find_boxes_with_rgb, find_in_image\n'), ((5429, 5464), 'lib.image_processing._contains_start_graphic', '_contains_start_graphic', (['page_image'], {}), '(page_image)\n', (5452, 5464), False, 'from lib.image_processing import Point, Rectangle, _contains_start_graphic, find_boxes_with_rgb, find_in_image\n'), ((7648, 7753), 'numpy.where', 'np.where', (['((cropped_bw[:, :, 0] != 255) | (cropped_bw[:, :, 1] != 255) | (cropped_bw[\n :, :, 2] != 255))'], {}), '((cropped_bw[:, :, 0] != 255) | (cropped_bw[:, :, 1] != 255) | (\n cropped_bw[:, :, 2] != 255))\n', (7656, 7753), True, 'import numpy as np\n'), ((9996, 10018), 'numpy.empty', 'np.empty', ([], {'shape': '(0, 2)'}), '(shape=(0, 2))\n', (10004, 10018), True, 'import numpy as np\n'), ((10810, 10825), 'lib.image_processing.Point', 'Point', (['(1.0)', '(1.0)'], {}), '(1.0, 1.0)\n', (10815, 10825), False, 'from lib.image_processing import Point, Rectangle, _contains_start_graphic, find_boxes_with_rgb, find_in_image\n'), ((15562, 15637), 'lib.image_processing.Point', 'Point', (['(anchor_center_x + member.center.x)', '(anchor_center_y + member.center.y)'], {}), '(anchor_center_x + member.center.x, anchor_center_y + member.center.y)\n', (15567, 15637), False, 'from lib.image_processing import Point, Rectangle, _contains_start_graphic, find_boxes_with_rgb, find_in_image\n'), ((6599, 6653), 'lib.image_processing.find_boxes_with_rgb', 'find_boxes_with_rgb', (['next_page_image', 'red', 'green', 'blue'], {}), '(next_page_image, red, green, blue)\n', (6618, 6653), False, 'from lib.image_processing import Point, Rectangle, _contains_start_graphic, find_boxes_with_rgb, find_in_image\n'), ((8513, 8544), 'numpy.array_equal', 'np.array_equal', (['img', 'cropped_bw'], {}), '(img, cropped_bw)\n', (8527, 8544), True, 'import numpy as np\n'), ((17452, 17500), 'lib.image_processing.Rectangle', 'Rectangle', (['left', 'top', '(right - left)', '(bottom - top)'], {}), '(left, top, right - left, bottom - top)\n', (17461, 17500), False, 'from lib.image_processing import Point, Rectangle, _contains_start_graphic, find_boxes_with_rgb, find_in_image\n'), ((6925, 6979), 'lib.image_processing.find_boxes_with_rgb', 'find_boxes_with_rgb', (['next_page_image', 'red', 'green', 'blue'], {}), '(next_page_image, red, green, blue)\n', (6944, 6979), False, 'from lib.image_processing import Point, Rectangle, _contains_start_graphic, find_boxes_with_rgb, find_in_image\n')]
import logging from typing import List import numpy as np import tensorflow as tf try: import tensorflow_probability as tfp distributions = tfp.distributions except: distributions = tf.distributions from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.utils import check_X_y from sklearn.utils.validation import check_array, check_is_fitted from spn.algorithms.LearningWrappers import learn_classifier, learn_parametric from spn.algorithms.MPE import mpe from spn.gpu.TensorFlow import optimize_tf from spn.structure.Base import Context, get_nodes_by_type from spn.structure.leaves.parametric.Parametric import Categorical, Gaussian, Parametric logger = logging.getLogger(__name__) class SPNClassifier(BaseEstimator, ClassifierMixin): """ :class:`SPNClassifier` wraps the SPN structure learning, tensorflow weight optimization and MPE procedures into a single class that follows the sklearn estimator interace. Therefore, :class:`SPNClassifier` is usable in the sklearn framework as estimator in :meth:`sklearn.model_selection.cross_val_score`, :meth:`sklearn.model_selection.GridSearchCV` and more. """ def __init__( self, parametric_types: List[Parametric] = None, n_jobs=-1, tf_optimize_weights=False, tf_n_epochs=100, tf_batch_size: int = None, tf_optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.001), tf_pre_optimization_hook=None, tf_post_optimization_hook=None, ): """ Create an :class:`SPNClassifier`. Parameters: parametric_types : List Parametric types of leaf nodes. If None, all are assumed to be Gaussian n_jobs : int Number of parallel jobs for learning the SPN structure tf_optimize_weights : bool Optimize weights in tensorflow tf_n_epochs : int Number of tensorflow optimization epochs tf_batch_size : int Batch size for tensorflow optimization tf_optimizer Tensorflow optimizer to use for optimization tf_pre_optimization_hook Hook that takes an SPN and returns an SPN before the optimization step tf_post_optimization_hook Hook that takes an SPN and returns an SPN after the optimization step """ self.n_jobs = n_jobs self.tf_optimize_weights = tf_optimize_weights self.tf_n_epochs = tf_n_epochs self.tf_optimizer = tf_optimizer self.tf_batch_size = tf_batch_size self.parametric_types = parametric_types self.tf_pre_optimization_hook = tf_pre_optimization_hook self.tf_post_optimization_hook = tf_post_optimization_hook def fit(self, X, y): """ Fit the :class:`SPNClassifier` object. Parameters ---------- X : np.ndarray Training variables y : np.ndarray Training labels Returns ------- SPNClassifier Fitted classifier """ # Check that X and y have correct shape X, y = check_X_y(X, y, multi_output=True) # Merge X and y train_data = np.c_[X, y].astype(np.float32) # If no parametric types were given: Assumen that all leafs are gaussian if self.parametric_types is None: parametric_types = [Gaussian] * X.shape[1] + [Categorical] else: parametric_types = self.parametric_types # Learn classifier self._spn = learn_classifier( train_data, ds_context=Context(parametric_types=parametric_types).add_domains(train_data), spn_learn_wrapper=learn_parametric, label_idx=X.shape[1], cpus=self.n_jobs, ) # If pre optimization hook has been defined, run now if self.tf_pre_optimization_hook: self._spn = self.tf_pre_optimization_hook(self._spn) # If optimization flag is set: optimize weights in tf if self.tf_optimize_weights: self._spn, self.loss = optimize_tf( spn=self._spn, data=train_data, optimizer=self.tf_optimizer, batch_size=self.tf_batch_size, epochs=self.tf_n_epochs, return_loss=True, ) # If post optimization hook has been defined, run now if self.tf_post_optimization_hook: self._spn = self.tf_post_optimization_hook(self._spn) self.X_ = X self.y_ = y # Return the classifier return self def predict(self, X): """ Make a prediction of the given data. Parameters ---------- X : np.ndarray Test data Returns ------- np.ndarray Label predictions for the given test data """ # Check is fit had been called check_is_fitted(self, ["X_", "y_"]) # Input validation X = check_array(X) # Classify n_test = X.shape[0] y_empty = np.full((n_test, 1), fill_value=np.nan) data = np.c_[X, y_empty] data_filled = mpe(self._spn, data) y_pred = data_filled[:, -1] return y_pred def get_params(self, deep=True): """Method to make SPNClassifier usable in sklearn procedures such as cross_val_score etc.""" return { "parametric_types": self.parametric_types, "n_jobs": self.n_jobs, "tf_optimize_weights": self.tf_optimize_weights, "tf_n_epochs": self.tf_n_epochs, "tf_batch_size": self.tf_batch_size, "tf_optimizer": self.tf_optimizer, "tf_pre_optimization_hook": self.tf_pre_optimization_hook, "tf_post_optimization_hook": self.tf_post_optimization_hook, } def set_params(self, **parameters): """Method to make SPNClassifier usable in sklearn procedures such as cross_val_score etc.""" for parameter, value in parameters.items(): setattr(self, parameter, value) return self def classification_categorical_to_tf_graph( node, data_placeholder=None, log_space=True, variable_dict=None, dtype=np.float32 ): """ Fix categorical to tf graph for classification problem. For a binary class label, there will be two categorical leaf nodes in the SPN. One which one-hot encodes the first class as [0, 1] and one that encodes the second clas as [1, 0]. Since the tf optimizes the log likelihood, these one-hot represented probabilities will be projected into logspace which results in log([1,0])=[0, -inf] and therefore NaNs in further computations. Therefore, this custom method adds a small epsilon, such that the zero probability value in the one-hot vector will not degrade to negative infinity. """ with tf.compat.v1.variable_scope("%s_%s" % (node.__class__.__name__, node.id)): p = np.array(node.p, dtype=dtype) # Epsilon to make sure there are no zero values eps = 1e-20 p += eps # Renormalize such that the sum over all probabilities is one p /= np.sum(p) assert np.all(p > 0), "Probabilities in the class leaf nodes have to be greater than zero but were %s" % p softmaxInverse = np.log(p / np.max(p)).astype(dtype) probs = tf.nn.softmax(tf.constant(softmaxInverse)) variable_dict[node] = probs if log_space: return distributions.Categorical(probs=probs).log_prob(data_placeholder[:, node.scope[0]]) return distributions.Categorical(probs=probs).prob(data_placeholder[:, node.scope[0]])
[ "numpy.full", "spn.gpu.TensorFlow.optimize_tf", "numpy.sum", "tensorflow.compat.v1.variable_scope", "sklearn.utils.check_X_y", "numpy.all", "tensorflow.constant", "sklearn.utils.validation.check_is_fitted", "tensorflow.compat.v1.train.AdamOptimizer", "numpy.max", "numpy.array", "spn.structure.Base.Context", "spn.algorithms.MPE.mpe", "logging.getLogger", "sklearn.utils.validation.check_array" ]
[((685, 712), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (702, 712), False, 'import logging\n'), ((1378, 1431), 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.compat.v1.train.AdamOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (1410, 1431), True, 'import tensorflow as tf\n'), ((3133, 3167), 'sklearn.utils.check_X_y', 'check_X_y', (['X', 'y'], {'multi_output': '(True)'}), '(X, y, multi_output=True)\n', (3142, 3167), False, 'from sklearn.utils import check_X_y\n'), ((4971, 5006), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', "['X_', 'y_']"], {}), "(self, ['X_', 'y_'])\n", (4986, 5006), False, 'from sklearn.utils.validation import check_array, check_is_fitted\n'), ((5047, 5061), 'sklearn.utils.validation.check_array', 'check_array', (['X'], {}), '(X)\n', (5058, 5061), False, 'from sklearn.utils.validation import check_array, check_is_fitted\n'), ((5128, 5167), 'numpy.full', 'np.full', (['(n_test, 1)'], {'fill_value': 'np.nan'}), '((n_test, 1), fill_value=np.nan)\n', (5135, 5167), True, 'import numpy as np\n'), ((5223, 5243), 'spn.algorithms.MPE.mpe', 'mpe', (['self._spn', 'data'], {}), '(self._spn, data)\n', (5226, 5243), False, 'from spn.algorithms.MPE import mpe\n'), ((6941, 7014), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (["('%s_%s' % (node.__class__.__name__, node.id))"], {}), "('%s_%s' % (node.__class__.__name__, node.id))\n", (6968, 7014), True, 'import tensorflow as tf\n'), ((7028, 7057), 'numpy.array', 'np.array', (['node.p'], {'dtype': 'dtype'}), '(node.p, dtype=dtype)\n', (7036, 7057), True, 'import numpy as np\n'), ((7236, 7245), 'numpy.sum', 'np.sum', (['p'], {}), '(p)\n', (7242, 7245), True, 'import numpy as np\n'), ((7262, 7275), 'numpy.all', 'np.all', (['(p > 0)'], {}), '(p > 0)\n', (7268, 7275), True, 'import numpy as np\n'), ((4114, 4264), 'spn.gpu.TensorFlow.optimize_tf', 'optimize_tf', ([], {'spn': 'self._spn', 'data': 'train_data', 'optimizer': 'self.tf_optimizer', 'batch_size': 'self.tf_batch_size', 'epochs': 'self.tf_n_epochs', 'return_loss': '(True)'}), '(spn=self._spn, data=train_data, optimizer=self.tf_optimizer,\n batch_size=self.tf_batch_size, epochs=self.tf_n_epochs, return_loss=True)\n', (4125, 4264), False, 'from spn.gpu.TensorFlow import optimize_tf\n'), ((7454, 7481), 'tensorflow.constant', 'tf.constant', (['softmaxInverse'], {}), '(softmaxInverse)\n', (7465, 7481), True, 'import tensorflow as tf\n'), ((3620, 3662), 'spn.structure.Base.Context', 'Context', ([], {'parametric_types': 'parametric_types'}), '(parametric_types=parametric_types)\n', (3627, 3662), False, 'from spn.structure.Base import Context, get_nodes_by_type\n'), ((7399, 7408), 'numpy.max', 'np.max', (['p'], {}), '(p)\n', (7405, 7408), True, 'import numpy as np\n')]
""" render_rgb.py renders obj file to rgb image Aviable function: - clear_mash: delete all the mesh in the secene - scene_setting_init: set scene configurations - node_setting_init: set node configurations - render: render rgb image for one obj file and one viewpoint - render_obj_by_vp_lists: wrapper function for render() render one obj file by multiple viewpoints - render_objs_by_one_vp: wrapper function for render() render multiple obj file by one viewpoint - init_all: a wrapper function, initialize all configurations = set_image_path: reset defualt image output folder author baiyu """ import sys import os import pickle import numpy as np import bpy from mathutils import Matrix import argparse abs_path = os.path.abspath(__file__) sys.path.append(os.path.dirname(abs_path)) from render_helper import * from settings import * from data_config import camera_setting_path, total_view_nums def clear_mesh(): """ clear all meshes in the secene """ for block in bpy.data.meshes: if block.users == 0: bpy.data.meshes.remove(block) for block in bpy.data.materials: if block.users == 0: bpy.data.materials.remove(block) for block in bpy.data.textures: if block.users == 0: bpy.data.textures.remove(block) for block in bpy.data.images: if block.users == 0: bpy.data.images.remove(block) bpy.ops.object.select_all(action='DESELECT') for obj in bpy.data.objects: if obj.type == 'MESH' or obj.type == 'EMPTY': obj.select = True bpy.ops.object.delete() def scene_setting_init(use_gpu): """initialize blender setting configurations """ sce = bpy.context.scene.name bpy.data.scenes[sce].render.engine = g_engine_type bpy.data.scenes[sce].cycles.film_transparent = g_use_film_transparent #output bpy.data.scenes[sce].render.image_settings.color_mode = g_color_mode bpy.data.scenes[sce].render.image_settings.color_depth = g_color_depth bpy.data.scenes[sce].render.image_settings.file_format = g_file_format bpy.data.scenes[sce].render.use_file_extension = g_use_file_extension #dimensions bpy.data.scenes[sce].render.resolution_x = g_resolution_x bpy.data.scenes[sce].render.resolution_y = g_resolution_y bpy.data.scenes[sce].render.resolution_percentage = g_resolution_percentage if use_gpu: bpy.data.scenes[sce].render.engine = 'CYCLES' #only cycles engine can use gpu bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral cycles_prefs = bpy.context.user_preferences.addons['cycles'].preferences for device in cycles_prefs.devices: if device.type == 'CUDA': device.use = True bpy.context.user_preferences.addons['cycles'].preferences.compute_device_type = 'CUDA' bpy.types.CyclesRenderSettings.device = 'GPU' bpy.data.scenes[sce].cycles.device = 'GPU' def node_setting_init(): """node settings for render rgb images mainly for compositing the background images """ bpy.context.scene.use_nodes = True tree = bpy.context.scene.node_tree links = tree.links for node in tree.nodes: tree.nodes.remove(node) image_node = tree.nodes.new('CompositorNodeImage') scale_node = tree.nodes.new('CompositorNodeScale') alpha_over_node = tree.nodes.new('CompositorNodeAlphaOver') render_layer_node = tree.nodes.new('CompositorNodeRLayers') img_file_output_node = tree.nodes.new('CompositorNodeOutputFile') depth_file_output_node = tree.nodes.new("CompositorNodeOutputFile") scale_node.space = g_scale_space img_file_output_node.format.color_mode = g_rgb_color_mode img_file_output_node.format.color_depth = g_rgb_color_depth img_file_output_node.format.file_format = g_rgb_file_format img_file_output_node.base_path = g_syn_data_folder depth_file_output_node.format.color_mode = g_depth_color_mode depth_file_output_node.format.color_depth = g_depth_color_depth depth_file_output_node.format.file_format = g_depth_file_format depth_file_output_node.base_path = g_syn_data_folder links.new(image_node.outputs[0], scale_node.inputs[0]) links.new(scale_node.outputs[0], alpha_over_node.inputs[1]) links.new(render_layer_node.outputs[0], alpha_over_node.inputs[2]) links.new(alpha_over_node.outputs[0], img_file_output_node.inputs[0]) links.new(render_layer_node.outputs['Depth'], depth_file_output_node.inputs[0]) def render(viewpoint, viewpoint_id, rendering_dir): """render rgb image and depth maps render a object rgb image by a given camera viewpoint and choose random image as background, only render one image at a time. Args: viewpoint: a vp parameter(contains azimuth,elevation,tilt angles and distance) viewpoint_id: the index of viewpoint rendering_dir: path to store camera info """ vp = viewpoint cam_location = camera_location(vp.azimuth, vp.elevation, vp.distance) cam_rot = camera_rot_XYZEuler(vp.azimuth, vp.elevation, vp.tilt) cam_obj = bpy.data.objects['Camera'] cam_obj.location[0] = cam_location[0] cam_obj.location[1] = cam_location[1] cam_obj.location[2] = cam_location[2] cam_obj.rotation_euler[0] = cam_rot[0] cam_obj.rotation_euler[1] = cam_rot[1] cam_obj.rotation_euler[2] = cam_rot[2] if g_background_image_path == 'TRANSPARENT': bpy.context.scene.render.alpha_mode = g_background_image_path else: background_images = os.listdir(g_background_image_path) image_name = random.choice(background_images) image_path = os.path.join(g_background_image_path, image_name) image_node = bpy.context.scene.node_tree.nodes[0] image_node.image = bpy.data.images.load(image_path) img_file_output_node = bpy.context.scene.node_tree.nodes[4] img_file_output_node.file_slots[0].path = 'color_###.png' # blender placeholder # depth_file_output_node = bpy.context.scene.node_tree.nodes[5] depth_file_output_node.file_slots[0].path = 'depth_###.exr' # blender placeholder # #start rendering bpy.context.scene.frame_set(viewpoint_id + 1) bpy.ops.render.render(write_still=True) # write camera info cam_K_file = os.path.join(cam_K_path, 'cam_K.txt') if (not os.path.isfile(cam_K_file)) or (len(os.listdir(cam_RT_path))<total_view_nums): K, RT = get_3x4_P_matrix_from_blender(cam_obj) np.savetxt(cam_K_file, K) np.savetxt(os.path.join(cam_RT_path, 'cam_RT_{0:03d}.txt'.format(viewpoint_id + 1)), RT) print('Camera parameters written.') def render_obj_by_vp_lists(rendering_dir, viewpoints): """ render one obj file by a given viewpoint list a wrapper function for render() Args: rendering_dir: a string variable indicate the rendering path of the model. viewpoints: an iterable object of vp parameter(contains azimuth,elevation,tilt angles and distance) """ if isinstance(viewpoints, tuple): vp_lists = [viewpoints] try: vp_lists = iter(viewpoints) except TypeError: print("viewpoints is not an iterable object") for vp_id, vp in enumerate(vp_lists): set_image_path(rendering_dir) set_depth_path(rendering_dir) render(vp, vp_id, rendering_dir) def render_objs_by_one_vp(obj_pathes, viewpoint): """ render multiple obj files by a given viewpoint Args: obj_paths: an iterable object contains multiple obj file pathes viewpoint: a namedtuple object contains azimuth, elevation,tilt angles and distance """ if isinstance(obj_pathes, str): obj_lists = [obj_pathes] try: obj_lists = iter(obj_lists) except TypeError: print("obj_pathes is not an iterable object") for obj_path in obj_lists: rendering_dir = os.path.join(output_folder, obj_path.split('/')[4]) if not os.path.exists(rendering_dir): os.makedirs(rendering_dir) clear_mesh() bpy.ops.import_scene.obj(filepath=obj_path) set_image_path(rendering_dir) set_depth_path(rendering_dir) render(viewpoint, 1, rendering_dir) def camera_setting_init(): """ camera settings for renderer """ bpy.data.objects['Camera'].rotation_mode = g_rotation_mode def light_setting_init(): """ light settings for renderer """ # Make light just directional, disable shadows. world = bpy.data.worlds['World'] world.use_nodes = True # changing these values does affect the render. bg = world.node_tree.nodes['Background'] bg.inputs[1].default_value = 10.0 def init_all(): """init everything we need for rendering an image """ scene_setting_init(g_gpu_render_enable) camera_setting_init() node_setting_init() light_setting_init() def set_image_path(new_path): """ set image output path to new_path Args: new rendered image output path """ file_output_node = bpy.context.scene.node_tree.nodes[4] file_output_node.base_path = new_path def set_depth_path(new_path): """ set image output path to new_path Args: new rendered depth output path """ file_output_node = bpy.context.scene.node_tree.nodes[5] file_output_node.base_path = new_path #--------------------------------------------------------------- # 3x4 P matrix from Blender camera #--------------------------------------------------------------- # BKE_camera_sensor_size def get_sensor_size(sensor_fit, sensor_x, sensor_y): if sensor_fit == 'VERTICAL': return sensor_y return sensor_x # BKE_camera_sensor_fit def get_sensor_fit(sensor_fit, size_x, size_y): if sensor_fit == 'AUTO': if size_x >= size_y: return 'HORIZONTAL' else: return 'VERTICAL' return sensor_fit # Build intrinsic camera parameters from Blender camera data # # See notes on this in # blender.stackexchange.com/questions/15102/what-is-blenders-camera-projection-matrix-model # as well as # https://blender.stackexchange.com/a/120063/3581 def get_calibration_matrix_K_from_blender(camd): if camd.type != 'PERSP': raise ValueError('Non-perspective cameras not supported') scene = bpy.context.scene f_in_mm = camd.lens scale = scene.render.resolution_percentage / 100 resolution_x_in_px = scale * scene.render.resolution_x resolution_y_in_px = scale * scene.render.resolution_y sensor_size_in_mm = get_sensor_size(camd.sensor_fit, camd.sensor_width, camd.sensor_height) sensor_fit = get_sensor_fit( camd.sensor_fit, scene.render.pixel_aspect_x * resolution_x_in_px, scene.render.pixel_aspect_y * resolution_y_in_px ) pixel_aspect_ratio = scene.render.pixel_aspect_y / scene.render.pixel_aspect_x if sensor_fit == 'HORIZONTAL': view_fac_in_px = resolution_x_in_px else: view_fac_in_px = pixel_aspect_ratio * resolution_y_in_px pixel_size_mm_per_px = sensor_size_in_mm / f_in_mm / view_fac_in_px s_u = 1 / pixel_size_mm_per_px s_v = 1 / pixel_size_mm_per_px / pixel_aspect_ratio # Parameters of intrinsic calibration matrix K u_0 = resolution_x_in_px / 2 - camd.shift_x * view_fac_in_px v_0 = resolution_y_in_px / 2 + camd.shift_y * view_fac_in_px / pixel_aspect_ratio skew = 0 # only use rectangular pixels K = Matrix( ((s_u, skew, u_0), ( 0, s_v, v_0), ( 0, 0, 1))) return K # Returns camera rotation and translation matrices from Blender. # # There are 3 coordinate systems involved: # 1. The World coordinates: "world" # - right-handed # 2. The Blender camera coordinates: "bcam" # - x is horizontal # - y is up # - right-handed: negative z look-at direction # 3. The desired computer vision camera coordinates: "cv" # - x is horizontal # - y is down (to align to the actual pixel coordinates # used in digital images) # - right-handed: positive z look-at direction def get_3x4_RT_matrix_from_blender(cam): # bcam stands for blender camera R_blender2shapenet = Matrix( ((1, 0, 0), (0, 0, -1), (0, 1, 0))) R_bcam2cv = Matrix( ((1, 0, 0), (0, -1, 0), (0, 0, -1))) # Transpose since the rotation is object rotation, # and we want coordinate rotation # R_world2bcam = cam.rotation_euler.to_matrix().transposed() # T_world2bcam = -1*R_world2bcam * location # # Use matrix_world instead to account for all constraints location, rotation = cam.matrix_world.decompose()[0:2] R_world2bcam = rotation.to_matrix().transposed() # Convert camera location to translation vector used in coordinate changes # T_world2bcam = -1*R_world2bcam*cam.location # Use location from matrix_world to account for constraints: T_world2bcam = -1*R_world2bcam * location # Build the coordinate transform matrix from world to computer vision camera R_world2cv = R_bcam2cv*R_world2bcam*R_blender2shapenet T_world2cv = R_bcam2cv*T_world2bcam # put into 3x4 matrix RT = Matrix(( R_world2cv[0][:] + (T_world2cv[0],), R_world2cv[1][:] + (T_world2cv[1],), R_world2cv[2][:] + (T_world2cv[2],) )) return RT def get_3x4_P_matrix_from_blender(cam): K = get_calibration_matrix_K_from_blender(cam.data) RT = get_3x4_RT_matrix_from_blender(cam) return K, RT ### YOU CAN WRITE YOUR OWN IMPLEMENTATION TO GENERATE DATA def parse_args(): argv = sys.argv if "--" not in argv: argv = [] # as if no args are passed else: argv = argv[argv.index("--") + 1:] # get all args after "--" parser = argparse.ArgumentParser(description='Blender renderer.') parser.add_argument("dict", type=str, help="model-view file for rendering.") args = parser.parse_args(argv) return args if __name__ == '__main__': args = parse_args() init_all() result_list = pickle.load(open(args.dict, 'rb')) cam_K_path = os.path.join(camera_setting_path, 'cam_K') cam_RT_path = os.path.join(camera_setting_path, 'cam_RT') if not os.path.exists(cam_K_path): os.makedirs(cam_K_path) if not os.path.exists(cam_RT_path): os.makedirs(cam_RT_path) for model in result_list: cat = model.path.split('/')[3] output_folder = os.path.join(g_syn_data_folder, cat) if not os.path.exists(output_folder): os.makedirs(output_folder) rendering_dir = os.path.join(output_folder, model.path.split('/')[4]) if not os.path.exists(rendering_dir): os.makedirs(rendering_dir) if len(os.listdir(rendering_dir)) == 40: print('Rendering has been done with this model.') continue clear_mesh() bpy.ops.import_scene.obj(filepath=model.path) render_obj_by_vp_lists(rendering_dir, model.vps)
[ "argparse.ArgumentParser", "bpy.context.scene.frame_set", "os.path.isfile", "bpy.data.textures.remove", "bpy.ops.import_scene.obj", "os.path.join", "bpy.ops.object.select_all", "os.path.abspath", "os.path.dirname", "numpy.savetxt", "os.path.exists", "bpy.data.meshes.remove", "bpy.ops.object.delete", "bpy.data.materials.remove", "bpy.ops.render.render", "bpy.data.images.remove", "os.listdir", "os.makedirs", "bpy.data.images.load", "mathutils.Matrix" ]
[((770, 795), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (785, 795), False, 'import os\n'), ((812, 837), 'os.path.dirname', 'os.path.dirname', (['abs_path'], {}), '(abs_path)\n', (827, 837), False, 'import os\n'), ((1457, 1501), 'bpy.ops.object.select_all', 'bpy.ops.object.select_all', ([], {'action': '"""DESELECT"""'}), "(action='DESELECT')\n", (1482, 1501), False, 'import bpy\n'), ((1623, 1646), 'bpy.ops.object.delete', 'bpy.ops.object.delete', ([], {}), '()\n', (1644, 1646), False, 'import bpy\n'), ((6285, 6330), 'bpy.context.scene.frame_set', 'bpy.context.scene.frame_set', (['(viewpoint_id + 1)'], {}), '(viewpoint_id + 1)\n', (6312, 6330), False, 'import bpy\n'), ((6335, 6374), 'bpy.ops.render.render', 'bpy.ops.render.render', ([], {'write_still': '(True)'}), '(write_still=True)\n', (6356, 6374), False, 'import bpy\n'), ((6417, 6454), 'os.path.join', 'os.path.join', (['cam_K_path', '"""cam_K.txt"""'], {}), "(cam_K_path, 'cam_K.txt')\n", (6429, 6454), False, 'import os\n'), ((11610, 11662), 'mathutils.Matrix', 'Matrix', (['((s_u, skew, u_0), (0, s_v, v_0), (0, 0, 1))'], {}), '(((s_u, skew, u_0), (0, s_v, v_0), (0, 0, 1)))\n', (11616, 11662), False, 'from mathutils import Matrix\n'), ((12369, 12411), 'mathutils.Matrix', 'Matrix', (['((1, 0, 0), (0, 0, -1), (0, 1, 0))'], {}), '(((1, 0, 0), (0, 0, -1), (0, 1, 0)))\n', (12375, 12411), False, 'from mathutils import Matrix\n'), ((12456, 12499), 'mathutils.Matrix', 'Matrix', (['((1, 0, 0), (0, -1, 0), (0, 0, -1))'], {}), '(((1, 0, 0), (0, -1, 0), (0, 0, -1)))\n', (12462, 12499), False, 'from mathutils import Matrix\n'), ((13371, 13495), 'mathutils.Matrix', 'Matrix', (['(R_world2cv[0][:] + (T_world2cv[0],), R_world2cv[1][:] + (T_world2cv[1],), \n R_world2cv[2][:] + (T_world2cv[2],))'], {}), '((R_world2cv[0][:] + (T_world2cv[0],), R_world2cv[1][:] + (T_world2cv\n [1],), R_world2cv[2][:] + (T_world2cv[2],)))\n', (13377, 13495), False, 'from mathutils import Matrix\n'), ((13961, 14017), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Blender renderer."""'}), "(description='Blender renderer.')\n", (13984, 14017), False, 'import argparse\n'), ((14315, 14357), 'os.path.join', 'os.path.join', (['camera_setting_path', '"""cam_K"""'], {}), "(camera_setting_path, 'cam_K')\n", (14327, 14357), False, 'import os\n'), ((14376, 14419), 'os.path.join', 'os.path.join', (['camera_setting_path', '"""cam_RT"""'], {}), "(camera_setting_path, 'cam_RT')\n", (14388, 14419), False, 'import os\n'), ((5674, 5709), 'os.listdir', 'os.listdir', (['g_background_image_path'], {}), '(g_background_image_path)\n', (5684, 5709), False, 'import os\n'), ((5785, 5834), 'os.path.join', 'os.path.join', (['g_background_image_path', 'image_name'], {}), '(g_background_image_path, image_name)\n', (5797, 5834), False, 'import os\n'), ((5920, 5952), 'bpy.data.images.load', 'bpy.data.images.load', (['image_path'], {}), '(image_path)\n', (5940, 5952), False, 'import bpy\n'), ((6609, 6634), 'numpy.savetxt', 'np.savetxt', (['cam_K_file', 'K'], {}), '(cam_K_file, K)\n', (6619, 6634), True, 'import numpy as np\n'), ((8227, 8270), 'bpy.ops.import_scene.obj', 'bpy.ops.import_scene.obj', ([], {'filepath': 'obj_path'}), '(filepath=obj_path)\n', (8251, 8270), False, 'import bpy\n'), ((14431, 14457), 'os.path.exists', 'os.path.exists', (['cam_K_path'], {}), '(cam_K_path)\n', (14445, 14457), False, 'import os\n'), ((14467, 14490), 'os.makedirs', 'os.makedirs', (['cam_K_path'], {}), '(cam_K_path)\n', (14478, 14490), False, 'import os\n'), ((14502, 14529), 'os.path.exists', 'os.path.exists', (['cam_RT_path'], {}), '(cam_RT_path)\n', (14516, 14529), False, 'import os\n'), ((14539, 14563), 'os.makedirs', 'os.makedirs', (['cam_RT_path'], {}), '(cam_RT_path)\n', (14550, 14563), False, 'import os\n'), ((14658, 14694), 'os.path.join', 'os.path.join', (['g_syn_data_folder', 'cat'], {}), '(g_syn_data_folder, cat)\n', (14670, 14694), False, 'import os\n'), ((15105, 15150), 'bpy.ops.import_scene.obj', 'bpy.ops.import_scene.obj', ([], {'filepath': 'model.path'}), '(filepath=model.path)\n', (15129, 15150), False, 'import bpy\n'), ((1094, 1123), 'bpy.data.meshes.remove', 'bpy.data.meshes.remove', (['block'], {}), '(block)\n', (1116, 1123), False, 'import bpy\n'), ((1203, 1235), 'bpy.data.materials.remove', 'bpy.data.materials.remove', (['block'], {}), '(block)\n', (1228, 1235), False, 'import bpy\n'), ((1314, 1345), 'bpy.data.textures.remove', 'bpy.data.textures.remove', (['block'], {}), '(block)\n', (1338, 1345), False, 'import bpy\n'), ((1422, 1451), 'bpy.data.images.remove', 'bpy.data.images.remove', (['block'], {}), '(block)\n', (1444, 1451), False, 'import bpy\n'), ((6467, 6493), 'os.path.isfile', 'os.path.isfile', (['cam_K_file'], {}), '(cam_K_file)\n', (6481, 6493), False, 'import os\n'), ((8127, 8156), 'os.path.exists', 'os.path.exists', (['rendering_dir'], {}), '(rendering_dir)\n', (8141, 8156), False, 'import os\n'), ((8170, 8196), 'os.makedirs', 'os.makedirs', (['rendering_dir'], {}), '(rendering_dir)\n', (8181, 8196), False, 'import os\n'), ((14710, 14739), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (14724, 14739), False, 'import os\n'), ((14753, 14779), 'os.makedirs', 'os.makedirs', (['output_folder'], {}), '(output_folder)\n', (14764, 14779), False, 'import os\n'), ((14874, 14903), 'os.path.exists', 'os.path.exists', (['rendering_dir'], {}), '(rendering_dir)\n', (14888, 14903), False, 'import os\n'), ((14917, 14943), 'os.makedirs', 'os.makedirs', (['rendering_dir'], {}), '(rendering_dir)\n', (14928, 14943), False, 'import os\n'), ((6503, 6526), 'os.listdir', 'os.listdir', (['cam_RT_path'], {}), '(cam_RT_path)\n', (6513, 6526), False, 'import os\n'), ((14959, 14984), 'os.listdir', 'os.listdir', (['rendering_dir'], {}), '(rendering_dir)\n', (14969, 14984), False, 'import os\n')]
'''read ENVI/raw binary format. Dimensions from header, data from .bin file.. ..then segment image using flood-fill segmentation''' import os import sys import pickle import numpy as np from flood import flood import matplotlib.pyplot as plt from dist import normalize, to_list, centroid def read_hdr(hdr): # read the image dimensions cols, rows, bands = 0, 0, 0 for line in open(hdr).readlines(): chunks = line.strip().split('=') try: # pull off two chunks delimited by '=' f, g = [x.strip() for x in chunks[0:2]] if f == 'samples': cols = g if f == 'lines': rows = g if f == 'bands': bands = g except: pass return [int(x) for x in [cols, rows, bands]] # string to int def read_float(fn): # read the raw binary file return np.fromfile(fn, dtype=np.float32) / 255. # put data in range [0, 1] '''pixel @ (row, col) = (i, j): npx = nrow * ncol # number of pixels in image red value: dat[ i * ncol + j] grn value: dat[ npx + i * ncol + j] blu value: dat[2 * npx + i * ncol + j]''' def plot(dat, rows, cols, bands, file_name): # plot "raw binary" image dat = dat.reshape((bands, rows * cols)) rgb = np.zeros((rows, cols, bands)) for i in range(bands): rgb[:, :, i] = dat[i, :].reshape((rows, cols)) plt.imshow(rgb) plt.show() # might uncomment this to zoom in to determine line numbers plt.savefig(file_name) plt.close() class image: def __init__(self, fn=None): if fn: self.fn = fn self.load() def load(self): self.cols, self.rows, self.bands = read_hdr(self.fn[:-4] + '.hdr') self.dat, self.npx = read_float(self.fn), self.rows * self.cols plot(self.dat, self.rows, self.cols, self.bands, self.fn[:-4] + '.png') def png(self): if type(self.dat) == list: self.dat = np.array(self.dat) plot(self.dat, self.rows, self.cols, self.bands, self.fn + '.png') def gather_points(self): # list points for each label self.points = [[] for i in range(self.next_label)] for i in range(self.rows): for j in range(self.cols): ix = i * self.cols + j # linear index if self.labels[ix] > 0: # skip background label = self.labels[ix] # label this point self.points[label] += [[i, j]] c = {} # count the number of pixels per segment for point in self.points: n = len(point) c[n] = (c[n] + 1) if (n in c) else 1 counts = [[k, c[k]] for k in c] # sort the counts counts.sort() ffn = self.fn + '_seg_count.png' if not os.path.exists(ffn): print('+w ' + ffn) plt.figure(figsize=(8, 8)) fig = plt.barh([str(x[0]) for x in counts], [str(x[1]) for x in counts]) plt.title("Pixel-count vs. number of segments w that count " + "(total segments: " + str(len(self.points)) + ")") plt.xlabel("Number of segments with a given pixel count") plt.ylabel("Pixel-count for a segment (total pixel counts = " + str(len(counts)) + ")") plt.tight_layout() plt.savefig(ffn) plt.close() def segment(self, flood_lines=None, use_normalize=False): print('segment ' + self.fn) self.name = self.fn[:-4] a = os.system('mkdir -p ' + self.name) self.rgb = [[self.dat[i], # format data into list of rgb tuples self.dat[self.npx + i], self.dat[2 * self.npx + i]] for i in range(0, self.npx)] c = {} # count rgb values for x in self.rgb: x = str(x) c[x] = c[x] + 1 if x in c else 1 ffn = self.fn + '_rgb_count.png' if not os.path.exists(ffn): plt.figure() plt.bar(c.keys(), np.log(list(c.values())) / np.log(10.)) plt.title("Log of count of color values") print('+w ' + ffn) plt.savefig(ffn) plt.close() counts = [[c[k], k] for k in c] counts.sort() self.max_color = counts[-1][1] # assume most-prevalent col is bg if sys.getrecursionlimit() < self.npx: # increase recursion limit sys.setrecursionlimit(self.npx) # labels for segmentation self.labels = [0 for i in range(self.npx)] # 0 == unlabelled! self.next_label = 1 r_i = flood_lines if flood_lines else range(self.rows) for i in r_i: for j in range(self.cols): flood(self, i, j) self.gather_points() # list (i,j) points by segment fn = None is_truth = (self.name == 'truth') # is this truth data? truth = None if is_truth: truth = [x for x in open('truth_chars.txt').read()] for pi in range(len(self.points)): # plot image rep. of each truth point = self.points[pi] if pi > 0: # 0 is bg / unlabelled try: ns = truth[pi - 1] if is_truth else str(pi) fn = self.name + os.path.sep + ns + '.png' if not os.path.exists(fn): plt.figure() plt.scatter([x[1] for x in point], [-x[0] for x in point]) plt.title(ns) print('+w ' + fn) if use_normalize: plt.xlim([-.5, self.cols - .5]) plt.ylim([-(self.rows - .5), .5]) plt.xlabel('col ix') plt.ylabel('-row ix') plt.savefig(fn) plt.close() fn = self.name + os.path.sep + ns + '.centroid' if not os.path.exists(fn): print(' +w ' + fn) xL, yL = to_list(point) cX, cY = centroid(xL, yL) open(fn, 'wb').write((str(cX) + ' ' + str(cY)).encode()) # nb run cleanup.py before changing truth inputs fn = self.name + os.path.sep + ns + '.p' if not os.path.exists(fn): print(' +w ' + fn) pickle.dump(point, open(fn, 'wb')) except: pass # don't plot / save the background if __name__ == "__main__": # example image data to demonstrate floodfill args = sys.argv if len(args) < 2: dat = [0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0] a = image() a.dat, a.rows, a.cols, a.bands = dat, 4, 4, 3 a.npx = a.rows * a.cols a.fn = '4x4.bin' a.png() a.segment(use_normalize=False) else: a = image('truth.bin', [745, 838, 932]) a.segment()
[ "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "sys.setrecursionlimit", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.imshow", "matplotlib.pyplot.close", "os.path.exists", "dist.to_list", "dist.centroid", "matplotlib.pyplot.show", "matplotlib.pyplot.ylim", "os.system", "matplotlib.pyplot.ylabel", "sys.getrecursionlimit", "matplotlib.pyplot.xlim", "numpy.log", "numpy.fromfile", "matplotlib.pyplot.scatter", "numpy.zeros", "numpy.array", "flood.flood", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig" ]
[((1278, 1307), 'numpy.zeros', 'np.zeros', (['(rows, cols, bands)'], {}), '((rows, cols, bands))\n', (1286, 1307), True, 'import numpy as np\n'), ((1394, 1409), 'matplotlib.pyplot.imshow', 'plt.imshow', (['rgb'], {}), '(rgb)\n', (1404, 1409), True, 'import matplotlib.pyplot as plt\n'), ((1414, 1424), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1422, 1424), True, 'import matplotlib.pyplot as plt\n'), ((1490, 1512), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_name'], {}), '(file_name)\n', (1501, 1512), True, 'import matplotlib.pyplot as plt\n'), ((1517, 1528), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1526, 1528), True, 'import matplotlib.pyplot as plt\n'), ((881, 914), 'numpy.fromfile', 'np.fromfile', (['fn'], {'dtype': 'np.float32'}), '(fn, dtype=np.float32)\n', (892, 914), True, 'import numpy as np\n'), ((3562, 3596), 'os.system', 'os.system', (["('mkdir -p ' + self.name)"], {}), "('mkdir -p ' + self.name)\n", (3571, 3596), False, 'import os\n'), ((1967, 1985), 'numpy.array', 'np.array', (['self.dat'], {}), '(self.dat)\n', (1975, 1985), True, 'import numpy as np\n'), ((2790, 2809), 'os.path.exists', 'os.path.exists', (['ffn'], {}), '(ffn)\n', (2804, 2809), False, 'import os\n'), ((2854, 2880), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (2864, 2880), True, 'import matplotlib.pyplot as plt\n'), ((3153, 3210), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of segments with a given pixel count"""'], {}), "('Number of segments with a given pixel count')\n", (3163, 3210), True, 'import matplotlib.pyplot as plt\n'), ((3346, 3364), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3362, 3364), True, 'import matplotlib.pyplot as plt\n'), ((3377, 3393), 'matplotlib.pyplot.savefig', 'plt.savefig', (['ffn'], {}), '(ffn)\n', (3388, 3393), True, 'import matplotlib.pyplot as plt\n'), ((3406, 3417), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3415, 3417), True, 'import matplotlib.pyplot as plt\n'), ((3981, 4000), 'os.path.exists', 'os.path.exists', (['ffn'], {}), '(ffn)\n', (3995, 4000), False, 'import os\n'), ((4014, 4026), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4024, 4026), True, 'import matplotlib.pyplot as plt\n'), ((4109, 4150), 'matplotlib.pyplot.title', 'plt.title', (['"""Log of count of color values"""'], {}), "('Log of count of color values')\n", (4118, 4150), True, 'import matplotlib.pyplot as plt\n'), ((4194, 4210), 'matplotlib.pyplot.savefig', 'plt.savefig', (['ffn'], {}), '(ffn)\n', (4205, 4210), True, 'import matplotlib.pyplot as plt\n'), ((4223, 4234), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4232, 4234), True, 'import matplotlib.pyplot as plt\n'), ((4384, 4407), 'sys.getrecursionlimit', 'sys.getrecursionlimit', ([], {}), '()\n', (4405, 4407), False, 'import sys\n'), ((4460, 4491), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['self.npx'], {}), '(self.npx)\n', (4481, 4491), False, 'import sys\n'), ((4767, 4784), 'flood.flood', 'flood', (['self', 'i', 'j'], {}), '(self, i, j)\n', (4772, 4784), False, 'from flood import flood\n'), ((4084, 4096), 'numpy.log', 'np.log', (['(10.0)'], {}), '(10.0)\n', (4090, 4096), True, 'import numpy as np\n'), ((5373, 5391), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (5387, 5391), False, 'import os\n'), ((5417, 5429), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5427, 5429), True, 'import matplotlib.pyplot as plt\n'), ((5454, 5514), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[x[1] for x in point]', '[(-x[0]) for x in point]'], {}), '([x[1] for x in point], [(-x[0]) for x in point])\n', (5465, 5514), True, 'import matplotlib.pyplot as plt\n'), ((5573, 5586), 'matplotlib.pyplot.title', 'plt.title', (['ns'], {}), '(ns)\n', (5582, 5586), True, 'import matplotlib.pyplot as plt\n'), ((5817, 5837), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""col ix"""'], {}), "('col ix')\n", (5827, 5837), True, 'import matplotlib.pyplot as plt\n'), ((5862, 5883), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""-row ix"""'], {}), "('-row ix')\n", (5872, 5883), True, 'import matplotlib.pyplot as plt\n'), ((5908, 5923), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fn'], {}), '(fn)\n', (5919, 5923), True, 'import matplotlib.pyplot as plt\n'), ((5948, 5959), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5957, 5959), True, 'import matplotlib.pyplot as plt\n'), ((6056, 6074), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (6070, 6074), False, 'import os\n'), ((6153, 6167), 'dist.to_list', 'to_list', (['point'], {}), '(point)\n', (6160, 6167), False, 'from dist import normalize, to_list, centroid\n'), ((6201, 6217), 'dist.centroid', 'centroid', (['xL', 'yL'], {}), '(xL, yL)\n', (6209, 6217), False, 'from dist import normalize, to_list, centroid\n'), ((6503, 6521), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (6517, 6521), False, 'import os\n'), ((5699, 5732), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-0.5, self.cols - 0.5]'], {}), '([-0.5, self.cols - 0.5])\n', (5707, 5732), True, 'import matplotlib.pyplot as plt\n'), ((5759, 5794), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-(self.rows - 0.5), 0.5]'], {}), '([-(self.rows - 0.5), 0.5])\n', (5767, 5794), True, 'import matplotlib.pyplot as plt\n')]
import sys import os from settings import beaver_broker_ip, beaver_broker_port, autotestdir, beaver_datanode_file, gflagsfile, config_path, log_dir, index_forsearch, pb_forsearch import psutil import time import numpy as np import requests #MEM_MAX = psutil.virtual_memory().total MEM_MAX = 0.8*32*1024*1024*1024 # memory size of tikv node, not current PC #------------------knob controller------------------ # disable_auto_compactions def set_disable_auto_compactions(ip, port, val): cmd="./tikv-ctl --host "+ip+":"+port+" modify-tikv-config -m kvdb -n default.disable_auto_compactions -v "+str(val) res=os.popen(cmd).read() # will return "success" return(res) knob_set=\ { "--max_concurrency_tasks_per_search": { "changebyyml": True, "set_func": None, "minval": 0, # if type==int, indicate min possible value "maxval": 0, # if type==int, indicate max possible value "enumval": [4, 6, 8], # if type==enum, list all valid values "type": "enum", # int / enum "default": 0 # default value }, "--max_per_search_ram": { "changebyyml": True, "set_func": None, "minval": 0, # if type==int, indicate min possible value "maxval": 0, # if type==int, indicate max possible value "enumval": [198], # if type==enum, list all valid values "type": "enum", # int / enum "default": 0 # default value }, "--max_per_sub_search_ram": { "changebyyml": True, "set_func": None, "minval": 0, # if type==int, indicate min possible value "maxval": 0, # if type==int, indicate max possible value "enumval": [99], # if type==enum, list all valid values "type": "enum", # int / enum "default": 0 # default value }, "--block_ids_per_batch": { "changebyyml": True, "set_func": None, "minval": 0, # if type==int, indicate min possible value "maxval": 0, # if type==int, indicate max possible value "enumval": [16, 18, 20], # if type==enum, list all valid values "type": "enum", # int / enum "default": 0 # default value }, "--lease_timeout": { "changebyyml": True, "set_func": None, "minval": 0, # if type==int, indicate min possible value "maxval": 0, # if type==int, indicate max possible value "enumval": [4, 8, 16, 32, 64], # if type==enum, list all valid values "type": "enum", # int / enum "default": 0 # default value }, "--enable_query_cache": { "changebyyml": True, "set_func": None, "minval": 0, # if type==int, indicate min possible value "maxval": 0, # if type==int, indicate max possible value "enumval": ['false', 'true'], # if type==enum, list all valid values "type": "bool", # int / enum "default": 0 # default value }, } #------------------metric controller------------------ def read_write_throughput(ip, port): return(0) # DEPRECATED FUNCTION: throughput is instant and could be read from go-ycsb. No need to read in this function def read_write_latency(ip, port): return(0) # DEPRECATED FUNCTION: latency is instant and could be read from go-ycsb. No need to read in this function def read_get_throughput(ip, port): return(0) # DEPRECATED FUNCTION: throughput is instant and could be read from go-ycsb. No need to read in this function def read_get_latency(ip, port): return(0) # DEPRECATED FUNCTION: latency is instant and could be read from go-ycsb. No need to read in this function def read_scan_throughput(ip, port): return(0) # DEPRECATED FUNCTION: throughput is instant and could be read from go-ycsb. No need to read in this function def read_scan_latency(ip, port): return(0) # DEPRECATED FUNCTION: latency is instant and could be read from go-ycsb. No need to read in this function def read_store_size(ip, port): return(0) def read_compaction_cpu(ip, port): cmd="ps -aux|grep beaver_datanode|grep -v 'grep'|grep -v '/bin/sh'|awk -F' *' '{print $3}'" res=os.popen(cmd).read() if len(res) == 0: return 0 else: return(res) def read_compaction_mem(ip, port): cmd="ps -aux|grep beaver_datanode|grep -v 'grep'|grep -v '/bin/sh'|awk -F' *' '{print $4}'" res=os.popen(cmd).read() if len(res) == 0: return 0 else: return(res) def read_search_latency(ip, port): url = "http://"+ip+":"+port+"/_search?index="+index_forsearch+"&sid=test&rpc_timeout=60" data = pb_forsearch testnum = 20 num = 100 restime = [] # costime = [] for i in range(num + testnum): start_api = beaverrequest(url, data) if i >= testnum: # restime.append(start_api[1]) restime.append(start_api[0]["timecost"]) sortedRestime = sorted(restime) newrestime = sortedRestime[:-10] return sum(newrestime) / len(newrestime) def beaverrequest(url, data): r = requests.post(url, data=data) return [r.json(), r.elapsed.total_seconds(), r.status_code] metric_set=\ {"write_throughput": { "read_func": read_write_throughput, "lessisbetter": 0, # whether less value of this metric is better(1: yes) "calc": "ins", #incremental }, "write_latency": { "read_func": read_write_latency, "lessisbetter": 1, # whether less value of this metric is better(1: yes) "calc": "ins", #instant }, "get_throughput": { "read_func": read_get_throughput, "lessisbetter": 0, # whether less value of this metric is better(1: yes) "calc": "ins", #incremental }, "get_latency": { "read_func": read_get_latency, "lessisbetter": 1, # whether less value of this metric is better(1: yes) "calc": "ins", #instant }, "scan_throughput": { "read_func": read_scan_throughput, "lessisbetter": 0, # whether less value of this metric is better(1: yes) "calc": "ins", #incremental }, "scan_latency": { "read_func": read_scan_latency, "lessisbetter": 1, # whether less value of this metric is better(1: yes) "calc": "ins", #instant }, "store_size": { "read_func": read_store_size, "lessisbetter": 1, # whether less value of this metric is better(1: yes) "calc": "ins", #instant }, "compaction_cpu": { "read_func": read_compaction_cpu, "lessisbetter": 1, # whether less value of this metric is better(1: yes) "calc": "ins", #incremental }, "compaction_mem": { "read_func": read_compaction_mem, "lessisbetter": 1, # whether less value of this metric is better(1: yes) "calc": "ins", #incremental }, "search_latency": { "read_func": read_search_latency, "lessisbetter": 1, # whether less value of this metric is better(1: yes) "calc": "ins", #incremental }, } #------------------workload controller------------------ def run_workload(wl_type): return(None) def load_workload(wl_type): return(None) #------------------common functions------------------ def set_tikvyml(knob_sessname, knob_val): ymldir=os.path.join(autotestdir,"conf","beaver_test.gflags_new") tmpdir=os.path.join(autotestdir,"conf","beaver_test.gflags") if not os.path.exists(os.path.dirname(tmpdir)): os.makedirs(os.path.dirname(tmpdir)) os.popen("cp "+gflagsfile+" "+tmpdir).read() with open(tmpdir, 'r') as read_file, open(ymldir, 'w') as write_file: dic={} for line in read_file: value = line.strip().split("=") if len(value) > 1: dic[value[0]] = value[1] if(knob_set[knob_sessname]['type']=='enum'): idx=knob_val knob_val=knob_set[knob_sessname]['enumval'][idx] if(knob_set[knob_sessname]['type']=='bool'): if(knob_val==0): knob_val='false' else: knob_val='true' if(knob_sessname=='--max_shard_size'): knob_val=str(knob_val)+"g" if(knob_sessname=='--max_per_search_ram' or knob_sessname=='--max_per_sub_search_ram'): knob_val=str(knob_val)+"m" if(knob_sessname in dic): dic[knob_sessname] = knob_val else: return('failed') print("set_beaver_datanode_gflags:: ",knob_sessname, knob_val) for kkk in dic: write_file.write(kkk+"="+str(dic[kkk])+'\n') # os.popen("rm "+tmpdir+" && "+"mv "+ymldir+" "+tmpdir) os.remove(tmpdir) os.rename(ymldir, tmpdir) time.sleep(0.5) return('success') # if(knob_name=='block-size'): # knob_val=str(knob_val)+"KB" # if(knob_name=='write-buffer-size' or knob_name=='max-bytes-for-level-base' or knob_name=='target-file-size-base'): # knob_val=str(knob_val)+"MB" # if(knob_name in tmpcontent[knob_sess[0]][knob_sess[1]]): # TODO: only support 2 level of knob_sess currently # tmpcontent[knob_sess[0]][knob_sess[1]][knob_name]=knob_val # else: # return('failed') # print("set_tikvyml:: ",knob_sessname, knob_sess, knob_name, knob_val) # ymlf=open(ymldir, 'w') # yaml.dump(tmpcontent, ymlf, Dumper=yaml.RoundTripDumper) # os.popen("rm "+tmpdir+" && "+"mv "+ymldir+" "+tmpdir) # time.sleep(0.5) # return('success') def set_knob(knob_name, knob_val): changebyyml=knob_set[knob_name]["changebyyml"] if(changebyyml): res=set_tikvyml(knob_name, knob_val) else: func=knob_set[knob_name]["set_func"] res=func(beaver_broker_ip, beaver_broker_port, knob_val) return res def read_knob(knob_name, knob_cache): res=knob_cache[knob_name] return res def read_metric(metric_name, rres=None): if(rres!=None): rl=rres.split('\n') rl.reverse() if(metric_name=="write_latency"): i=0 while((not rl[i].startswith('UPDATE ')) and (not rl[i].startswith('INSERT '))): i+=1 dat=rl[i][rl[i].find("Avg(us):") + 9:].split(",")[0] dat=int(dat) return(dat) elif(metric_name=="get_latency"): i=0 while(not rl[i].startswith('READ ')): i+=1 dat=rl[i][rl[i].find("Avg(us):") + 9:].split(",")[0] dat=int(dat) return(dat) elif(metric_name=="scan_latency"): i=0 while(not rl[i].startswith('SCAN ')): i+=1 dat=rl[i][rl[i].find("Avg(us):") + 9:].split(",")[0] dat=int(dat) return(dat) elif(metric_name=="write_throughput"): i=0 while((not rl[i].startswith('UPDATE ')) and (not rl[i].startswith('INSERT '))): i+=1 dat=rl[i][rl[i].find("OPS:") + 5:].split(",")[0] dat=float(dat) return(dat) elif(metric_name=="get_throughput"): i=0 while(not rl[i].startswith('READ ')): i+=1 dat=rl[i][rl[i].find("OPS:") + 5:].split(",")[0] dat=float(dat) return(dat) elif(metric_name=="scan_throughput"): i=0 while(not rl[i].startswith('SCAN ')): i+=1 dat=rl[i][rl[i].find("OPS:") + 5:].split(",")[0] dat=float(dat) return(dat) func=metric_set[metric_name]["read_func"] res=func(beaver_broker_ip, beaver_broker_port) return res def init_knobs(): # if there are knobs whose range is related to PC memory size, initialize them here pass def calc_metric(metric_after, metric_before, metric_list): num_metrics = len(metric_list) new_metric = np.zeros([1, num_metrics]) for i, x in enumerate(metric_list): if(metric_set[x]["calc"]=="inc"): new_metric[0][i]=metric_after[0][i]-metric_before[0][i] elif(metric_set[x]["calc"]=="ins"): new_metric[0][i]=metric_after[0][i] return(new_metric) # def restart_db(): # #cmd="cd /home/tidb/tidb-ansible/ && ansible-playbook unsafe_cleanup_data.yml" # dircmd="cd "+ autotestdir + " && " # clrcmd="ansible-playbook unsafe_cleanup_data.yml" # depcmd="ansible-playbook deploy.yml" # runcmd="ansible-playbook start.yml" # ntpcmd="ansible-playbook -i hosts.ini deploy_ntp.yml -u tidb -b" #need sleep 10s after ntpcmd # print("-------------------------------------------------------") # clrres = os.popen(dircmd+clrcmd).read() # if("Congrats! All goes well" in clrres): # print("unsafe_cleanup_data finished, res == "+clrres.split('\n')[-2]) # else: # print(clrres) # print("unsafe_cleanup_data failed") # exit() # print("-------------------------------------------------------") # ntpres = os.popen(dircmd + ntpcmd).read() # time.sleep(10) # if ("Congrats! All goes well" in ntpres): # print("set ntp finished, res == " + ntpres.split('\n')[-2]) # else: # print(ntpres) # print("set ntp failed") # exit() # print("-------------------------------------------------------") # depres = os.popen(dircmd + depcmd).read() # if ("Congrats! All goes well" in depres): # print("deploy finished, res == "+depres.split('\n')[-2]) # else: # print(depres) # print("deploy failed") # exit() # print("-------------------------------------------------------") # runres = os.popen(dircmd + runcmd).read() # if ("Congrats! All goes well" in runres): # print("start finished, res == "+runres.split('\n')[-2]) # else: # print(runres) # print("start failed") # exit() # print("-------------------------------------------------------") def restart_beaver_datanode(): dircmd="cd "+ autotestdir + " && " stopcmd="ps -ef|grep beaver_datanode|grep -v 'grep'|awk -F' *' '{print $2}'|xargs kill" querycmd="ps -ef|grep beaver_datanode|grep -v 'grep'|awk -F' *' '{print $2}'" beaver_conf=os.path.join(autotestdir,"conf","beaver_datanode.gflags") test_conf=os.path.join(autotestdir,"conf","beaver_test.gflags") startcmd=beaver_datanode_file+" --flagfile="+beaver_conf+" --config_path="+config_path+" --log_dir="+log_dir+" > /dev/null 2>&1" print("-----------------------------stop beaver datanode--------------------------") stopres = os.popen(stopcmd).read() if len(os.popen(querycmd).read()) != 0: for i in range(5): time.sleep(2) psres = os.popen(querycmd).read() if len(psres) == 0 : print("Beaver has been closed successfully!") break else: print("Waiting beaver to close, pid is %s" % psres) if i == 4: print("Beaver close failed!") exit() else: print("Beaver closed successfully!") print("-----------------------------replace config file--------------------------") if os.path.exists(beaver_conf): os.remove(beaver_conf) replaceres = os.popen("cp "+test_conf+" "+beaver_conf).read() if len(replaceres) == 0: print("replace config file finished!") else: print(replaceres) print("replace config file failed!") exit() print("-----------------------------start beaver datanode--------------------------") startres = os.popen(startcmd) beaver_url = "http://"+beaver_broker_ip+":"+beaver_broker_port+"/_search?index="+index_forsearch+"&sid=test&rpc_timeout=60" for i in range(20): time.sleep(10) curlres = requests.post(beaver_url, data=pb_forsearch).json() if "result" in curlres and curlres['result'] == False: print("Waiting beaver datanode to be available...") else: print("Beaver datanode is available!") break if i == 19: print(curlres) print("Beaver start failed!") exit() print("---------------------------------------------------------------------------")
[ "os.remove", "os.rename", "os.popen", "numpy.zeros", "os.path.exists", "os.path.dirname", "time.sleep", "requests.post", "os.path.join" ]
[((6072, 6101), 'requests.post', 'requests.post', (['url'], {'data': 'data'}), '(url, data=data)\n', (6085, 6101), False, 'import requests\n'), ((8870, 8929), 'os.path.join', 'os.path.join', (['autotestdir', '"""conf"""', '"""beaver_test.gflags_new"""'], {}), "(autotestdir, 'conf', 'beaver_test.gflags_new')\n", (8882, 8929), False, 'import os\n'), ((8939, 8994), 'os.path.join', 'os.path.join', (['autotestdir', '"""conf"""', '"""beaver_test.gflags"""'], {}), "(autotestdir, 'conf', 'beaver_test.gflags')\n", (8951, 8994), False, 'import os\n'), ((10239, 10256), 'os.remove', 'os.remove', (['tmpdir'], {}), '(tmpdir)\n', (10248, 10256), False, 'import os\n'), ((10261, 10286), 'os.rename', 'os.rename', (['ymldir', 'tmpdir'], {}), '(ymldir, tmpdir)\n', (10270, 10286), False, 'import os\n'), ((10291, 10306), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (10301, 10306), False, 'import time\n'), ((13447, 13473), 'numpy.zeros', 'np.zeros', (['[1, num_metrics]'], {}), '([1, num_metrics])\n', (13455, 13473), True, 'import numpy as np\n'), ((15787, 15846), 'os.path.join', 'os.path.join', (['autotestdir', '"""conf"""', '"""beaver_datanode.gflags"""'], {}), "(autotestdir, 'conf', 'beaver_datanode.gflags')\n", (15799, 15846), False, 'import os\n'), ((15859, 15914), 'os.path.join', 'os.path.join', (['autotestdir', '"""conf"""', '"""beaver_test.gflags"""'], {}), "(autotestdir, 'conf', 'beaver_test.gflags')\n", (15871, 15914), False, 'import os\n'), ((16762, 16789), 'os.path.exists', 'os.path.exists', (['beaver_conf'], {}), '(beaver_conf)\n', (16776, 16789), False, 'import os\n'), ((17165, 17183), 'os.popen', 'os.popen', (['startcmd'], {}), '(startcmd)\n', (17173, 17183), False, 'import os\n'), ((16799, 16821), 'os.remove', 'os.remove', (['beaver_conf'], {}), '(beaver_conf)\n', (16808, 16821), False, 'import os\n'), ((17344, 17358), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (17354, 17358), False, 'import time\n'), ((633, 646), 'os.popen', 'os.popen', (['cmd'], {}), '(cmd)\n', (641, 646), False, 'import os\n'), ((5174, 5187), 'os.popen', 'os.popen', (['cmd'], {}), '(cmd)\n', (5182, 5187), False, 'import os\n'), ((5404, 5417), 'os.popen', 'os.popen', (['cmd'], {}), '(cmd)\n', (5412, 5417), False, 'import os\n'), ((9019, 9042), 'os.path.dirname', 'os.path.dirname', (['tmpdir'], {}), '(tmpdir)\n', (9034, 9042), False, 'import os\n'), ((9065, 9088), 'os.path.dirname', 'os.path.dirname', (['tmpdir'], {}), '(tmpdir)\n', (9080, 9088), False, 'import os\n'), ((16149, 16166), 'os.popen', 'os.popen', (['stopcmd'], {}), '(stopcmd)\n', (16157, 16166), False, 'import os\n'), ((16257, 16270), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (16267, 16270), False, 'import time\n'), ((16839, 16886), 'os.popen', 'os.popen', (["('cp ' + test_conf + ' ' + beaver_conf)"], {}), "('cp ' + test_conf + ' ' + beaver_conf)\n", (16847, 16886), False, 'import os\n'), ((9098, 9141), 'os.popen', 'os.popen', (["('cp ' + gflagsfile + ' ' + tmpdir)"], {}), "('cp ' + gflagsfile + ' ' + tmpdir)\n", (9106, 9141), False, 'import os\n'), ((17377, 17421), 'requests.post', 'requests.post', (['beaver_url'], {'data': 'pb_forsearch'}), '(beaver_url, data=pb_forsearch)\n', (17390, 17421), False, 'import requests\n'), ((16185, 16203), 'os.popen', 'os.popen', (['querycmd'], {}), '(querycmd)\n', (16193, 16203), False, 'import os\n'), ((16291, 16309), 'os.popen', 'os.popen', (['querycmd'], {}), '(querycmd)\n', (16299, 16309), False, 'import os\n')]
import time import bisect import numpy as np import pandas as pd import networkx as nx import scipy import scipy.optimize import scipy as sp import os import matplotlib.pyplot as plt import random from bayes_opt import BayesianOptimization from bayes_opt.util import UtilityFunction, Colours import asyncio import threading import json import tornado.ioloop import tornado.httpserver from tornado.web import RequestHandler import requests from lib.priorityqueue import PriorityQueue from lib.dynamics import DiseaseModel from lib.mobilitysim import MobilitySimulator from bayes_opt import BayesianOptimization from lib.parallel import * SIMPLIFIED_OPT = True def format_opt_to_sim(opt_params, n_betas): ''' Convert bayes_opt parameter format into our format ''' if SIMPLIFIED_OPT: return { 'betas' : [opt_params['beta'] for _ in range(n_betas)], 'alpha': opt_params['alpha'], 'mu': opt_params['mu'] } else: sim_params = { 'betas' : [None for _ in range(n_betas)], 'alpha': None, 'mu': None } for k, v, in opt_params.items(): if 'betas' in k: sim_params['betas'][int(k[5:])] = v else: sim_params[k] = v return sim_params def format_sim_to_opt(sim_params): ''' Convert our format into bayes opt format ''' if SIMPLIFIED_OPT: return { 'beta' : sim_params['betas'][0], 'alpha': sim_params['alpha'], 'mu': opt_params['mu'] } else: opt_params = {'betas' + str(i) : p for i, p in enumerate(sim_params['betas'])} opt_params.update({ 'alpha': sim_params['alpha'], 'mu': sim_params['mu'] }) return opt_params def convert_timings_to_daily(timings, time_horizon): ''' Converts batch of size N of timings of M individuals in a time horizon of `time_horizon` in hours into daily aggregate cases Argument: timings : np.array of shape (N, M) Argument: timings : np.array of shape (N, T / 24) ''' if len(timings.shape) == 1: timings = np.expand_dims(timings, axis=0) arr = np.array([ np.sum((timings >= t * 24) & (timings < (t + 1) * 24), axis=1) for t in range(0, int(time_horizon // 24))]).T return arr def convert_timings_to_cumulative_daily(timings, time_horizon): ''' Converts batch of size N of timings of M individuals in a time horizon of `time_horizon` in hours into daily cumulative aggregate cases Argument: timings : np.array of shape (N, M) Argument: timings : np.array of shape (N, T / 24) ''' if len(timings.shape) == 1: timings = np.expand_dims(timings, axis=0) cumulative = np.array([ np.sum((timings < (t + 1) * 24), axis=1) for t in range(0, int(time_horizon // 24))]).T return cumulative def loss_daily(predicted_confirmed_times, targets_daily, time_horizon, power=2.0): ''' Daily loss: total squared error between average predicted daily cases and true daily cases ''' # predicted_confirmed_daily = convert_timings_to_daily(predicted_confirmed_times, time_horizon) predicted_confirmed_daily = convert_timings_to_cumulative_daily(predicted_confirmed_times, time_horizon) ave_predicted_confirmed_daily = predicted_confirmed_daily.mean(axis=0) loss = np.power(np.abs(ave_predicted_confirmed_daily - targets_daily), power).mean() return loss def multimodal_loss_daily(preds, weights, targets, time_horizon, power=2.0): ''' Multimodal Daily loss: Same as loss_daily but considering several weighted metrics (e.g. positive, recovered, deceased) ''' loss = 0 for w, pred, target in zip(weights, preds, targets): # pred = convert_timings_to_daily(pred, time_horizon) pred = convert_timings_to_cumulative_daily(pred, time_horizon) ave_pred = pred.mean(axis=0) loss += w * np.power(np.abs(ave_pred - target), power).mean() return loss def make_loss_function(mob_settings, distributions, targets, time_horizon, param_bounds, initial_seeds, testing_params, random_repeats, num_site_types, cpu_count, measure_list, loss, num_people, site_loc, home_loc, c, extra_params=None): ''' Returns function executable by optimizer with desired loss ''' with open(f'logger_{c}.txt', 'w+') as logfile: logfile.write(f'Log run: seed = {c}\n\n') def f(opt_params): # convert bayes_opt parameter format into our format sim_params = format_opt_to_sim(opt_params, n_betas=num_site_types) # launch in parallel summary = launch_parallel_simulations( mob_settings=mob_settings, distributions=distributions, random_repeats=random_repeats, cpu_count=cpu_count, params=sim_params, initial_seeds=initial_seeds, testing_params=testing_params, measure_list=measure_list, max_time=time_horizon, num_people=num_people, site_loc=site_loc, home_loc=home_loc, verbose=False) if loss == 'loss_daily': return summary.state_started_at['posi'] elif loss == 'multimodal_loss_daily': return (summary.state_started_at['posi'], summary.state_started_at['resi'], summary.state_started_at['dead']) else: raise ValueError('Unknown loss function') if loss == 'loss_daily': def loss_function(**kwargv): predicted_confirmed_times = f(kwargv) l = loss_daily( predicted_confirmed_times=predicted_confirmed_times, targets_daily=targets, time_horizon=time_horizon, power=2.0) ave_pred = convert_timings_to_cumulative_daily( predicted_confirmed_times, time_horizon).mean(axis=0) loginfo = f'{-l} ' + str(kwargv) + '\n' with open(f'logger_{c}.txt', 'a') as logfile: logfile.write(loginfo) # bayes_opt maximizes return - l return loss_function elif loss == 'multimodal_loss_daily': # here `extra_params` are weights if extra_params: weights = extra_params['weights'] else: weights = np.ones(len(targets)) def loss_function(**kwargv): preds = f(kwargv) l = multimodal_loss_daily( preds=preds, weights=weights, targets=targets, time_horizon=time_horizon, power=2.0) # bayes_opt maximizes return - l return loss_function else: raise ValueError('Unknown loss function')
[ "numpy.abs", "numpy.sum", "numpy.expand_dims" ]
[((2226, 2257), 'numpy.expand_dims', 'np.expand_dims', (['timings'], {'axis': '(0)'}), '(timings, axis=0)\n', (2240, 2257), True, 'import numpy as np\n'), ((2841, 2872), 'numpy.expand_dims', 'np.expand_dims', (['timings'], {'axis': '(0)'}), '(timings, axis=0)\n', (2855, 2872), True, 'import numpy as np\n'), ((2288, 2350), 'numpy.sum', 'np.sum', (['((timings >= t * 24) & (timings < (t + 1) * 24))'], {'axis': '(1)'}), '((timings >= t * 24) & (timings < (t + 1) * 24), axis=1)\n', (2294, 2350), True, 'import numpy as np\n'), ((2910, 2948), 'numpy.sum', 'np.sum', (['(timings < (t + 1) * 24)'], {'axis': '(1)'}), '(timings < (t + 1) * 24, axis=1)\n', (2916, 2948), True, 'import numpy as np\n'), ((3538, 3591), 'numpy.abs', 'np.abs', (['(ave_predicted_confirmed_daily - targets_daily)'], {}), '(ave_predicted_confirmed_daily - targets_daily)\n', (3544, 3591), True, 'import numpy as np\n'), ((4120, 4145), 'numpy.abs', 'np.abs', (['(ave_pred - target)'], {}), '(ave_pred - target)\n', (4126, 4145), True, 'import numpy as np\n')]
import os import numpy as np import pytest from ananse.network import Network from .test_02_utils import write_file @pytest.fixture def binding_fname(): return "tests/example_data/binding2.tsv" @pytest.fixture def network(): genome = "tests/data/genome.fa" if not os.path.exists(genome): write_file(genome, [">chr1", "N"]) return Network(genome=genome, gene_bed="ananse/db/hg38.genes.bed") def test_unique_enhancer(network, binding_fname): regions = network.unique_enhancers(binding_fname) regions = regions.as_df() assert regions.shape[0] == 6 assert sorted(list(regions["Chromosome"].unique())) == ["chr1", "chr10", "chr17"] assert sorted(list(regions["Start"].unique())) == [7677184, 7687827] def test_distance_weight(network): dw = network.distance_weight( include_promoter=True, promoter_region=20, full_weight_region=50, maximum_distance=100, alpha=5, ) assert list(dw.columns) == ["weight", "dist"] dw = dw.set_index("dist") assert dw.loc[0, "weight"] == 1 assert dw.loc[25, "weight"] == 1 assert dw.loc[50, "weight"] == 1 assert dw.loc[51, "weight"] < 1 assert np.isclose(dw.loc[100, "weight"], 0, atol=1e-4) assert dw.shape[0] == 101 dw = network.distance_weight( include_promoter=False, promoter_region=20, full_weight_region=50, maximum_distance=100, alpha=5, ) assert list(dw.columns) == ["weight", "dist"] dw = dw.set_index("dist") assert dw.loc[0, "weight"] == 0 assert dw.loc[20, "weight"] == 0 assert dw.loc[21, "weight"] == 1 assert dw.shape[0] == 101
[ "numpy.isclose", "os.path.exists", "ananse.network.Network" ]
[((361, 420), 'ananse.network.Network', 'Network', ([], {'genome': 'genome', 'gene_bed': '"""ananse/db/hg38.genes.bed"""'}), "(genome=genome, gene_bed='ananse/db/hg38.genes.bed')\n", (368, 420), False, 'from ananse.network import Network\n'), ((1202, 1251), 'numpy.isclose', 'np.isclose', (["dw.loc[100, 'weight']", '(0)'], {'atol': '(0.0001)'}), "(dw.loc[100, 'weight'], 0, atol=0.0001)\n", (1212, 1251), True, 'import numpy as np\n'), ((282, 304), 'os.path.exists', 'os.path.exists', (['genome'], {}), '(genome)\n', (296, 304), False, 'import os\n')]
"""Miscellaneous utility functions.""" from functools import reduce from PIL import Image import numpy as np from matplotlib.colors import rgb_to_hsv, hsv_to_rgb import spacy import re import cv2 import time from keras_bert.tokenizer import Tokenizer from keras_bert.loader import load_trained_model_from_checkpoint, load_vocabulary from keras_bert import extract_embeddings import os def compose(*funcs): """Compose arbitrarily many functions, evaluated left to right. Reference: https://mathieularose.com/function-composition-in-python/ """ # return lambda x: reduce(lambda v, f: f(v), funcs, x) if funcs: return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs) else: raise ValueError('Composition of empty sequence not supported.') def letterbox_image(image, size): '''resize image with unchanged aspect ratio using padding''' iw, ih = image.size w, h = size scale = min(w/iw, h/ih) nw = int(iw*scale) nh = int(ih*scale) image = image.resize((nw,nh), Image.BICUBIC) new_image = Image.new('RGB', size, (128,128,128)) new_image.paste(image, ((w-nw)//2, (h-nh)//2)) return new_image def rand(a=0, b=1): return np.random.rand()*(b-a) + a def get_bert_input(text,vocabs,max_len=512): tokenizer = Tokenizer(vocabs, cased=False) token=[] segment=[] token, segment = tokenizer.encode(text, max_len=max_len) token.append(token) segment.append(segment) token.extend([0] * (max_len - len(token))) segment.extend([0] * (max_len - len(token))) return [token,segment] def seq_to_list(s): ''' note: 2018.10.3 use for process sentences ''' t_str = s.lower() for i in [r'\?', r'\!', r'\'', r'\"', r'\$', r'\:', r'\@', r'\(', r'\)', r'\,', r'\.', r'\;', r'\n']: t_str = re.sub(i, '', t_str) for i in [r'\-', r'\/']: t_str = re.sub(i, ' ', t_str) q_list = re.sub(r'\?', '', t_str.lower()).split(' ') q_list = list(filter(lambda x: len(x) > 0, q_list)) return q_list def qlist_to_vec(max_length, q_list,embed): ''' note: 2018.10.3 use for process sentences ''' glove_matrix = [] glove_dict = {} q_len = len(q_list) if q_len > max_length: q_len = max_length for i in range(max_length): if i < q_len: w=q_list[i] if w not in glove_dict: glove_dict[w]=embed(u'%s'%w).vector glove_matrix.append(glove_dict[w]) else: glove_matrix.append(np.zeros(300,dtype=float)) return np.array(glove_matrix) def get_random_data(annotation_line, input_shape,embed,config, train_mode=True, max_boxes=1): '''random preprocessing for real-time data augmentation''' SEG_DIR=config['seg_gt_path'] line = annotation_line.split() h, w = input_shape stop=len(line) for i in range(1,len(line)): if (line[i]=='~'): stop=i break # print(line[1:stop]) box_ = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:stop]]) box=np.zeros([1,5]) seg_id=box_[0][-1] box[0]=box_[0][:-1] seg_map=np.load(os.path.join(SEG_DIR,str(seg_id)+'.npy')) seg_map_ori=np.array(seg_map).astype(np.float32) seg_map=Image.fromarray(seg_map_ori) # print(np.shape(box)) # print(box) ##################################### #sentence process maxlength set to 20 and random choose one for train sentences=[] sent_stop=stop+1 for i in range(stop+1,len(line)): if line[i]=='~': sentences.append(line[sent_stop:i]) sent_stop=i+1 sentences.append(line[sent_stop:len(line)]) choose_index=np.random.choice(len(sentences)) sentence=sentences[choose_index] # print(qlist) if config['use_bert']: vocabs = load_vocabulary(config['bert_path']+'/vocab.txt') word_vec=get_bert_input(sentence,vocabs,512) else: word_vec=qlist_to_vec(config['word_len'], sentence,embed) # print(word_vec) # print(np.shape(word_vec)) ####################################### image = Image.open(os.path.join(config['image_path'],line[0])) iw, ih = image.size scale = min(w / iw, h / ih) nw = int(iw * scale) nh = int(ih * scale) dx = (w - nw) // 2 dy = (h - nh) // 2 ori_image = image image = image.resize((nw, nh), Image.BICUBIC) new_image = Image.new('RGB', (w, h), (128, 128, 128)) new_image.paste(image, (dx, dy)) image_data = np.array(new_image) / 255. seg_map = seg_map.resize((nw, nh)) new_map = Image.new('L', (w, h), (0)) new_map.paste(seg_map, (dx, dy)) seg_map_data = np.array(new_map) seg_map_data = cv2.resize(seg_map_data, ( seg_map_data.shape[0] // config['seg_out_stride'], seg_map_data.shape[0] // config['seg_out_stride']),interpolation=cv2.INTER_NEAREST) seg_map_data = np.reshape(seg_map_data, [np.shape(seg_map_data)[0], np.shape(seg_map_data)[1], 1]) # print(new_image.size) # correct boxes box_data = np.zeros((max_boxes, 5)) if len(box) > 0: if len(box) > max_boxes: box = box[:max_boxes] box[:, [0, 2]] = box[:, [0, 2]] * scale + dx box[:, [1, 3]] = box[:, [1, 3]] * scale + dy box_data[:len(box)] = box box_data = box_data[:, 0:4] #delete classfy if not train_mode: word_vec=[qlist_to_vec(config['word_len'], sent,embed) for sent in sentences] return image_data, box_data,word_vec,ori_image,sentences,np.expand_dims(seg_map_ori ,-1) return image_data, box_data,word_vec,seg_map_data def lr_step_decay(lr_start=0.001, steps=[30, 40]): def get_lr(epoch): decay_rate = len(steps) for i, e in enumerate(steps): if epoch < e: decay_rate = i break lr = lr_start / (10 ** (decay_rate)) return lr return get_lr #powre decay def lr_power_decay(lr_start=2.5e-4,lr_power=0.9, warm_up_lr=0.,step_all=45*1414,warm_up_step=1000): # step_per_epoch=3286 def warm_up(base_lr, lr, cur_step, end_step): return base_lr + (lr - base_lr) * cur_step / end_step def get_learningrate(epoch): if epoch<warm_up_step: lr = warm_up(warm_up_lr, lr_start, epoch, warm_up_step) else: lr = lr_start * ((1 - float(epoch-warm_up_step) / (step_all-warm_up_step)) ** lr_power) return lr # print("learning rate is", lr) return get_learningrate
[ "PIL.Image.new", "os.path.join", "numpy.random.rand", "numpy.zeros", "numpy.expand_dims", "numpy.shape", "numpy.array", "keras_bert.loader.load_vocabulary", "re.sub", "PIL.Image.fromarray", "keras_bert.tokenizer.Tokenizer", "cv2.resize" ]
[((1069, 1108), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'size', '(128, 128, 128)'], {}), "('RGB', size, (128, 128, 128))\n", (1078, 1108), False, 'from PIL import Image\n'), ((1302, 1332), 'keras_bert.tokenizer.Tokenizer', 'Tokenizer', (['vocabs'], {'cased': '(False)'}), '(vocabs, cased=False)\n', (1311, 1332), False, 'from keras_bert.tokenizer import Tokenizer\n'), ((2576, 2598), 'numpy.array', 'np.array', (['glove_matrix'], {}), '(glove_matrix)\n', (2584, 2598), True, 'import numpy as np\n'), ((3085, 3101), 'numpy.zeros', 'np.zeros', (['[1, 5]'], {}), '([1, 5])\n', (3093, 3101), True, 'import numpy as np\n'), ((3275, 3303), 'PIL.Image.fromarray', 'Image.fromarray', (['seg_map_ori'], {}), '(seg_map_ori)\n', (3290, 3303), False, 'from PIL import Image\n'), ((4426, 4467), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(w, h)', '(128, 128, 128)'], {}), "('RGB', (w, h), (128, 128, 128))\n", (4435, 4467), False, 'from PIL import Image\n'), ((4603, 4628), 'PIL.Image.new', 'Image.new', (['"""L"""', '(w, h)', '(0)'], {}), "('L', (w, h), 0)\n", (4612, 4628), False, 'from PIL import Image\n'), ((4687, 4704), 'numpy.array', 'np.array', (['new_map'], {}), '(new_map)\n', (4695, 4704), True, 'import numpy as np\n'), ((4724, 4894), 'cv2.resize', 'cv2.resize', (['seg_map_data', "(seg_map_data.shape[0] // config['seg_out_stride'], seg_map_data.shape[0] //\n config['seg_out_stride'])"], {'interpolation': 'cv2.INTER_NEAREST'}), "(seg_map_data, (seg_map_data.shape[0] // config['seg_out_stride'],\n seg_map_data.shape[0] // config['seg_out_stride']), interpolation=cv2.\n INTER_NEAREST)\n", (4734, 4894), False, 'import cv2\n'), ((5061, 5085), 'numpy.zeros', 'np.zeros', (['(max_boxes, 5)'], {}), '((max_boxes, 5))\n', (5069, 5085), True, 'import numpy as np\n'), ((1829, 1849), 're.sub', 're.sub', (['i', '""""""', 't_str'], {}), "(i, '', t_str)\n", (1835, 1849), False, 'import re\n'), ((1895, 1916), 're.sub', 're.sub', (['i', '""" """', 't_str'], {}), "(i, ' ', t_str)\n", (1901, 1916), False, 'import re\n'), ((3839, 3890), 'keras_bert.loader.load_vocabulary', 'load_vocabulary', (["(config['bert_path'] + '/vocab.txt')"], {}), "(config['bert_path'] + '/vocab.txt')\n", (3854, 3890), False, 'from keras_bert.loader import load_trained_model_from_checkpoint, load_vocabulary\n'), ((4139, 4182), 'os.path.join', 'os.path.join', (["config['image_path']", 'line[0]'], {}), "(config['image_path'], line[0])\n", (4151, 4182), False, 'import os\n'), ((4522, 4541), 'numpy.array', 'np.array', (['new_image'], {}), '(new_image)\n', (4530, 4541), True, 'import numpy as np\n'), ((1211, 1227), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1225, 1227), True, 'import numpy as np\n'), ((3226, 3243), 'numpy.array', 'np.array', (['seg_map'], {}), '(seg_map)\n', (3234, 3243), True, 'import numpy as np\n'), ((5526, 5557), 'numpy.expand_dims', 'np.expand_dims', (['seg_map_ori', '(-1)'], {}), '(seg_map_ori, -1)\n', (5540, 5557), True, 'import numpy as np\n'), ((2538, 2564), 'numpy.zeros', 'np.zeros', (['(300)'], {'dtype': 'float'}), '(300, dtype=float)\n', (2546, 2564), True, 'import numpy as np\n'), ((4935, 4957), 'numpy.shape', 'np.shape', (['seg_map_data'], {}), '(seg_map_data)\n', (4943, 4957), True, 'import numpy as np\n'), ((4962, 4984), 'numpy.shape', 'np.shape', (['seg_map_data'], {}), '(seg_map_data)\n', (4970, 4984), True, 'import numpy as np\n')]
#!/usr/bin/env python import argparse from functools import partial from pathlib import Path from requests_futures.sessions import FuturesSession import pandas as pd import numpy as np # see https://stackoverflow.com/a/50039149 import resource resource.setrlimit(resource.RLIMIT_NOFILE, (110000, 110000)) __version__ = '0.3' HEADERS = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3', 'Accept-Encoding': 'none', 'Accept-Language': 'en-US,en;q=0.8', 'Connection': 'keep-alive' } def get_html(response, verbose=False): try: result = response.result() if verbose: print('Response from {} has status code {}.'.format(result.url, result.status_code)) assert result.status_code // 100 == 2 return result.content.decode() except: if verbose: print('Error occured for {}'.format(response)) return None def get_htmls(urls, max_workers=8, verbose=False, timeout=60): session = FuturesSession(max_workers=max_workers) if verbose: n = len(urls) print('Submitting {} jobs...'.format(n)) responses = [session.get(url, headers=HEADERS, timeout=timeout) for url in urls] if verbose: print('Executing {} jobs...'.format(n)) # if verbose, run a for loop to show progress explicitly if verbose: result = [] for i, response in enumerate(responses): print('{} done, {} to go...'.format(i, n - i)) result.append(get_html(response, verbose=verbose)) return result else: return [get_html(response, verbose=verbose) for response in responses] def get_htmls_archive(urls, max_workers=8, verbose=False, timeout=60): urls = ['https://web.archive.org/web/' + url for url in urls] return get_htmls(urls, max_workers=max_workers, verbose=verbose, timeout=timeout) def main(path, output, verbose, worker, timeout): df = pd.read_hdf(path) # if output already existed, updates: if Path(output).is_file(): df_old = pd.read_hdf(output) # merging dfs df_merged = df.merge(df_old[['html']], how='outer', left_index=True, right_index=True) df = df_merged # merging might have changed the orders df.sort_values('time_added', inplace=True) na_idx = df.html.isna() n = np.count_nonzero(na_idx) print('{} out of {} urls are new, fetching...'.format(n, df.shape[0])) # fetch html n_workers = worker if worker else n df.loc[na_idx, 'html'] = get_htmls(df[na_idx].index, max_workers=n_workers, verbose=verbose, timeout=timeout) else: n = df.shape[0] print('{} urls to fetch...'.format(n)) n_workers = worker if worker else n df['html'] = get_htmls(df.index, max_workers=n_workers, verbose=verbose, timeout=timeout) # no response df['archive'] = df.html.isna() n = np.count_nonzero(df.archive) print('{} out of {} urls cannot be fetched, try fetching from archive.org...'.format(n, df.shape[0])) n_workers = worker if worker else n df.loc[df.archive, 'html'] = get_htmls_archive(df[df.archive].index, max_workers=n_workers, verbose=verbose, timeout=timeout) df.to_hdf( output, 'df', format='table', complevel=9, ) def cli(): parser = argparse.ArgumentParser(description="Save url content in HDF5.") parser.add_argument('input', help='Input urls in HDF5.') parser.add_argument('-o', '--output', help='Output HDF5. Update file if exists.') parser.add_argument('-p', '--worker', type=int, help='No. of workers used. If not specified, use as many as needed.') parser.add_argument('-t', '--timeout', type=float, default=60., help='Timeout specified for requests. Default: 60.') parser.add_argument('-v', '--version', action='version', version='%(prog)s {}'.format(__version__)) parser.add_argument('-V', '--verbose', action='store_true', help='verbose to stdout.') args = parser.parse_args() main(args.input, args.output, args.verbose, args.worker, args.timeout) if __name__ == "__main__": cli()
[ "numpy.count_nonzero", "pandas.read_hdf", "argparse.ArgumentParser", "resource.setrlimit", "requests_futures.sessions.FuturesSession", "pathlib.Path" ]
[((247, 307), 'resource.setrlimit', 'resource.setrlimit', (['resource.RLIMIT_NOFILE', '(110000, 110000)'], {}), '(resource.RLIMIT_NOFILE, (110000, 110000))\n', (265, 307), False, 'import resource\n'), ((1187, 1226), 'requests_futures.sessions.FuturesSession', 'FuturesSession', ([], {'max_workers': 'max_workers'}), '(max_workers=max_workers)\n', (1201, 1226), False, 'from requests_futures.sessions import FuturesSession\n'), ((2128, 2145), 'pandas.read_hdf', 'pd.read_hdf', (['path'], {}), '(path)\n', (2139, 2145), True, 'import pandas as pd\n'), ((3116, 3144), 'numpy.count_nonzero', 'np.count_nonzero', (['df.archive'], {}), '(df.archive)\n', (3132, 3144), True, 'import numpy as np\n'), ((3544, 3608), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Save url content in HDF5."""'}), "(description='Save url content in HDF5.')\n", (3567, 3608), False, 'import argparse\n'), ((2237, 2256), 'pandas.read_hdf', 'pd.read_hdf', (['output'], {}), '(output)\n', (2248, 2256), True, 'import pandas as pd\n'), ((2543, 2567), 'numpy.count_nonzero', 'np.count_nonzero', (['na_idx'], {}), '(na_idx)\n', (2559, 2567), True, 'import numpy as np\n'), ((2196, 2208), 'pathlib.Path', 'Path', (['output'], {}), '(output)\n', (2200, 2208), False, 'from pathlib import Path\n')]
import os, fnmatch, sys, time import dill as pickle import scipy.interpolate as interp import numpy as np import matplotlib.pyplot as plt import matplotlib.mlab as mlab import bead_util as bu import calib_util as cu import configuration as config import time dirname = '/data/old_trap/20201202/power/init' files, _ = bu.find_all_fnames(dirname, sort_time=True) fb_set = [] power = [] for filname in files: df = bu.DataFile() df.load(filname) fb_set.append(np.mean(df.pos_fb[2])) power.append(np.abs(np.mean(df.power))) plt.plot(fb_set, power) plt.show()
[ "matplotlib.pyplot.show", "matplotlib.pyplot.plot", "bead_util.find_all_fnames", "numpy.mean", "bead_util.DataFile" ]
[((325, 368), 'bead_util.find_all_fnames', 'bu.find_all_fnames', (['dirname'], {'sort_time': '(True)'}), '(dirname, sort_time=True)\n', (343, 368), True, 'import bead_util as bu\n'), ((549, 572), 'matplotlib.pyplot.plot', 'plt.plot', (['fb_set', 'power'], {}), '(fb_set, power)\n', (557, 572), True, 'import matplotlib.pyplot as plt\n'), ((573, 583), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (581, 583), True, 'import matplotlib.pyplot as plt\n'), ((426, 439), 'bead_util.DataFile', 'bu.DataFile', ([], {}), '()\n', (437, 439), True, 'import bead_util as bu\n'), ((480, 501), 'numpy.mean', 'np.mean', (['df.pos_fb[2]'], {}), '(df.pos_fb[2])\n', (487, 501), True, 'import numpy as np\n'), ((527, 544), 'numpy.mean', 'np.mean', (['df.power'], {}), '(df.power)\n', (534, 544), True, 'import numpy as np\n')]
import numpy as np from scipy.linalg import expm from pymanopt.manifolds.manifold import EuclideanEmbeddedSubmanifold from pymanopt.tools.multi import multiprod, multisym, multitransp class Stiefel(EuclideanEmbeddedSubmanifold): """ Factory class for the Stiefel manifold. Instantiation requires the dimensions n, p to be specified. Optional argument k allows the user to optimize over the product of k Stiefels. Elements are represented as n x p matrices (if k == 1), and as k x n x p matrices if k > 1 (Note that this is different to manopt!). """ def __init__(self, n, p, k=1): self._n = n self._p = p self._k = k # Check that n is greater than or equal to p if n < p or p < 1: raise ValueError("Need n >= p >= 1. Values supplied were n = %d " "and p = %d." % (n, p)) if k < 1: raise ValueError("Need k >= 1. Value supplied was k = %d." % k) if k == 1: name = "Stiefel manifold St(%d, %d)" % (n, p) elif k >= 2: name = "Product Stiefel manifold St(%d, %d)^%d" % (n, p, k) dimension = int(k * (n * p - p * (p + 1) / 2)) super().__init__(name, dimension) @property def typicaldist(self): return np.sqrt(self._p * self._k) def inner(self, X, G, H): # Inner product (Riemannian metric) on the tangent space # For the stiefel this is the Frobenius inner product. return np.tensordot(G, H, axes=G.ndim) def dist(self, X, Y): raise NotImplementedError( "The manifold '{:s}' currently provides no implementation of " "the 'dist' method".format(self._get_class_name())) def proj(self, X, U): return U - multiprod(X, multisym(multiprod(multitransp(X), U))) # TODO(nkoep): Implement the weingarten map instead. def ehess2rhess(self, X, egrad, ehess, H): XtG = multiprod(multitransp(X), egrad) symXtG = multisym(XtG) HsymXtG = multiprod(H, symXtG) return self.proj(X, ehess - HsymXtG) # Retract to the Stiefel using the qr decomposition of X + G. def retr(self, X, G): if self._k == 1: # Calculate 'thin' qr decomposition of X + G q, r = np.linalg.qr(X + G) # Unflip any flipped signs XNew = np.dot(q, np.diag(np.sign(np.sign(np.diag(r)) + 0.5))) else: XNew = X + G for i in range(self._k): q, r = np.linalg.qr(XNew[i]) XNew[i] = np.dot( q, np.diag(np.sign(np.sign(np.diag(r)) + 0.5))) return XNew def norm(self, X, G): # Norm on the tangent space of the Stiefel is simply the Euclidean # norm. return np.linalg.norm(G) # Generate random Stiefel point using qr of random normally distributed # matrix. def rand(self): if self._k == 1: X = np.random.randn(self._n, self._p) q, r = np.linalg.qr(X) return q X = np.zeros((self._k, self._n, self._p)) for i in range(self._k): X[i], r = np.linalg.qr(np.random.randn(self._n, self._p)) return X def randvec(self, X): U = np.random.randn(*np.shape(X)) U = self.proj(X, U) U = U / np.linalg.norm(U) return U def transp(self, x1, x2, d): return self.proj(x2, d) def exp(self, X, U): # TODO: Simplify these expressions. if self._k == 1: W = expm(np.bmat([[X.T.dot(U), -U.T.dot(U)], [np.eye(self._p), X.T.dot(U)]])) Z = np.bmat([[expm(-X.T.dot(U))], [np.zeros((self._p, self._p))]]) Y = np.bmat([X, U]).dot(W).dot(Z) else: Y = np.zeros(np.shape(X)) for i in range(self._k): W = expm(np.bmat([[X[i].T.dot(U[i]), -U[i].T.dot(U[i])], [np.eye(self._p), X[i].T.dot(U[i])]])) Z = np.bmat([[expm(-X[i].T.dot(U[i]))], [np.zeros((self._p, self._p))]]) Y[i] = np.bmat([X[i], U[i]]).dot(W).dot(Z) return Y def zerovec(self, X): if self._k == 1: return np.zeros((self._n, self._p)) return np.zeros((self._k, self._n, self._p))
[ "numpy.eye", "numpy.random.randn", "numpy.tensordot", "numpy.linalg.qr", "numpy.zeros", "pymanopt.tools.multi.multiprod", "numpy.shape", "numpy.bmat", "pymanopt.tools.multi.multitransp", "numpy.linalg.norm", "pymanopt.tools.multi.multisym", "numpy.diag", "numpy.sqrt" ]
[((1309, 1335), 'numpy.sqrt', 'np.sqrt', (['(self._p * self._k)'], {}), '(self._p * self._k)\n', (1316, 1335), True, 'import numpy as np\n'), ((1510, 1541), 'numpy.tensordot', 'np.tensordot', (['G', 'H'], {'axes': 'G.ndim'}), '(G, H, axes=G.ndim)\n', (1522, 1541), True, 'import numpy as np\n'), ((2011, 2024), 'pymanopt.tools.multi.multisym', 'multisym', (['XtG'], {}), '(XtG)\n', (2019, 2024), False, 'from pymanopt.tools.multi import multiprod, multisym, multitransp\n'), ((2043, 2063), 'pymanopt.tools.multi.multiprod', 'multiprod', (['H', 'symXtG'], {}), '(H, symXtG)\n', (2052, 2063), False, 'from pymanopt.tools.multi import multiprod, multisym, multitransp\n'), ((2812, 2829), 'numpy.linalg.norm', 'np.linalg.norm', (['G'], {}), '(G)\n', (2826, 2829), True, 'import numpy as np\n'), ((3085, 3122), 'numpy.zeros', 'np.zeros', (['(self._k, self._n, self._p)'], {}), '((self._k, self._n, self._p))\n', (3093, 3122), True, 'import numpy as np\n'), ((4341, 4378), 'numpy.zeros', 'np.zeros', (['(self._k, self._n, self._p)'], {}), '((self._k, self._n, self._p))\n', (4349, 4378), True, 'import numpy as np\n'), ((1971, 1985), 'pymanopt.tools.multi.multitransp', 'multitransp', (['X'], {}), '(X)\n', (1982, 1985), False, 'from pymanopt.tools.multi import multiprod, multisym, multitransp\n'), ((2303, 2322), 'numpy.linalg.qr', 'np.linalg.qr', (['(X + G)'], {}), '(X + G)\n', (2315, 2322), True, 'import numpy as np\n'), ((2982, 3015), 'numpy.random.randn', 'np.random.randn', (['self._n', 'self._p'], {}), '(self._n, self._p)\n', (2997, 3015), True, 'import numpy as np\n'), ((3035, 3050), 'numpy.linalg.qr', 'np.linalg.qr', (['X'], {}), '(X)\n', (3047, 3050), True, 'import numpy as np\n'), ((3356, 3373), 'numpy.linalg.norm', 'np.linalg.norm', (['U'], {}), '(U)\n', (3370, 3373), True, 'import numpy as np\n'), ((4297, 4325), 'numpy.zeros', 'np.zeros', (['(self._n, self._p)'], {}), '((self._n, self._p))\n', (4305, 4325), True, 'import numpy as np\n'), ((2535, 2556), 'numpy.linalg.qr', 'np.linalg.qr', (['XNew[i]'], {}), '(XNew[i])\n', (2547, 2556), True, 'import numpy as np\n'), ((3191, 3224), 'numpy.random.randn', 'np.random.randn', (['self._n', 'self._p'], {}), '(self._n, self._p)\n', (3206, 3224), True, 'import numpy as np\n'), ((3299, 3310), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (3307, 3310), True, 'import numpy as np\n'), ((3836, 3847), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (3844, 3847), True, 'import numpy as np\n'), ((1821, 1835), 'pymanopt.tools.multi.multitransp', 'multitransp', (['X'], {}), '(X)\n', (1832, 1835), False, 'from pymanopt.tools.multi import multiprod, multisym, multitransp\n'), ((3719, 3747), 'numpy.zeros', 'np.zeros', (['(self._p, self._p)'], {}), '((self._p, self._p))\n', (3727, 3747), True, 'import numpy as np\n'), ((3640, 3655), 'numpy.eye', 'np.eye', (['self._p'], {}), '(self._p)\n', (3646, 3655), True, 'import numpy as np\n'), ((3767, 3782), 'numpy.bmat', 'np.bmat', (['[X, U]'], {}), '([X, U])\n', (3774, 3782), True, 'import numpy as np\n'), ((4118, 4146), 'numpy.zeros', 'np.zeros', (['(self._p, self._p)'], {}), '((self._p, self._p))\n', (4126, 4146), True, 'import numpy as np\n'), ((2415, 2425), 'numpy.diag', 'np.diag', (['r'], {}), '(r)\n', (2422, 2425), True, 'import numpy as np\n'), ((3994, 4009), 'numpy.eye', 'np.eye', (['self._p'], {}), '(self._p)\n', (4000, 4009), True, 'import numpy as np\n'), ((4173, 4194), 'numpy.bmat', 'np.bmat', (['[X[i], U[i]]'], {}), '([X[i], U[i]])\n', (4180, 4194), True, 'import numpy as np\n'), ((2638, 2648), 'numpy.diag', 'np.diag', (['r'], {}), '(r)\n', (2645, 2648), True, 'import numpy as np\n')]
############################################################### import os import cv2 import math import numpy as np from scipy.ndimage import interpolation as inter from scipy.ndimage import rotate ############################################################### C_percentage = 0 ACCEPTED_EXTENSIONS = (".jpeg", ".jpg", ".png", ".tif", ".tiff", ".bmp", ".dib", ".jpe", ".jp2", ".webp", ".pbm", ".pgm", ".ppm", ".sr", ".ras") ############################################################### def euclidian_distance(first, second): return math.sqrt(sum([pow(max(x, y) - min(x, y), 2) for x, y in zip(first, second)])) def color_difference(first, second, precision = 100): return euclidian_distance(first, second) > precision def precision(arr, angle): hit = np.sum(inter.rotate(arr, angle, reshape = False, order = 0), axis = 1) prec = np.sum((hit[1:]-hit[:-1])**2) return prec def rotateImage(image, angle): image_center = tuple(np.array(image.shape[1::-1]) / 2) rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0) result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=(255,255,255)) return result def crop(abs_folder_in, abs_folder_out, debug): global C_percentage images_list = [i for i in os.listdir(abs_folder_in) if i.endswith(ACCEPTED_EXTENSIONS) and i[:2] != "ad"] if debug: print("\n".join(images_list)) images_list = sorted(images_list, key = lambda i: int(i[:-4])) for c, image_path in enumerate(images_list, 1): original_image = cv2.imread(os.path.join(abs_folder_in, image_path), 0) sheet = cv2.resize(original_image, (0, 0), fx = 0.125, fy = 0.125) ret, sheet = cv2.threshold(sheet, 127, 255, cv2.THRESH_BINARY) wd, ht = sheet.shape pix = np.array(sheet, np.uint8) bin_img = 1 - (pix / 255.0) limit, delta = 10, 1 angles = np.arange(-limit, limit+delta, delta) scores = [precision(bin_img, angle) for angle in angles] best = angles[scores.index(max(scores))] original_image = rotateImage(cv2.imread(os.path.join(abs_folder_in, image_path)), best) w, h, z = original_image.shape K = 500 / max(w, h) resized_image = cv2.resize(original_image, (0, 0), fx = K, fy = K) w, h, z = resized_image.shape mx, my = int(w / 2), int(h / 2) startx = 0 starty = 0 endx = w endy = h for i in range(1, w): if color_difference(resized_image[i, my], resized_image[i - 1, my]): startx = i break for i in range(w - 2, 0, -1): if color_difference(resized_image[i, my], resized_image[i + 1, my]): endx = i break for i in range(1, h): if color_difference(resized_image[mx, i], resized_image[mx, i - 1]): starty = i break for i in range(h - 2, 0, -1): if color_difference(resized_image[mx, i], resized_image[mx, i + 1]): endy = i break if endx <= startx: endx = w if endy <= starty: endy = h startx, starty, endx, endy = int(startx * (1 / K)), int(starty * (1 / K)), int(endx * (1 / K)), int(endy * (1 / K)) jump = int(1 / K * 10) if debug: print("Angle : ", best) print("K : ", K, " jump : ", jump) print("(", startx, ", ", starty, ") -> (", endx, ", ", endy, ")") print("Saving...") if (endx-jump) - (startx+jump) < (w*K)/3 or (endy-jump) - (starty+jump) < (h*K)/3: cv2.imwrite(os.path.join(abs_folder_out, str(c) + ".jpg"), original_image) else: cv2.imwrite(os.path.join(abs_folder_out, str(c) + ".jpg"), original_image[startx + jump : endx - jump, starty + jump : endy - jump]) if debug: print("Done ", c, " of ", len(images_list)) C_percentage += 1 / len(images_list) C_percentage = 0 def get_percentage(): global C_percentage return C_percentage ###############################################################
[ "os.listdir", "numpy.sum", "cv2.threshold", "scipy.ndimage.interpolation.rotate", "cv2.warpAffine", "numpy.array", "numpy.arange", "os.path.join", "cv2.getRotationMatrix2D", "cv2.resize" ]
[((847, 880), 'numpy.sum', 'np.sum', (['((hit[1:] - hit[:-1]) ** 2)'], {}), '((hit[1:] - hit[:-1]) ** 2)\n', (853, 880), True, 'import numpy as np\n'), ((997, 1046), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['image_center', 'angle', '(1.0)'], {}), '(image_center, angle, 1.0)\n', (1020, 1046), False, 'import cv2\n'), ((1060, 1199), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'rot_mat', 'image.shape[1::-1]'], {'flags': 'cv2.INTER_LINEAR', 'borderMode': 'cv2.BORDER_CONSTANT', 'borderValue': '(255, 255, 255)'}), '(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR,\n borderMode=cv2.BORDER_CONSTANT, borderValue=(255, 255, 255))\n', (1074, 1199), False, 'import cv2\n'), ((772, 820), 'scipy.ndimage.interpolation.rotate', 'inter.rotate', (['arr', 'angle'], {'reshape': '(False)', 'order': '(0)'}), '(arr, angle, reshape=False, order=0)\n', (784, 820), True, 'from scipy.ndimage import interpolation as inter\n'), ((1653, 1707), 'cv2.resize', 'cv2.resize', (['original_image', '(0, 0)'], {'fx': '(0.125)', 'fy': '(0.125)'}), '(original_image, (0, 0), fx=0.125, fy=0.125)\n', (1663, 1707), False, 'import cv2\n'), ((1733, 1782), 'cv2.threshold', 'cv2.threshold', (['sheet', '(127)', '(255)', 'cv2.THRESH_BINARY'], {}), '(sheet, 127, 255, cv2.THRESH_BINARY)\n', (1746, 1782), False, 'import cv2\n'), ((1826, 1851), 'numpy.array', 'np.array', (['sheet', 'np.uint8'], {}), '(sheet, np.uint8)\n', (1834, 1851), True, 'import numpy as np\n'), ((1934, 1973), 'numpy.arange', 'np.arange', (['(-limit)', '(limit + delta)', 'delta'], {}), '(-limit, limit + delta, delta)\n', (1943, 1973), True, 'import numpy as np\n'), ((2273, 2319), 'cv2.resize', 'cv2.resize', (['original_image', '(0, 0)'], {'fx': 'K', 'fy': 'K'}), '(original_image, (0, 0), fx=K, fy=K)\n', (2283, 2319), False, 'import cv2\n'), ((949, 977), 'numpy.array', 'np.array', (['image.shape[1::-1]'], {}), '(image.shape[1::-1])\n', (957, 977), True, 'import numpy as np\n'), ((1314, 1339), 'os.listdir', 'os.listdir', (['abs_folder_in'], {}), '(abs_folder_in)\n', (1324, 1339), False, 'import os\n'), ((1593, 1632), 'os.path.join', 'os.path.join', (['abs_folder_in', 'image_path'], {}), '(abs_folder_in, image_path)\n', (1605, 1632), False, 'import os\n'), ((2134, 2173), 'os.path.join', 'os.path.join', (['abs_folder_in', 'image_path'], {}), '(abs_folder_in, image_path)\n', (2146, 2173), False, 'import os\n')]
from logging import error import numpy as np class Plane: def __init__(self, origin: np.array, vector1: np.array, vector2: np.array) -> None: self.origin = origin self.vector1 = vector1 self.vector2 = vector2 def getBarycentricCoordinates(self, point: np.array, direction: np.array): a = np.array([self.vector1, self.vector2, -direction]).T b = point - self.origin sol = np.linalg.solve(a, b) return np.array([sol[0], sol[1]]) def convertBarycentricCoordinates(self, x, y): return self.origin + x * self.vector1 + y * self.vector2 class Sphere: """https://math.stackexchange.com/questions/268064/move-a-point-up-and-down-along-a-sphere""" def __init__(self, radius) -> None: self.radius = radius self.current_pos = [90, -90] def rotate(self, x, y): self.current_pos[0] = (self.current_pos[0] + x) % 360 self.current_pos[1] = (self.current_pos[1] + y) % 360 theta, phi = np.deg2rad(self.current_pos[0]), np.deg2rad(self.current_pos[1]) return np.array( [ self.radius * np.sin(theta) * np.cos(phi), self.radius * np.sin(theta) * np.sin(phi), self.radius * np.cos(theta), ] ) class RotationMatrix3D: def __init__(self) -> None: pass def __call__( self, object_to_rotate: np.ndarray, axis: int, angle: float ) -> np.ndarray: if axis == 0: rotation_matrix = np.array( [ [1, 0, 0], [0, np.cos(angle), -np.sin(angle)], [0, np.sin(angle), np.cos(angle)], ] ) elif axis == 1: rotation_matrix = np.array( [ [np.cos(angle), 0, np.sin(angle)], [0, 1, 0], [-np.sin(angle), 0, np.cos(angle)], ] ) elif axis == 2: rotation_matrix = np.array( [ [np.cos(angle), -np.sin(angle), 0], [np.sin(angle), np.cos(angle), 0], [0, 0, 1], ] ) else: raise error("Invalid argument for axis, options are 0, 1, 2") return np.matmul(rotation_matrix, object_to_rotate) def getQuadrant(x: float, y: float): if x == 0 and y == 0: return -1 if x >= 0 and y >= 0: return 1 elif x > 0 and y < 0: return 2 elif x <= 0 and y <= 0: return 3 elif x < 0 and y > 0: return 4
[ "logging.error", "numpy.deg2rad", "numpy.sin", "numpy.array", "numpy.cos", "numpy.matmul", "numpy.linalg.solve" ]
[((430, 451), 'numpy.linalg.solve', 'np.linalg.solve', (['a', 'b'], {}), '(a, b)\n', (445, 451), True, 'import numpy as np\n'), ((468, 494), 'numpy.array', 'np.array', (['[sol[0], sol[1]]'], {}), '([sol[0], sol[1]])\n', (476, 494), True, 'import numpy as np\n'), ((2348, 2392), 'numpy.matmul', 'np.matmul', (['rotation_matrix', 'object_to_rotate'], {}), '(rotation_matrix, object_to_rotate)\n', (2357, 2392), True, 'import numpy as np\n'), ((331, 381), 'numpy.array', 'np.array', (['[self.vector1, self.vector2, -direction]'], {}), '([self.vector1, self.vector2, -direction])\n', (339, 381), True, 'import numpy as np\n'), ((1008, 1039), 'numpy.deg2rad', 'np.deg2rad', (['self.current_pos[0]'], {}), '(self.current_pos[0])\n', (1018, 1039), True, 'import numpy as np\n'), ((1041, 1072), 'numpy.deg2rad', 'np.deg2rad', (['self.current_pos[1]'], {}), '(self.current_pos[1])\n', (1051, 1072), True, 'import numpy as np\n'), ((1158, 1169), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (1164, 1169), True, 'import numpy as np\n'), ((1217, 1228), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (1223, 1228), True, 'import numpy as np\n'), ((1260, 1273), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1266, 1273), True, 'import numpy as np\n'), ((2276, 2331), 'logging.error', 'error', (['"""Invalid argument for axis, options are 0, 1, 2"""'], {}), "('Invalid argument for axis, options are 0, 1, 2')\n", (2281, 2331), False, 'from logging import error\n'), ((1142, 1155), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1148, 1155), True, 'import numpy as np\n'), ((1201, 1214), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1207, 1214), True, 'import numpy as np\n'), ((1613, 1626), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1619, 1626), True, 'import numpy as np\n'), ((1669, 1682), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1675, 1682), True, 'import numpy as np\n'), ((1684, 1697), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1690, 1697), True, 'import numpy as np\n'), ((1629, 1642), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1635, 1642), True, 'import numpy as np\n'), ((1835, 1848), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1841, 1848), True, 'import numpy as np\n'), ((1853, 1866), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1859, 1866), True, 'import numpy as np\n'), ((1940, 1953), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1946, 1953), True, 'import numpy as np\n'), ((1922, 1935), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1928, 1935), True, 'import numpy as np\n'), ((2091, 2104), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (2097, 2104), True, 'import numpy as np\n'), ((2147, 2160), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (2153, 2160), True, 'import numpy as np\n'), ((2162, 2175), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (2168, 2175), True, 'import numpy as np\n'), ((2107, 2120), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (2113, 2120), True, 'import numpy as np\n')]
import cv2 import numpy as np import sys, getopt import time import dlib import math i=False class Controller(): def __init__(self): self.detector = dlib.get_frontal_face_detector() self.predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat") self.frame = None self.ret = None self.faces = None self.cap = cv2.VideoCapture(0) #open phone camera API self.address = None#"https://192.168.43.1:8080/video" self.threshold = 35 self.grayFrame = None self.cutEyes = None self.img = cv2.imread("anime.jpg") self.cutEyesGray = None self.contours = None self.capThreshold = None self.left_eye = None self.maskEyes = None self.landmarks = None self.min_x = None self.max_x = None self.min_y = None self.max_y = None self.otps = None self.args = None self.cameraIs = False self.thresholdIs = False self.rgbIs = False self.eyeLinesIs = False self.fx = 1 self.fy = 1 self.check = True self.calibrationIsOkey = False self.testImage = None self.background = None self.eyeCenter = None self.maxArray = np.array([[0,0]]) self.maxMean = None self.minArray = np.array([[0,0]]) self.minMean = None self.key = None self.time = 0 self.optIfBlock = 2 self.startTimeIfBlock = True self.CalibFinishBlock = False self.finalScreen = False self.screenSizeX=1920 self.screenSizeY=1080 def getOtps(self): try: self.otps, self.args = getopt.getopt(sys.argv[1:],"h:c:t:r:a:e:",["help","cameradress","threshold","rgb","eyeline","halfcut","quartercut","calib"]) except getopt.GetoptError as err: print(err) sys.exit() #self.otps = [] def nothing(self,x): pass def main(self): self.getOtps() for otp, arg in self.otps: if otp == '-a': self.address = str(arg) self.cap.open(self.address) elif otp == '--threshold': self.thresholdIs = True for ot , ar in self.otps: if ot == '-t': self.threshold = int(ar) elif (otp == '-r' and arg == 'True'): self.rgbIs = True elif otp == '-e' and arg == 'True': self.eyeLinesIs = True elif otp == '-c' and arg == 'True': self.cameraIs = True elif otp == '--halfcut': self.fx = 0.5 self.fy = 0.5 elif otp == '--quartercut': self.fx = 0.25 self.fy = 0.25 elif otp == '--calib': self.calibrationIsOkey = True #TODO #print(self.otps, self.args) #self.optimizationEyesLooking() array1 = [[0,0]] openImage = False o = False while True: #first open cam try: self.readSelf() self.frame = cv2.resize(self.frame,None,fx=self.fx,fy=self.fy) #TODO #self.frame = cv2.rotate(self.frame,cv2.ROTATE_90_COUNTERCLOCKWISE) self.grayFrame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY) except: print("error") self.faces = self.detector(self.grayFrame) self.lanmarkingLeftEyes() if self.eyeLinesIs == True: self.eyeLines() if self.cameraIs == True and self.ret: cv2.imshow("Frame", self.frame) #second isteğe göre açılan seçenekler (thershold vb) göz seçim ayarları için if self.thresholdIs == True: cv2.imshow("2",self.capThreshold) if self.cutEyesGray is not None: cv2.imshow("3", self.cutEyesGray) self.key = cv2.waitKey(1) #third kalibrasyyon #key 'o' if self.key == 79 or self.key == 111: o = True self.cameraIs = False self.thresholdIs = False cv2.destroyAllWindows() #key 'space' if self.key == 32: cv2.destroyAllWindows() o = False openImage = True if self.calibrationIsOkey == True and o == True: self.optimizationEyesLooking() if openImage == True: self.lookingPointDrawCircle() #four final if self.finalScreen: self.final_screen() if self.key == 27: break self.cap.release() cv2.destroyAllWindows() def showImage(self): self.testImage = cv2.imread('anime.jpg') imageH, imageW, imageChannels= self.testImage.shape cv2.circle(self.testImage, ( (self.eyeCenter[0] * imageW) / self.rightMean[0], (self.eyeCenter[1] * imageH) / self.bottomMean[1])) def lookingPointDrawCircle(self): self.thresholdIs = False cv2.imshow("",self.img) def readSelf(self): self.ret, self.frame=self.cap.read() def lanmarkingLeftEyes(self): for face in self.faces: #x = face.left() #y = face.top() #x1 = face.right() #y1 = face.bottom() self.landmarks = self.predictor(self.grayFrame, face) self.left_eye = np.array([(self.landmarks.part(36).x, self.landmarks.part(36).y), (self.landmarks.part(37).x, self.landmarks.part(37).y), (self.landmarks.part(38).x, self.landmarks.part(38).y), (self.landmarks.part(39).x, self.landmarks.part(39).y), (self.landmarks.part(40).x, self.landmarks.part(40).y), (self.landmarks.part(41).x, self.landmarks.part(41).y)], np.int32) h, w, _ = self.frame.shape mask = np.zeros((h, w), np.uint8) cv2.polylines(mask, [self.left_eye], True, 255, 2) cv2.fillPoly(mask, [self.left_eye], 255) self.maskEyes = cv2.bitwise_and(self.grayFrame, self.grayFrame, mask=mask) self.maskEyes = np.where(self.maskEyes==0, 255,self.maskEyes) self.min_x = np.min(self.left_eye[:,0]) self.max_x = np.max(self.left_eye[:,0]) self.min_y = np.min(self.left_eye[:,1]) self.max_y = np.max(self.left_eye[:,1]) self.cutEyes = self.maskEyes[self.min_y : self.max_y, self.min_x : self.max_x] self.cutEyes = cv2.resize(self.cutEyes, None, fx=5, fy=5) self.capThreshold = cv2.GaussianBlur(self.cutEyes, (5,5), 0) _, self.capThreshold = cv2.threshold(self.capThreshold, self.threshold, 255, cv2.THRESH_BINARY_INV) self.contours, _ = cv2.findContours(self.capThreshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) for cnt in self.contours: (x, y, w, h) = cv2.boundingRect(cnt) cv2.rectangle(self.capThreshold, (x, y), (x + w, y + h), (255, 0, 0), 1) #middle point x = x + int(w/2) y = y + int(h/2) cv2.circle(self.cutEyes, (x+int(w/2),y+int(h/2)) ,5, (255,0,0),-1) self.eyeCenter = [x+int(w/2),y+int(h/2)] break if self.rgbIs == True: cv2.imshow("c", self.cutEyes) def final_screen(self): x,y=self.pC() cv2.namedWindow("dd", cv2.WND_PROP_FULLSCREEN) cv2.moveWindow("dd", screen.x - 1, screen.y - 1) cv2.setWindowProperty("dd", cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN) if (x-int(x))<0.5: x=math.floor(x) else: x = x + 1 x = math.floor(x) if (y-int(y))<0.5: y=math.floor(y) else: y = y + 1 y = math.floor(y) cv2.circle(self.img, (int(x),int(y)), 5, (0,0,0), -1) #print("x:",x," y:",y, " eyecenter:",self.eyeCenter) cv2.imshow("dd",self.img) def pC(self): #print(self.minMean) return (self.screenSizeX*(self.eyeCenter[0]-self.minMean[0]))/(self.maxMean[0]-self.minMean[0]),(self.screenSizeY*(self.eyeCenter[1]-self.minMean[1]))/(self.maxMean[1]-self.minMean[1]) def eyeLines(self): horizontalLineLeft = (self.landmarks.part(36).x, self.landmarks.part(36).y) horizontalLineRight = (self.landmarks.part(39).x, self.landmarks.part(39).y) verticalLineTop = (self.landmarks.part(38).x, self.landmarks.part(38).y) verticalLineBottom = (self.landmarks.part(40).x, self.landmarks.part(40).y) cv2.line(self.frame, horizontalLineLeft, horizontalLineRight,(0,255,0),1) cv2.line(self.frame, verticalLineTop, verticalLineBottom,(0,255,0),1) def getCutEyeShape(self,x,y,x1,y1): return self.frame[y:y1, x:x1] def optimizationEyesLooking(self): background = np.zeros((screen.height,screen.width),np.uint8) cv2.namedWindow("aa", cv2.WND_PROP_FULLSCREEN) cv2.moveWindow("aa", screen.x - 1, screen.y - 1) cv2.setWindowProperty("aa", cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN) if self.optIfBlock==1: self.startTime(time.perf_counter()) if time.perf_counter()-self.time < 3: if self.eyeCenter != None: self.minArray = np.append(self.minArray, [self.eyeCenter],axis=0) cv2.circle(background, (10,10), 5, (255,255,255), -1) (text_width, text_height) = cv2.getTextSize("Follow point", cv2.FONT_HERSHEY_SIMPLEX, 0.8, 1)[0] cv2.putText(background, "Follow point", ((screen.width//2)-(text_width//2),(screen.height//2)-30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255), 1 , cv2.LINE_AA) elif time.perf_counter()-self.time < 6 and time.perf_counter()-self.time > 3: if self.eyeCenter != None: self.maxArray = np.append(self.maxArray, [self.eyeCenter],axis=0) cv2.circle(background, (screen.width-10,screen.height-10), 5, (255,255,255), -1) (text_width, text_height) = cv2.getTextSize("Follow point", cv2.FONT_HERSHEY_SIMPLEX, 0.8, 1)[0] cv2.putText(background, "Follow point", ((screen.width//2)-(text_width//2),(screen.height//2)-30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255), 1 , cv2.LINE_AA) elif time.perf_counter()-self.time == 6: cv2.destroyAllWindows() else: self.CalibFinishBlock = True self.calibrationIsOkey = True self.check = True self.optIfBlock=3 elif self.optIfBlock==2: (text_width, text_height) = cv2.getTextSize("Kalibrasyonu ayarlamak için 's' tuşuna basın", cv2.FONT_HERSHEY_SIMPLEX, 0.8, 1)[0] cv2.putText(background, "Kalibrasyonu ayarlamak için 's' tuşuna basın", ((screen.width//2)-(text_width//2),(screen.height//2)-30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255), 1 , cv2.LINE_AA) elif self.optIfBlock==3: self.optFinish(background) #key 's' if self.key == 83 or self.key == 115: self.optIfBlock = 1 #key 'i' if self.key == 73 or self.key == 105: self.minArray = self.minArray[1:] self.maxArray = self.maxArray[1:] #self.minMean = self.minArray.mean(0) self.minMean = self.minArray.min(0) #self.maxMean = self.maxArray.mean(0) self.maxMean = self.maxArray.max(0) self.calibrationIsOkey=False self.finalScreen=True cv2.destroyWindow("aa") else: cv2.imshow("aa",background) def optFinish(self, stage): (text_width, text_height) = cv2.getTextSize("Go to do image 'i'", cv2.FONT_HERSHEY_SIMPLEX, 0.8, 1)[0] cv2.putText(stage, "Go to do image 'i'", ((screen.width//2)-(text_width//2),(screen.height//2)-30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255), 1 , cv2.LINE_AA) cv2.imshow("aa",stage) def startTime(self, time): if self.startTimeIfBlock: self.time = time self.startTimeIfBlock = False def getCameraShape(self): for i in range(3): print(self.frame.shape[i]) return self.frame[1], self.frame[0] if __name__ == "__main__": import screeninfo screen = screeninfo.get_monitors()[0] ct = Controller() ct.main()
[ "cv2.GaussianBlur", "getopt.getopt", "cv2.bitwise_and", "cv2.fillPoly", "cv2.rectangle", "screeninfo.get_monitors", "cv2.imshow", "dlib.shape_predictor", "cv2.line", "cv2.cvtColor", "numpy.append", "numpy.max", "cv2.destroyAllWindows", "cv2.boundingRect", "cv2.resize", "cv2.circle", "cv2.waitKey", "time.perf_counter", "numpy.min", "dlib.get_frontal_face_detector", "sys.exit", "cv2.findContours", "cv2.putText", "cv2.polylines", "cv2.threshold", "cv2.getTextSize", "cv2.setWindowProperty", "numpy.zeros", "math.floor", "cv2.VideoCapture", "cv2.imread", "numpy.where", "numpy.array", "cv2.destroyWindow", "cv2.moveWindow", "cv2.namedWindow" ]
[((161, 193), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (191, 193), False, 'import dlib\n'), ((219, 280), 'dlib.shape_predictor', 'dlib.shape_predictor', (['"""shape_predictor_68_face_landmarks.dat"""'], {}), "('shape_predictor_68_face_landmarks.dat')\n", (239, 280), False, 'import dlib\n'), ((376, 395), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (392, 395), False, 'import cv2\n'), ((594, 617), 'cv2.imread', 'cv2.imread', (['"""anime.jpg"""'], {}), "('anime.jpg')\n", (604, 617), False, 'import cv2\n'), ((1296, 1314), 'numpy.array', 'np.array', (['[[0, 0]]'], {}), '([[0, 0]])\n', (1304, 1314), True, 'import numpy as np\n'), ((1366, 1384), 'numpy.array', 'np.array', (['[[0, 0]]'], {}), '([[0, 0]])\n', (1374, 1384), True, 'import numpy as np\n'), ((4946, 4969), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4967, 4969), False, 'import cv2\n'), ((5020, 5043), 'cv2.imread', 'cv2.imread', (['"""anime.jpg"""'], {}), "('anime.jpg')\n", (5030, 5043), False, 'import cv2\n'), ((5112, 5241), 'cv2.circle', 'cv2.circle', (['self.testImage', '(self.eyeCenter[0] * imageW / self.rightMean[0], self.eyeCenter[1] * imageH /\n self.bottomMean[1])'], {}), '(self.testImage, (self.eyeCenter[0] * imageW / self.rightMean[0],\n self.eyeCenter[1] * imageH / self.bottomMean[1]))\n', (5122, 5241), False, 'import cv2\n'), ((5322, 5346), 'cv2.imshow', 'cv2.imshow', (['""""""', 'self.img'], {}), "('', self.img)\n", (5332, 5346), False, 'import cv2\n'), ((7799, 7845), 'cv2.namedWindow', 'cv2.namedWindow', (['"""dd"""', 'cv2.WND_PROP_FULLSCREEN'], {}), "('dd', cv2.WND_PROP_FULLSCREEN)\n", (7814, 7845), False, 'import cv2\n'), ((7854, 7902), 'cv2.moveWindow', 'cv2.moveWindow', (['"""dd"""', '(screen.x - 1)', '(screen.y - 1)'], {}), "('dd', screen.x - 1, screen.y - 1)\n", (7868, 7902), False, 'import cv2\n'), ((7911, 7986), 'cv2.setWindowProperty', 'cv2.setWindowProperty', (['"""dd"""', 'cv2.WND_PROP_FULLSCREEN', 'cv2.WINDOW_FULLSCREEN'], {}), "('dd', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n", (7932, 7986), False, 'import cv2\n'), ((8359, 8385), 'cv2.imshow', 'cv2.imshow', (['"""dd"""', 'self.img'], {}), "('dd', self.img)\n", (8369, 8385), False, 'import cv2\n'), ((8997, 9074), 'cv2.line', 'cv2.line', (['self.frame', 'horizontalLineLeft', 'horizontalLineRight', '(0, 255, 0)', '(1)'], {}), '(self.frame, horizontalLineLeft, horizontalLineRight, (0, 255, 0), 1)\n', (9005, 9074), False, 'import cv2\n'), ((9079, 9152), 'cv2.line', 'cv2.line', (['self.frame', 'verticalLineTop', 'verticalLineBottom', '(0, 255, 0)', '(1)'], {}), '(self.frame, verticalLineTop, verticalLineBottom, (0, 255, 0), 1)\n', (9087, 9152), False, 'import cv2\n'), ((9304, 9353), 'numpy.zeros', 'np.zeros', (['(screen.height, screen.width)', 'np.uint8'], {}), '((screen.height, screen.width), np.uint8)\n', (9312, 9353), True, 'import numpy as np\n'), ((9360, 9406), 'cv2.namedWindow', 'cv2.namedWindow', (['"""aa"""', 'cv2.WND_PROP_FULLSCREEN'], {}), "('aa', cv2.WND_PROP_FULLSCREEN)\n", (9375, 9406), False, 'import cv2\n'), ((9415, 9463), 'cv2.moveWindow', 'cv2.moveWindow', (['"""aa"""', '(screen.x - 1)', '(screen.y - 1)'], {}), "('aa', screen.x - 1, screen.y - 1)\n", (9429, 9463), False, 'import cv2\n'), ((9472, 9547), 'cv2.setWindowProperty', 'cv2.setWindowProperty', (['"""aa"""', 'cv2.WND_PROP_FULLSCREEN', 'cv2.WINDOW_FULLSCREEN'], {}), "('aa', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n", (9493, 9547), False, 'import cv2\n'), ((12299, 12476), 'cv2.putText', 'cv2.putText', (['stage', '"""Go to do image \'i\'"""', '(screen.width // 2 - text_width // 2, screen.height // 2 - 30)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.8)', '(255, 255, 255)', '(1)', 'cv2.LINE_AA'], {}), '(stage, "Go to do image \'i\'", (screen.width // 2 - text_width //\n 2, screen.height // 2 - 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, \n 255), 1, cv2.LINE_AA)\n', (12310, 12476), False, 'import cv2\n'), ((12470, 12493), 'cv2.imshow', 'cv2.imshow', (['"""aa"""', 'stage'], {}), "('aa', stage)\n", (12480, 12493), False, 'import cv2\n'), ((12837, 12862), 'screeninfo.get_monitors', 'screeninfo.get_monitors', ([], {}), '()\n', (12860, 12862), False, 'import screeninfo\n'), ((1725, 1862), 'getopt.getopt', 'getopt.getopt', (['sys.argv[1:]', '"""h:c:t:r:a:e:"""', "['help', 'cameradress', 'threshold', 'rgb', 'eyeline', 'halfcut',\n 'quartercut', 'calib']"], {}), "(sys.argv[1:], 'h:c:t:r:a:e:', ['help', 'cameradress',\n 'threshold', 'rgb', 'eyeline', 'halfcut', 'quartercut', 'calib'])\n", (1738, 1862), False, 'import sys, getopt\n'), ((4116, 4130), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4127, 4130), False, 'import cv2\n'), ((6283, 6309), 'numpy.zeros', 'np.zeros', (['(h, w)', 'np.uint8'], {}), '((h, w), np.uint8)\n', (6291, 6309), True, 'import numpy as np\n'), ((6322, 6372), 'cv2.polylines', 'cv2.polylines', (['mask', '[self.left_eye]', '(True)', '(255)', '(2)'], {}), '(mask, [self.left_eye], True, 255, 2)\n', (6335, 6372), False, 'import cv2\n'), ((6385, 6425), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', '[self.left_eye]', '(255)'], {}), '(mask, [self.left_eye], 255)\n', (6397, 6425), False, 'import cv2\n'), ((6454, 6512), 'cv2.bitwise_and', 'cv2.bitwise_and', (['self.grayFrame', 'self.grayFrame'], {'mask': 'mask'}), '(self.grayFrame, self.grayFrame, mask=mask)\n', (6469, 6512), False, 'import cv2\n'), ((6541, 6589), 'numpy.where', 'np.where', (['(self.maskEyes == 0)', '(255)', 'self.maskEyes'], {}), '(self.maskEyes == 0, 255, self.maskEyes)\n', (6549, 6589), True, 'import numpy as np\n'), ((6613, 6640), 'numpy.min', 'np.min', (['self.left_eye[:, 0]'], {}), '(self.left_eye[:, 0])\n', (6619, 6640), True, 'import numpy as np\n'), ((6665, 6692), 'numpy.max', 'np.max', (['self.left_eye[:, 0]'], {}), '(self.left_eye[:, 0])\n', (6671, 6692), True, 'import numpy as np\n'), ((6717, 6744), 'numpy.min', 'np.min', (['self.left_eye[:, 1]'], {}), '(self.left_eye[:, 1])\n', (6723, 6744), True, 'import numpy as np\n'), ((6769, 6796), 'numpy.max', 'np.max', (['self.left_eye[:, 1]'], {}), '(self.left_eye[:, 1])\n', (6775, 6796), True, 'import numpy as np\n'), ((6915, 6957), 'cv2.resize', 'cv2.resize', (['self.cutEyes', 'None'], {'fx': '(5)', 'fy': '(5)'}), '(self.cutEyes, None, fx=5, fy=5)\n', (6925, 6957), False, 'import cv2\n'), ((6991, 7032), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['self.cutEyes', '(5, 5)', '(0)'], {}), '(self.cutEyes, (5, 5), 0)\n', (7007, 7032), False, 'import cv2\n'), ((7067, 7143), 'cv2.threshold', 'cv2.threshold', (['self.capThreshold', 'self.threshold', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(self.capThreshold, self.threshold, 255, cv2.THRESH_BINARY_INV)\n', (7080, 7143), False, 'import cv2\n'), ((7175, 7250), 'cv2.findContours', 'cv2.findContours', (['self.capThreshold', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(self.capThreshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (7191, 7250), False, 'import cv2\n'), ((8027, 8040), 'math.floor', 'math.floor', (['x'], {}), '(x)\n', (8037, 8040), False, 'import math\n'), ((8093, 8106), 'math.floor', 'math.floor', (['x'], {}), '(x)\n', (8103, 8106), False, 'import math\n'), ((8148, 8161), 'math.floor', 'math.floor', (['y'], {}), '(y)\n', (8158, 8161), False, 'import math\n'), ((8214, 8227), 'math.floor', 'math.floor', (['y'], {}), '(y)\n', (8224, 8227), False, 'import math\n'), ((12068, 12091), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""aa"""'], {}), "('aa')\n", (12085, 12091), False, 'import cv2\n'), ((12118, 12146), 'cv2.imshow', 'cv2.imshow', (['"""aa"""', 'background'], {}), "('aa', background)\n", (12128, 12146), False, 'import cv2\n'), ((12216, 12287), 'cv2.getTextSize', 'cv2.getTextSize', (['"""Go to do image \'i\'"""', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.8)', '(1)'], {}), '("Go to do image \'i\'", cv2.FONT_HERSHEY_SIMPLEX, 0.8, 1)\n', (12231, 12287), False, 'import cv2\n'), ((1927, 1937), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1935, 1937), False, 'import sys, getopt\n'), ((3238, 3290), 'cv2.resize', 'cv2.resize', (['self.frame', 'None'], {'fx': 'self.fx', 'fy': 'self.fy'}), '(self.frame, None, fx=self.fx, fy=self.fy)\n', (3248, 3290), False, 'import cv2\n'), ((3427, 3471), 'cv2.cvtColor', 'cv2.cvtColor', (['self.frame', 'cv2.COLOR_BGR2GRAY'], {}), '(self.frame, cv2.COLOR_BGR2GRAY)\n', (3439, 3471), False, 'import cv2\n'), ((3768, 3799), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'self.frame'], {}), "('Frame', self.frame)\n", (3778, 3799), False, 'import cv2\n'), ((3962, 3996), 'cv2.imshow', 'cv2.imshow', (['"""2"""', 'self.capThreshold'], {}), "('2', self.capThreshold)\n", (3972, 3996), False, 'import cv2\n'), ((4057, 4090), 'cv2.imshow', 'cv2.imshow', (['"""3"""', 'self.cutEyesGray'], {}), "('3', self.cutEyesGray)\n", (4067, 4090), False, 'import cv2\n'), ((4355, 4378), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4376, 4378), False, 'import cv2\n'), ((4451, 4474), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4472, 4474), False, 'import cv2\n'), ((7321, 7342), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (7337, 7342), False, 'import cv2\n'), ((7359, 7431), 'cv2.rectangle', 'cv2.rectangle', (['self.capThreshold', '(x, y)', '(x + w, y + h)', '(255, 0, 0)', '(1)'], {}), '(self.capThreshold, (x, y), (x + w, y + h), (255, 0, 0), 1)\n', (7372, 7431), False, 'import cv2\n'), ((7711, 7740), 'cv2.imshow', 'cv2.imshow', (['"""c"""', 'self.cutEyes'], {}), "('c', self.cutEyes)\n", (7721, 7740), False, 'import cv2\n'), ((9615, 9634), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (9632, 9634), False, 'import time\n'), ((9831, 9887), 'cv2.circle', 'cv2.circle', (['background', '(10, 10)', '(5)', '(255, 255, 255)', '(-1)'], {}), '(background, (10, 10), 5, (255, 255, 255), -1)\n', (9841, 9887), False, 'import cv2\n'), ((10014, 10191), 'cv2.putText', 'cv2.putText', (['background', '"""Follow point"""', '(screen.width // 2 - text_width // 2, screen.height // 2 - 30)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.8)', '(255, 255, 255)', '(1)', 'cv2.LINE_AA'], {}), "(background, 'Follow point', (screen.width // 2 - text_width // \n 2, screen.height // 2 - 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, \n 255), 1, cv2.LINE_AA)\n", (10025, 10191), False, 'import cv2\n'), ((11253, 11462), 'cv2.putText', 'cv2.putText', (['background', '"""Kalibrasyonu ayarlamak için \'s\' tuşuna basın"""', '(screen.width // 2 - text_width // 2, screen.height // 2 - 30)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.8)', '(255, 255, 255)', '(1)', 'cv2.LINE_AA'], {}), '(background, "Kalibrasyonu ayarlamak için \'s\' tuşuna basın", (\n screen.width // 2 - text_width // 2, screen.height // 2 - 30), cv2.\n FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 1, cv2.LINE_AA)\n', (11264, 11462), False, 'import cv2\n'), ((9651, 9670), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (9668, 9670), False, 'import time\n'), ((9765, 9815), 'numpy.append', 'np.append', (['self.minArray', '[self.eyeCenter]'], {'axis': '(0)'}), '(self.minArray, [self.eyeCenter], axis=0)\n', (9774, 9815), True, 'import numpy as np\n'), ((9929, 9994), 'cv2.getTextSize', 'cv2.getTextSize', (['"""Follow point"""', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.8)', '(1)'], {}), "('Follow point', cv2.FONT_HERSHEY_SIMPLEX, 0.8, 1)\n", (9944, 9994), False, 'import cv2\n'), ((10412, 10504), 'cv2.circle', 'cv2.circle', (['background', '(screen.width - 10, screen.height - 10)', '(5)', '(255, 255, 255)', '(-1)'], {}), '(background, (screen.width - 10, screen.height - 10), 5, (255, \n 255, 255), -1)\n', (10422, 10504), False, 'import cv2\n'), ((10622, 10799), 'cv2.putText', 'cv2.putText', (['background', '"""Follow point"""', '(screen.width // 2 - text_width // 2, screen.height // 2 - 30)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.8)', '(255, 255, 255)', '(1)', 'cv2.LINE_AA'], {}), "(background, 'Follow point', (screen.width // 2 - text_width // \n 2, screen.height // 2 - 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, \n 255), 1, cv2.LINE_AA)\n", (10633, 10799), False, 'import cv2\n'), ((11140, 11242), 'cv2.getTextSize', 'cv2.getTextSize', (['"""Kalibrasyonu ayarlamak için \'s\' tuşuna basın"""', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.8)', '(1)'], {}), '("Kalibrasyonu ayarlamak için \'s\' tuşuna basın", cv2.\n FONT_HERSHEY_SIMPLEX, 0.8, 1)\n', (11155, 11242), False, 'import cv2\n'), ((10346, 10396), 'numpy.append', 'np.append', (['self.maxArray', '[self.eyeCenter]'], {'axis': '(0)'}), '(self.maxArray, [self.eyeCenter], axis=0)\n', (10355, 10396), True, 'import numpy as np\n'), ((10537, 10602), 'cv2.getTextSize', 'cv2.getTextSize', (['"""Follow point"""', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.8)', '(1)'], {}), "('Follow point', cv2.FONT_HERSHEY_SIMPLEX, 0.8, 1)\n", (10552, 10602), False, 'import cv2\n'), ((10853, 10876), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (10874, 10876), False, 'import cv2\n'), ((10194, 10213), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (10211, 10213), False, 'import time\n'), ((10232, 10251), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (10249, 10251), False, 'import time\n'), ((10801, 10820), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (10818, 10820), False, 'import time\n')]
# ============================================================================ # ~/cerebstats/cerebstats/stat_scores/chi2GOFScore.py # # This py-file contains custum score functions initiated by # # from cerebstats import scoreScores # from cerebstats.scoreScores import ABCScore # ============================================================================ import numpy as np from scipy.stats import chisquare import sciunit # ==============================Chi2GOFScore================================== class Chi2GOFScore(sciunit.Score): """ Compute chi2-statistic for chi-squared goodness-of-fit Test of proportions. One may think of this as a **one-way contingency table.** +--------------+-------------------------------------------------------------+ | sample size | :math:`k` categories of a categorial variable of interest | + +--------------+--------------+----------------+--------------+ | :math:`n` | :math:`x_1` | :math:`x_2` | :math:`\\ldots` | :math:`x_k` | +==============+==============+==============+================+==============+ | observations | :math:`O_1` | :math:`O_2` | :math:`\\ldots` | :math:`O_k` | +--------------+--------------+--------------+----------------+--------------+ | probabilities| :math:`p_1` | :math:`p_2` | :math:`\\ldots` | :math:`p_k` | +--------------+--------------+--------------+----------------+--------------+ | expected | :math:`np_1` | :math:`np_2` | :math:`\\ldots` | :math:`np_k` | +--------------+--------------+--------------+----------------+--------------+ Notice that for probabilities of *k* categories :math:`\\sum_{\\forall i} p_i = 1`. The expected counts for each category can be derived from it (or already given) such that :math:`\\sum_{\\forall i} np_i = n`. .. table:: Title here ==================== ============================================================================== Definitions Interpretation ==================== ============================================================================== :math:`n` sample size; total number of experiments done :math:`k` number of categorical variables :math:`O_i` observed count (frequency) for :math:`i^{th}` variable :math:`p_i` probability for :math:`i^{th}` category such that :math:`\\sum_{\\forall i} p_i = 1` :math:`E_i` expected count for :math:`i^{th}` category such that :math:`E_i = n p_i` test-statistic :math:`\\chi^2 = \\sum_{\\forall i} \\frac{(O_i - E_i)^2}{E_i}` :math:`df` degrees of freedom, :math:`df = k-1` ==================== ============================================================================== *Note* the modification made when compared with a two-way :math:`\\chi^2` test is - the calculation of expected counts :math:`E_i = n p_i` - the degree of freedom :math:`df = k-1` This class uses `scipy.stats.chisquare <https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.chisquare.html>`_. **Use Case:** :: x = Chi2GOFScoreForProportionChi2GOFTest.compute( observation, prediction ) score = Chi2GOFScoreForProportionChi2GOFTest(x) *Note*: As part of the `SciUnit <http://scidash.github.io/sciunit.html>`_ framework this custom :py:class:`.TScore` should have the following methods, * :py:meth:`.compute` (class method) * :py:meth:`.sort_key` (property) * :py:meth:`.__str__` """ #_allowed_types = (float,) _description = ( "ZScoreForSignTest gives the z-statistic applied to medians. " + "The experimental data (observation) is taken as the sample. " + "The sample statistic is 'median' or computed median form 'raw_data'. " + "The null-value is the 'some' specified value whic is taken to be the predicted value generated from running the model. " ) @classmethod def compute(cls, observation, prediction): """ +---------------------+-----------------------------------------------------------------------+ | Argument | Value type | +=====================+=======================================================================+ | first argument |dictionary; observation/experimental data must have keys "sample_size" | | |with a number as its value and "observed_freq" whose value is an array | +---------------------+-----------------------------------------------------------------------+ | second argument |dictionary; model prediction must have either "probabilities" or | | |"expected" whose value is an array (same length as "observed_freq") | +---------------------+-----------------------------------------------------------------------+ *Note:* * chi squared tests (for goodness-of-fit or contingency table) by nature are two-sided so there is not option for one-sidedness. """ name = "chi2_goodness_of_fit_test_for_proportions" if "probabilities" in prediction: probabilities = np.array( prediction["probabilities"] ) expected_counts = observation["sample_size"] * probabilities elif "expected" in prediction: expected_counts = np.array( prediction["expected"] ) probabilities = expected_counts / observation["sample_size"] # k_categories = expected_counts.size score, pvalue = chisquare( observation["observed_freq"], f_exp = expected_counts ) #return self.score # chi2_statistic return {"name": name, "sample_statistic": probabilities, "expected_values": expected_counts, "test_statistic": score, "df": k_categories-1, "p_value": pvalue} @property def sort_key(self): return self.score def __str__(self): return "ChiSqGOFScore is " + str(self.score) # ============================================================================
[ "numpy.array", "scipy.stats.chisquare" ]
[((5745, 5807), 'scipy.stats.chisquare', 'chisquare', (["observation['observed_freq']"], {'f_exp': 'expected_counts'}), "(observation['observed_freq'], f_exp=expected_counts)\n", (5754, 5807), False, 'from scipy.stats import chisquare\n'), ((5377, 5414), 'numpy.array', 'np.array', (["prediction['probabilities']"], {}), "(prediction['probabilities'])\n", (5385, 5414), True, 'import numpy as np\n'), ((5559, 5591), 'numpy.array', 'np.array', (["prediction['expected']"], {}), "(prediction['expected'])\n", (5567, 5591), True, 'import numpy as np\n')]
""" A simple script for generating sample data for learning to give personalised offers. """ import json import pandas as pd import numpy as np import gzip import random import logging GENERATE_INBALANCED_DATA = False NUM_INTERACTIONS_PER_USER = 3 FIRST_TIMESTAMP = 1591803782 # 2020-06-10, 18:43:02 LAST_TIMESTAMP = 1599579782 # 2020-09-08, 18:43:02 RANDOM_SEED = 1 IN_PRODUCTS_FILENAME = "src/products/src/products-service/data/products.yaml" IN_USERS_FILENAME = "src/users/src/users-service/data/users.json.gz" IN_OFFERS_FILENAME = "src/offers/src/offers-service/data/offers.json" # Where to put the generated data so that it is picked up by stage.sh GENERATED_DATA_ROOT = "src/aws-lambda/personalize-pre-create-resources/data" def generate_data(interactions_filename, users_df, offers_df): """Script for writing to a file simulated user-offer interactions""" random.seed(RANDOM_SEED) np.random.seed(RANDOM_SEED) num_users = users_df.shape[0] num_interactions = NUM_INTERACTIONS_PER_USER * num_users if GENERATE_INBALANCED_DATA: # We may wish to assume probability is proportional to ID to show off how we can add # business logic around Personalize offer_probs = offers_df.id.values.astype(float) else: # Or we can work around inbalance at the data munging stage offer_probs = np.ones(len(offers_df.id.values), dtype=float) # Normalise so that we have probabilities offer_probs = offer_probs / offer_probs.sum() # generate timestamps time_between_events = (LAST_TIMESTAMP - FIRST_TIMESTAMP) / num_interactions timestamps = np.arange(FIRST_TIMESTAMP, LAST_TIMESTAMP, time_between_events).astype(int) # pre-shuffle them as we will be using them as a randomising key when we sort by timestamp np.random.shuffle(timestamps) # generate all users Ids sample_user_ids = np.tile(users_df['id'].values.astype(int), NUM_INTERACTIONS_PER_USER) # only one event type event_type = ['OfferConverted'] * num_interactions # we sort it to ensure there is a correlation between user ID and offer ID. # This correlation is what the personalisation will learn. sampled_offers = sorted(np.random.choice(offers_df.id.values, num_interactions, p=offer_probs)) interactions_df = pd.DataFrame({'ITEM_ID': sampled_offers, 'USER_ID': sample_user_ids, 'EVENT_TYPE': event_type, 'TIMESTAMP': timestamps}) # by sorting by timestamp, other elements get shuffled interactions_df = interactions_df.sort_values('TIMESTAMP') with open(interactions_filename, 'w') as outfile: interactions_df.to_csv(outfile, index=False) globals().update(locals()) # This can be used for inspecting in console after script ran or if run with ipython. print('Generation script finished - created offers dataset') if __name__ == '__main__': # User info is stored in the repository - it was automatically generated with gzip.open(IN_USERS_FILENAME, 'r') as f: users = json.load(f) users_df = pd.DataFrame(users) # Offers info is stored in repository with open(IN_OFFERS_FILENAME, 'r') as f: offers = json.load(f) offers_df = pd.DataFrame(offers) logging.basicConfig(level=logging.INFO) generate_data(GENERATED_DATA_ROOT + '/offer_interactions.csv', users_df, offers_df)
[ "pandas.DataFrame", "json.load", "numpy.random.seed", "gzip.open", "logging.basicConfig", "random.seed", "numpy.arange", "numpy.random.choice", "numpy.random.shuffle" ]
[((880, 904), 'random.seed', 'random.seed', (['RANDOM_SEED'], {}), '(RANDOM_SEED)\n', (891, 904), False, 'import random\n'), ((909, 936), 'numpy.random.seed', 'np.random.seed', (['RANDOM_SEED'], {}), '(RANDOM_SEED)\n', (923, 936), True, 'import numpy as np\n'), ((1803, 1832), 'numpy.random.shuffle', 'np.random.shuffle', (['timestamps'], {}), '(timestamps)\n', (1820, 1832), True, 'import numpy as np\n'), ((2303, 2427), 'pandas.DataFrame', 'pd.DataFrame', (["{'ITEM_ID': sampled_offers, 'USER_ID': sample_user_ids, 'EVENT_TYPE':\n event_type, 'TIMESTAMP': timestamps}"], {}), "({'ITEM_ID': sampled_offers, 'USER_ID': sample_user_ids,\n 'EVENT_TYPE': event_type, 'TIMESTAMP': timestamps})\n", (2315, 2427), True, 'import pandas as pd\n'), ((3148, 3167), 'pandas.DataFrame', 'pd.DataFrame', (['users'], {}), '(users)\n', (3160, 3167), True, 'import pandas as pd\n'), ((3303, 3323), 'pandas.DataFrame', 'pd.DataFrame', (['offers'], {}), '(offers)\n', (3315, 3323), True, 'import pandas as pd\n'), ((3329, 3368), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (3348, 3368), False, 'import logging\n'), ((2208, 2278), 'numpy.random.choice', 'np.random.choice', (['offers_df.id.values', 'num_interactions'], {'p': 'offer_probs'}), '(offers_df.id.values, num_interactions, p=offer_probs)\n', (2224, 2278), True, 'import numpy as np\n'), ((3063, 3096), 'gzip.open', 'gzip.open', (['IN_USERS_FILENAME', '"""r"""'], {}), "(IN_USERS_FILENAME, 'r')\n", (3072, 3096), False, 'import gzip\n'), ((3119, 3131), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3128, 3131), False, 'import json\n'), ((3273, 3285), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3282, 3285), False, 'import json\n'), ((1628, 1691), 'numpy.arange', 'np.arange', (['FIRST_TIMESTAMP', 'LAST_TIMESTAMP', 'time_between_events'], {}), '(FIRST_TIMESTAMP, LAST_TIMESTAMP, time_between_events)\n', (1637, 1691), True, 'import numpy as np\n')]
from kmodes.util.dissim import num_TZ_dissim,cat_TZ_dissim from sklearn.decomposition import PCA import numpy centroid = [ [1,2,3], [5,6,6] ] Xnum = [ [54,2,44], [89,6,4], [1.5,0,-5], [5346,874,212] ] centroid = numpy.array(centroid) Xnum = numpy.array(Xnum) x = numpy.array([[1,2,3],[2,3,3],[12938,9999,666],[54,11,21354]]) pca = PCA(n_components=1) newx = pca.fit_transform(x) print(newx)
[ "numpy.array", "sklearn.decomposition.PCA" ]
[((236, 257), 'numpy.array', 'numpy.array', (['centroid'], {}), '(centroid)\n', (247, 257), False, 'import numpy\n'), ((265, 282), 'numpy.array', 'numpy.array', (['Xnum'], {}), '(Xnum)\n', (276, 282), False, 'import numpy\n'), ((288, 360), 'numpy.array', 'numpy.array', (['[[1, 2, 3], [2, 3, 3], [12938, 9999, 666], [54, 11, 21354]]'], {}), '([[1, 2, 3], [2, 3, 3], [12938, 9999, 666], [54, 11, 21354]])\n', (299, 360), False, 'import numpy\n'), ((356, 375), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(1)'}), '(n_components=1)\n', (359, 375), False, 'from sklearn.decomposition import PCA\n')]
"""Tests for log_linear_exp function.""" import chex import jax import jax.numpy as jnp import numpy as np from vmcnet.utils.log_linear_exp import log_linear_exp import vmcnet.utils.slog_helpers as slog_helpers def test_log_linear_exp_shape(): """Test output shape of log linear exp.""" signs = jnp.ones((5, 2, 4, 3)) vals = jnp.zeros((5, 2, 4, 3)) weights = jnp.ones((2, 7)) out = log_linear_exp(signs, vals, weights, axis=-3) out_no_weights = log_linear_exp(signs, vals, axis=-3) desired_shape = (5, 7, 4, 3) desired_shape_no_weights = (5, 1, 4, 3) chex.assert_shape(out, desired_shape) chex.assert_shape(out_no_weights, desired_shape_no_weights) def test_log_linear_exp_no_overflow(): """Test that the log-linear-exp trick avoids overflow when any vals are big.""" signs = jnp.array([-1.0, -1.0, 1.0, 1.0]) vals = jnp.array([300.0, 100.0, 3000.0, 1.5]) weights = jnp.reshape(jnp.array([-1.0, 2.0, 0.5, 0.6]), (4, 1)) sign_out, log_out = log_linear_exp(signs, vals, weights, axis=0) # the output should be sign_out=1.0, log_out=log|0.5 * exp(3000) + tinier stuff| assert jnp.isfinite(log_out) np.testing.assert_allclose(sign_out, 1.0) np.testing.assert_allclose(log_out, jnp.log(0.5) + 3000.0) def test_log_linear_exp_no_underflow(): """Test that the log-linear-exp trick avoids underflow when all vals are small.""" signs = jnp.array([-1.0, -1.0, 1.0, 1.0]) vals = jnp.array([-4000.0, -5500.0, -3000.0, -1234.5]) sign_out, log_out = log_linear_exp(signs, vals, axis=0) # the output should be sign_out=1.0, log_out=log|exp(-1234.5) + tinier stuff| np.testing.assert_allclose(sign_out, 1.0) np.testing.assert_allclose(log_out, -1234.5) def test_log_linear_equals_log_linear_exp_log(): """Test that log-linear-exp of sign(x), log|x| is just log-linear.""" key = jax.random.PRNGKey(0) key, subkey = jax.random.split(key) x = jax.random.normal(subkey, (9, 5)) sign_x, log_x = slog_helpers.array_to_slog(x) key, subkey = jax.random.split(key) kernel = jax.random.normal(subkey, (5, 7)) sign_linear_out, log_linear_out = slog_helpers.array_to_slog(jnp.dot(x, kernel)) sign_linear_exp_log_out, log_linear_exp_log_out = log_linear_exp( sign_x, log_x, kernel, axis=-1 ) np.testing.assert_allclose(sign_linear_exp_log_out, sign_linear_out) np.testing.assert_allclose(log_linear_exp_log_out, log_linear_out, rtol=1e-5)
[ "jax.numpy.array", "vmcnet.utils.slog_helpers.array_to_slog", "jax.numpy.isfinite", "jax.numpy.log", "chex.assert_shape", "jax.random.normal", "jax.numpy.dot", "numpy.testing.assert_allclose", "jax.random.PRNGKey", "jax.numpy.ones", "jax.numpy.zeros", "vmcnet.utils.log_linear_exp.log_linear_exp", "jax.random.split" ]
[((307, 329), 'jax.numpy.ones', 'jnp.ones', (['(5, 2, 4, 3)'], {}), '((5, 2, 4, 3))\n', (315, 329), True, 'import jax.numpy as jnp\n'), ((341, 364), 'jax.numpy.zeros', 'jnp.zeros', (['(5, 2, 4, 3)'], {}), '((5, 2, 4, 3))\n', (350, 364), True, 'import jax.numpy as jnp\n'), ((379, 395), 'jax.numpy.ones', 'jnp.ones', (['(2, 7)'], {}), '((2, 7))\n', (387, 395), True, 'import jax.numpy as jnp\n'), ((407, 452), 'vmcnet.utils.log_linear_exp.log_linear_exp', 'log_linear_exp', (['signs', 'vals', 'weights'], {'axis': '(-3)'}), '(signs, vals, weights, axis=-3)\n', (421, 452), False, 'from vmcnet.utils.log_linear_exp import log_linear_exp\n'), ((474, 510), 'vmcnet.utils.log_linear_exp.log_linear_exp', 'log_linear_exp', (['signs', 'vals'], {'axis': '(-3)'}), '(signs, vals, axis=-3)\n', (488, 510), False, 'from vmcnet.utils.log_linear_exp import log_linear_exp\n'), ((593, 630), 'chex.assert_shape', 'chex.assert_shape', (['out', 'desired_shape'], {}), '(out, desired_shape)\n', (610, 630), False, 'import chex\n'), ((635, 694), 'chex.assert_shape', 'chex.assert_shape', (['out_no_weights', 'desired_shape_no_weights'], {}), '(out_no_weights, desired_shape_no_weights)\n', (652, 694), False, 'import chex\n'), ((832, 865), 'jax.numpy.array', 'jnp.array', (['[-1.0, -1.0, 1.0, 1.0]'], {}), '([-1.0, -1.0, 1.0, 1.0])\n', (841, 865), True, 'import jax.numpy as jnp\n'), ((877, 915), 'jax.numpy.array', 'jnp.array', (['[300.0, 100.0, 3000.0, 1.5]'], {}), '([300.0, 100.0, 3000.0, 1.5])\n', (886, 915), True, 'import jax.numpy as jnp\n'), ((1009, 1053), 'vmcnet.utils.log_linear_exp.log_linear_exp', 'log_linear_exp', (['signs', 'vals', 'weights'], {'axis': '(0)'}), '(signs, vals, weights, axis=0)\n', (1023, 1053), False, 'from vmcnet.utils.log_linear_exp import log_linear_exp\n'), ((1151, 1172), 'jax.numpy.isfinite', 'jnp.isfinite', (['log_out'], {}), '(log_out)\n', (1163, 1172), True, 'import jax.numpy as jnp\n'), ((1177, 1218), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['sign_out', '(1.0)'], {}), '(sign_out, 1.0)\n', (1203, 1218), True, 'import numpy as np\n'), ((1423, 1456), 'jax.numpy.array', 'jnp.array', (['[-1.0, -1.0, 1.0, 1.0]'], {}), '([-1.0, -1.0, 1.0, 1.0])\n', (1432, 1456), True, 'import jax.numpy as jnp\n'), ((1468, 1515), 'jax.numpy.array', 'jnp.array', (['[-4000.0, -5500.0, -3000.0, -1234.5]'], {}), '([-4000.0, -5500.0, -3000.0, -1234.5])\n', (1477, 1515), True, 'import jax.numpy as jnp\n'), ((1541, 1576), 'vmcnet.utils.log_linear_exp.log_linear_exp', 'log_linear_exp', (['signs', 'vals'], {'axis': '(0)'}), '(signs, vals, axis=0)\n', (1555, 1576), False, 'from vmcnet.utils.log_linear_exp import log_linear_exp\n'), ((1664, 1705), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['sign_out', '(1.0)'], {}), '(sign_out, 1.0)\n', (1690, 1705), True, 'import numpy as np\n'), ((1710, 1754), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['log_out', '(-1234.5)'], {}), '(log_out, -1234.5)\n', (1736, 1754), True, 'import numpy as np\n'), ((1890, 1911), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (1908, 1911), False, 'import jax\n'), ((1930, 1951), 'jax.random.split', 'jax.random.split', (['key'], {}), '(key)\n', (1946, 1951), False, 'import jax\n'), ((1960, 1993), 'jax.random.normal', 'jax.random.normal', (['subkey', '(9, 5)'], {}), '(subkey, (9, 5))\n', (1977, 1993), False, 'import jax\n'), ((2014, 2043), 'vmcnet.utils.slog_helpers.array_to_slog', 'slog_helpers.array_to_slog', (['x'], {}), '(x)\n', (2040, 2043), True, 'import vmcnet.utils.slog_helpers as slog_helpers\n'), ((2063, 2084), 'jax.random.split', 'jax.random.split', (['key'], {}), '(key)\n', (2079, 2084), False, 'import jax\n'), ((2098, 2131), 'jax.random.normal', 'jax.random.normal', (['subkey', '(5, 7)'], {}), '(subkey, (5, 7))\n', (2115, 2131), False, 'import jax\n'), ((2272, 2318), 'vmcnet.utils.log_linear_exp.log_linear_exp', 'log_linear_exp', (['sign_x', 'log_x', 'kernel'], {'axis': '(-1)'}), '(sign_x, log_x, kernel, axis=-1)\n', (2286, 2318), False, 'from vmcnet.utils.log_linear_exp import log_linear_exp\n'), ((2338, 2406), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['sign_linear_exp_log_out', 'sign_linear_out'], {}), '(sign_linear_exp_log_out, sign_linear_out)\n', (2364, 2406), True, 'import numpy as np\n'), ((2411, 2489), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['log_linear_exp_log_out', 'log_linear_out'], {'rtol': '(1e-05)'}), '(log_linear_exp_log_out, log_linear_out, rtol=1e-05)\n', (2437, 2489), True, 'import numpy as np\n'), ((942, 974), 'jax.numpy.array', 'jnp.array', (['[-1.0, 2.0, 0.5, 0.6]'], {}), '([-1.0, 2.0, 0.5, 0.6])\n', (951, 974), True, 'import jax.numpy as jnp\n'), ((2198, 2216), 'jax.numpy.dot', 'jnp.dot', (['x', 'kernel'], {}), '(x, kernel)\n', (2205, 2216), True, 'import jax.numpy as jnp\n'), ((1259, 1271), 'jax.numpy.log', 'jnp.log', (['(0.5)'], {}), '(0.5)\n', (1266, 1271), True, 'import jax.numpy as jnp\n')]
import pyopencl as cl import pyopencl.array as cl_array import numpy import numpy.linalg as la a = numpy.random.rand(50000).astype(numpy.float32) b = numpy.random.rand(50000).astype(numpy.float32) ctx = cl.create_some_context() queue = cl.CommandQueue(ctx) a_dev = cl_array.to_device(queue, a) b_dev = cl_array.to_device(queue, b) dest_dev = cl_array.empty_like(a_dev) prg = cl.Program(ctx, """ __kernel void sum(__global const float *a, __global const float *b, __global float *c) { int gid = get_global_id(0); c[gid] = a[gid] + b[gid]; } """).build() prg.sum(queue, a.shape, None, a_dev.data, b_dev.data, dest_dev.data) print(la.norm((dest_dev - (a_dev+b_dev)).get()))
[ "pyopencl.array.empty_like", "pyopencl.create_some_context", "pyopencl.array.to_device", "pyopencl.CommandQueue", "pyopencl.Program", "numpy.random.rand" ]
[((205, 229), 'pyopencl.create_some_context', 'cl.create_some_context', ([], {}), '()\n', (227, 229), True, 'import pyopencl as cl\n'), ((238, 258), 'pyopencl.CommandQueue', 'cl.CommandQueue', (['ctx'], {}), '(ctx)\n', (253, 258), True, 'import pyopencl as cl\n'), ((268, 296), 'pyopencl.array.to_device', 'cl_array.to_device', (['queue', 'a'], {}), '(queue, a)\n', (286, 296), True, 'import pyopencl.array as cl_array\n'), ((305, 333), 'pyopencl.array.to_device', 'cl_array.to_device', (['queue', 'b'], {}), '(queue, b)\n', (323, 333), True, 'import pyopencl.array as cl_array\n'), ((345, 371), 'pyopencl.array.empty_like', 'cl_array.empty_like', (['a_dev'], {}), '(a_dev)\n', (364, 371), True, 'import pyopencl.array as cl_array\n'), ((100, 124), 'numpy.random.rand', 'numpy.random.rand', (['(50000)'], {}), '(50000)\n', (117, 124), False, 'import numpy\n'), ((151, 175), 'numpy.random.rand', 'numpy.random.rand', (['(50000)'], {}), '(50000)\n', (168, 175), False, 'import numpy\n'), ((379, 589), 'pyopencl.Program', 'cl.Program', (['ctx', '"""\n __kernel void sum(__global const float *a,\n __global const float *b, __global float *c)\n {\n int gid = get_global_id(0);\n c[gid] = a[gid] + b[gid];\n }\n """'], {}), '(ctx,\n """\n __kernel void sum(__global const float *a,\n __global const float *b, __global float *c)\n {\n int gid = get_global_id(0);\n c[gid] = a[gid] + b[gid];\n }\n """\n )\n', (389, 589), True, 'import pyopencl as cl\n')]
import sys sys.path.append("PerceptualSimilarity\\") import os import utils import torch import numpy as np from torch import nn import torchgeometry from kornia import color import torch.nn.functional as F from torchvision import transforms class Dense(nn.Module): def __init__(self, in_features, out_features, activation='relu', kernel_initializer='he_normal'): super(Dense, self).__init__() self.in_features = in_features self.out_features = out_features self.activation = activation self.kernel_initializer = kernel_initializer self.linear = nn.Linear(in_features, out_features) # initialization if kernel_initializer == 'he_normal': nn.init.kaiming_normal_(self.linear.weight) else: raise NotImplementedError def forward(self, inputs): outputs = self.linear(inputs) if self.activation is not None: if self.activation == 'relu': outputs = nn.ReLU(inplace=True)(outputs) return outputs class Conv2D(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, activation='relu', strides=1): super(Conv2D, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.activation = activation self.strides = strides self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, strides, int((kernel_size - 1) / 2)) # default: using he_normal as the kernel initializer nn.init.kaiming_normal_(self.conv.weight) def forward(self, inputs): outputs = self.conv(inputs) if self.activation is not None: if self.activation == 'relu': outputs = nn.ReLU(inplace=True)(outputs) else: raise NotImplementedError return outputs class Flatten(nn.Module): def __init__(self): super(Flatten, self).__init__() def forward(self, input): return input.view(input.size(0), -1) class StegaStampEncoder(nn.Module): def __init__(self): super(StegaStampEncoder, self).__init__() self.secret_dense = Dense(100, 7500, activation='relu', kernel_initializer='he_normal') self.conv1 = Conv2D(6, 32, 3, activation='relu') self.conv2 = Conv2D(32, 32, 3, activation='relu', strides=2) self.conv3 = Conv2D(32, 64, 3, activation='relu', strides=2) self.conv4 = Conv2D(64, 128, 3, activation='relu', strides=2) self.conv5 = Conv2D(128, 256, 3, activation='relu', strides=2) self.up6 = Conv2D(256, 128, 3, activation='relu') self.conv6 = Conv2D(256, 128, 3, activation='relu') self.up7 = Conv2D(128, 64, 3, activation='relu') self.conv7 = Conv2D(128, 64, 3, activation='relu') self.up8 = Conv2D(64, 32, 3, activation='relu') self.conv8 = Conv2D(64, 32, 3, activation='relu') self.up9 = Conv2D(32, 32, 3, activation='relu') self.conv9 = Conv2D(70, 32, 3, activation='relu') self.residual = Conv2D(32, 3, 1, activation=None) def forward(self, inputs): secrect, image = inputs secrect = secrect - .5 image = image - .5 secrect = self.secret_dense(secrect) secrect = secrect.reshape(-1, 3, 50, 50) secrect_enlarged = nn.Upsample(scale_factor=(8, 8))(secrect) inputs = torch.cat([secrect_enlarged, image], dim=1) conv1 = self.conv1(inputs) conv2 = self.conv2(conv1) conv3 = self.conv3(conv2) conv4 = self.conv4(conv3) conv5 = self.conv5(conv4) up6 = self.up6(nn.Upsample(scale_factor=(2, 2))(conv5)) merge6 = torch.cat([conv4, up6], dim=1) conv6 = self.conv6(merge6) up7 = self.up7(nn.Upsample(scale_factor=(2, 2))(conv6)) merge7 = torch.cat([conv3, up7], dim=1) conv7 = self.conv7(merge7) up8 = self.up8(nn.Upsample(scale_factor=(2, 2))(conv7)) merge8 = torch.cat([conv2, up8], dim=1) conv8 = self.conv8(merge8) up9 = self.up9(nn.Upsample(scale_factor=(2, 2))(conv8)) merge9 = torch.cat([conv1, up9, inputs], dim=1) conv9 = self.conv9(merge9) residual = self.residual(conv9) return residual class SpatialTransformerNetwork(nn.Module): def __init__(self): super(SpatialTransformerNetwork, self).__init__() self.localization = nn.Sequential( Conv2D(3, 32, 3, strides=2, activation='relu'), Conv2D(32, 64, 3, strides=2, activation='relu'), Conv2D(64, 128, 3, strides=2, activation='relu'), Flatten(), Dense(320000, 128, activation='relu'), nn.Linear(128, 6) ) self.localization[-1].weight.data.fill_(0) self.localization[-1].bias.data = torch.FloatTensor([1, 0, 0, 0, 1, 0]) def forward(self, image): theta = self.localization(image) theta = theta.view(-1, 2, 3) grid = F.affine_grid(theta, image.size(), align_corners=False) transformed_image = F.grid_sample(image, grid, align_corners=False) return transformed_image class StegaStampDecoder(nn.Module): def __init__(self, secret_size=100): super(StegaStampDecoder, self).__init__() self.secret_size = secret_size self.stn = SpatialTransformerNetwork() self.decoder = nn.Sequential( Conv2D(3, 32, 3, strides=2, activation='relu'), Conv2D(32, 32, 3, activation='relu'), Conv2D(32, 64, 3, strides=2, activation='relu'), Conv2D(64, 64, 3, activation='relu'), Conv2D(64, 64, 3, strides=2, activation='relu'), Conv2D(64, 128, 3, strides=2, activation='relu'), Conv2D(128, 128, 3, strides=2, activation='relu'), Flatten(), Dense(21632, 512, activation='relu'), Dense(512, secret_size, activation=None)) def forward(self, image): image = image - .5 transformed_image = self.stn(image) return torch.sigmoid(self.decoder(transformed_image)) class Discriminator(nn.Module): def __init__(self): super(Discriminator, self).__init__() self.model = nn.Sequential( Conv2D(3, 8, 3, strides=2, activation='relu'), Conv2D(8, 16, 3, strides=2, activation='relu'), Conv2D(16, 32, 3, strides=2, activation='relu'), Conv2D(32, 64, 3, strides=2, activation='relu'), Conv2D(64, 1, 3, activation=None)) def forward(self, image): x = image - .5 x = self.model(x) output = torch.mean(x) return output, x def transform_net(encoded_image, args, global_step): sh = encoded_image.size() ramp_fn = lambda ramp: np.min([global_step / ramp, 1.]) rnd_bri = ramp_fn(args.rnd_bri_ramp) * args.rnd_bri rnd_hue = ramp_fn(args.rnd_hue_ramp) * args.rnd_hue rnd_brightness = utils.get_rnd_brightness_torch(rnd_bri, rnd_hue, args.batch_size) # [batch_size, 3, 1, 1] jpeg_quality = 100. - torch.rand(1)[0] * ramp_fn(args.jpeg_quality_ramp) * (100. - args.jpeg_quality) rnd_noise = torch.rand(1)[0] * ramp_fn(args.rnd_noise_ramp) * args.rnd_noise contrast_low = 1. - (1. - args.contrast_low) * ramp_fn(args.contrast_ramp) contrast_high = 1. + (args.contrast_high - 1.) * ramp_fn(args.contrast_ramp) contrast_params = [contrast_low, contrast_high] rnd_sat = torch.rand(1)[0] * ramp_fn(args.rnd_sat_ramp) * args.rnd_sat # blur N_blur = 7 f = utils.random_blur_kernel(probs=[.25, .25], N_blur=N_blur, sigrange_gauss=[1., 3.], sigrange_line=[.25, 1.], wmin_line=3) if args.cuda: f = f.cuda() encoded_image = F.conv2d(encoded_image, f, bias=None, padding=int((N_blur - 1) / 2)) # noise noise = torch.normal(mean=0, std=rnd_noise, size=encoded_image.size(), dtype=torch.float32) if args.cuda: noise = noise.cuda() encoded_image = encoded_image + noise encoded_image = torch.clamp(encoded_image, 0, 1) # contrast & brightness contrast_scale = torch.Tensor(encoded_image.size()[0]).uniform_(contrast_params[0], contrast_params[1]) contrast_scale = contrast_scale.reshape(encoded_image.size()[0], 1, 1, 1) if args.cuda: contrast_scale = contrast_scale.cuda() rnd_brightness = rnd_brightness.cuda() encoded_image = encoded_image * contrast_scale encoded_image = encoded_image + rnd_brightness encoded_image = torch.clamp(encoded_image, 0, 1) # saturation sat_weight = torch.FloatTensor([.3, .6, .1]).reshape(1, 3, 1, 1) if args.cuda: sat_weight = sat_weight.cuda() encoded_image_lum = torch.mean(encoded_image * sat_weight, dim=1).unsqueeze_(1) encoded_image = (1 - rnd_sat) * encoded_image + rnd_sat * encoded_image_lum # jpeg encoded_image = encoded_image.reshape([-1, 3, 400, 400]) if not args.no_jpeg: encoded_image = utils.jpeg_compress_decompress(encoded_image, rounding=utils.round_only_at_0, quality=jpeg_quality) return encoded_image def get_secret_acc(secret_true, secret_pred): if 'cuda' in str(secret_pred.device): secret_pred = secret_pred.cpu() secret_true = secret_true.cpu() secret_pred = torch.round(secret_pred) correct_pred = torch.sum((secret_pred - secret_true) == 0, dim=1) str_acc = 1.0 - torch.sum((correct_pred - secret_pred.size()[1]) != 0).numpy() / correct_pred.size()[0] bit_acc = torch.sum(correct_pred).numpy() / secret_pred.numel() return bit_acc, str_acc def build_model(encoder, decoder, discriminator, lpips_fn, secret_input, image_input, l2_edge_gain, borders, secret_size, M, loss_scales, yuv_scales, args, global_step, writer): test_transform = transform_net(image_input, args, global_step) input_warped = torchgeometry.warp_perspective(image_input, M[:, 1, :, :], dsize=(400, 400), flags='bilinear') mask_warped = torchgeometry.warp_perspective(torch.ones_like(input_warped), M[:, 1, :, :], dsize=(400, 400), flags='bilinear') input_warped += (1 - mask_warped) * image_input residual_warped = encoder((secret_input, input_warped)) encoded_warped = residual_warped + input_warped residual = torchgeometry.warp_perspective(residual_warped, M[:, 0, :, :], dsize=(400, 400), flags='bilinear') if borders == 'no_edge': encoded_image = image_input + residual elif borders == 'black': encoded_image = residual_warped + input_warped encoded_image = torchgeometry.warp_perspective(encoded_image, M[:, 0, :, :], dsize=(400, 400), flags='bilinear') input_unwarped = torchgeometry.warp_perspective(image_input, M[:, 0, :, :], dsize=(400, 400), flags='bilinear') elif borders.startswith('random'): mask = torchgeometry.warp_perspective(torch.ones_like(residual), M[:, 0, :, :], dsize=(400, 400), flags='bilinear') encoded_image = residual_warped + input_unwarped encoded_image = torchgeometry.warp_perspective(encoded_image, M[:, 0, :, :], dsize=(400, 400), flags='bilinear') input_unwarped = torchgeometry.warp_perspective(input_warped, M[:, 0, :, :], dsize=(400, 400), flags='bilinear') ch = 3 if borders.endswith('rgb') else 1 encoded_image += (1 - mask) * torch.ones_like(residual) * torch.rand([ch]) elif borders == 'white': mask = torchgeometry.warp_perspective(torch.ones_like(residual), M[:, 0, :, :], dsize=(400, 400), flags='bilinear') encoded_image = residual_warped + input_warped encoded_image = torchgeometry.warp_perspective(encoded_image, M[:, 0, :, :], dsize=(400, 400), flags='bilinear') input_unwarped = torchgeometry.warp_perspective(input_warped, M[:, 0, :, :], dsize=(400, 400), flags='bilinear') encoded_image += (1 - mask) * torch.ones_like(residual) elif borders == 'image': mask = torchgeometry.warp_perspective(torch.ones_like(residual), M[:, 0, :, :], dsize=(400, 400), flags='bilinear') encoded_image = residual_warped + input_warped encoded_image = torchgeometry.warp_perspective(encoded_image, M[:, 0, :, :], dsize=(400, 400), flags='bilinear') encoded_image += (1 - mask) * torch.roll(image_input, 1, 0) if borders == 'no_edge': D_output_real, _ = discriminator(image_input) D_output_fake, D_heatmap = discriminator(encoded_image) else: D_output_real, _ = discriminator(input_warped) D_output_fake, D_heatmap = discriminator(encoded_warped) transformed_image = transform_net(encoded_image, args, global_step) decoded_secret = decoder(transformed_image) bit_acc, str_acc = get_secret_acc(secret_input, decoded_secret) normalized_input = image_input * 2 - 1 normalized_encoded = encoded_image * 2 - 1 lpips_loss = torch.mean(lpips_fn(normalized_input, normalized_encoded)) cross_entropy = nn.BCELoss() if args.cuda: cross_entropy = cross_entropy.cuda() secret_loss = cross_entropy(decoded_secret, secret_input) size = (int(image_input.shape[2]), int(image_input.shape[3])) gain = 10 falloff_speed = 4 falloff_im = np.ones(size) for i in range(int(falloff_im.shape[0] / falloff_speed)): # for i in range 100 falloff_im[-i, :] *= (np.cos(4 * np.pi * i / size[0] + np.pi) + 1) / 2 # [cos[(4*pi*i/400)+pi] + 1]/2 falloff_im[i, :] *= (np.cos(4 * np.pi * i / size[0] + np.pi) + 1) / 2 # [cos[(4*pi*i/400)+pi] + 1]/2 for j in range(int(falloff_im.shape[1] / falloff_speed)): falloff_im[:, -j] *= (np.cos(4 * np.pi * j / size[0] + np.pi) + 1) / 2 falloff_im[:, j] *= (np.cos(4 * np.pi * j / size[0] + np.pi) + 1) / 2 falloff_im = 1 - falloff_im falloff_im = torch.from_numpy(falloff_im).float() if args.cuda: falloff_im = falloff_im.cuda() falloff_im *= l2_edge_gain encoded_image_yuv = color.rgb_to_yuv(encoded_image) image_input_yuv = color.rgb_to_yuv(image_input) im_diff = encoded_image_yuv - image_input_yuv im_diff += im_diff * falloff_im.unsqueeze_(0) yuv_loss = torch.mean((im_diff) ** 2, axis=[0, 2, 3]) yuv_scales = torch.Tensor(yuv_scales) if args.cuda: yuv_scales = yuv_scales.cuda() image_loss = torch.dot(yuv_loss, yuv_scales) D_loss = D_output_real - D_output_fake G_loss = D_output_fake loss = loss_scales[0] * image_loss + loss_scales[1] * lpips_loss + loss_scales[2] * secret_loss if not args.no_gan: loss += loss_scales[3] * G_loss writer.add_scalar('loss/image_loss', image_loss, global_step) writer.add_scalar('loss/lpips_loss', lpips_loss, global_step) writer.add_scalar('loss/secret_loss', secret_loss, global_step) writer.add_scalar('loss/G_loss', G_loss, global_step) writer.add_scalar('loss/loss', loss, global_step) writer.add_scalar('metric/bit_acc', bit_acc, global_step) writer.add_scalar('metric/str_acc', str_acc, global_step) if global_step % 20 == 0: writer.add_image('input/image_input', image_input[0], global_step) writer.add_image('input/image_warped', input_warped[0], global_step) writer.add_image('encoded/encoded_warped', encoded_warped[0], global_step) writer.add_image('encoded/residual_warped', residual_warped[0] + 0.5, global_step) writer.add_image('encoded/encoded_image', encoded_image[0], global_step) writer.add_image('transformed/transformed_image', transformed_image[0], global_step) writer.add_image('transformed/test', test_transform[0], global_step) return loss, secret_loss, D_loss, bit_acc, str_acc
[ "utils.get_rnd_brightness_torch", "torch.dot", "numpy.ones", "torch.cat", "torch.roll", "utils.random_blur_kernel", "sys.path.append", "torch.nn.BCELoss", "torch.nn.init.kaiming_normal_", "torch.nn.functional.grid_sample", "torch.FloatTensor", "torch.nn.Upsample", "kornia.color.rgb_to_yuv", "torch.Tensor", "utils.jpeg_compress_decompress", "torch.nn.Linear", "torch.mean", "numpy.min", "torch.clamp", "numpy.cos", "torch.rand", "torch.sum", "torch.from_numpy", "torch.ones_like", "torch.nn.ReLU", "torchgeometry.warp_perspective", "torch.round" ]
[((12, 53), 'sys.path.append', 'sys.path.append', (['"""PerceptualSimilarity\\\\"""'], {}), "('PerceptualSimilarity\\\\')\n", (27, 53), False, 'import sys\n'), ((7002, 7067), 'utils.get_rnd_brightness_torch', 'utils.get_rnd_brightness_torch', (['rnd_bri', 'rnd_hue', 'args.batch_size'], {}), '(rnd_bri, rnd_hue, args.batch_size)\n', (7032, 7067), False, 'import utils\n'), ((7604, 7735), 'utils.random_blur_kernel', 'utils.random_blur_kernel', ([], {'probs': '[0.25, 0.25]', 'N_blur': 'N_blur', 'sigrange_gauss': '[1.0, 3.0]', 'sigrange_line': '[0.25, 1.0]', 'wmin_line': '(3)'}), '(probs=[0.25, 0.25], N_blur=N_blur, sigrange_gauss=\n [1.0, 3.0], sigrange_line=[0.25, 1.0], wmin_line=3)\n', (7628, 7735), False, 'import utils\n'), ((8104, 8136), 'torch.clamp', 'torch.clamp', (['encoded_image', '(0)', '(1)'], {}), '(encoded_image, 0, 1)\n', (8115, 8136), False, 'import torch\n'), ((8586, 8618), 'torch.clamp', 'torch.clamp', (['encoded_image', '(0)', '(1)'], {}), '(encoded_image, 0, 1)\n', (8597, 8618), False, 'import torch\n'), ((9418, 9442), 'torch.round', 'torch.round', (['secret_pred'], {}), '(secret_pred)\n', (9429, 9442), False, 'import torch\n'), ((9462, 9510), 'torch.sum', 'torch.sum', (['(secret_pred - secret_true == 0)'], {'dim': '(1)'}), '(secret_pred - secret_true == 0, dim=1)\n', (9471, 9510), False, 'import torch\n'), ((10000, 10098), 'torchgeometry.warp_perspective', 'torchgeometry.warp_perspective', (['image_input', 'M[:, 1, :, :]'], {'dsize': '(400, 400)', 'flags': '"""bilinear"""'}), "(image_input, M[:, 1, :, :], dsize=(400, 400),\n flags='bilinear')\n", (10030, 10098), False, 'import torchgeometry\n'), ((10456, 10559), 'torchgeometry.warp_perspective', 'torchgeometry.warp_perspective', (['residual_warped', 'M[:, 0, :, :]'], {'dsize': '(400, 400)', 'flags': '"""bilinear"""'}), "(residual_warped, M[:, 0, :, :], dsize=(400, \n 400), flags='bilinear')\n", (10486, 10559), False, 'import torchgeometry\n'), ((13255, 13267), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (13265, 13267), False, 'from torch import nn\n'), ((13513, 13526), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (13520, 13526), True, 'import numpy as np\n'), ((14250, 14281), 'kornia.color.rgb_to_yuv', 'color.rgb_to_yuv', (['encoded_image'], {}), '(encoded_image)\n', (14266, 14281), False, 'from kornia import color\n'), ((14304, 14333), 'kornia.color.rgb_to_yuv', 'color.rgb_to_yuv', (['image_input'], {}), '(image_input)\n', (14320, 14333), False, 'from kornia import color\n'), ((14449, 14489), 'torch.mean', 'torch.mean', (['(im_diff ** 2)'], {'axis': '[0, 2, 3]'}), '(im_diff ** 2, axis=[0, 2, 3])\n', (14459, 14489), False, 'import torch\n'), ((14509, 14533), 'torch.Tensor', 'torch.Tensor', (['yuv_scales'], {}), '(yuv_scales)\n', (14521, 14533), False, 'import torch\n'), ((14608, 14639), 'torch.dot', 'torch.dot', (['yuv_loss', 'yuv_scales'], {}), '(yuv_loss, yuv_scales)\n', (14617, 14639), False, 'import torch\n'), ((602, 638), 'torch.nn.Linear', 'nn.Linear', (['in_features', 'out_features'], {}), '(in_features, out_features)\n', (611, 638), False, 'from torch import nn\n'), ((1576, 1617), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.conv.weight'], {}), '(self.conv.weight)\n', (1599, 1617), False, 'from torch import nn\n'), ((3445, 3488), 'torch.cat', 'torch.cat', (['[secrect_enlarged, image]'], {'dim': '(1)'}), '([secrect_enlarged, image], dim=1)\n', (3454, 3488), False, 'import torch\n'), ((3741, 3771), 'torch.cat', 'torch.cat', (['[conv4, up6]'], {'dim': '(1)'}), '([conv4, up6], dim=1)\n', (3750, 3771), False, 'import torch\n'), ((3888, 3918), 'torch.cat', 'torch.cat', (['[conv3, up7]'], {'dim': '(1)'}), '([conv3, up7], dim=1)\n', (3897, 3918), False, 'import torch\n'), ((4035, 4065), 'torch.cat', 'torch.cat', (['[conv2, up8]'], {'dim': '(1)'}), '([conv2, up8], dim=1)\n', (4044, 4065), False, 'import torch\n'), ((4182, 4220), 'torch.cat', 'torch.cat', (['[conv1, up9, inputs]'], {'dim': '(1)'}), '([conv1, up9, inputs], dim=1)\n', (4191, 4220), False, 'import torch\n'), ((4881, 4918), 'torch.FloatTensor', 'torch.FloatTensor', (['[1, 0, 0, 0, 1, 0]'], {}), '([1, 0, 0, 0, 1, 0])\n', (4898, 4918), False, 'import torch\n'), ((5127, 5174), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['image', 'grid'], {'align_corners': '(False)'}), '(image, grid, align_corners=False)\n', (5140, 5174), True, 'import torch.nn.functional as F\n'), ((6684, 6697), 'torch.mean', 'torch.mean', (['x'], {}), '(x)\n', (6694, 6697), False, 'import torch\n'), ((6835, 6868), 'numpy.min', 'np.min', (['[global_step / ramp, 1.0]'], {}), '([global_step / ramp, 1.0])\n', (6841, 6868), True, 'import numpy as np\n'), ((9049, 9153), 'utils.jpeg_compress_decompress', 'utils.jpeg_compress_decompress', (['encoded_image'], {'rounding': 'utils.round_only_at_0', 'quality': 'jpeg_quality'}), '(encoded_image, rounding=utils.\n round_only_at_0, quality=jpeg_quality)\n', (9079, 9153), False, 'import utils\n'), ((10144, 10173), 'torch.ones_like', 'torch.ones_like', (['input_warped'], {}), '(input_warped)\n', (10159, 10173), False, 'import torch\n'), ((722, 765), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.linear.weight'], {}), '(self.linear.weight)\n', (745, 765), False, 'from torch import nn\n'), ((3385, 3417), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(8, 8)'}), '(scale_factor=(8, 8))\n', (3396, 3417), False, 'from torch import nn\n'), ((4760, 4777), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(6)'], {}), '(128, 6)\n', (4769, 4777), False, 'from torch import nn\n'), ((8654, 8688), 'torch.FloatTensor', 'torch.FloatTensor', (['[0.3, 0.6, 0.1]'], {}), '([0.3, 0.6, 0.1])\n', (8671, 8688), False, 'import torch\n'), ((8787, 8832), 'torch.mean', 'torch.mean', (['(encoded_image * sat_weight)'], {'dim': '(1)'}), '(encoded_image * sat_weight, dim=1)\n', (8797, 8832), False, 'import torch\n'), ((10740, 10841), 'torchgeometry.warp_perspective', 'torchgeometry.warp_perspective', (['encoded_image', 'M[:, 0, :, :]'], {'dsize': '(400, 400)', 'flags': '"""bilinear"""'}), "(encoded_image, M[:, 0, :, :], dsize=(400, \n 400), flags='bilinear')\n", (10770, 10841), False, 'import torchgeometry\n'), ((10862, 10960), 'torchgeometry.warp_perspective', 'torchgeometry.warp_perspective', (['image_input', 'M[:, 0, :, :]'], {'dsize': '(400, 400)', 'flags': '"""bilinear"""'}), "(image_input, M[:, 0, :, :], dsize=(400, 400),\n flags='bilinear')\n", (10892, 10960), False, 'import torchgeometry\n'), ((14100, 14128), 'torch.from_numpy', 'torch.from_numpy', (['falloff_im'], {}), '(falloff_im)\n', (14116, 14128), False, 'import torch\n'), ((3683, 3715), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2, 2)'}), '(scale_factor=(2, 2))\n', (3694, 3715), False, 'from torch import nn\n'), ((3830, 3862), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2, 2)'}), '(scale_factor=(2, 2))\n', (3841, 3862), False, 'from torch import nn\n'), ((3977, 4009), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2, 2)'}), '(scale_factor=(2, 2))\n', (3988, 4009), False, 'from torch import nn\n'), ((4124, 4156), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2, 2)'}), '(scale_factor=(2, 2))\n', (4135, 4156), False, 'from torch import nn\n'), ((7215, 7228), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (7225, 7228), False, 'import torch\n'), ((7508, 7521), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (7518, 7521), False, 'import torch\n'), ((9635, 9658), 'torch.sum', 'torch.sum', (['correct_pred'], {}), '(correct_pred)\n', (9644, 9658), False, 'import torch\n'), ((11247, 11348), 'torchgeometry.warp_perspective', 'torchgeometry.warp_perspective', (['encoded_image', 'M[:, 0, :, :]'], {'dsize': '(400, 400)', 'flags': '"""bilinear"""'}), "(encoded_image, M[:, 0, :, :], dsize=(400, \n 400), flags='bilinear')\n", (11277, 11348), False, 'import torchgeometry\n'), ((11369, 11469), 'torchgeometry.warp_perspective', 'torchgeometry.warp_perspective', (['input_warped', 'M[:, 0, :, :]'], {'dsize': '(400, 400)', 'flags': '"""bilinear"""'}), "(input_warped, M[:, 0, :, :], dsize=(400, 400\n ), flags='bilinear')\n", (11399, 11469), False, 'import torchgeometry\n'), ((13641, 13680), 'numpy.cos', 'np.cos', (['(4 * np.pi * i / size[0] + np.pi)'], {}), '(4 * np.pi * i / size[0] + np.pi)\n', (13647, 13680), True, 'import numpy as np\n'), ((13751, 13790), 'numpy.cos', 'np.cos', (['(4 * np.pi * i / size[0] + np.pi)'], {}), '(4 * np.pi * i / size[0] + np.pi)\n', (13757, 13790), True, 'import numpy as np\n'), ((13924, 13963), 'numpy.cos', 'np.cos', (['(4 * np.pi * j / size[0] + np.pi)'], {}), '(4 * np.pi * j / size[0] + np.pi)\n', (13930, 13963), True, 'import numpy as np\n'), ((14002, 14041), 'numpy.cos', 'np.cos', (['(4 * np.pi * j / size[0] + np.pi)'], {}), '(4 * np.pi * j / size[0] + np.pi)\n', (14008, 14041), True, 'import numpy as np\n'), ((996, 1017), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1003, 1017), False, 'from torch import nn\n'), ((1794, 1815), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1801, 1815), False, 'from torch import nn\n'), ((7119, 7132), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (7129, 7132), False, 'import torch\n'), ((11042, 11067), 'torch.ones_like', 'torch.ones_like', (['residual'], {}), '(residual)\n', (11057, 11067), False, 'import torch\n'), ((11580, 11596), 'torch.rand', 'torch.rand', (['[ch]'], {}), '([ch])\n', (11590, 11596), False, 'import torch\n'), ((11875, 11976), 'torchgeometry.warp_perspective', 'torchgeometry.warp_perspective', (['encoded_image', 'M[:, 0, :, :]'], {'dsize': '(400, 400)', 'flags': '"""bilinear"""'}), "(encoded_image, M[:, 0, :, :], dsize=(400, \n 400), flags='bilinear')\n", (11905, 11976), False, 'import torchgeometry\n'), ((11997, 12097), 'torchgeometry.warp_perspective', 'torchgeometry.warp_perspective', (['input_warped', 'M[:, 0, :, :]'], {'dsize': '(400, 400)', 'flags': '"""bilinear"""'}), "(input_warped, M[:, 0, :, :], dsize=(400, 400\n ), flags='bilinear')\n", (12027, 12097), False, 'import torchgeometry\n'), ((11552, 11577), 'torch.ones_like', 'torch.ones_like', (['residual'], {}), '(residual)\n', (11567, 11577), False, 'import torch\n'), ((11672, 11697), 'torch.ones_like', 'torch.ones_like', (['residual'], {}), '(residual)\n', (11687, 11697), False, 'import torch\n'), ((12131, 12156), 'torch.ones_like', 'torch.ones_like', (['residual'], {}), '(residual)\n', (12146, 12156), False, 'import torch\n'), ((12435, 12536), 'torchgeometry.warp_perspective', 'torchgeometry.warp_perspective', (['encoded_image', 'M[:, 0, :, :]'], {'dsize': '(400, 400)', 'flags': '"""bilinear"""'}), "(encoded_image, M[:, 0, :, :], dsize=(400, \n 400), flags='bilinear')\n", (12465, 12536), False, 'import torchgeometry\n'), ((12232, 12257), 'torch.ones_like', 'torch.ones_like', (['residual'], {}), '(residual)\n', (12247, 12257), False, 'import torch\n'), ((12570, 12599), 'torch.roll', 'torch.roll', (['image_input', '(1)', '(0)'], {}), '(image_input, 1, 0)\n', (12580, 12599), False, 'import torch\n')]
import json import random import numpy as np from Source.Utility.Pathfinding.Graph import Graph def get_distance_to_players(game_state): own_player = game_state["players"][str(game_state["you"])] distances = [0, 0, 0, 0, 0, 0] current_position = (own_player["x"], own_player["y"]) if game_state["players"][str(game_state["you"])]["active"]: for i in range(6): if i + 1 == game_state["you"]: distances[i] = 0 else: try: if game_state["players"][str(i + 1)]["active"]: enemy_position = (game_state["players"][str(i + 1)]["x"], game_state["players"][str(i + 1)]["y"]) distance = np.sqrt(np.power(current_position[0] - enemy_position[0], 2) + np.power( current_position[1] - enemy_position[1], 2)) distances[i] = distance else: distances[i] = 0 except KeyError: distances[i] = 0 max_distance = np.sqrt(np.power(game_state["width"], 2) + np.power(game_state["height"], 2)) for i in range(len(distances)): distances[i] = distances[i] / max_distance return distances def get_average_distance(distances): sum = counter = 0.0 for i in range(len(distances)): if distances[i] == 0: pass else: sum += distances[i] counter += 1 if counter == 0: return 0 else: return sum / counter def get_free_spaces(new_position, game_state): own_player = game_state["players"][str(game_state["you"])] speed = own_player["speed"] number_of_free_spaces = 0 for i in range(-2, 3): for j in range(-2, 3): try: if game_state["cells"][new_position[1] + i][new_position[0] + j] == 0: number_of_free_spaces += 1 except IndexError: pass normalised_num = (number_of_free_spaces - speed) / 25.0 return normalised_num def get_avg_speed(game_state): sum = 0.0 counter = 0.0 avg = 0.0 if game_state["players"][str(game_state["you"])]["active"]: for i in range(6): if i + 1 == game_state["you"]: pass else: try: if game_state["players"][str(i + 1)]["active"]: sum += game_state["players"][str(i + 1)]["speed"] counter += 1 except KeyError: pass if counter > 0: avg = sum / counter norm_avg = avg / 10 return norm_avg def get_num_living_players(game_state): num = 0 for i in range (6): if game_state["players"][str(i+1)]["active"]: num += 1 return num def get_player_data(game_state, id): x = game_state["players"][str(id + 1)]["x"] y = game_state["players"][str(id + 1)]["y"] speed = game_state["players"][str(id + 1)]["speed"] return x, y, speed def get_distances_to_borders(game_state, id): board_height = game_state["height"] board_width = game_state["width"] position = game_state["players"][str(id + 1)]["x"], game_state["players"][str(id + 1)]["y"] top_distance = position[1] - 1 bottom_distance = (board_height - 1) - (position[1] - 1) right_distance = (board_width - 1) - (position[0] - 1) left_distance = position[0] - 1 return top_distance, bottom_distance, right_distance, left_distance def get_own_speed(game_state): own_player = game_state["players"][str(game_state["you"])] speed = own_player["speed"] return speed def get_connected_fields_for_new_position( x, y, new_direction, game_state, field_size): game_state = json.loads(game_state) graph = Graph(game_state["cells"],x,y, game_state["width"], game_state["height"], new_direction, field_size) return len(graph.get_connected_components())
[ "numpy.power", "Source.Utility.Pathfinding.Graph.Graph", "json.loads" ]
[((3807, 3829), 'json.loads', 'json.loads', (['game_state'], {}), '(game_state)\n', (3817, 3829), False, 'import json\n'), ((3842, 3948), 'Source.Utility.Pathfinding.Graph.Graph', 'Graph', (["game_state['cells']", 'x', 'y', "game_state['width']", "game_state['height']", 'new_direction', 'field_size'], {}), "(game_state['cells'], x, y, game_state['width'], game_state['height'],\n new_direction, field_size)\n", (3847, 3948), False, 'from Source.Utility.Pathfinding.Graph import Graph\n'), ((1083, 1115), 'numpy.power', 'np.power', (["game_state['width']", '(2)'], {}), "(game_state['width'], 2)\n", (1091, 1115), True, 'import numpy as np\n'), ((1118, 1151), 'numpy.power', 'np.power', (["game_state['height']", '(2)'], {}), "(game_state['height'], 2)\n", (1126, 1151), True, 'import numpy as np\n'), ((733, 785), 'numpy.power', 'np.power', (['(current_position[0] - enemy_position[0])', '(2)'], {}), '(current_position[0] - enemy_position[0], 2)\n', (741, 785), True, 'import numpy as np\n'), ((788, 840), 'numpy.power', 'np.power', (['(current_position[1] - enemy_position[1])', '(2)'], {}), '(current_position[1] - enemy_position[1], 2)\n', (796, 840), True, 'import numpy as np\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Functions for estimating the precision matrix based on the covariance of either the residuals (temporal based precision matrix) or of the measurements (instance based precision matrix) """ from collections.abc import Iterable import numpy as np from rsatoolbox.data import average_dataset_by from rsatoolbox.util.data_utils import get_unique_inverse def _check_demean(matrix): """ checks that an input has 2 or 3 dimensions and subtracts the mean. returns a 2D matrix for covariance/precision computation and the degrees of freedom Args: matrix (np.ndarray): n_conditions x n_channels Returns: numpy.ndarray: demeaned matrix """ assert isinstance(matrix, np.ndarray), "input must be ndarray" if matrix.ndim in [1, 2]: matrix = matrix - np.mean(matrix, axis=0, keepdims=True) dof = matrix.shape[0] - 1 elif matrix.ndim == 3: matrix -= np.mean(matrix, axis=2, keepdims=True) dof = (matrix.shape[0] - 1) * matrix.shape[2] matrix = matrix.transpose(0, 2, 1).reshape( matrix.shape[0] * matrix.shape[2], matrix.shape[1]) else: raise ValueError('Matrix for covariance estimation has wrong # of dimensions!') return matrix, dof def _estimate_covariance(matrix, dof, method): """ calls the right covariance estimation function based on the ""method" argument Args: matrix (np.ndarray): n_conditions x n_channels dof (int): degrees of freedom method (string): which estimator to use Returns: numpy.ndarray, numpy.ndarray: cov_mat: n_channels x n_channels sample covariance matrix """ matrix, dof_nat = _check_demean(matrix) if dof is None: dof = dof_nat # calculate sample covariance matrix s if method == 'shrinkage_eye': cov_mat = _covariance_eye(matrix, dof) elif method == 'shrinkage_diag': cov_mat = _covariance_diag(matrix, dof) elif method == 'diag': cov_mat = _variance(matrix, dof) elif method == 'full': cov_mat = _covariance_full(matrix, dof) return cov_mat def _variance(matrix, dof): """ returns the vector of variances per measurement channel. The formula used here implies that the mean was already removed. Args: matrix (np.ndarray): n_conditions x n_channels Returns: numpy.ndarray: variance vector """ return np.diag(np.einsum('ij, ij-> j', matrix, matrix) / dof) def _covariance_full(matrix, dof): """ computes the sample covariance matrix from a 2d-array. matrix should be demeaned before! Args: matrix (np.ndarray): n_conditions x n_channels Returns: numpy.ndarray, numpy.ndarray: s_mean: n_channels x n_channels sample covariance matrix """ return np.einsum('ij, ik-> jk', matrix, matrix) / dof def _covariance_eye(matrix, dof): """ computes the sample covariance matrix from a 2d-array. matrix should be demeaned before! Computes an optimal shrinkage estimate of a sample covariance matrix as described by the following publication: <NAME> Wolfe (2004): "A well-conditioned estimator for large-dimensional covariance matrices" Args: matrix (np.ndarray): n_conditions x n_channels Returns: numpy.ndarray, numpy.ndarray: s_mean: n_channels x n_channels sample covariance matrix xt_x: Einstein summation form of the matrix product of the 2d-array with itself """ s_sum = np.zeros((matrix.shape[1], matrix.shape[1])) s2_sum = np.zeros((matrix.shape[1], matrix.shape[1])) for m_line in matrix: xt_x = np.outer(m_line, m_line) s_sum += xt_x s2_sum += xt_x ** 2 s = s_sum / matrix.shape[0] b2 = np.sum(s2_sum / matrix.shape[0] - s * s) / matrix.shape[0] # calculate the scalar estimators to find the optimal shrinkage: # m, d^2, b^2 as in Ledoit & Wolfe paper m = np.sum(np.diag(s)) / s.shape[0] d2 = np.sum((s - m * np.eye(s.shape[0])) ** 2) b2 = min(d2, b2) # shrink covariance matrix s_shrink = b2 / d2 * m * np.eye(s.shape[0]) \ + (d2-b2) / d2 * s # correction for degrees of freedom s_shrink = s_shrink * matrix.shape[0] / dof return s_shrink def _covariance_diag(matrix, dof, mem_threshold=(10**9)/8): """ computes the sample covariance matrix from a 2d-array. matrix should be demeaned before! Computes an optimal shrinkage estimate of a sample covariance matrix as described by the following publication: <NAME>., & <NAME>. (2005). "A Shrinkage Approach to Large-Scale Covariance Matrix Estimation and Implications for Functional Genomics."" Args: matrix (np.ndarray): n_conditions x n_channels Returns: numpy.ndarray, numpy.ndarray: s_mean: n_channels x n_channels sample covariance matrix xt_x: Einstein summation form of the matrix product of the 2d-array with itself """ s_sum = np.zeros((matrix.shape[1], matrix.shape[1])) s2_sum = np.zeros((matrix.shape[1], matrix.shape[1])) for m_line in matrix: xt_x = np.outer(m_line, m_line) s_sum += xt_x s2_sum += xt_x ** 2 s = s_sum / dof var = np.diag(s) std = np.sqrt(var) s_mean = s_sum / np.expand_dims(std, 0) / np.expand_dims(std, 1) / (matrix.shape[0] - 1) s2_mean = s2_sum / np.expand_dims(var, 0) / np.expand_dims(var, 1) / (matrix.shape[0] - 1) var_hat = matrix.shape[0] / dof ** 2 \ * (s2_mean - s_mean ** 2) mask = ~np.eye(s.shape[0], dtype=np.bool) lamb = np.sum(var_hat[mask]) / np.sum(s_mean[mask] ** 2) lamb = max(min(lamb, 1), 0) scaling = np.eye(s.shape[0]) + (1-lamb) * mask s_shrink = s * scaling return s_shrink def sample_covariance_3d(tensor): """ computes the sample covariance matrix from a tensor by estimating the sample covariance for each slice along the third dimension and averaging the estimated covariance matrices. Args: tensor (numpy.ndarray): n_conditions x n_channels x n_measurements Returns: numpy.ndarray: s_mean: n_channels x n_channels expected sample covariance matrix """ xt_x = np.einsum('ij, ik-> ijk', tensor, tensor) s = np.mean(xt_x, axis=0) return s, xt_x def cov_from_residuals(residuals, dof=None, method='shrinkage_diag'): """ Estimates a covariance matrix from measurements. Allows for shrinkage estimates. Use 'method' to choose which estimation method is used. Args: residuals(numpy.ndarray or list of these): n_residuals x n_channels matrix of residuals dof(int or list of int): degrees of freedom for covariance estimation defaults to n_res - 1, should be corrected for the number of regressors in a GLM if applicable. method(str): which estimate to use: 'diag': provides a diagonal matrix, i.e. univariate noise normalizer 'full': computes the sample covariance without shrinkage 'shrinkage_eye': shrinks the data covariance towards a multiple of the identity. 'shrinkage_diag': shrinks the covariance matrix towards the diagonal covariance matrix. Returns: numpy.ndarray (or list): sigma_p: covariance matrix over channels """ if not isinstance(residuals, np.ndarray) or len(residuals.shape) > 2: cov_mat = [] for i, residual in enumerate(residuals): if dof is None: cov_mat.append(cov_from_residuals( residual, method=method)) elif isinstance(dof, Iterable): cov_mat.append(cov_from_residuals( residuals, method=method, dof=dof[i])) else: cov_mat.append(cov_from_residuals( residual, method=method, dof=dof)) else: cov_mat = _estimate_covariance(residuals, dof, method) return cov_mat def prec_from_residuals(residuals, dof=None, method='shrinkage_diag'): """ Estimates the covariance matrix from residuals and finds its multiplicative inverse (= the precision matrix) Use 'method' to choose which estimation method is used. Args: residuals(numpy.ndarray or list of these): n_residuals x n_channels matrix of residuals dof(int or list of int): degrees of freedom for covariance estimation defaults to n_res - 1, should be corrected for the number of regressors in a GLM if applicable. method(str): which estimate to use: 'diag': provides a diagonal matrix, i.e. univariate noise normalizer 'full': computes the sample covariance without shrinkage 'shrinkage_eye': shrinks the data covariance towards a multiple of the identity. 'shrinkage_diag': shrinks the covariance matrix towards the diagonal covariance matrix. Returns: numpy.ndarray (or list): sigma_p: precision matrix over channels """ cov = cov_from_residuals(residuals=residuals, dof=dof, method=method) if not isinstance(cov, np.ndarray): prec = [None] * len(cov) for i, cov_i in enumerate(cov): prec[i] = np.linalg.inv(cov_i) elif len(cov.shape) > 2: prec = np.zeros(cov.shape) for i, cov_i in enumerate(cov): prec[i] = np.linalg.inv(cov_i) else: prec = np.linalg.inv(cov) return prec def cov_from_measurements(dataset, obs_desc, dof=None, method='shrinkage_diag'): """ Estimates a covariance matrix from measurements. Allows for shrinkage estimates. Use 'method' to choose which estimation method is used. Args: dataset(data.Dataset): rsatoolbox Dataset object dof(int or list of int): degrees of freedom for covariance estimation defaults to n_res - 1, should be corrected for the number of regressors in a GLM if applicable. method(str): which estimate to use: 'diag': provides a diagonal matrix, i.e. univariate noise normalizer 'full': computes the sample covariance without shrinkage 'shrinkage_eye': shrinks the data covariance towards a multiple of the identity. 'shrinkage_diag': shrinks the covariance matrix towards the diagonal covariance matrix. Returns: numpy.ndarray (or list): sigma_p: covariance matrix over channels """ if isinstance(dataset, Iterable): cov_mat = [] for i, dat in enumerate(dataset): if dof is None: cov_mat.append(cov_from_unbalanced( dat, obs_desc=obs_desc, method=method)) elif isinstance(dof, Iterable): cov_mat.append(cov_from_unbalanced( dat, obs_desc=obs_desc, method=method, dof=dof[i])) else: cov_mat.append(cov_from_unbalanced( dat, obs_desc=obs_desc, method=method, dof=dof)) else: assert "Dataset" in str(type(dataset)), "Provided object is not a dataset" assert obs_desc in dataset.obs_descriptors.keys(), \ "obs_desc not contained in the dataset's obs_descriptors" tensor, _ = dataset.get_measurements_tensor(obs_desc) # calculate sample covariance matrix s cov_mat = _estimate_covariance(tensor, dof, method) return cov_mat def prec_from_measurements(dataset, obs_desc, dof=None, method='shrinkage_diag'): """ Estimates the covariance matrix from measurements and finds its multiplicative inverse (= the precision matrix) Use 'method' to choose which estimation method is used. Args: residuals(numpy.ndarray or list of these): n_residuals x n_channels matrix of residuals dof(int or list of int): degrees of freedom for covariance estimation defaults to n_res - 1, should be corrected for the number of regressors in a GLM if applicable. method(str): which estimate to use: 'diag': provides a diagonal matrix, i.e. univariate noise normalizer 'full': computes the sample covariance without shrinkage 'shrinkage_eye': shrinks the data covariance towards a multiple of the identity. 'shrinkage_diag': shrinks the covariance matrix towards the diagonal covariance matrix. Returns: numpy.ndarray (or list): sigma_p: precision matrix over channels """ cov = cov_from_measurements(dataset, obs_desc, dof=dof, method=method) if not isinstance(cov, np.ndarray): prec = [None] * len(cov) for i, cov_i in enumerate(cov): prec[i] = np.linalg.inv(cov_i) elif len(cov.shape) > 2: prec = np.zeros(cov.shape) for i, cov_i in enumerate(cov): prec[i] = np.linalg.inv(cov_i) else: prec = np.linalg.inv(cov) return prec def cov_from_unbalanced(dataset, obs_desc, dof=None, method='shrinkage_diag'): """ Estimates a covariance matrix from an unbalanced dataset, i.e. from a dataset that contains different numbers of samples for different stimuli. Args: dataset(data.Dataset): rsatoolbox Dataset object dof(int or list of int): degrees of freedom for covariance estimation defaults to n_measurements - n_stimuli, should be corrected if this is not the case method(str): which estimate to use: 'diag': provides a diagonal matrix, i.e. univariate noise normalizer 'full': computes the sample covariance without shrinkage 'shrinkage_eye': shrinks the data covariance towards a multiple of the identity. 'shrinkage_diag': shrinks the covariance matrix towards the diagonal covariance matrix. Returns: numpy.ndarray (or list): sigma_p: covariance matrix over channels """ if isinstance(dataset, Iterable): cov_mat = [] for i, dat in enumerate(dataset): if dof is None: cov_mat.append(cov_from_unbalanced( dat, obs_desc=obs_desc, method=method)) elif isinstance(dof, Iterable): cov_mat.append(cov_from_unbalanced( dat, obs_desc=obs_desc, method=method, dof=dof[i])) else: cov_mat.append(cov_from_unbalanced( dat, obs_desc=obs_desc, method=method, dof=dof)) else: assert "Dataset" in str(type(dataset)), "Provided object is not a dataset" assert obs_desc in dataset.obs_descriptors.keys(), \ "obs_desc not contained in the dataset's obs_descriptors" matrix = dataset.measurements means, values, _ = average_dataset_by(dataset, obs_desc) values, inverse = get_unique_inverse(dataset.obs_descriptors[obs_desc]) matrix -= means[inverse] # calculate sample covariance matrix s if dof is None: dof = matrix.shape[0] - len(values) cov_mat = _estimate_covariance(matrix, dof, method) return cov_mat def prec_from_unbalanced(dataset, obs_desc, dof=None, method='shrinkage_diag'): """ Estimates the covariance matrix from measurements and finds its multiplicative inverse (= the precision matrix) Use 'method' to choose which estimation method is used. Args: residuals(numpy.ndarray or list of these): n_residuals x n_channels matrix of residuals dof(int or list of int): degrees of freedom for covariance estimation defaults to n_res - 1, should be corrected for the number of regressors in a GLM if applicable. method(str): which estimate to use: 'diag': provides a diagonal matrix, i.e. univariate noise normalizer 'full': computes the sample covariance without shrinkage 'shrinkage_eye': shrinks the data covariance towards a multiple of the identity. 'shrinkage_diag': shrinks the covariance matrix towards the diagonal covariance matrix. Returns: numpy.ndarray (or list): sigma_p: precision matrix over channels """ cov = cov_from_unbalanced(dataset, obs_desc, dof=dof, method=method) if not isinstance(cov, np.ndarray): prec = [None] * len(cov) for i, cov_i in enumerate(cov): prec[i] = np.linalg.inv(cov_i) elif len(cov.shape) > 2: prec = np.zeros(cov.shape) for i, cov_i in enumerate(cov): prec[i] = np.linalg.inv(cov_i) else: prec = np.linalg.inv(cov) return prec
[ "numpy.outer", "numpy.sum", "numpy.eye", "rsatoolbox.util.data_utils.get_unique_inverse", "rsatoolbox.data.average_dataset_by", "numpy.zeros", "numpy.einsum", "numpy.expand_dims", "numpy.mean", "numpy.linalg.inv", "numpy.diag", "numpy.sqrt" ]
[((3735, 3779), 'numpy.zeros', 'np.zeros', (['(matrix.shape[1], matrix.shape[1])'], {}), '((matrix.shape[1], matrix.shape[1]))\n', (3743, 3779), True, 'import numpy as np\n'), ((3793, 3837), 'numpy.zeros', 'np.zeros', (['(matrix.shape[1], matrix.shape[1])'], {}), '((matrix.shape[1], matrix.shape[1]))\n', (3801, 3837), True, 'import numpy as np\n'), ((5267, 5311), 'numpy.zeros', 'np.zeros', (['(matrix.shape[1], matrix.shape[1])'], {}), '((matrix.shape[1], matrix.shape[1]))\n', (5275, 5311), True, 'import numpy as np\n'), ((5325, 5369), 'numpy.zeros', 'np.zeros', (['(matrix.shape[1], matrix.shape[1])'], {}), '((matrix.shape[1], matrix.shape[1]))\n', (5333, 5369), True, 'import numpy as np\n'), ((5516, 5526), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (5523, 5526), True, 'import numpy as np\n'), ((5537, 5549), 'numpy.sqrt', 'np.sqrt', (['var'], {}), '(var)\n', (5544, 5549), True, 'import numpy as np\n'), ((6520, 6561), 'numpy.einsum', 'np.einsum', (['"""ij, ik-> ijk"""', 'tensor', 'tensor'], {}), "('ij, ik-> ijk', tensor, tensor)\n", (6529, 6561), True, 'import numpy as np\n'), ((6570, 6591), 'numpy.mean', 'np.mean', (['xt_x'], {'axis': '(0)'}), '(xt_x, axis=0)\n', (6577, 6591), True, 'import numpy as np\n'), ((2986, 3026), 'numpy.einsum', 'np.einsum', (['"""ij, ik-> jk"""', 'matrix', 'matrix'], {}), "('ij, ik-> jk', matrix, matrix)\n", (2995, 3026), True, 'import numpy as np\n'), ((3879, 3903), 'numpy.outer', 'np.outer', (['m_line', 'm_line'], {}), '(m_line, m_line)\n', (3887, 3903), True, 'import numpy as np\n'), ((3995, 4035), 'numpy.sum', 'np.sum', (['(s2_sum / matrix.shape[0] - s * s)'], {}), '(s2_sum / matrix.shape[0] - s * s)\n', (4001, 4035), True, 'import numpy as np\n'), ((5411, 5435), 'numpy.outer', 'np.outer', (['m_line', 'm_line'], {}), '(m_line, m_line)\n', (5419, 5435), True, 'import numpy as np\n'), ((5827, 5860), 'numpy.eye', 'np.eye', (['s.shape[0]'], {'dtype': 'np.bool'}), '(s.shape[0], dtype=np.bool)\n', (5833, 5860), True, 'import numpy as np\n'), ((5872, 5893), 'numpy.sum', 'np.sum', (['var_hat[mask]'], {}), '(var_hat[mask])\n', (5878, 5893), True, 'import numpy as np\n'), ((5896, 5921), 'numpy.sum', 'np.sum', (['(s_mean[mask] ** 2)'], {}), '(s_mean[mask] ** 2)\n', (5902, 5921), True, 'import numpy as np\n'), ((5968, 5986), 'numpy.eye', 'np.eye', (['s.shape[0]'], {}), '(s.shape[0])\n', (5974, 5986), True, 'import numpy as np\n'), ((15071, 15108), 'rsatoolbox.data.average_dataset_by', 'average_dataset_by', (['dataset', 'obs_desc'], {}), '(dataset, obs_desc)\n', (15089, 15108), False, 'from rsatoolbox.data import average_dataset_by\n'), ((15135, 15188), 'rsatoolbox.util.data_utils.get_unique_inverse', 'get_unique_inverse', (['dataset.obs_descriptors[obs_desc]'], {}), '(dataset.obs_descriptors[obs_desc])\n', (15153, 15188), False, 'from rsatoolbox.util.data_utils import get_unique_inverse\n'), ((877, 915), 'numpy.mean', 'np.mean', (['matrix'], {'axis': '(0)', 'keepdims': '(True)'}), '(matrix, axis=0, keepdims=True)\n', (884, 915), True, 'import numpy as np\n'), ((995, 1033), 'numpy.mean', 'np.mean', (['matrix'], {'axis': '(2)', 'keepdims': '(True)'}), '(matrix, axis=2, keepdims=True)\n', (1002, 1033), True, 'import numpy as np\n'), ((2578, 2617), 'numpy.einsum', 'np.einsum', (['"""ij, ij-> j"""', 'matrix', 'matrix'], {}), "('ij, ij-> j', matrix, matrix)\n", (2587, 2617), True, 'import numpy as np\n'), ((4183, 4193), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (4190, 4193), True, 'import numpy as np\n'), ((4340, 4358), 'numpy.eye', 'np.eye', (['s.shape[0]'], {}), '(s.shape[0])\n', (4346, 4358), True, 'import numpy as np\n'), ((5596, 5618), 'numpy.expand_dims', 'np.expand_dims', (['std', '(1)'], {}), '(std, 1)\n', (5610, 5618), True, 'import numpy as np\n'), ((5691, 5713), 'numpy.expand_dims', 'np.expand_dims', (['var', '(1)'], {}), '(var, 1)\n', (5705, 5713), True, 'import numpy as np\n'), ((9543, 9563), 'numpy.linalg.inv', 'np.linalg.inv', (['cov_i'], {}), '(cov_i)\n', (9556, 9563), True, 'import numpy as np\n'), ((9608, 9627), 'numpy.zeros', 'np.zeros', (['cov.shape'], {}), '(cov.shape)\n', (9616, 9627), True, 'import numpy as np\n'), ((9736, 9754), 'numpy.linalg.inv', 'np.linalg.inv', (['cov'], {}), '(cov)\n', (9749, 9754), True, 'import numpy as np\n'), ((13011, 13031), 'numpy.linalg.inv', 'np.linalg.inv', (['cov_i'], {}), '(cov_i)\n', (13024, 13031), True, 'import numpy as np\n'), ((13076, 13095), 'numpy.zeros', 'np.zeros', (['cov.shape'], {}), '(cov.shape)\n', (13084, 13095), True, 'import numpy as np\n'), ((13204, 13222), 'numpy.linalg.inv', 'np.linalg.inv', (['cov'], {}), '(cov)\n', (13217, 13222), True, 'import numpy as np\n'), ((16698, 16718), 'numpy.linalg.inv', 'np.linalg.inv', (['cov_i'], {}), '(cov_i)\n', (16711, 16718), True, 'import numpy as np\n'), ((16763, 16782), 'numpy.zeros', 'np.zeros', (['cov.shape'], {}), '(cov.shape)\n', (16771, 16782), True, 'import numpy as np\n'), ((16891, 16909), 'numpy.linalg.inv', 'np.linalg.inv', (['cov'], {}), '(cov)\n', (16904, 16909), True, 'import numpy as np\n'), ((5571, 5593), 'numpy.expand_dims', 'np.expand_dims', (['std', '(0)'], {}), '(std, 0)\n', (5585, 5593), True, 'import numpy as np\n'), ((5666, 5688), 'numpy.expand_dims', 'np.expand_dims', (['var', '(0)'], {}), '(var, 0)\n', (5680, 5688), True, 'import numpy as np\n'), ((9690, 9710), 'numpy.linalg.inv', 'np.linalg.inv', (['cov_i'], {}), '(cov_i)\n', (9703, 9710), True, 'import numpy as np\n'), ((13158, 13178), 'numpy.linalg.inv', 'np.linalg.inv', (['cov_i'], {}), '(cov_i)\n', (13171, 13178), True, 'import numpy as np\n'), ((16845, 16865), 'numpy.linalg.inv', 'np.linalg.inv', (['cov_i'], {}), '(cov_i)\n', (16858, 16865), True, 'import numpy as np\n'), ((4233, 4251), 'numpy.eye', 'np.eye', (['s.shape[0]'], {}), '(s.shape[0])\n', (4239, 4251), True, 'import numpy as np\n')]
import sys import h5py import numpy as np import torch from torch.autograd import Variable def print_args(args): print("===== Experiment Configuration =====") options = vars(args) for key, value in options.items(): print(f'{key}: {value}') print("====================================") def rand_float(lo, hi): return np.random.rand() * (hi - lo) + lo def rand_int(lo, hi): return np.random.randint(lo, hi) def calc_dis(a, b): return np.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2) def norm(x, p=2): return np.power(np.sum(x ** p), 1. / p) def store_data(data_names, data, path): hf = h5py.File(path, 'w') for i in range(len(data_names)): hf.create_dataset(data_names[i], data=data[i]) hf.close() def load_data(data_names, path): hf = h5py.File(path, 'r') data = [] for i in range(len(data_names)): d = np.array(hf.get(data_names[i])) data.append(d) hf.close() return data def combine_stat(stat_0, stat_1): mean_0, std_0, n_0 = stat_0[:, 0], stat_0[:, 1], stat_0[:, 2] mean_1, std_1, n_1 = stat_1[:, 0], stat_1[:, 1], stat_1[:, 2] mean = (mean_0 * n_0 + mean_1 * n_1) / (n_0 + n_1) std = np.sqrt( (std_0 ** 2 * n_0 + std_1 ** 2 * n_1 + (mean_0 - mean) ** 2 * n_0 + (mean_1 - mean) ** 2 * n_1) / (n_0 + n_1)) n = n_0 + n_1 return np.stack([mean, std, n], axis=-1) def init_stat(dim): # mean, std, count return np.zeros((dim, 3)) def var_norm(x): return torch.sqrt((x ** 2).sum()).item() def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) def get_flat(x, keep_dim=False): if keep_dim: return x.reshape(torch.Size([1, x.size(0) * x.size(1)]) + x.size()[2:]) return x.reshape(torch.Size([x.size(0) * x.size(1)]) + x.size()[2:]) def to_var(tensor, use_gpu, requires_grad=False): if use_gpu: return Variable(torch.FloatTensor(tensor).cuda(), requires_grad=requires_grad) else: return Variable(torch.FloatTensor(tensor), requires_grad=requires_grad) def to_np(x): return x.detach().cpu().numpy() def mix_iters(iters): table = [] for i, iter in enumerate(iters): table += [i] * len(iter) np.random.shuffle(table) for i in table: yield iters[i].next() class Tee(object): def __init__(self, name, mode): self.file = open(name, mode) self.stdout = sys.stdout sys.stdout = self def __del__(self): sys.stdout = self.stdout self.file.close() def write(self, data): self.file.write(data) self.stdout.write(data) def flush(self): self.file.flush() def close(self): self.__del__() class AverageMeter(object): def __init__(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count
[ "numpy.stack", "h5py.File", "numpy.sum", "numpy.zeros", "torch.FloatTensor", "numpy.random.randint", "numpy.random.rand", "numpy.random.shuffle", "numpy.sqrt" ]
[((417, 442), 'numpy.random.randint', 'np.random.randint', (['lo', 'hi'], {}), '(lo, hi)\n', (434, 442), True, 'import numpy as np\n'), ((476, 524), 'numpy.sqrt', 'np.sqrt', (['((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)'], {}), '((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)\n', (483, 524), True, 'import numpy as np\n'), ((640, 660), 'h5py.File', 'h5py.File', (['path', '"""w"""'], {}), "(path, 'w')\n", (649, 660), False, 'import h5py\n'), ((812, 832), 'h5py.File', 'h5py.File', (['path', '"""r"""'], {}), "(path, 'r')\n", (821, 832), False, 'import h5py\n'), ((1216, 1338), 'numpy.sqrt', 'np.sqrt', (['((std_0 ** 2 * n_0 + std_1 ** 2 * n_1 + (mean_0 - mean) ** 2 * n_0 + (\n mean_1 - mean) ** 2 * n_1) / (n_0 + n_1))'], {}), '((std_0 ** 2 * n_0 + std_1 ** 2 * n_1 + (mean_0 - mean) ** 2 * n_0 +\n (mean_1 - mean) ** 2 * n_1) / (n_0 + n_1))\n', (1223, 1338), True, 'import numpy as np\n'), ((1374, 1407), 'numpy.stack', 'np.stack', (['[mean, std, n]'], {'axis': '(-1)'}), '([mean, std, n], axis=-1)\n', (1382, 1407), True, 'import numpy as np\n'), ((1464, 1482), 'numpy.zeros', 'np.zeros', (['(dim, 3)'], {}), '((dim, 3))\n', (1472, 1482), True, 'import numpy as np\n'), ((2266, 2290), 'numpy.random.shuffle', 'np.random.shuffle', (['table'], {}), '(table)\n', (2283, 2290), True, 'import numpy as np\n'), ((565, 579), 'numpy.sum', 'np.sum', (['(x ** p)'], {}), '(x ** p)\n', (571, 579), True, 'import numpy as np\n'), ((348, 364), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (362, 364), True, 'import numpy as np\n'), ((2045, 2070), 'torch.FloatTensor', 'torch.FloatTensor', (['tensor'], {}), '(tensor)\n', (2062, 2070), False, 'import torch\n'), ((1948, 1973), 'torch.FloatTensor', 'torch.FloatTensor', (['tensor'], {}), '(tensor)\n', (1965, 1973), False, 'import torch\n')]
import numpy as np class LinearConstraints(): def __init__(self, A, b, mode='Intersection'): """ Defines linear functions f(x) = Ax + b. The integration domain is defined as the union of where all of these functions are positive if mode='Union' or the domain where any of the functions is positive, when mode='Intersection' :param A: matrix A with shape (M, D) where M is the number of constraints and D the dimension :param b: offset, shape (M, 1) """ self.A = A self.b = b self.N_constraints = b.shape[0] self.N_dim = A.shape[1] self.mode = mode def evaluate(self, x): """ Evaluate linear functions at N locations x :param x: location, shape (D, N) :return: Ax + b """ return np.dot(self.A, x) + self.b def integration_domain(self, x): """ is 1 if x is in the integration domain, else 0 :param x: location, shape (D, N) :return: either self.indicator_union or self.indicator_intersection, depending on setting of self.mode """ if self.mode == 'Union': return self.indicator_union(x) elif self.mode == 'Intersection': return self.indicator_intersection(x) else: raise NotImplementedError def indicator_intersection(self, x): """ Intersection of indicator functions taken to be 1 when the linear function is >= 0 :param x: location, shape (D, N) :return: 1 if all linear functions are >= 0, else 0. """ return np.where(self.evaluate(x) >= 0, 1, 0).prod(axis=0) def indicator_union(self, x): """ Union of indicator functions taken to be 1 when the linear function is >= 0 :param x: location, shape (D, N) :return: 1 if any of the linear functions is >= 0, else 0. """ return 1 - (np.where(self.evaluate(x) >= 0, 0, 1)).prod(axis=0) class ShiftedLinearConstraints(LinearConstraints): def __init__(self, A, b, shift): """ Class for shifted linear constraints that appear in multilevel splitting method :param A: matrix A with shape (M, D) where M is the number of constraints and D the dimension :param b: offset, shape (M, 1) :param shift: (positive) scalar value denoting the shift """ self.shift = shift super(ShiftedLinearConstraints, self).__init__(A, b + shift)
[ "numpy.dot" ]
[((832, 849), 'numpy.dot', 'np.dot', (['self.A', 'x'], {}), '(self.A, x)\n', (838, 849), True, 'import numpy as np\n')]
# Imports import math import numpy as np import matplotlib.pyplot as plt class ParkingTrajectoryGenerator: # Class Variables # Vehicle Parameters __l = 0.356 # length between front and rear axle in m __b = 0.37 # width of car in m __l_1 = 0.12 # length between front axle and bumper in m __l_2 = 0.108 # length between rear axle and bumper in m __alpha_max = math.radians(45) # maximum steering angle in rad # alpha_c = alpha_max # constant steering angle in rad __rho_min = 1/math.tan(__alpha_max) # radius of the turning cycle of the car in m # Driving lane and parking spot parameters __h_cd = 0.974-2*0.03 # width of driving lane in m __h_pd = (0.96-3*0.02)/2 # width of parking space in m __h_pw = 0.85 # depth of parking space in m __h_ps = (__h_pd - __b)/2 # = h_pr = h_pl = h_ps -> for symmetrical parking -> space between car and parking space boundaries in m # Parameters for calculation of the Trajectory Points __num_points_per_segment = 100 __pull_out_left_straight_offset = 0.2 __r_B2 = math.sqrt((__l + __l_1)**2 + (__rho_min + __b/2)**2) __s_m = -math.sqrt((__rho_min - __b/2)**2 - (__rho_min - __h_pd/2)**2) __s_max = __h_cd - __r_B2 __s = max(abs(__s_m), abs(__s_max)) # Points of Parking Trajectory __parkingTrajectoryPoints_x_rear_axle = np.zeros(2*__num_points_per_segment) __parkingTrajectoryPoints_y_rear_axle = np.zeros(2*__num_points_per_segment) #__parkingTrajectoryPoints_x_front_axle = np.zeros(2*__num_points_per_segment) #__parkingTrajectoryPoints_y_front_axle = np.zeros(2*__num_points_per_segment) __pullOutLeftTrajectoryPoints_x_rear_axle = np.zeros(2*__num_points_per_segment) __pullOutLeftTrajectoryPoints_y_rear_axle = np.zeros(2*__num_points_per_segment) #__pullOutLeftTrajectoryPoints_x_front_axle = np.zeros(2*__num_points_per_segment) #__pullOutLeftTrajectoryPoints_y_front_axle = np.zeros(2*__num_points_per_segment) # Heading of Parking Trajectory __parkingTrajectoryHeading_rear_axle = np.zeros(2*__num_points_per_segment) # Parameter for Representing Circle Arc as Polynomial (Bezier) __c = 0.55191502449 # Parameters of Steering Angle Controller (Saturated Control) from Paper __K_t = 8 __K = 5.85 __a_0 = 0.17 __u = np.tan(__alpha_max)/__l # Vehicle Heading for test purposes (idealised) __theta = np.zeros(2*__num_points_per_segment) # Constructor def __init__(self, targetParkingSpot_x = 0, targetParkingSpot_y = 0): self.__targetPoint_x_rear_axle = targetParkingSpot_x + self.__h_pw - self.__l_2 - self.__h_ps self.__targetPoint_y_rear_axle = targetParkingSpot_y self.__targetPoint_x_front_axle = targetParkingSpot_x + self.__h_pw - self.__l_2 - self.__h_ps - self.__l self.__targetPoint_y_front_axle = targetParkingSpot_y self.calcParkingTrajectory() self.calcPullOutLeftTrajectory() # Setter def setTargetParkingSpot(self, targetParkingSpot_x = 0, targetParkingSpot_y = 0): self.__targetPoint_x_rear_axle = targetParkingSpot_x + self.__h_pw - self.__l_2 - self.__h_ps self.__targetPoint_y_rear_axle = targetParkingSpot_y self.__targetPoint_x_front_axle = targetParkingSpot_x + self.__h_pw - self.__l_2 - self.__h_ps - self.__l self.__targetPoint_y_front_axle = targetParkingSpot_y self.calcParkingTrajectory() self.calcPullOutLeftTrajectory() # Getter def getParkingStartPoint(self): return self.__parkingTrajectoryPoints_x_rear_axle[-1], self.__parkingTrajectoryPoints_y_rear_axle[-1] def getParkingEndPoint(self): return self.__targetPoint_x_rear_axle, self.__targetPoint_y_rear_axle def getParkingTrajectoryPolynomials(self): return self.__parkingTrajectory_polynomial_coefficients_circle_arc_x, self.__parkingTrajectory_polynomial_coefficients_circle_arc_y, self.__parkingTrajectory_polynomial_coefficients_straight_x, self.__parkingTrajectory_polynomial_coefficients_straight_y def gePullOutLeftTrajectoryPolynomials(self): return self.__pullOutLeftTrajectory_polynomial_coefficients_circle_arc_x, self.__pullOutLeftTrajectory_polynomial_coefficients_circle_arc_y, self.__pullOutLeftTrajectory_polynomial_coefficients_straight_x, self.__pullOutLeftTrajectory_polynomial_coefficients_straight_y # Functions def calcParkingTrajectory(self): # = Pull Out Right Trajectory # Target Point Rear End of the Parking Spot (Rear end of the axle) S_x_rear_axle = self.__targetPoint_x_rear_axle - self.__h_pw + self.__l_2 + self.__h_ps + self.__s S_y_rear_axle = self.__targetPoint_y_rear_axle #S_x_front_axle = self.targetPoint_x_front_axle - self.h_pw + self.l_2 + self.h_ps + self.s + self.l #S_y_front_axle = self.targetPoint_y_front_axle O_x_rear_axle = S_x_rear_axle O_y_rear_axle = S_y_rear_axle + self.__rho_min #O_x_front_axle = S_x_front_axle #O_y_front_axle = S_y_front_axle + self.rho_min # Points on Unit circle with Origin O P_0_circle_arc_x = O_x_rear_axle P_0_circle_arc_y = O_y_rear_axle - 1 P_1_circle_arc_x = O_x_rear_axle - self.__c P_1_circle_arc_y = O_y_rear_axle - 1 P_2_circle_arc_x = O_x_rear_axle - 1 P_2_circle_arc_y = O_y_rear_axle - self.__c P_3_circle_arc_x = O_x_rear_axle - 1 P_3_circle_arc_y = O_y_rear_axle # Polynomial of the circle arc self.__parkingTrajectory_polynomial_coefficients_circle_arc_x = np.poly1d([self.__rho_min*(P_3_circle_arc_x + 3.*P_1_circle_arc_x - 3.*P_2_circle_arc_x - P_0_circle_arc_x), self.__rho_min*3*(P_2_circle_arc_x - 2*P_1_circle_arc_x + P_0_circle_arc_x), self.__rho_min*3*(P_1_circle_arc_x - P_0_circle_arc_x), self.__rho_min*P_0_circle_arc_x]) self.__parkingTrajectory_polynomial_coefficients_circle_arc_y = np.poly1d([self.__rho_min*(P_3_circle_arc_y + 3.*P_1_circle_arc_y - 3.*P_2_circle_arc_y - P_0_circle_arc_y), self.__rho_min*3*(P_2_circle_arc_y - 2*P_1_circle_arc_y + P_0_circle_arc_y), self.__rho_min*3*(P_1_circle_arc_y - P_0_circle_arc_y), self.__rho_min*P_0_circle_arc_y]) # Polynomial of the straight self.__parkingTrajectory_polynomial_coefficients_straight_x = np.poly1d([0, 0, S_x_rear_axle - self.__targetPoint_x_rear_axle, self.__targetPoint_x_rear_axle]) self.__parkingTrajectory_polynomial_coefficients_straight_y = np.poly1d([0, 0, S_y_rear_axle - self.__targetPoint_y_rear_axle, self.__targetPoint_y_rear_axle]) self.__parkingTrajectoryPoints_x_rear_axle[ : self.__num_points_per_segment] = np.linspace(self.__targetPoint_x_rear_axle, S_x_rear_axle, self.__num_points_per_segment) self.__parkingTrajectoryPoints_y_rear_axle[ : self.__num_points_per_segment] = np.ones(self.__num_points_per_segment)*self.__targetPoint_y_rear_axle #self.__parkingTrajectoryHeading_rear_axle[ : self.__num_points_per_segment] = np.ones(self.__num_points_per_segment)*math.pi #self.parkingTrajectoryPoints_x_front_axle[0 : self.num_points_per_segment] = np.linspace(self.targetPoint_x_front_axle, S_x_front_axle, self.num_points_per_segment) #self.parkingTrajectoryPoints_y_front_axle[0 : self.num_points_per_segment] = np.ones(self.num_points_per_segment)*self.targetPoint_y_front_axle circle_arc_angle = np.linspace(math.pi, math.pi*(3/2), self.__num_points_per_segment) #heading_angle = np.linspace(math.pi, math.pi/2, self.__num_points_per_segment) # Vehicle Heading for test self.__theta[ : self.__num_points_per_segment] = math.pi self.__theta[self.__num_points_per_segment : ] = np.linspace(math.pi, math.pi/2, self.__num_points_per_segment) #i = self.__num_points_per_segment #for angle in circle_arc_angle : self.__parkingTrajectoryPoints_x_rear_axle[self.__num_points_per_segment : ] = self.__rho_min*np.cos(circle_arc_angle) + O_x_rear_axle self.__parkingTrajectoryPoints_y_rear_axle[self.__num_points_per_segment : ] = self.__rho_min*np.sin(circle_arc_angle) + O_y_rear_axle #self.__parkingTrajectoryPoints_x_front_axle[ : self.__num_points_per_segment] = self.__parkingTrajectoryPoints_x_rear_axle[ : self.__num_points_per_segment] - self.__l #self.__parkingTrajectoryPoints_y_front_axle[ : self.__num_points_per_segment] = self.__parkingTrajectoryPoints_y_rear_axle[ : self.__num_points_per_segment] #self.__parkingTrajectoryPoints_x_front_axle[self.__num_points_per_segment : ] = self.__parkingTrajectoryPoints_x_rear_axle[self.__num_points_per_segment : ] + np.cos(self.__theta[self.__num_points_per_segment : ])*self.__l #self.__parkingTrajectoryPoints_y_front_axle[self.__num_points_per_segment : ] = self.__parkingTrajectoryPoints_y_rear_axle[self.__num_points_per_segment : ] + np.sin(self.__theta[self.__num_points_per_segment : ])*self.__l #self.__parkingTrajectoryHeading_rear_axle[self.__num_points_per_segment : ] = heading_angle #self.parkingTrajectoryPoints_x_front_axle[i] = self.rho_min*math.cos(angle) + O_x_front_axle #self.parkingTrajectoryPoints_y_front_axle[i] = self.rho_min*math.sin(angle) + O_y_front_axle # i += 1 # Printing #t = np.linspace(0, 1, 100) #poly_circle_arc_x = self.__parkingTrajectory_polynomial_coefficients_circle_arc_x(t) #poly_circle_arc_y = self.__parkingTrajectory_polynomial_coefficients_circle_arc_y(t) #poly_straight_x = self.__parkingTrajectory_polynomial_coefficients_straight_x(t) #poly_straight_y = self.__parkingTrajectory_polynomial_coefficients_straight_y(t) #plt.plot(self.__parkingTrajectoryPoints_x_rear_axle, self.__parkingTrajectoryPoints_y_rear_axle, 'b.') #plt.plot(poly_circle_arc_x, poly_circle_arc_y, 'r.') #plt.plot(poly_straight_x, poly_straight_y, 'r.') #plt.show() #plt.stem(self.__parkingTrajectoryHeading_rear_axle) #plt.show() return self.__parkingTrajectory_polynomial_coefficients_circle_arc_x, self.__parkingTrajectory_polynomial_coefficients_circle_arc_y, self.__parkingTrajectory_polynomial_coefficients_straight_x, self.__parkingTrajectory_polynomial_coefficients_straight_y def calcPullOutLeftTrajectory(self): # Target Point Rear End of the Parking Spot (Rear end of the axle) S_x_rear_axle = self.__targetPoint_x_rear_axle - self.__h_pw + self.__l_2 + self.__h_ps + self.__s - self.__pull_out_left_straight_offset S_y_rear_axle = self.__targetPoint_y_rear_axle #S_x_front_axle = self.targetPoint_x_front_axle - self.h_pw + self.l_2 + self.h_ps + self.s + self.l #S_y_front_axle = self.targetPoint_y_front_axle O_x_rear_axle = S_x_rear_axle O_y_rear_axle = S_y_rear_axle - self.__rho_min #O_x_front_axle = S_x_front_axle #O_y_front_axle = S_y_front_axle + self.rho_min # Points on Unit circle with Origin O P_0_circle_arc_x = O_x_rear_axle - 1 P_0_circle_arc_y = O_y_rear_axle P_1_circle_arc_x = O_x_rear_axle - 1 P_1_circle_arc_y = O_y_rear_axle + self.__c P_2_circle_arc_x = O_x_rear_axle - self.__c P_2_circle_arc_y = O_y_rear_axle + 1 P_3_circle_arc_x = O_x_rear_axle P_3_circle_arc_y = O_y_rear_axle + 1 # Polynomial of the circle arc self.__pullOutLeftTrajectory_polynomial_coefficients_circle_arc_x = np.poly1d([self.__rho_min*(P_3_circle_arc_x + 3.*P_1_circle_arc_x - 3.*P_2_circle_arc_x - P_0_circle_arc_x), self.__rho_min*3*(P_2_circle_arc_x - 2*P_1_circle_arc_x + P_0_circle_arc_x), self.__rho_min*3*(P_1_circle_arc_x - P_0_circle_arc_x), self.__rho_min*P_0_circle_arc_x]) self.__pullOutLeftTrajectory_polynomial_coefficients_circle_arc_y = np.poly1d([self.__rho_min*(P_3_circle_arc_y + 3.*P_1_circle_arc_y - 3.*P_2_circle_arc_y - P_0_circle_arc_y), self.__rho_min*3*(P_2_circle_arc_y - 2*P_1_circle_arc_y + P_0_circle_arc_y), self.__rho_min*3*(P_1_circle_arc_y - P_0_circle_arc_y), self.__rho_min*P_0_circle_arc_y]) # Polynomial of the straight self.__pullOutLeftTrajectory_polynomial_coefficients_straight_x = np.poly1d([0, 0, S_x_rear_axle - self.__targetPoint_x_rear_axle, self.__targetPoint_x_rear_axle]) self.__pullOutLeftTrajectory_polynomial_coefficients_straight_y = np.poly1d([0, 0, S_y_rear_axle - self.__targetPoint_y_rear_axle, self.__targetPoint_y_rear_axle]) self.__pullOutLeftTrajectoryPoints_x_rear_axle[0 : self.__num_points_per_segment] = np.linspace(self.__targetPoint_x_rear_axle, S_x_rear_axle, self.__num_points_per_segment) self.__pullOutLeftTrajectoryPoints_y_rear_axle[0 : self.__num_points_per_segment] = np.ones(self.__num_points_per_segment)*self.__targetPoint_y_rear_axle #self.parkingTrajectoryPoints_x_front_axle[0 : self.num_points_per_segment] = np.linspace(self.targetPoint_x_front_axle, S_x_front_axle, self.num_points_per_segment) #self.parkingTrajectoryPoints_y_front_axle[0 : self.num_points_per_segment] = np.ones(self.num_points_per_segment)*self.targetPoint_y_front_axle circle_arc_angle = np.linspace(math.pi, math.pi/2, self.__num_points_per_segment) i = self.__num_points_per_segment for angle in circle_arc_angle : self.__pullOutLeftTrajectoryPoints_x_rear_axle[i] = self.__rho_min*np.cos(angle) + O_x_rear_axle self.__pullOutLeftTrajectoryPoints_y_rear_axle[i] = self.__rho_min*np.sin(angle) + O_y_rear_axle #self.parkingTrajectoryPoints_x_front_axle[i] = self.rho_min*math.cos(angle) + O_x_front_axle #self.parkingTrajectoryPoints_y_front_axle[i] = self.rho_min*math.sin(angle) + O_y_front_axle i += 1 # Printing #t = np.linspace(0, 1, 100) #poly_circle_arc_x = self.__pullOutLeftTrajectory_polynomial_coefficients_circle_arc_x(t) #poly_circle_arc_y = self.__pullOutLeftTrajectory_polynomial_coefficients_circle_arc_y(t) #poly_straight_x = self.__pullOutLeftTrajectory_polynomial_coefficients_straight_x(t) #poly_straight_y = self.__pullOutLeftTrajectory_polynomial_coefficients_straight_y(t) #plt.plot(self.__parkingTrajectoryPoints_x_rear_axle, self.__parkingTrajectoryPoints_y_rear_axle, 'b.') #plt.plot(self.__pullOutLeftTrajectoryPoints_x_rear_axle, self.__pullOutLeftTrajectoryPoints_y_rear_axle, 'b.') #plt.plot(poly_circle_arc_x, poly_circle_arc_y, 'r.') #plt.plot(poly_straight_x, poly_straight_y, 'r.') #plt.show() return self.__pullOutLeftTrajectory_polynomial_coefficients_circle_arc_x, self.__pullOutLeftTrajectory_polynomial_coefficients_circle_arc_y, self.__pullOutLeftTrajectory_polynomial_coefficients_straight_x, self.__pullOutLeftTrajectory_polynomial_coefficients_straight_y def getSteeringAngle(self, actualPoint_y, vehicle_heading): theta = vehicle_heading - math.pi print(theta) v = self.__K*(theta - self.__a_0*actualPoint_y) alpha = np.arctan(self.__l*self.__u*np.tanh(self.__K_t*v)) return alpha ParkingTrajectoryGenerator1 = ParkingTrajectoryGenerator() [a, b, c, d] = ParkingTrajectoryGenerator1.getParkingTrajectoryPolynomials() print(a) print(b) print(c) print(d) #plt.plot(ParkingTrajectoryGenerator1._ParkingTrajectoryGenerator__parkingTrajectoryPoints_x_front_axle, ParkingTrajectoryGenerator1._ParkingTrajectoryGenerator__parkingTrajectoryPoints_y_front_axle, 'b.', ParkingTrajectoryGenerator1._ParkingTrajectoryGenerator__parkingTrajectoryPoints_x_rear_axle, ParkingTrajectoryGenerator1._ParkingTrajectoryGenerator__parkingTrajectoryPoints_y_rear_axle, 'r.') #plt.show() steering_angle = np.zeros(ParkingTrajectoryGenerator1._ParkingTrajectoryGenerator__parkingTrajectoryPoints_y_rear_axle.size) i = 0 for elem in ParkingTrajectoryGenerator1._ParkingTrajectoryGenerator__parkingTrajectoryPoints_y_rear_axle : steering_angle[i] = ParkingTrajectoryGenerator1.getSteeringAngle(ParkingTrajectoryGenerator1._ParkingTrajectoryGenerator__parkingTrajectoryPoints_y_rear_axle[i], ParkingTrajectoryGenerator1._ParkingTrajectoryGenerator__theta[i]) i += 1 plt.stem(ParkingTrajectoryGenerator1._ParkingTrajectoryGenerator__theta) plt.show() plt.stem(np.degrees(steering_angle)) plt.show() #ParkingTrajectoryGenerator1.calcPullOutLeftTrajectory()
[ "numpy.poly1d", "matplotlib.pyplot.show", "numpy.tanh", "math.sqrt", "numpy.degrees", "math.radians", "math.tan", "numpy.zeros", "matplotlib.pyplot.stem", "numpy.ones", "numpy.tan", "numpy.sin", "numpy.linspace", "numpy.cos" ]
[((15864, 15976), 'numpy.zeros', 'np.zeros', (['ParkingTrajectoryGenerator1._ParkingTrajectoryGenerator__parkingTrajectoryPoints_y_rear_axle.size'], {}), '(ParkingTrajectoryGenerator1.\n _ParkingTrajectoryGenerator__parkingTrajectoryPoints_y_rear_axle.size)\n', (15872, 15976), True, 'import numpy as np\n'), ((16330, 16402), 'matplotlib.pyplot.stem', 'plt.stem', (['ParkingTrajectoryGenerator1._ParkingTrajectoryGenerator__theta'], {}), '(ParkingTrajectoryGenerator1._ParkingTrajectoryGenerator__theta)\n', (16338, 16402), True, 'import matplotlib.pyplot as plt\n'), ((16403, 16413), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16411, 16413), True, 'import matplotlib.pyplot as plt\n'), ((16452, 16462), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16460, 16462), True, 'import matplotlib.pyplot as plt\n'), ((394, 410), 'math.radians', 'math.radians', (['(45)'], {}), '(45)\n', (406, 410), False, 'import math\n'), ((1082, 1140), 'math.sqrt', 'math.sqrt', (['((__l + __l_1) ** 2 + (__rho_min + __b / 2) ** 2)'], {}), '((__l + __l_1) ** 2 + (__rho_min + __b / 2) ** 2)\n', (1091, 1140), False, 'import math\n'), ((1360, 1398), 'numpy.zeros', 'np.zeros', (['(2 * __num_points_per_segment)'], {}), '(2 * __num_points_per_segment)\n', (1368, 1398), True, 'import numpy as np\n'), ((1441, 1479), 'numpy.zeros', 'np.zeros', (['(2 * __num_points_per_segment)'], {}), '(2 * __num_points_per_segment)\n', (1449, 1479), True, 'import numpy as np\n'), ((1692, 1730), 'numpy.zeros', 'np.zeros', (['(2 * __num_points_per_segment)'], {}), '(2 * __num_points_per_segment)\n', (1700, 1730), True, 'import numpy as np\n'), ((1777, 1815), 'numpy.zeros', 'np.zeros', (['(2 * __num_points_per_segment)'], {}), '(2 * __num_points_per_segment)\n', (1785, 1815), True, 'import numpy as np\n'), ((2068, 2106), 'numpy.zeros', 'np.zeros', (['(2 * __num_points_per_segment)'], {}), '(2 * __num_points_per_segment)\n', (2076, 2106), True, 'import numpy as np\n'), ((2422, 2460), 'numpy.zeros', 'np.zeros', (['(2 * __num_points_per_segment)'], {}), '(2 * __num_points_per_segment)\n', (2430, 2460), True, 'import numpy as np\n'), ((16424, 16450), 'numpy.degrees', 'np.degrees', (['steering_angle'], {}), '(steering_angle)\n', (16434, 16450), True, 'import numpy as np\n'), ((520, 541), 'math.tan', 'math.tan', (['__alpha_max'], {}), '(__alpha_max)\n', (528, 541), False, 'import math\n'), ((1148, 1217), 'math.sqrt', 'math.sqrt', (['((__rho_min - __b / 2) ** 2 - (__rho_min - __h_pd / 2) ** 2)'], {}), '((__rho_min - __b / 2) ** 2 - (__rho_min - __h_pd / 2) ** 2)\n', (1157, 1217), False, 'import math\n'), ((2331, 2350), 'numpy.tan', 'np.tan', (['__alpha_max'], {}), '(__alpha_max)\n', (2337, 2350), True, 'import numpy as np\n'), ((5624, 5938), 'numpy.poly1d', 'np.poly1d', (['[self.__rho_min * (P_3_circle_arc_x + 3.0 * P_1_circle_arc_x - 3.0 *\n P_2_circle_arc_x - P_0_circle_arc_x), self.__rho_min * 3 * (\n P_2_circle_arc_x - 2 * P_1_circle_arc_x + P_0_circle_arc_x), self.\n __rho_min * 3 * (P_1_circle_arc_x - P_0_circle_arc_x), self.__rho_min *\n P_0_circle_arc_x]'], {}), '([self.__rho_min * (P_3_circle_arc_x + 3.0 * P_1_circle_arc_x - \n 3.0 * P_2_circle_arc_x - P_0_circle_arc_x), self.__rho_min * 3 * (\n P_2_circle_arc_x - 2 * P_1_circle_arc_x + P_0_circle_arc_x), self.\n __rho_min * 3 * (P_1_circle_arc_x - P_0_circle_arc_x), self.__rho_min *\n P_0_circle_arc_x])\n', (5633, 5938), True, 'import numpy as np\n'), ((5972, 6286), 'numpy.poly1d', 'np.poly1d', (['[self.__rho_min * (P_3_circle_arc_y + 3.0 * P_1_circle_arc_y - 3.0 *\n P_2_circle_arc_y - P_0_circle_arc_y), self.__rho_min * 3 * (\n P_2_circle_arc_y - 2 * P_1_circle_arc_y + P_0_circle_arc_y), self.\n __rho_min * 3 * (P_1_circle_arc_y - P_0_circle_arc_y), self.__rho_min *\n P_0_circle_arc_y]'], {}), '([self.__rho_min * (P_3_circle_arc_y + 3.0 * P_1_circle_arc_y - \n 3.0 * P_2_circle_arc_y - P_0_circle_arc_y), self.__rho_min * 3 * (\n P_2_circle_arc_y - 2 * P_1_circle_arc_y + P_0_circle_arc_y), self.\n __rho_min * 3 * (P_1_circle_arc_y - P_0_circle_arc_y), self.__rho_min *\n P_0_circle_arc_y])\n', (5981, 6286), True, 'import numpy as np\n'), ((6364, 6466), 'numpy.poly1d', 'np.poly1d', (['[0, 0, S_x_rear_axle - self.__targetPoint_x_rear_axle, self.\n __targetPoint_x_rear_axle]'], {}), '([0, 0, S_x_rear_axle - self.__targetPoint_x_rear_axle, self.\n __targetPoint_x_rear_axle])\n', (6373, 6466), True, 'import numpy as np\n'), ((6532, 6634), 'numpy.poly1d', 'np.poly1d', (['[0, 0, S_y_rear_axle - self.__targetPoint_y_rear_axle, self.\n __targetPoint_y_rear_axle]'], {}), '([0, 0, S_y_rear_axle - self.__targetPoint_y_rear_axle, self.\n __targetPoint_y_rear_axle])\n', (6541, 6634), True, 'import numpy as np\n'), ((6718, 6812), 'numpy.linspace', 'np.linspace', (['self.__targetPoint_x_rear_axle', 'S_x_rear_axle', 'self.__num_points_per_segment'], {}), '(self.__targetPoint_x_rear_axle, S_x_rear_axle, self.\n __num_points_per_segment)\n', (6729, 6812), True, 'import numpy as np\n'), ((7453, 7523), 'numpy.linspace', 'np.linspace', (['math.pi', '(math.pi * (3 / 2))', 'self.__num_points_per_segment'], {}), '(math.pi, math.pi * (3 / 2), self.__num_points_per_segment)\n', (7464, 7523), True, 'import numpy as np\n'), ((7767, 7831), 'numpy.linspace', 'np.linspace', (['math.pi', '(math.pi / 2)', 'self.__num_points_per_segment'], {}), '(math.pi, math.pi / 2, self.__num_points_per_segment)\n', (7778, 7831), True, 'import numpy as np\n'), ((11578, 11892), 'numpy.poly1d', 'np.poly1d', (['[self.__rho_min * (P_3_circle_arc_x + 3.0 * P_1_circle_arc_x - 3.0 *\n P_2_circle_arc_x - P_0_circle_arc_x), self.__rho_min * 3 * (\n P_2_circle_arc_x - 2 * P_1_circle_arc_x + P_0_circle_arc_x), self.\n __rho_min * 3 * (P_1_circle_arc_x - P_0_circle_arc_x), self.__rho_min *\n P_0_circle_arc_x]'], {}), '([self.__rho_min * (P_3_circle_arc_x + 3.0 * P_1_circle_arc_x - \n 3.0 * P_2_circle_arc_x - P_0_circle_arc_x), self.__rho_min * 3 * (\n P_2_circle_arc_x - 2 * P_1_circle_arc_x + P_0_circle_arc_x), self.\n __rho_min * 3 * (P_1_circle_arc_x - P_0_circle_arc_x), self.__rho_min *\n P_0_circle_arc_x])\n', (11587, 11892), True, 'import numpy as np\n'), ((11930, 12244), 'numpy.poly1d', 'np.poly1d', (['[self.__rho_min * (P_3_circle_arc_y + 3.0 * P_1_circle_arc_y - 3.0 *\n P_2_circle_arc_y - P_0_circle_arc_y), self.__rho_min * 3 * (\n P_2_circle_arc_y - 2 * P_1_circle_arc_y + P_0_circle_arc_y), self.\n __rho_min * 3 * (P_1_circle_arc_y - P_0_circle_arc_y), self.__rho_min *\n P_0_circle_arc_y]'], {}), '([self.__rho_min * (P_3_circle_arc_y + 3.0 * P_1_circle_arc_y - \n 3.0 * P_2_circle_arc_y - P_0_circle_arc_y), self.__rho_min * 3 * (\n P_2_circle_arc_y - 2 * P_1_circle_arc_y + P_0_circle_arc_y), self.\n __rho_min * 3 * (P_1_circle_arc_y - P_0_circle_arc_y), self.__rho_min *\n P_0_circle_arc_y])\n', (11939, 12244), True, 'import numpy as np\n'), ((12326, 12428), 'numpy.poly1d', 'np.poly1d', (['[0, 0, S_x_rear_axle - self.__targetPoint_x_rear_axle, self.\n __targetPoint_x_rear_axle]'], {}), '([0, 0, S_x_rear_axle - self.__targetPoint_x_rear_axle, self.\n __targetPoint_x_rear_axle])\n', (12335, 12428), True, 'import numpy as np\n'), ((12498, 12600), 'numpy.poly1d', 'np.poly1d', (['[0, 0, S_y_rear_axle - self.__targetPoint_y_rear_axle, self.\n __targetPoint_y_rear_axle]'], {}), '([0, 0, S_y_rear_axle - self.__targetPoint_y_rear_axle, self.\n __targetPoint_y_rear_axle])\n', (12507, 12600), True, 'import numpy as np\n'), ((12689, 12783), 'numpy.linspace', 'np.linspace', (['self.__targetPoint_x_rear_axle', 'S_x_rear_axle', 'self.__num_points_per_segment'], {}), '(self.__targetPoint_x_rear_axle, S_x_rear_axle, self.\n __num_points_per_segment)\n', (12700, 12783), True, 'import numpy as np\n'), ((13295, 13359), 'numpy.linspace', 'np.linspace', (['math.pi', '(math.pi / 2)', 'self.__num_points_per_segment'], {}), '(math.pi, math.pi / 2, self.__num_points_per_segment)\n', (13306, 13359), True, 'import numpy as np\n'), ((6895, 6933), 'numpy.ones', 'np.ones', (['self.__num_points_per_segment'], {}), '(self.__num_points_per_segment)\n', (6902, 6933), True, 'import numpy as np\n'), ((12871, 12909), 'numpy.ones', 'np.ones', (['self.__num_points_per_segment'], {}), '(self.__num_points_per_segment)\n', (12878, 12909), True, 'import numpy as np\n'), ((8017, 8041), 'numpy.cos', 'np.cos', (['circle_arc_angle'], {}), '(circle_arc_angle)\n', (8023, 8041), True, 'import numpy as np\n'), ((8160, 8184), 'numpy.sin', 'np.sin', (['circle_arc_angle'], {}), '(circle_arc_angle)\n', (8166, 8184), True, 'import numpy as np\n'), ((15216, 15239), 'numpy.tanh', 'np.tanh', (['(self.__K_t * v)'], {}), '(self.__K_t * v)\n', (15223, 15239), True, 'import numpy as np\n'), ((13520, 13533), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (13526, 13533), True, 'import numpy as np\n'), ((13629, 13642), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (13635, 13642), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- """ Created on Fri Jan 12 13:07:35 2018 @author: Sunny """ import numpy as np import cv2 print(cv2.__version__) TOTAL_CAMERAS=1 HEIGHT = 240 WIDTH = 320 RECORD_WIDTH = WIDTH*3 RECORD_HEIGHT = HEIGHT FPS = 90 cam = [] frame = [] ret = [] rgb = [] i = 0 rgb_current=0 cam = cv2.VideoCapture(0) #cam1 = cv2.VideoCapture(1) #cam2 = cv2.VideoCapture(2) cam.set(3,WIDTH) cam.set(4,HEIGHT) cam.set(cv2.CAP_PROP_FPS,FPS) print(cam.get(3)) print(cam.get(4)) print(cam.get(5)) print(cam.get(cv2.CAP_PROP_FPS)) fourcc = cv2.VideoWriter_fourcc(*'DIVX') out = cv2.VideoWriter('C:\\Users\\Sunny\\Desktop\\saveOutput.avi',fourcc, FPS, (RECORD_WIDTH,RECORD_HEIGHT)) x=0 rgb_previous = 0 cv2.namedWindow("Live Feed") flag_record=True while(True): final_frame=0 j = 0 ret_current, frame_current = cam.read() # Our operations on the frame come here #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) rgb_current = cv2.cvtColor(frame_current, cv2.COLOR_RGBA2RGB) rgb_current = cv2.resize(rgb_current,(WIDTH,HEIGHT),interpolation=cv2.INTER_CUBIC); horizontal_img = cv2.flip(rgb_current, 1 ) # Display the resulting frame numpy_horizontal = np.hstack((horizontal_img, rgb_current, horizontal_img)) numpy_vertical = np.vstack((numpy_horizontal,numpy_horizontal)) if(flag_record == True ): out.write(numpy_horizontal) cv2.imshow("Live Feed",numpy_horizontal) if cv2.waitKey(1) & 0xFF == ord('q'): break elif cv2.waitKey(1) & 0xFF == ord('r'): if(flag_record==False): flag_record = True else: flag_record = False cam.release() if(flag_record==True): out.release() cv2.destroyAllWindows()
[ "cv2.resize", "cv2.VideoWriter_fourcc", "cv2.cvtColor", "cv2.waitKey", "cv2.imshow", "numpy.hstack", "cv2.VideoCapture", "numpy.vstack", "cv2.VideoWriter", "cv2.flip", "cv2.destroyAllWindows", "cv2.namedWindow" ]
[((302, 321), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (318, 321), False, 'import cv2\n'), ((543, 574), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'DIVX'"], {}), "(*'DIVX')\n", (565, 574), False, 'import cv2\n'), ((581, 690), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""C:\\\\Users\\\\Sunny\\\\Desktop\\\\saveOutput.avi"""', 'fourcc', 'FPS', '(RECORD_WIDTH, RECORD_HEIGHT)'], {}), "('C:\\\\Users\\\\Sunny\\\\Desktop\\\\saveOutput.avi', fourcc, FPS, (\n RECORD_WIDTH, RECORD_HEIGHT))\n", (596, 690), False, 'import cv2\n'), ((705, 733), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Live Feed"""'], {}), "('Live Feed')\n", (720, 733), False, 'import cv2\n'), ((1722, 1745), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1743, 1745), False, 'import cv2\n'), ((961, 1008), 'cv2.cvtColor', 'cv2.cvtColor', (['frame_current', 'cv2.COLOR_RGBA2RGB'], {}), '(frame_current, cv2.COLOR_RGBA2RGB)\n', (973, 1008), False, 'import cv2\n'), ((1035, 1106), 'cv2.resize', 'cv2.resize', (['rgb_current', '(WIDTH, HEIGHT)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(rgb_current, (WIDTH, HEIGHT), interpolation=cv2.INTER_CUBIC)\n', (1045, 1106), False, 'import cv2\n'), ((1126, 1150), 'cv2.flip', 'cv2.flip', (['rgb_current', '(1)'], {}), '(rgb_current, 1)\n', (1134, 1150), False, 'import cv2\n'), ((1210, 1266), 'numpy.hstack', 'np.hstack', (['(horizontal_img, rgb_current, horizontal_img)'], {}), '((horizontal_img, rgb_current, horizontal_img))\n', (1219, 1266), True, 'import numpy as np\n'), ((1288, 1335), 'numpy.vstack', 'np.vstack', (['(numpy_horizontal, numpy_horizontal)'], {}), '((numpy_horizontal, numpy_horizontal))\n', (1297, 1335), True, 'import numpy as np\n'), ((1415, 1456), 'cv2.imshow', 'cv2.imshow', (['"""Live Feed"""', 'numpy_horizontal'], {}), "('Live Feed', numpy_horizontal)\n", (1425, 1456), False, 'import cv2\n'), ((1464, 1478), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1475, 1478), False, 'import cv2\n'), ((1522, 1536), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1533, 1536), False, 'import cv2\n')]
import numpy as np from instResp.polezero import polezero from instResp.plotResp import plotResponse import os import logging logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO) logger = logging.getLogger(__name__) ''' This module contains a collection of non-bulletproof codes for creating/manipulating instrument response stages, particularly the first stage = analog polezero stage. ''' def evalResp(pz, f): s = 0.000 + 1.000j numerator = 1.000 + 0.000j denominator = 1.000 + 0.000j if pz.type == 'A': s *= 2.*np.pi*f elif pz.type == 'B': s *= f else: logger.warn("Unknown pz response type=[%s]" % pz.type) for j in range(pz.nzeros): numerator *= (s - pz.zeros[j]) for j in range(pz.npoles): denominator *= (s - pz.poles[j]) Gf = numerator * pz.a0 # Make sure this is complex Gf /= denominator return Gf; def getResponse(pz, freqs, removeZero=False, useSensitivity=True): ''' We're expecting a standard IRIS polezero file for displacement, so if velocity=True try to shed one zero at origin ''' if removeZero: success = pz.remove_zero() #zeros = np.zeros((pz.zeros.size-1,), dtype=np.complex128) #success = remove_zero(pz.zeros, zeros) if success: logger.debug("Zero successfully removed from origin") #pz.zeros = zeros #pz.nzeros = zeros.size else: logger.warn("Problem removing zero from origin!") resp = np.zeros((len(freqs),), dtype=np.complex128) for i, f in enumerate(freqs): resp[i] = evalResp(pz, f) if useSensitivity: resp[i] *= pz.sensitivity return resp def read_sacpz_file(filename): """ * ********************************** * NETWORK (KNETWK): AU * STATION (KSTNM): WR1 * LOCATION (KHOLE): * CHANNEL (KCMPNM): BHZ * CREATED : 2017-02-02T01:23:27 * START : 2005-01-31T00:00:00 * END : 2599-12-31T23:59:59 * DESCRIPTION : Warramunga Array, Australia * LATITUDE : -19.942600 * LONGITUDE : 134.339500 * ELEVATION : 389.0 * DEPTH : 0.0 * DIP : 0.0 * AZIMUTH : 0.0 * SAMPLE RATE : 40.0 * INPUT UNIT : M * OUTPUT UNIT : COUNTS * INSTTYPE : Guralp CMG3ESP_30sec_ims/Guralp DM24-MK3 Datalogge * INSTGAIN : 4.000290e+03 (M/S) * COMMENT : V3180 A3242 * SENSITIVITY : 2.797400e+09 (M/S) * A0 : 8.883050e-02 * ********************************** ZEROS 5 +0.000000e+00 +0.000000e+00 +0.000000e+00 +0.000000e+00 +0.000000e+00 +0.000000e+00 +8.670000e+02 +9.050000e+02 +8.670000e+02 -9.050000e+02 POLES 4 -1.486000e-01 +1.486000e-01 -1.486000e-01 -1.486000e-01 -3.140000e+02 +2.023000e+02 -3.140000e+02 -2.023000e+02 CONSTANT 2.484944e+08 """ fname = 'read_sacpz_file' with open(filename, 'r') as f: lines = f.readlines() zeros = None poles = None sensitivity = None a0 = None unitsIn = None unitsOut = None knet = "" ksta = "" kloc = "" kchan = "" for i in range(len(lines)): line = lines[i] #print "i=[%d] line=[%s]" % (i, line) if line[0] == '*': if line[2] != '*': split_list = line.split(':') field = split_list[0][1:] val = split_list[1] # could have val = "" or val = 2.79E9 (M/S) val_list = val.split() nsplit=len(val_list) #print "field=", field, " val=", val if 'SENSITIVITY' in field: sensitivity = float(val_list[0]) elif 'A0' in field: a0 = float(val_list[0]) elif 'INPUT UNIT' in field: unitsIn = val.strip() elif 'OUTPUT UNIT' in field: unitsOut = val.strip() elif 'NETWORK' in field: knet = val.strip() elif 'STATION' in field: ksta = val.strip() elif 'LOCATION' in field: kloc = val.strip() elif 'CHANNEL' in field: kchan = val.strip() elif line[0:5] == 'ZEROS': try: nzeros = int(line[6:len(line)]) except: logger.error("%s.%s Error: can't read nzeros from line=[%s]" % (__name__, fname, line)) exit(1) #zeros = np.zeros((nzeros,), dtype=np.complex128) zeros = np.zeros(nzeros, dtype=np.complex128) for j in range(nzeros): i += 1 line = lines[i] (z_re, z_im) = line.split() zeros[j] = complex( float(z_re), float(z_im) ) elif line[0:5] == 'POLES': try: npoles = int(line[6:len(line)]) except: logger.error("%s.%s Error: can't read npoles from line=[%s]" % (__name__, fname, line)) exit(1) poles = np.zeros(npoles, dtype=np.complex128) for j in range(npoles): i += 1 line = lines[i] (p_re, p_im) = line.split() poles[j] = complex( float(p_re), float(p_im) ) #print "knet=%s ksta=%s kloc=%s kchan=%s" % (knet, ksta, kloc, kchan) name = "%s.%s %s.%s" % (knet, ksta, kloc, kchan) pz_ = polezero(name = name, type = 'A', #type = 'A[Laplace Transform (Rad/sec)]', unitsIn = unitsIn, unitsOut = unitsOut, a0 = a0, sensitivity = sensitivity, sensitivity_f = 1.0, poles = poles, zeros = zeros) return pz_ def get_corner_freq_from_pole(pole): ''' get distance [rad/s] from lowest order pole to origin and return Hz [/s] ''' return np.sqrt(pole.real**2 + pole.imag**2) / (2.*np.pi) def test_RC(): from instResp.libNom import RC R = 4. C = 1.25/(2.*np.pi) pzs = RC(tau=R*C) freqs = np.logspace(-5, 4., num=1000) resp = getResponse(pzs, freqs, removeZero=False) title = 'RC filter: R=4 ohms, C=1.25F/2pi' plotResponse(resp, freqs, title=title, xmin=.001, xmax=100., ymin=0.01, ymax=1.2) logger.info("Corner freq:%f" % get_corner_freq_from_pole(pzs.poles[0])) return def test_WA(damp=.18, gain=1., f0=14, fnorm=100.): from instResp.libNom import WA, Accelerometer pzs = WA(per=1/f0, damp=damp, gain=gain, normalize=True, normalize_freq=fnorm) logger.info(pzs) freqs = np.logspace(-5, 4., num=500) resp = getResponse(pzs, freqs, removeZero=False) #print(np.max(np.abs(resp))) title='WA for f0=%.2f Hz damp=%.3f gain=%.0f' % (f0,damp, gain) logger.info("Corner freq:%.2f" % get_corner_freq_from_pole(pzs.poles[0])) plotResponse(resp, freqs, title=title, xmin=1, xmax=5000., ymin=.01, ymax=1.2) return def plot_pz_resp(pzfile=None): pzs = read_sacpz_file(pzfile) logger.info(pzs) freqs = np.logspace(-5, 3., num=500) resp = getResponse(pzs, freqs, removeZero=True, useSensitivity=False) title=pzfile plotResponse(resp, freqs, title=title, xmin=.001, xmax=100., ymin=.01, ymax=1e3) return def main(): #test_RC() test_WA(damp=0.6) exit() pz_dir = '/Users/mth/mth/Data/IRIS_Request/pz/' pz_fil = 'SACPZ.II.AAK.10.BHZ' plot_pz_resp(pzfile=os.path.join(pz_dir, pz_fil)) exit() if __name__=="__main__": main()
[ "instResp.libNom.RC", "logging.basicConfig", "numpy.logspace", "instResp.polezero.polezero", "numpy.zeros", "instResp.libNom.WA", "instResp.plotResp.plotResponse", "os.path.join", "logging.getLogger", "numpy.sqrt" ]
[((129, 204), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s:%(message)s"""', 'level': 'logging.INFO'}), "(format='%(levelname)s:%(message)s', level=logging.INFO)\n", (148, 204), False, 'import logging\n'), ((214, 241), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (231, 241), False, 'import logging\n'), ((5619, 5765), 'instResp.polezero.polezero', 'polezero', ([], {'name': 'name', 'type': '"""A"""', 'unitsIn': 'unitsIn', 'unitsOut': 'unitsOut', 'a0': 'a0', 'sensitivity': 'sensitivity', 'sensitivity_f': '(1.0)', 'poles': 'poles', 'zeros': 'zeros'}), "(name=name, type='A', unitsIn=unitsIn, unitsOut=unitsOut, a0=a0,\n sensitivity=sensitivity, sensitivity_f=1.0, poles=poles, zeros=zeros)\n", (5627, 5765), False, 'from instResp.polezero import polezero\n'), ((6345, 6358), 'instResp.libNom.RC', 'RC', ([], {'tau': '(R * C)'}), '(tau=R * C)\n', (6347, 6358), False, 'from instResp.libNom import RC\n'), ((6369, 6399), 'numpy.logspace', 'np.logspace', (['(-5)', '(4.0)'], {'num': '(1000)'}), '(-5, 4.0, num=1000)\n', (6380, 6399), True, 'import numpy as np\n'), ((6504, 6591), 'instResp.plotResp.plotResponse', 'plotResponse', (['resp', 'freqs'], {'title': 'title', 'xmin': '(0.001)', 'xmax': '(100.0)', 'ymin': '(0.01)', 'ymax': '(1.2)'}), '(resp, freqs, title=title, xmin=0.001, xmax=100.0, ymin=0.01,\n ymax=1.2)\n', (6516, 6591), False, 'from instResp.plotResp import plotResponse\n'), ((6787, 6861), 'instResp.libNom.WA', 'WA', ([], {'per': '(1 / f0)', 'damp': 'damp', 'gain': 'gain', 'normalize': '(True)', 'normalize_freq': 'fnorm'}), '(per=1 / f0, damp=damp, gain=gain, normalize=True, normalize_freq=fnorm)\n', (6789, 6861), False, 'from instResp.libNom import WA, Accelerometer\n'), ((6894, 6923), 'numpy.logspace', 'np.logspace', (['(-5)', '(4.0)'], {'num': '(500)'}), '(-5, 4.0, num=500)\n', (6905, 6923), True, 'import numpy as np\n'), ((7160, 7245), 'instResp.plotResp.plotResponse', 'plotResponse', (['resp', 'freqs'], {'title': 'title', 'xmin': '(1)', 'xmax': '(5000.0)', 'ymin': '(0.01)', 'ymax': '(1.2)'}), '(resp, freqs, title=title, xmin=1, xmax=5000.0, ymin=0.01, ymax=1.2\n )\n', (7172, 7245), False, 'from instResp.plotResp import plotResponse\n'), ((7351, 7380), 'numpy.logspace', 'np.logspace', (['(-5)', '(3.0)'], {'num': '(500)'}), '(-5, 3.0, num=500)\n', (7362, 7380), True, 'import numpy as np\n'), ((7476, 7566), 'instResp.plotResp.plotResponse', 'plotResponse', (['resp', 'freqs'], {'title': 'title', 'xmin': '(0.001)', 'xmax': '(100.0)', 'ymin': '(0.01)', 'ymax': '(1000.0)'}), '(resp, freqs, title=title, xmin=0.001, xmax=100.0, ymin=0.01,\n ymax=1000.0)\n', (7488, 7566), False, 'from instResp.plotResp import plotResponse\n'), ((6197, 6237), 'numpy.sqrt', 'np.sqrt', (['(pole.real ** 2 + pole.imag ** 2)'], {}), '(pole.real ** 2 + pole.imag ** 2)\n', (6204, 6237), True, 'import numpy as np\n'), ((7743, 7771), 'os.path.join', 'os.path.join', (['pz_dir', 'pz_fil'], {}), '(pz_dir, pz_fil)\n', (7755, 7771), False, 'import os\n'), ((4740, 4777), 'numpy.zeros', 'np.zeros', (['nzeros'], {'dtype': 'np.complex128'}), '(nzeros, dtype=np.complex128)\n', (4748, 4777), True, 'import numpy as np\n'), ((5244, 5281), 'numpy.zeros', 'np.zeros', (['npoles'], {'dtype': 'np.complex128'}), '(npoles, dtype=np.complex128)\n', (5252, 5281), True, 'import numpy as np\n')]
# Module file for conductance measurements with the # SR830. Implementing the good ideas of <NAME> from typing import Union, Optional from time import sleep import numpy as np import qcodes as qc from qcodes.instrument.parameter import Parameter from qdev_wrappers.sweep_functions import _do_measurement from qcodes.instrument_drivers.QDev.QDac_channels import QDac as QDacch from qdev_wrappers.T3.customised_instruments import SR830_T3 def do2Dconductance(outer_param: Parameter, outer_start: Union[float, int], outer_stop: Union[float, int], outer_npts: int, inner_param: Parameter, inner_start: Union[float, int], inner_stop: Union[float, int], inner_npts: int, lockin: SR830_T3, delay: Optional[float]=None): """ Function to perform a sped-up 2D conductance measurement Args: outer_param: The outer loop voltage parameter outer_start: The outer loop start voltage outer_stop: The outer loop stop voltage outer_npts: The number of points in the outer loop inner_param: The inner loop voltage parameter inner_start: The inner loop start voltage inner_stop: The inner loop stop voltage inner_npts: The number of points in the inner loop lockin: The lock-in amplifier to use delay: Delay to wait after setting inner parameter before triggering lockin. If None will use default delay, otherwise used the supplied. """ station = qc.Station.default sr = lockin # Validate the instruments if sr.name not in station.components: raise KeyError('Unknown lock-in! Refusing to proceed until the ' 'lock-in has been added to the station.') if (outer_param._instrument.name not in station.components and outer_param._instrument._parent.name not in station.components): raise KeyError('Unknown instrument for outer parameter. ' 'Please add that instrument to the station.') if (inner_param._instrument.name not in station.components and inner_param._instrument._parent.name not in station.components): raise KeyError('Unknown instrument for inner parameter. ' 'Please add that instrument to the station.') tau = sr.time_constant() min_delay = 0.002 # what's the physics behind this number? if delay is None: delay = tau + min_delay # Prepare for the first iteration # Some of these things have to be repeated during the loop sr.buffer_reset() sr.buffer_start() #sr.buffer_trig_mode('ON') sr.buffer_SR('Trigger') sr.conductance.shape = (inner_npts,) sr.conductance.setpoint_names = (inner_param.name,) sr.conductance.setpoint_labels = (inner_param.label,) sr.conductance.setpoint_units = ('V',) sr.conductance.setpoints = (tuple(np.linspace(inner_start, inner_stop, inner_npts)),) def trigger(): sleep(delay) sr.send_trigger() def prepare_buffer(): # here it should be okay to call ch1_databuffer... I think... sr.ch1_databuffer.prepare_buffer_readout() # For the dataset/plotting, put in the correct setpoints sr.conductance.setpoint_names = (inner_param.name,) sr.conductance.setpoint_labels = (inner_param.label,) sr.conductance.setpoint_units = ('V',) sr.conductance.setpoints = (tuple(np.linspace(inner_start, inner_stop, inner_npts)),) def start_buffer(): sr.buffer_start() sr.conductance.shape = (inner_npts,) # This is something def reset_buffer(): sr.buffer_reset() trig_task = qc.Task(trigger) reset_task = qc.Task(reset_buffer) start_task = qc.Task(start_buffer) inner_loop = qc.Loop(inner_param.sweep(inner_start, inner_stop, num=inner_npts)).each(trig_task) outer_loop = qc.Loop(outer_param.sweep(outer_start, outer_stop, num=outer_npts)).each(start_task, inner_loop, sr.conductance, reset_task) set_params = ((inner_param, inner_start, inner_stop), (outer_param, outer_start, outer_stop)) meas_params = (sr.conductance,) prepare_buffer() qdac = None # ensure that any waveform generator is unbound from the qdac channels that we step if # we are stepping the qdac if isinstance(inner_param._instrument, QDacch): qdacch = inner_param._instrument qdacch.slope('Inf') if isinstance(outer_param._instrument, QDacch): qdacch = outer_param._instrument qdacch.slope('Inf') if qdac: qdac.fast_voltage_set(True) # now that we have unbound the function generators # we don't need to do it in the loop qdac.voltage_set_dont_wait(False) # this is un safe and highly experimental plot, data = _do_measurement(outer_loop, set_params, meas_params, do_plots=True) return plot, data
[ "qcodes.Task", "qdev_wrappers.sweep_functions._do_measurement", "numpy.linspace", "time.sleep" ]
[((3995, 4011), 'qcodes.Task', 'qc.Task', (['trigger'], {}), '(trigger)\n', (4002, 4011), True, 'import qcodes as qc\n'), ((4029, 4050), 'qcodes.Task', 'qc.Task', (['reset_buffer'], {}), '(reset_buffer)\n', (4036, 4050), True, 'import qcodes as qc\n'), ((4068, 4089), 'qcodes.Task', 'qc.Task', (['start_buffer'], {}), '(start_buffer)\n', (4075, 4089), True, 'import qcodes as qc\n'), ((5531, 5598), 'qdev_wrappers.sweep_functions._do_measurement', '_do_measurement', (['outer_loop', 'set_params', 'meas_params'], {'do_plots': '(True)'}), '(outer_loop, set_params, meas_params, do_plots=True)\n', (5546, 5598), False, 'from qdev_wrappers.sweep_functions import _do_measurement\n'), ((3187, 3199), 'time.sleep', 'sleep', (['delay'], {}), '(delay)\n', (3192, 3199), False, 'from time import sleep\n'), ((3007, 3055), 'numpy.linspace', 'np.linspace', (['inner_start', 'inner_stop', 'inner_npts'], {}), '(inner_start, inner_stop, inner_npts)\n', (3018, 3055), True, 'import numpy as np\n'), ((3650, 3698), 'numpy.linspace', 'np.linspace', (['inner_start', 'inner_stop', 'inner_npts'], {}), '(inner_start, inner_stop, inner_npts)\n', (3661, 3698), True, 'import numpy as np\n')]
import numpy as np import tensorflow as tf class PositionalEncodings(tf.keras.Model): """Sinusoidal positional encoding generator. """ def __init__(self, channels: int, presize: int = 128): """Initializer. Args: channels: size of the channels. presize: initial pe cache size. """ super().__init__() self.channels = channels self.size = presize self.buffer = self.generate(presize) def call(self, size: int) -> tf.Tensor: """Return cached positional encodings. Args: size: length of the pe. Returns: [tf.float32; [T, C]], sinusoidal positional encodings. """ if size <= self.size: return self.buffer[:size] # generate new cache self.buffer = self.generate(size) return self.buffer def generate(self, size: int) -> tf.Tensor: """Generate positional encodings. Args: size: length of the pe. Returns: [tf.float32; [T, C]], sinusoidal positional encodings. """ # [tf.int32; [T]] pos = tf.range(size) # [tf.int32; [C//2]] i = tf.range(0, self.channels, 2) # [C//C], casting for float64 denom = tf.exp(-np.log(10000) * tf.cast(i / self.channels, tf.float32)) # [T, C//2] context = tf.cast(pos, tf.float32)[:, None] * denom[None] # [T, C//2, 1] context = context[..., None] # [T, C//2, 2] pe = tf.concat([tf.sin(context), tf.cos(context)], axis=-1) # [T, C] pe = tf.reshape(pe, [size, self.channels]) return pe
[ "tensorflow.range", "tensorflow.sin", "numpy.log", "tensorflow.reshape", "tensorflow.cast", "tensorflow.cos" ]
[((1155, 1169), 'tensorflow.range', 'tf.range', (['size'], {}), '(size)\n', (1163, 1169), True, 'import tensorflow as tf\n'), ((1211, 1240), 'tensorflow.range', 'tf.range', (['(0)', 'self.channels', '(2)'], {}), '(0, self.channels, 2)\n', (1219, 1240), True, 'import tensorflow as tf\n'), ((1626, 1663), 'tensorflow.reshape', 'tf.reshape', (['pe', '[size, self.channels]'], {}), '(pe, [size, self.channels])\n', (1636, 1663), True, 'import tensorflow as tf\n'), ((1319, 1357), 'tensorflow.cast', 'tf.cast', (['(i / self.channels)', 'tf.float32'], {}), '(i / self.channels, tf.float32)\n', (1326, 1357), True, 'import tensorflow as tf\n'), ((1397, 1421), 'tensorflow.cast', 'tf.cast', (['pos', 'tf.float32'], {}), '(pos, tf.float32)\n', (1404, 1421), True, 'import tensorflow as tf\n'), ((1552, 1567), 'tensorflow.sin', 'tf.sin', (['context'], {}), '(context)\n', (1558, 1567), True, 'import tensorflow as tf\n'), ((1569, 1584), 'tensorflow.cos', 'tf.cos', (['context'], {}), '(context)\n', (1575, 1584), True, 'import tensorflow as tf\n'), ((1303, 1316), 'numpy.log', 'np.log', (['(10000)'], {}), '(10000)\n', (1309, 1316), True, 'import numpy as np\n')]
#!/usr/bin/env python3 # Copyright 2021 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Plot cumulative time based on daily time reporting data.""" from typing import List from typing import Optional from typing import Union from datetime import date import matplotlib.dates as mdates import matplotlib.pyplot as plt import matplotlib.ticker as ticker import numpy as np import pandas as pd include_plot_title = True save_plots = True def load_csv(filename: str) -> np.array: with open(filename, 'rb') as file: return np.loadtxt( file, delimiter=',', skiprows=1, usecols=(0,1), dtype=str, ) def filter_data(data: np.array) -> np.array: return np.array([d for d in data if d[0] not in ('', 'total')]) def convert_data(data: np.array) -> np.array: return np.array([[date.fromisoformat(d[0]), float(d[1])] for d in data]) def add_zeroth_datapoint(data: np.array) -> np.array: return np.vstack([[[data[0,0], 0.0]], data]) def data_to_cumsum(data: np.array, col: int = 1) -> np.array: data[:,col] = np.cumsum(data[:,col]) return data def get_data(filename: str): data = load_csv(filename) data = filter_data(data) data = convert_data(data) data = add_zeroth_datapoint(data) data = data_to_cumsum(data) return data def format_filename(string: str) -> str: string = string.replace('(', '') string = string.replace(')', '') string = string.replace(' ', '_') string = string.replace('\\', '') return string.lower() def plot_data( data: np.array, title: str, major_formatter_str: str, major_locator: Optional[mdates.RRuleLocator] = None, yaxis_multiple_locator: Optional[int] = None, colour: str = 'blue', ) -> None: fig, ax = plt.subplots(1, 1) ax.plot(data[:,0], data[:,1], '-', color=colour) if include_plot_title: ax.set(title=title) ax.set(ylabel='cumulative time (h)') if major_locator: ax.xaxis.set_major_locator(major_locator) ax.xaxis.set_major_formatter(mdates.DateFormatter(major_formatter_str)) if yaxis_multiple_locator: ax.yaxis.set_major_locator(ticker.MultipleLocator(yaxis_multiple_locator)) ax.set_ylim(0) ax.grid() fig.autofmt_xdate() if save_plots: filename = format_filename(title) fig.savefig(f'{filename}.png', bbox_inches='tight') fig.savefig(f'{filename}.svg', bbox_inches='tight') def plot_data_compare( data: List[np.array], title: str, legends: List[str], major_formatter_str: str, major_locator: Optional[mdates.RRuleLocator] = None, yaxis_multiple_locator: Optional[int] = None, colours: Union[str, List[str]] = 'blue', ) -> None: fig, ax = plt.subplots(1, 1) for i in range(len(data)): colour = colours if isinstance(colours, str) else colours[i] d = data[i] ax.plot(d[:,0], d[:,1], '-', color=colour) total_time = d[-1,1] legends[i] = legends[i] + f' ({total_time:g} h)' if include_plot_title: ax.set(title=title) ax.set(ylabel='cumulative time (h)') if major_locator: ax.xaxis.set_major_locator(major_locator) ax.xaxis.set_major_formatter(mdates.DateFormatter(major_formatter_str)) if yaxis_multiple_locator: ax.yaxis.set_major_locator(ticker.MultipleLocator(yaxis_multiple_locator)) ax.set_ylim(0) ax.legend(legends)#, loc='center', bbox_to_anchor=(0.3, 0.8)) ax.grid() fig.autofmt_xdate() if save_plots: filename = format_filename(title) fig.savefig(f'{filename}.png', bbox_inches='tight') fig.savefig(f'{filename}.svg', bbox_inches='tight') def main(): plt.rc('text', usetex=True) plt.rc('font', family='serif', size=14) plt.rc('axes', titlesize=20) plt.rc('legend', fontsize=14) # Under File, Download -> Comma-separated values (.csv, current sheet), # download the 'Time' and 'Blog' sheets data_time = get_data('rmw_email time tracking - Code.csv') data_blog = get_data('rmw_email time tracking - Blog.csv') plot_data( data_time, 'rmw\_email code time investment', '%Y %B', colour='green', ) plot_data( data_blog, 'rmw\_email blog post time investment', '%Y-%b-%d', mdates.DayLocator((1,5,10,15,20,25)), yaxis_multiple_locator=5, colour='blue', ) plot_data_compare( [data_time, data_blog], 'Overall rmw\_email time investment', ['code', 'blog post'], '%Y %B', colours=['green', 'blue'], ) plt.show() if __name__ == '__main__': main()
[ "matplotlib.pyplot.show", "matplotlib.dates.DayLocator", "numpy.cumsum", "matplotlib.dates.DateFormatter", "datetime.date.fromisoformat", "numpy.array", "matplotlib.pyplot.rc", "numpy.loadtxt", "matplotlib.ticker.MultipleLocator", "matplotlib.pyplot.subplots", "numpy.vstack" ]
[((1244, 1300), 'numpy.array', 'np.array', (["[d for d in data if d[0] not in ('', 'total')]"], {}), "([d for d in data if d[0] not in ('', 'total')])\n", (1252, 1300), True, 'import numpy as np\n'), ((1493, 1531), 'numpy.vstack', 'np.vstack', (['[[[data[0, 0], 0.0]], data]'], {}), '([[[data[0, 0], 0.0]], data])\n', (1502, 1531), True, 'import numpy as np\n'), ((1613, 1636), 'numpy.cumsum', 'np.cumsum', (['data[:, col]'], {}), '(data[:, col])\n', (1622, 1636), True, 'import numpy as np\n'), ((2318, 2336), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (2330, 2336), True, 'import matplotlib.pyplot as plt\n'), ((3287, 3305), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (3299, 3305), True, 'import matplotlib.pyplot as plt\n'), ((4246, 4273), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (4252, 4273), True, 'import matplotlib.pyplot as plt\n'), ((4278, 4317), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""', 'size': '(14)'}), "('font', family='serif', size=14)\n", (4284, 4317), True, 'import matplotlib.pyplot as plt\n'), ((4322, 4350), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': '(20)'}), "('axes', titlesize=20)\n", (4328, 4350), True, 'import matplotlib.pyplot as plt\n'), ((4355, 4384), 'matplotlib.pyplot.rc', 'plt.rc', (['"""legend"""'], {'fontsize': '(14)'}), "('legend', fontsize=14)\n", (4361, 4384), True, 'import matplotlib.pyplot as plt\n'), ((5164, 5174), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5172, 5174), True, 'import matplotlib.pyplot as plt\n'), ((1045, 1115), 'numpy.loadtxt', 'np.loadtxt', (['file'], {'delimiter': '""","""', 'skiprows': '(1)', 'usecols': '(0, 1)', 'dtype': 'str'}), "(file, delimiter=',', skiprows=1, usecols=(0, 1), dtype=str)\n", (1055, 1115), True, 'import numpy as np\n'), ((2593, 2634), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['major_formatter_str'], {}), '(major_formatter_str)\n', (2613, 2634), True, 'import matplotlib.dates as mdates\n'), ((3766, 3807), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['major_formatter_str'], {}), '(major_formatter_str)\n', (3786, 3807), True, 'import matplotlib.dates as mdates\n'), ((4867, 4908), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', (['(1, 5, 10, 15, 20, 25)'], {}), '((1, 5, 10, 15, 20, 25))\n', (4884, 4908), True, 'import matplotlib.dates as mdates\n'), ((2702, 2748), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['yaxis_multiple_locator'], {}), '(yaxis_multiple_locator)\n', (2724, 2748), True, 'import matplotlib.ticker as ticker\n'), ((3875, 3921), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['yaxis_multiple_locator'], {}), '(yaxis_multiple_locator)\n', (3897, 3921), True, 'import matplotlib.ticker as ticker\n'), ((1371, 1395), 'datetime.date.fromisoformat', 'date.fromisoformat', (['d[0]'], {}), '(d[0])\n', (1389, 1395), False, 'from datetime import date\n')]
""" Created on Mon Sep 9 15:51:35 2013 QgasUtils: Basic Quantum Gas Utilities functions @author: ispielman Modified on Wed Dec 10 11:26: 2014 @author: aputra """ import numpy import scipy.ndimage def ImageSlice(xVals, yVals, Image, r0, Width, Scaled = False): """ Produces a pair of slices from image of a band with 'Width' centered at r0 = [x y] Scaled : 'False' use pixels directly, and 'True' compute scaling from (xvals and yvals) assuming they are linearly spaced Currently Width and x,y are in scaled units, not pixel units. the return will be ((xvals xslice) (yvals yslice)), where each entry is a numpy array. these are copies, not views. """ if (Scaled): (xMin, yMin) = numpy.floor(GetPixelCoordsFromImage(r0, - Width/2, xVals, yVals)); (xMax, yMax) = numpy.ceil(GetPixelCoordsFromImage(r0, Width/2, xVals, yVals)); else: (xMin, yMin) = r0 - numpy.round(Width/2); (xMax, yMax) = r0 + numpy.round(Width/2); # Extract bands of desired width # These are slices, so views of the initial data if xMin<0: xMin =0 if yMin<0: yMin =0 if xMax>xVals.shape[1]: xMax = xVals.shape[1] if yMax>yVals.shape[0]: yMax = yVals.shape[0] # Compute averages ySlice = Image[:,xMin:xMax].mean(1); # along y, so use x center xSlice = Image[yMin:yMax,:].mean(0); # along x, so use y center yValsSlice = yVals[:,0].copy(); xValsSlice = xVals[0,:].copy(); return ((xValsSlice, xSlice), (yValsSlice, ySlice)); def ImageCrop(xVals, yVals, Image, r0, Width, Scaled = False, Center = True): """ crops an image along with the associated matrix of x and y to a specified area and returns the cropped image this will be a copy not a view Image, xVals, yVals : (2D image, xvals, yvals) r0 : center of ROI in physical units (two element list or array) Width : length of box-sides in physical units (two element list or array) Scaled : If true, will attempt to use the x and y waves, to generate pixel values Center : Recenter on cropped region """ error = False; Cropped_Image={'OptDepth':0,'xVals':0,'yVals':0,'Error':error} if (Scaled): if(ScaleTest(xVals, yVals)): rMinPixel = numpy.floor(GetPixelCoordsFromImage(r0, -Width/2, xVals, yVals)); rMaxPixel = numpy.ceil(GetPixelCoordsFromImage(r0, Width/2, xVals, yVals)); else: rMinPixel = numpy.floor(r0)-numpy.floor(Width/2); rMaxPixel = numpy.ceil(r0)+numpy.ceil(Width/2); error = True; else: rMinPixel = numpy.floor(r0)-numpy.floor(Width/2); rMaxPixel = numpy.ceil(r0)+numpy.ceil(Width/2); if rMinPixel[0]<0: rMinPixel[0]=0 if rMinPixel[1]<0: rMinPixel[1]=0 if rMaxPixel[0]>xVals.shape[1]: rMaxPixel[0] = xVals.shape[1] if rMaxPixel[1]>yVals.shape[0]: rMaxPixel[1] = yVals.shape[0] Cropped_Image['OptDepth'] = Image[rMinPixel[1]:rMaxPixel[1],rMinPixel[0]:rMaxPixel[0]].copy(); Cropped_Image['xVals'] = xVals[rMinPixel[1]:rMaxPixel[1],rMinPixel[0]:rMaxPixel[0]].copy(); Cropped_Image['yVals'] = yVals[rMinPixel[1]:rMaxPixel[1],rMinPixel[0]:rMaxPixel[0]].copy(); if (Center): Cropped_Image['xVals'] -= r0[0]; Cropped_Image['yVals'] -= r0[1]; return Cropped_Image; def ImageSliceFromMax(Image, width, pScale = True): """ Produces a pair of slices from image of a band with 'Width' centered at the maximum val of Image Scaled : 'False' use pixels directly, and 'True' compute scaling from (xvals and yvals) assuming they are linearly spaced Currently Width and x,y are in scaled units, not pixel units. the return will be ((xvals xslice) (yvals yslice)), where each entry is a numpy array. these are copies, not views. """ Z = scipy.ndimage.gaussian_filter(Image['OptDepth'], sigma=3); id = Z.argmax() r0max = (numpy.ravel(Image['xVals'])[id], numpy.ravel(Image['yVals'])[id]) imgSlice = ImageSlice(Image['xVals'], Image['yVals'], Image['OptDepth'], r0max, width, Scaled = pScale) imgSlicefromMax={'xVals':0,'yVals':0,'xSlice':0, 'ySlice':0, 'xMax':r0max[0], 'yMax':r0max[1]} imgSlicefromMax['yVals'] = imgSlice[1][0] imgSlicefromMax['xVals'] = imgSlice[0][0] imgSlicefromMax['ySlice'] = imgSlice[1][1] imgSlicefromMax['xSlice'] = imgSlice[0][1] return imgSlicefromMax def GetPixelCoordsFromImage(r0, Offset, xVals, yVals): """ Returns the pixel coordinates associated with the scaled values in the 2D arrays xVals and yVals remember in r0 the ordering is r0 = (x0, y0) """ # Assume that the correct arrays were passed dy = yVals[1][0] - yVals[0][0]; dx = xVals[0][1] - xVals[0][0]; y0 = yVals[0][0]; x0 = xVals[0][0]; #want offset to be an integer number of pixels Offset = numpy.round(Offset/numpy.array([dx,dy])); return (r0 - numpy.array([x0, y0])) /numpy.array([dx, dy])+Offset; def ScaleTest(xVals, yVals): """ Returns the pixel coordinates associated with the scaled values in the 2D arrays xVals and yVals remember in r0 the ordering is r0 = (x0, y0) """ # Assume that the correct arrays were passed dy = yVals[1][0] - yVals[0][0]; dx = xVals[0][1] - xVals[0][0]; if ((dx == 0) or (dy == 0)): print("ImageSlice: generating scaled axes failed"); print(dx,dy,xVals[0][1],xVals[0][0],yVals[1][0],yVals[0][0],xVals,yVals) return False; else: return True;
[ "numpy.ceil", "numpy.ravel", "numpy.floor", "numpy.array", "numpy.round" ]
[((936, 958), 'numpy.round', 'numpy.round', (['(Width / 2)'], {}), '(Width / 2)\n', (947, 958), False, 'import numpy\n'), ((986, 1008), 'numpy.round', 'numpy.round', (['(Width / 2)'], {}), '(Width / 2)\n', (997, 1008), False, 'import numpy\n'), ((2672, 2687), 'numpy.floor', 'numpy.floor', (['r0'], {}), '(r0)\n', (2683, 2687), False, 'import numpy\n'), ((2688, 2710), 'numpy.floor', 'numpy.floor', (['(Width / 2)'], {}), '(Width / 2)\n', (2699, 2710), False, 'import numpy\n'), ((2730, 2744), 'numpy.ceil', 'numpy.ceil', (['r0'], {}), '(r0)\n', (2740, 2744), False, 'import numpy\n'), ((2745, 2766), 'numpy.ceil', 'numpy.ceil', (['(Width / 2)'], {}), '(Width / 2)\n', (2755, 2766), False, 'import numpy\n'), ((4000, 4027), 'numpy.ravel', 'numpy.ravel', (["Image['xVals']"], {}), "(Image['xVals'])\n", (4011, 4027), False, 'import numpy\n'), ((4033, 4060), 'numpy.ravel', 'numpy.ravel', (["Image['yVals']"], {}), "(Image['yVals'])\n", (4044, 4060), False, 'import numpy\n'), ((4958, 4979), 'numpy.array', 'numpy.array', (['[dx, dy]'], {}), '([dx, dy])\n', (4969, 4979), False, 'import numpy\n'), ((5025, 5046), 'numpy.array', 'numpy.array', (['[dx, dy]'], {}), '([dx, dy])\n', (5036, 5046), False, 'import numpy\n'), ((2518, 2533), 'numpy.floor', 'numpy.floor', (['r0'], {}), '(r0)\n', (2529, 2533), False, 'import numpy\n'), ((2534, 2556), 'numpy.floor', 'numpy.floor', (['(Width / 2)'], {}), '(Width / 2)\n', (2545, 2556), False, 'import numpy\n'), ((2580, 2594), 'numpy.ceil', 'numpy.ceil', (['r0'], {}), '(r0)\n', (2590, 2594), False, 'import numpy\n'), ((2595, 2616), 'numpy.ceil', 'numpy.ceil', (['(Width / 2)'], {}), '(Width / 2)\n', (2605, 2616), False, 'import numpy\n'), ((5001, 5022), 'numpy.array', 'numpy.array', (['[x0, y0]'], {}), '([x0, y0])\n', (5012, 5022), False, 'import numpy\n')]
import os import json import multiprocessing from matplotlib import pyplot as plt from mpl_toolkits.mplot3d import Axes3D import numpy as np import scipy as sp import matplotlib.pyplot as plt import matplotlib as mpl import pandas as pd import matplotlib.animation from sklearn.model_selection import train_test_split from tqdm import tqdm from typing import List import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.autograd import Variable from torch.utils.data import Dataset, DataLoader from scipy.spatial import distance_matrix import nibabel as nib from scipy.ndimage.interpolation import zoom from scipy import ndimage from sklearn.metrics import jaccard_score from skimage.metrics import hausdorff_distance from scipy.stats import pearsonr from aneurysm_utils.preprocessing import resize_mri from aneurysm_utils.environment import Environment from collections import defaultdict from sklearn import metrics as sk_metrics from sklearn.preprocessing import MinMaxScaler #import open3d def evaluate_model( y_true: list, y_pred: list, segmentation: bool = None, prefix: str = None ) -> dict: metrics = {} if segmentation: y_true = np.concatenate(y_true).ravel() y_pred = np.concatenate(y_pred).ravel() if not prefix: prefix = "" else: prefix = prefix + "_" metrics[prefix + "accuracy"] = sk_metrics.accuracy_score(y_true, y_pred) metrics[prefix + "bal_acc"] = sk_metrics.balanced_accuracy_score(y_true, y_pred) try: metrics[prefix + "precision"] = sk_metrics.precision_score(y_true, y_pred) metrics[prefix + "recall"] = sk_metrics.recall_score(y_true, y_pred) metrics[prefix + "spec"] = sk_metrics.recall_score(y_true, y_pred, pos_label=0) metrics[prefix + "sen"] = sk_metrics.recall_score(y_true, y_pred, pos_label=1) metrics[prefix + "f1"] = sk_metrics.f1_score(y_true, y_pred) except Exception: print( "precision/recall/spec/sen/f1 are not supported for non-binary classification." ) print("Accuracy (" + prefix + "): " + str(metrics[prefix + "accuracy"])) print("Balanced Accuracy (" + prefix + "): " + str(metrics[prefix + "bal_acc"])) print(sk_metrics.classification_report(y_true, y_pred)) return metrics # Transparent colormap (alpha to red), that is used for plotting an overlay. # See https://stackoverflow.com/questions/37327308/add-alpha-to-an-existing-matplotlib-colormap alpha_to_red_cmap = np.zeros((256, 4)) alpha_to_red_cmap[:, 0] = 0.8 alpha_to_red_cmap[:, -1] = np.linspace(0, 1, 256) # cmap.N-20) # alpha values alpha_to_red_cmap = mpl.colors.ListedColormap(alpha_to_red_cmap) red_to_alpha_cmap = np.zeros((256, 4)) red_to_alpha_cmap[:, 0] = 0.8 red_to_alpha_cmap[:, -1] = np.linspace(1, 0, 256) # cmap.N-20) # alpha values red_to_alpha_cmap = mpl.colors.ListedColormap(red_to_alpha_cmap) def animate_slices( struct_arr, overlay=None, axis=0, reverse_direction=False, interval=40, vmin=None, vmax=None, overlay_vmin=None, overlay_vmax=None, ): """ Create a matplotlib animation that moves through a 3D image along a specified axis. """ if vmin is None: vmin = struct_arr.min() if vmax is None: vmax = struct_arr.max() if overlay_vmin is None and overlay is not None: overlay_vmin = overlay.min() if overlay_vmax is None and overlay is not None: overlay_vmax = overlay.max() fig, ax = plt.subplots() axis_label = ["x", "y", "z"][axis] # TODO: If I select slice 50 here at the beginning, the plots look different. im = ax.imshow( np.take(struct_arr, 0, axis=axis), vmin=vmin, vmax=vmax, cmap="gray", interpolation=None, animated=True, ) if overlay is not None: im_overlay = ax.imshow( np.take(overlay, 0, axis=axis), vmin=overlay_vmin, vmax=overlay_vmax, cmap=alpha_to_red_cmap, interpolation=None, animated=True, ) text = ax.text( 0.03, 0.97, "{}={}".format(axis_label, 0), color="white", horizontalalignment="left", verticalalignment="top", transform=ax.transAxes, ) ax.axis("off") def update(i): im.set_array(np.take(struct_arr, i, axis=axis)) if overlay is not None: im_overlay.set_array(np.take(overlay, i, axis=axis)) text.set_text("{}={}".format(axis_label, i)) return im, text num_frames = struct_arr.shape[axis] if reverse_direction: frames = np.arange(num_frames - 1, 0, -1) else: frames = np.arange(0, num_frames) return mpl.animation.FuncAnimation( fig, update, frames=frames, interval=interval, blit=True ) def plot_slices( struct_arr, num_slices=7, cmap="gray", vmin=None, vmax=None, overlay=None, overlay_cmap=alpha_to_red_cmap, overlay_vmin=None, overlay_vmax=None, ): """ Plot equally spaced slices of a 3D image (and an overlay) along every axis Args: struct_arr (3D array or tensor): The 3D array to plot (usually from a nifti file). num_slices (int): The number of slices to plot for each dimension. cmap: The colormap for the image (default: `'gray'`). vmin (float): Same as in matplotlib.imshow. If `None`, take the global minimum of `struct_arr`. vmax (float): Same as in matplotlib.imshow. If `None`, take the global maximum of `struct_arr`. overlay (3D array or tensor): The 3D array to plot as an overlay on top of the image. Same size as `struct_arr`. overlay_cmap: The colomap for the overlay (default: `alpha_to_red_cmap`). overlay_vmin (float): Same as in matplotlib.imshow. If `None`, take the global minimum of `overlay`. overlay_vmax (float): Same as in matplotlib.imshow. If `None`, take the global maximum of `overlay`. """ if vmin is None: vmin = struct_arr.min() if vmax is None: vmax = struct_arr.max() if overlay_vmin is None and overlay is not None: overlay_vmin = overlay.min() if overlay_vmax is None and overlay is not None: overlay_vmax = overlay.max() print(vmin, vmax, overlay_vmin, overlay_vmax) fig, axes = plt.subplots(3, num_slices, figsize=(15, 6)) intervals = np.asarray(struct_arr.shape) / num_slices for axis, axis_label in zip([0, 1, 2], ["x", "y", "z"]): for i, ax in enumerate(axes[axis]): i_slice = int(np.round(intervals[axis] / 2 + i * intervals[axis])) # print(axis_label, 'plotting slice', i_slice) plt.sca(ax) plt.axis("off") plt.imshow( sp.ndimage.rotate(np.take(struct_arr, i_slice, axis=axis), 90), vmin=vmin, vmax=vmax, cmap=cmap, interpolation=None, ) plt.text( 0.03, 0.97, "{}={}".format(axis_label, i_slice), color="white", horizontalalignment="left", verticalalignment="top", transform=ax.transAxes, ) if overlay is not None: plt.imshow( sp.ndimage.rotate(np.take(overlay, i_slice, axis=axis), 90), cmap=overlay_cmap, vmin=overlay_vmin, vmax=overlay_vmax, interpolation=None, ) def draw_mask_3d(image:np.array,ax:Axes3D=None,zorder:int=0,markersize:float=0.8,alpha:float=1,c=None): """ Draws all points which are not zero of given image in scatterplot Parameters ---------- image: where to get mask from ax: if given uses this axis object zorder: order of points drawn markersize: size of points alpha: transparency of points c: if anything points will be black """ fig = plt.figure() if ax==None: ax = Axes3D(fig) else: ax=ax for cluster in range(1,int(np.unique(image)[-1]+1)): if len(np.argwhere(image==cluster))==0: print("no aneurysm found") continue if c==None: ax.scatter(np.argwhere(image==cluster).T[0],np.argwhere(image==cluster).T[1],np.argwhere(image==cluster).T[2],s=markersize,alpha=alpha,zorder=zorder) else: ax.scatter(np.argwhere(image==cluster).T[0],np.argwhere(image==cluster).T[1],np.argwhere(image==cluster).T[2],s=3,alpha=alpha,zorder=zorder,c="black") def draw_image(image:np.array,ax:Axes3D=None,zorder:int=0,markersize:float=0.8,transparency:bool=True): """ Draws all points which are not zero of given image in scatterplot in colors according to their intensity Parameters ---------- image: where to get mask from ax: if given uses this axis object zorder: order of points drawn markersize: size of points transparency: if true scales transparency with intensity values """ fig = plt.figure() if ax==None: ax = Axes3D(fig) else: ax=ax if transparency: alpha= image[image>0] alpha = np.where(alpha>0.15,alpha,0.01) else: alpha=1 cmap = plt.get_cmap('YlOrRd') ax.scatter(np.argwhere(image>0).T[0],np.argwhere(image>0).T[1],np.argwhere(image>0).T[2],s=markersize,alpha=image[image>0],zorder=zorder,c=cmap(image[image>0])) def draw_bounding_box(candidates,ax:Axes3D=None): """ Draws bounding box of given bounding box dictionary -> see postprocessing function Parameters ---------- image: list of dictionaries where entry vertices contains the points of the bounding box ax: if given uses this axis object """ fig = plt.figure() if ax==None: ax = Axes3D(fig) else: ax=ax for candidate in candidates: Z= candidate["vertices"] Z=np.array(Z) verts= [(Z[0],Z[1]),(Z[0],Z[2]),(Z[0],Z[3]),(Z[6],Z[1]),(Z[7],Z[1]),(Z[2],Z[5]), (Z[2],Z[7]),(Z[3],Z[5]),(Z[3],Z[6]),(Z[4],Z[7]),(Z[4],Z[6]),(Z[4],Z[5])] for element in verts: x=[element[0][0],element[1][0]] y=[element[0][1],element[1][1]] z=[element[0][2],element[1][2]] ax.plot(x,y,z,c='r',linewidth=2,alpha=1) fig.show()
[ "sklearn.metrics.accuracy_score", "sklearn.metrics.classification_report", "matplotlib.animation.FuncAnimation", "matplotlib.pyplot.figure", "sklearn.metrics.f1_score", "numpy.arange", "matplotlib.colors.ListedColormap", "numpy.round", "numpy.unique", "numpy.linspace", "matplotlib.pyplot.subplots", "matplotlib.pyplot.get_cmap", "mpl_toolkits.mplot3d.Axes3D", "numpy.asarray", "sklearn.metrics.recall_score", "numpy.argwhere", "numpy.concatenate", "sklearn.metrics.balanced_accuracy_score", "numpy.zeros", "matplotlib.pyplot.axis", "numpy.where", "numpy.take", "numpy.array", "sklearn.metrics.precision_score", "matplotlib.pyplot.sca" ]
[((2528, 2546), 'numpy.zeros', 'np.zeros', (['(256, 4)'], {}), '((256, 4))\n', (2536, 2546), True, 'import numpy as np\n'), ((2604, 2626), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(256)'], {}), '(0, 1, 256)\n', (2615, 2626), True, 'import numpy as np\n'), ((2677, 2721), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (['alpha_to_red_cmap'], {}), '(alpha_to_red_cmap)\n', (2702, 2721), True, 'import matplotlib as mpl\n'), ((2743, 2761), 'numpy.zeros', 'np.zeros', (['(256, 4)'], {}), '((256, 4))\n', (2751, 2761), True, 'import numpy as np\n'), ((2819, 2841), 'numpy.linspace', 'np.linspace', (['(1)', '(0)', '(256)'], {}), '(1, 0, 256)\n', (2830, 2841), True, 'import numpy as np\n'), ((2892, 2936), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (['red_to_alpha_cmap'], {}), '(red_to_alpha_cmap)\n', (2917, 2936), True, 'import matplotlib as mpl\n'), ((1411, 1452), 'sklearn.metrics.accuracy_score', 'sk_metrics.accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1436, 1452), True, 'from sklearn import metrics as sk_metrics\n'), ((1487, 1537), 'sklearn.metrics.balanced_accuracy_score', 'sk_metrics.balanced_accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1521, 1537), True, 'from sklearn import metrics as sk_metrics\n'), ((3536, 3550), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3548, 3550), True, 'import matplotlib.pyplot as plt\n'), ((4790, 4879), 'matplotlib.animation.FuncAnimation', 'mpl.animation.FuncAnimation', (['fig', 'update'], {'frames': 'frames', 'interval': 'interval', 'blit': '(True)'}), '(fig, update, frames=frames, interval=interval,\n blit=True)\n', (4817, 4879), True, 'import matplotlib as mpl\n'), ((6409, 6453), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', 'num_slices'], {'figsize': '(15, 6)'}), '(3, num_slices, figsize=(15, 6))\n', (6421, 6453), True, 'import matplotlib.pyplot as plt\n'), ((8107, 8119), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8117, 8119), True, 'import matplotlib.pyplot as plt\n'), ((9197, 9209), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9207, 9209), True, 'import matplotlib.pyplot as plt\n'), ((9412, 9434), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""YlOrRd"""'], {}), "('YlOrRd')\n", (9424, 9434), True, 'import matplotlib.pyplot as plt\n'), ((9934, 9946), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9944, 9946), True, 'import matplotlib.pyplot as plt\n'), ((1587, 1629), 'sklearn.metrics.precision_score', 'sk_metrics.precision_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1613, 1629), True, 'from sklearn import metrics as sk_metrics\n'), ((1667, 1706), 'sklearn.metrics.recall_score', 'sk_metrics.recall_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1690, 1706), True, 'from sklearn import metrics as sk_metrics\n'), ((1742, 1794), 'sklearn.metrics.recall_score', 'sk_metrics.recall_score', (['y_true', 'y_pred'], {'pos_label': '(0)'}), '(y_true, y_pred, pos_label=0)\n', (1765, 1794), True, 'from sklearn import metrics as sk_metrics\n'), ((1829, 1881), 'sklearn.metrics.recall_score', 'sk_metrics.recall_score', (['y_true', 'y_pred'], {'pos_label': '(1)'}), '(y_true, y_pred, pos_label=1)\n', (1852, 1881), True, 'from sklearn import metrics as sk_metrics\n'), ((1915, 1950), 'sklearn.metrics.f1_score', 'sk_metrics.f1_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1934, 1950), True, 'from sklearn import metrics as sk_metrics\n'), ((2263, 2311), 'sklearn.metrics.classification_report', 'sk_metrics.classification_report', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (2295, 2311), True, 'from sklearn import metrics as sk_metrics\n'), ((3701, 3734), 'numpy.take', 'np.take', (['struct_arr', '(0)'], {'axis': 'axis'}), '(struct_arr, 0, axis=axis)\n', (3708, 3734), True, 'import numpy as np\n'), ((4693, 4725), 'numpy.arange', 'np.arange', (['(num_frames - 1)', '(0)', '(-1)'], {}), '(num_frames - 1, 0, -1)\n', (4702, 4725), True, 'import numpy as np\n'), ((4753, 4777), 'numpy.arange', 'np.arange', (['(0)', 'num_frames'], {}), '(0, num_frames)\n', (4762, 4777), True, 'import numpy as np\n'), ((6470, 6498), 'numpy.asarray', 'np.asarray', (['struct_arr.shape'], {}), '(struct_arr.shape)\n', (6480, 6498), True, 'import numpy as np\n'), ((8150, 8161), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (8156, 8161), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((9240, 9251), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (9246, 9251), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((9343, 9378), 'numpy.where', 'np.where', (['(alpha > 0.15)', 'alpha', '(0.01)'], {}), '(alpha > 0.15, alpha, 0.01)\n', (9351, 9378), True, 'import numpy as np\n'), ((9977, 9988), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (9983, 9988), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((10089, 10100), 'numpy.array', 'np.array', (['Z'], {}), '(Z)\n', (10097, 10100), True, 'import numpy as np\n'), ((3924, 3954), 'numpy.take', 'np.take', (['overlay', '(0)'], {'axis': 'axis'}), '(overlay, 0, axis=axis)\n', (3931, 3954), True, 'import numpy as np\n'), ((4400, 4433), 'numpy.take', 'np.take', (['struct_arr', 'i'], {'axis': 'axis'}), '(struct_arr, i, axis=axis)\n', (4407, 4433), True, 'import numpy as np\n'), ((6769, 6780), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (6776, 6780), True, 'import matplotlib.pyplot as plt\n'), ((6793, 6808), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (6801, 6808), True, 'import matplotlib.pyplot as plt\n'), ((1212, 1234), 'numpy.concatenate', 'np.concatenate', (['y_true'], {}), '(y_true)\n', (1226, 1234), True, 'import numpy as np\n'), ((1260, 1282), 'numpy.concatenate', 'np.concatenate', (['y_pred'], {}), '(y_pred)\n', (1274, 1282), True, 'import numpy as np\n'), ((4500, 4530), 'numpy.take', 'np.take', (['overlay', 'i'], {'axis': 'axis'}), '(overlay, i, axis=axis)\n', (4507, 4530), True, 'import numpy as np\n'), ((6644, 6695), 'numpy.round', 'np.round', (['(intervals[axis] / 2 + i * intervals[axis])'], {}), '(intervals[axis] / 2 + i * intervals[axis])\n', (6652, 6695), True, 'import numpy as np\n'), ((8258, 8287), 'numpy.argwhere', 'np.argwhere', (['(image == cluster)'], {}), '(image == cluster)\n', (8269, 8287), True, 'import numpy as np\n'), ((9450, 9472), 'numpy.argwhere', 'np.argwhere', (['(image > 0)'], {}), '(image > 0)\n', (9461, 9472), True, 'import numpy as np\n'), ((9476, 9498), 'numpy.argwhere', 'np.argwhere', (['(image > 0)'], {}), '(image > 0)\n', (9487, 9498), True, 'import numpy as np\n'), ((9502, 9524), 'numpy.argwhere', 'np.argwhere', (['(image > 0)'], {}), '(image > 0)\n', (9513, 9524), True, 'import numpy as np\n'), ((6867, 6906), 'numpy.take', 'np.take', (['struct_arr', 'i_slice'], {'axis': 'axis'}), '(struct_arr, i_slice, axis=axis)\n', (6874, 6906), True, 'import numpy as np\n'), ((8217, 8233), 'numpy.unique', 'np.unique', (['image'], {}), '(image)\n', (8226, 8233), True, 'import numpy as np\n'), ((7436, 7472), 'numpy.take', 'np.take', (['overlay', 'i_slice'], {'axis': 'axis'}), '(overlay, i_slice, axis=axis)\n', (7443, 7472), True, 'import numpy as np\n'), ((8394, 8423), 'numpy.argwhere', 'np.argwhere', (['(image == cluster)'], {}), '(image == cluster)\n', (8405, 8423), True, 'import numpy as np\n'), ((8427, 8456), 'numpy.argwhere', 'np.argwhere', (['(image == cluster)'], {}), '(image == cluster)\n', (8438, 8456), True, 'import numpy as np\n'), ((8460, 8489), 'numpy.argwhere', 'np.argwhere', (['(image == cluster)'], {}), '(image == cluster)\n', (8471, 8489), True, 'import numpy as np\n'), ((8570, 8599), 'numpy.argwhere', 'np.argwhere', (['(image == cluster)'], {}), '(image == cluster)\n', (8581, 8599), True, 'import numpy as np\n'), ((8603, 8632), 'numpy.argwhere', 'np.argwhere', (['(image == cluster)'], {}), '(image == cluster)\n', (8614, 8632), True, 'import numpy as np\n'), ((8636, 8665), 'numpy.argwhere', 'np.argwhere', (['(image == cluster)'], {}), '(image == cluster)\n', (8647, 8665), True, 'import numpy as np\n')]
import nose import numpy as np from numpy.polynomial.polynomial import polyval import pySDC.helpers.transfer_helper as th from pySDC.core.Collocation import CollBase from pySDC.tests.test_helpers import get_derived_from_in_package classes = [] def setup(): global classes, t_start, t_end # generate random boundaries for the time slice with 0.0 <= t_start < 0.2 and 0.8 <= t_end < 1.0 t_start = np.random.rand(1) * 0.2 t_end = 0.8 + np.random.rand(1) * 0.2 classes = get_derived_from_in_package(CollBase, 'pySDC/implementations/collocation_classes') @nose.tools.with_setup(setup) def test_Q_transfer(): for collclass in classes: yield check_Q_transfer, collclass def check_Q_transfer(collclass): """ A simple test program to check the order of the Q interpolation/restriction """ for M in range(3, 9): Mfine = M Mcoarse = int((Mfine+1)/2.0) coll_fine = collclass(Mfine, 0, 1) coll_coarse = collclass(Mcoarse, 0, 1) assert coll_fine.left_is_node == coll_coarse.left_is_node, 'ERROR: should be using the same class for coarse and fine Q' fine_grid = coll_fine.nodes coarse_grid = coll_coarse.nodes for order in range(2,coll_coarse.num_nodes+1): Pcoll = th.interpolation_matrix_1d(fine_grid, coarse_grid, k=order, pad=0, equidist_nested=False) Rcoll = th.restriction_matrix_1d(fine_grid, coarse_grid, k=order, pad=0) for polyorder in range(1,order+2): coeff = np.random.rand(polyorder) ufine = polyval(fine_grid,coeff) ucoarse = polyval(coarse_grid,coeff) uinter = Pcoll.dot(ucoarse) urestr = Rcoll.dot(ufine) err_inter = np.linalg.norm(uinter-ufine, np.inf) err_restr = np.linalg.norm(urestr-ucoarse, np.inf) if polyorder <= order: assert err_inter < 2E-15, "ERROR: Q-interpolation order is not reached, got %s" %err_inter assert err_restr < 2E-15, "ERROR: Q-restriction order is not reached, got %s" % err_restr else: assert err_inter > 2E-15, "ERROR: Q-interpolation order is higher than expected, got %s" % polyorder @nose.tools.with_setup(setup) def test_Q_transfer_minimal(): for collclass in classes: yield check_Q_transfer_minimal, collclass def check_Q_transfer_minimal(collclass): """ A simple test program to check the order of the Q interpolation/restriction for only 2 coarse nodes """ Mcoarse = 2 coll_coarse = collclass(Mcoarse, 0, 1) for M in range(3, 9): Mfine = M coll_fine = collclass(Mfine, 0, 1) assert coll_fine.left_is_node == coll_coarse.left_is_node, 'ERROR: should be using the same class for coarse and fine Q' fine_grid = coll_fine.nodes coarse_grid = coll_coarse.nodes Pcoll = th.interpolation_matrix_1d(fine_grid, coarse_grid, k=2, pad=0, equidist_nested=False) Rcoll = th.restriction_matrix_1d(fine_grid, coarse_grid, k=2, pad=0) for polyorder in range(1,3): coeff = np.random.rand(polyorder) ufine = polyval(fine_grid,coeff) ucoarse = polyval(coarse_grid,coeff) uinter = Pcoll.dot(ucoarse) urestr = Rcoll.dot(ufine) err_inter = np.linalg.norm(uinter-ufine, np.inf) err_restr = np.linalg.norm(urestr-ucoarse, np.inf) if polyorder <= 2: assert err_inter < 2E-15, "ERROR: Q-interpolation order is not reached, got %s" %err_inter assert err_restr < 2E-15, "ERROR: Q-restriction order is not reached, got %s" % err_restr else: assert err_inter > 2E-15, "ERROR: Q-interpolation order is higher than expected, got %s" % polyorder
[ "numpy.polynomial.polynomial.polyval", "numpy.linalg.norm", "pySDC.helpers.transfer_helper.restriction_matrix_1d", "pySDC.tests.test_helpers.get_derived_from_in_package", "numpy.random.rand", "nose.tools.with_setup", "pySDC.helpers.transfer_helper.interpolation_matrix_1d" ]
[((576, 604), 'nose.tools.with_setup', 'nose.tools.with_setup', (['setup'], {}), '(setup)\n', (597, 604), False, 'import nose\n'), ((2290, 2318), 'nose.tools.with_setup', 'nose.tools.with_setup', (['setup'], {}), '(setup)\n', (2311, 2318), False, 'import nose\n'), ((491, 577), 'pySDC.tests.test_helpers.get_derived_from_in_package', 'get_derived_from_in_package', (['CollBase', '"""pySDC/implementations/collocation_classes"""'], {}), "(CollBase,\n 'pySDC/implementations/collocation_classes')\n", (518, 577), False, 'from pySDC.tests.test_helpers import get_derived_from_in_package\n'), ((411, 428), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (425, 428), True, 'import numpy as np\n'), ((2966, 3055), 'pySDC.helpers.transfer_helper.interpolation_matrix_1d', 'th.interpolation_matrix_1d', (['fine_grid', 'coarse_grid'], {'k': '(2)', 'pad': '(0)', 'equidist_nested': '(False)'}), '(fine_grid, coarse_grid, k=2, pad=0,\n equidist_nested=False)\n', (2992, 3055), True, 'import pySDC.helpers.transfer_helper as th\n'), ((3068, 3128), 'pySDC.helpers.transfer_helper.restriction_matrix_1d', 'th.restriction_matrix_1d', (['fine_grid', 'coarse_grid'], {'k': '(2)', 'pad': '(0)'}), '(fine_grid, coarse_grid, k=2, pad=0)\n', (3092, 3128), True, 'import pySDC.helpers.transfer_helper as th\n'), ((453, 470), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (467, 470), True, 'import numpy as np\n'), ((1288, 1381), 'pySDC.helpers.transfer_helper.interpolation_matrix_1d', 'th.interpolation_matrix_1d', (['fine_grid', 'coarse_grid'], {'k': 'order', 'pad': '(0)', 'equidist_nested': '(False)'}), '(fine_grid, coarse_grid, k=order, pad=0,\n equidist_nested=False)\n', (1314, 1381), True, 'import pySDC.helpers.transfer_helper as th\n'), ((1398, 1462), 'pySDC.helpers.transfer_helper.restriction_matrix_1d', 'th.restriction_matrix_1d', (['fine_grid', 'coarse_grid'], {'k': 'order', 'pad': '(0)'}), '(fine_grid, coarse_grid, k=order, pad=0)\n', (1422, 1462), True, 'import pySDC.helpers.transfer_helper as th\n'), ((3187, 3212), 'numpy.random.rand', 'np.random.rand', (['polyorder'], {}), '(polyorder)\n', (3201, 3212), True, 'import numpy as np\n'), ((3233, 3258), 'numpy.polynomial.polynomial.polyval', 'polyval', (['fine_grid', 'coeff'], {}), '(fine_grid, coeff)\n', (3240, 3258), False, 'from numpy.polynomial.polynomial import polyval\n'), ((3280, 3307), 'numpy.polynomial.polynomial.polyval', 'polyval', (['coarse_grid', 'coeff'], {}), '(coarse_grid, coeff)\n', (3287, 3307), False, 'from numpy.polynomial.polynomial import polyval\n'), ((3411, 3449), 'numpy.linalg.norm', 'np.linalg.norm', (['(uinter - ufine)', 'np.inf'], {}), '(uinter - ufine, np.inf)\n', (3425, 3449), True, 'import numpy as np\n'), ((3472, 3512), 'numpy.linalg.norm', 'np.linalg.norm', (['(urestr - ucoarse)', 'np.inf'], {}), '(urestr - ucoarse, np.inf)\n', (3486, 3512), True, 'import numpy as np\n'), ((1535, 1560), 'numpy.random.rand', 'np.random.rand', (['polyorder'], {}), '(polyorder)\n', (1549, 1560), True, 'import numpy as np\n'), ((1585, 1610), 'numpy.polynomial.polynomial.polyval', 'polyval', (['fine_grid', 'coeff'], {}), '(fine_grid, coeff)\n', (1592, 1610), False, 'from numpy.polynomial.polynomial import polyval\n'), ((1636, 1663), 'numpy.polynomial.polynomial.polyval', 'polyval', (['coarse_grid', 'coeff'], {}), '(coarse_grid, coeff)\n', (1643, 1663), False, 'from numpy.polynomial.polynomial import polyval\n'), ((1779, 1817), 'numpy.linalg.norm', 'np.linalg.norm', (['(uinter - ufine)', 'np.inf'], {}), '(uinter - ufine, np.inf)\n', (1793, 1817), True, 'import numpy as np\n'), ((1844, 1884), 'numpy.linalg.norm', 'np.linalg.norm', (['(urestr - ucoarse)', 'np.inf'], {}), '(urestr - ucoarse, np.inf)\n', (1858, 1884), True, 'import numpy as np\n')]
from numpy import ndarray from src.domain.cs_column import Column import numpy as np from src.model.stop_at_station_summary import StopAtStationSummary class CargoSpace(object): """ Represents cargo space in transport vehicle/ship ect. """ def __init__(self, width: int, height: int): self._width: int = width self._height: int = height self._columns: list = [Column(height) for i in range(width)] @property def columns(self) -> list: return self._columns def simulate_stop_at_station(self, station_index: int, packages_to_load: list) -> StopAtStationSummary: """ Simulates stop at station, unloads, loads packages and monitors activities. Args: station_index: Current station index. packages_to_load: List of packages to load at this station. Returns: Summary of process and current state of cargo space. """ movements_sum = 0 wait_que = [] packages_per_col = np.zeros(len(self._columns), dtype=int) # Unload packages for current station. movements_sum += self._unload_packages(packages_per_col, wait_que, station_index) # Load packages for current station. movements_sum += self._load_packages(packages_to_load, packages_per_col) # Load packages from waiting que. movements_sum += self._load_packages(wait_que, packages_per_col) return StopAtStationSummary( movements_sum=movements_sum, layout_dist=packages_per_col.tolist(), weight_dist=[column.sum_weight for column in self._columns] ) def _unload_packages(self, packages_per_col: ndarray, wait_que: list, station_index: int) -> int: movement = 0 for index, column in enumerate(self._columns): ret_que, ret_movements = column.unload_at_station(station_index) movement += ret_movements wait_que += ret_que packages_per_col[index] = column.count() return movement def _load_packages(self, packages_to_load: list, packages_per_col: ndarray) -> int: movements = 0 for package in packages_to_load: add_index = package.given_col_index if packages_per_col[add_index] == self._height: add_index = np.argmin(packages_per_col) self._columns[add_index].add(package) packages_per_col[add_index] += 1 movements += 1 return movements
[ "src.domain.cs_column.Column", "numpy.argmin" ]
[((407, 421), 'src.domain.cs_column.Column', 'Column', (['height'], {}), '(height)\n', (413, 421), False, 'from src.domain.cs_column import Column\n'), ((2339, 2366), 'numpy.argmin', 'np.argmin', (['packages_per_col'], {}), '(packages_per_col)\n', (2348, 2366), True, 'import numpy as np\n')]
import numpy as np class BayesLinearRegressor: def __init__(self, number_of_features, alpha=1e6): ''' :param number_of_features: Integer number of features in the training rows, excluding the intercept and output values :param alpha: Float inverse ridge regularizaiton constant, set to 1e6 ''' # alpha is our initial guess on the variance, basically, all parameters initialized to 0 with alpha variance # so, you know, just set it super-high. This is the same as L2 regularization, btw! # all those weird Bayesian update rules actually amount to very standard linear algebra identities # Once you see that it's just updating the moment matrix and the sum of squared residuals, it's straightforward! # So those are our internal variables that everything else depends upon self.number_of_features = number_of_features self.alpha = alpha self.beta_means = np.array([0] * (number_of_features + 1), dtype=np.float) # + 1 for the intercept self.number_of_updates = 0 self.residual_sum_squares = 0 self.moment_matrix = np.eye(number_of_features + 2) * 0.0 # + 2 for the intercept and the output self.regularization_matrix = np.eye(self.number_of_features + 1) / self.alpha self.regularization_matrix[0, 0] = 0 # we don't regularize the intercept term def partial_fit(self, X, Y, W=None, reverse=False): ''' The online updating rules :param X: Input feature vector(s) as 2D numpy array :param Y: Input output values as 1D numpy array :param W: Data weights (relative to unity) as a 1D numpy array :param reverse: Boolean, True means that we "unfit" the training rows, otherwise acts as normal :return: None ''' # see http://www.biostat.umn.edu/~ph7440/pubh7440/BayesianLinearModelGoryDetails.pdf for gory details # clear the frozen parameter sample since we are updating the parameter distributions self.frozen_parameter_sample = None moment_of_X_before = self.moment_matrix[:-1, :-1] beta_means_before = self.beta_means.copy() inverted_covariance_matrix_before = moment_of_X_before + self.regularization_matrix # Here we concatenate the intercept input value (constant 1), the input vector, and the output value: rank_n_obs_update_matrix = np.array([[1] + row + output for row, output in zip(X.tolist(), Y.tolist())]) if W is None: moment_matrix_update_term = rank_n_obs_update_matrix.T @ rank_n_obs_update_matrix else: moment_matrix_update_term = rank_n_obs_update_matrix.T @ np.diag(W.tolist()) @ rank_n_obs_update_matrix if not reverse: self.moment_matrix += moment_matrix_update_term moment_of_Y_update_term = Y.T @ Y self.number_of_updates += 1 else: self.moment_matrix -= moment_matrix_update_term moment_of_Y_update_term = -Y.T @ Y self.number_of_updates -= 1 moment_of_X = self.moment_matrix[:-1, :-1] moment_of_X_and_Y = self.moment_matrix[:-1, -1] moment_of_X_and_Y_update_term = moment_matrix_update_term[:-1, -1] inverted_covariance_matrix = moment_of_X + self.regularization_matrix covariance_matrix = np.linalg.inv(inverted_covariance_matrix) # these two statements are equivalent, so I choose the simpler one, although the latter # one is more consistent with the notation I come across in the literature self.beta_means = covariance_matrix @ (moment_of_X_and_Y) # self.beta_means = covariance_matrix @ (inverted_covariance_matrix_before @ beta_means_before + moment_of_X_and_Y_update_term) if self.number_of_updates > len(covariance_matrix) - 1: self.residual_sum_squares += ( moment_of_Y_update_term - self.beta_means.T @ inverted_covariance_matrix @ self.beta_means + beta_means_before.T @ inverted_covariance_matrix_before @ beta_means_before ) def partial_unfit(self, X, Y): return self.partial_fit(X, Y, reverse=True) def predict(self, X, use_means=False, freeze_parameter_sample=False): ''' :param X: Input feature vector excluding the intercept constant as a 2D numpy array :param use_means: Boolean where True means we just provide the prediciton at the mean of the coefficients (sometimes referred to as deterministic prediction); otherwise sample parameters from the multivariate norm and incorporate the uncertainty of the parameters in your prediction :param freeze_parameter_sample: Boolean. When set to True, we sample from the parameters only once for each prediction :return: ''' X_with_intercept = np.array([[1] + row.tolist() for row in X]) scale_multiplier = 1.0 / max(1.0, (self.number_of_updates - self.number_of_features - 1)) if use_means: return X_with_intercept @ self.beta_means else: if freeze_parameter_sample: if self.frozen_parameter_sample is not None: self.frozen_parameter_sample = np.random.multivariate_normal( self.beta_means.T[0], self.residual_sum_squares * scale_multiplier * self.cov ) beta = self.frozen_parameter_sample else: beta = np.random.multivariate_normal( self.beta_means.T[0], self.residual_sum_squares * scale_multiplier * self.cov_params ) return X_with_intercept @ beta @property def coef_(self): return self.beta_means[1:] @property def intercept_(self): return float(self.beta_means[0]) @property def cov_params(self): scale_multiplier = 1.0 / max(1.0, (self.number_of_updates - self.number_of_features - 1)) moment_of_X = self.moment_matrix[:-1, :-1] inverted_covariance_matrix = moment_of_X + np.eye(self.number_of_features + 1) / self.alpha return np.linalg.inv(inverted_covariance_matrix) * self.residual_sum_squares * scale_multiplier
[ "numpy.eye", "numpy.linalg.inv", "numpy.array", "numpy.random.multivariate_normal" ]
[((965, 1021), 'numpy.array', 'np.array', (['([0] * (number_of_features + 1))'], {'dtype': 'np.float'}), '([0] * (number_of_features + 1), dtype=np.float)\n', (973, 1021), True, 'import numpy as np\n'), ((3376, 3417), 'numpy.linalg.inv', 'np.linalg.inv', (['inverted_covariance_matrix'], {}), '(inverted_covariance_matrix)\n', (3389, 3417), True, 'import numpy as np\n'), ((1149, 1179), 'numpy.eye', 'np.eye', (['(number_of_features + 2)'], {}), '(number_of_features + 2)\n', (1155, 1179), True, 'import numpy as np\n'), ((1263, 1298), 'numpy.eye', 'np.eye', (['(self.number_of_features + 1)'], {}), '(self.number_of_features + 1)\n', (1269, 1298), True, 'import numpy as np\n'), ((5578, 5698), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['self.beta_means.T[0]', '(self.residual_sum_squares * scale_multiplier * self.cov_params)'], {}), '(self.beta_means.T[0], self.\n residual_sum_squares * scale_multiplier * self.cov_params)\n', (5607, 5698), True, 'import numpy as np\n'), ((6189, 6224), 'numpy.eye', 'np.eye', (['(self.number_of_features + 1)'], {}), '(self.number_of_features + 1)\n', (6195, 6224), True, 'import numpy as np\n'), ((6253, 6294), 'numpy.linalg.inv', 'np.linalg.inv', (['inverted_covariance_matrix'], {}), '(inverted_covariance_matrix)\n', (6266, 6294), True, 'import numpy as np\n'), ((5306, 5419), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['self.beta_means.T[0]', '(self.residual_sum_squares * scale_multiplier * self.cov)'], {}), '(self.beta_means.T[0], self.\n residual_sum_squares * scale_multiplier * self.cov)\n', (5335, 5419), True, 'import numpy as np\n')]
import numpy as np class Camera(object): """Camera is a simple finite pinhole camera defined by the matrices K, R and t. see "Multiple View Geometry in Computer Vision" by <NAME> and <NAME> for notation. Parameters ---------- K: The 3x3 intrinsic camera parameters R: The 3x3 rotation matrix from world to camera coordinates t: The 3x1 translation vector for the camera center in camera coordinates (so that the camera center is the origin in the camera coordinates) """ def __init__(self, K, R, t): # Make sure the input data have the right shape assert K.shape == (3, 3) assert R.shape == (3, 3) assert t.shape == (3, 1) self._K = K self._R = R self._t = t self._P = None self._P_pinv = None self._center = None @property def K(self): return self._K @property def R(self): return self._R @property def t(self): return self._t @property def center(self): # Compute the center of the camera in homogenous coordinates and return # it as a 4x1 vector if self._center is None: self._center = np.vstack( [(-np.linalg.inv(self.R)).dot(self.t), [1]] ).astype(np.float32) assert self._center.shape == (4, 1) return self._center @property def P(self): # Compute and return a 3x4 projection matrix if self._P is None: self._P = self._K.dot(np.hstack([self._R, self._t])) return self._P @property def P_pinv(self): if self._P_pinv is None: self._P_pinv = np.linalg.pinv(self.P) return self._P_pinv
[ "numpy.linalg.inv", "numpy.linalg.pinv", "numpy.hstack" ]
[((1711, 1733), 'numpy.linalg.pinv', 'np.linalg.pinv', (['self.P'], {}), '(self.P)\n', (1725, 1733), True, 'import numpy as np\n'), ((1560, 1589), 'numpy.hstack', 'np.hstack', (['[self._R, self._t]'], {}), '([self._R, self._t])\n', (1569, 1589), True, 'import numpy as np\n'), ((1263, 1284), 'numpy.linalg.inv', 'np.linalg.inv', (['self.R'], {}), '(self.R)\n', (1276, 1284), True, 'import numpy as np\n')]
#! python # -*- coding: utf-8 -*- ## import time import wx import cv2 import numpy as np from mwx.controls import Param, LParam from mwx.controls import ToggleButton, Choice from mwx.graphman import Layer, Thread import editor as edi class Plugin(Layer): """Plugins of camera viewer """ menu = "Cameras" menustr = "Camera &viewer" camerasys = property(lambda self: self.camera_selector.value) cameraman = property(lambda self: self.parent.require(self.camerasys)) def Init(self): self.viewer = Thread(self) self.button = ToggleButton(self, "View camera", icon='cam', handler=lambda v: self.viewer.Start(self.run) if v.IsChecked() else self.viewer.Stop()) self.rate_param = LParam('rate', (100,500,100), 500, tip="refresh speed [ms] (>= 100ms)") self.size_param = Param('size', (128,256,512,1024), 512, tip="resizing view window (<= 1k)") self.camera_selector = Choice(self, choices=['JeolCamera', 'RigakuCamera'], readonly=1) self.layout(( self.button, ), ) self.layout(( self.rate_param, self.size_param, self.camera_selector, ), title="Setting", row=1, show=0, type='vspin', lw=40, tw=40, cw=-1 ) def init_session(self, session): self.rate_param.value = session.get('rate') self.size_param.value = session.get('size') self.camera_selector.value = session.get('camera') def save_session(self, session): session.update({ 'rate': self.rate_param.value, 'size': self.size_param.value, 'camera': self.camera_selector.value, }) def Destroy(self): if self.viewer.is_active: self.viewer.Stop() return Layer.Destroy(self) def run(self): try: title = self.__module__ if not self.cameraman: print(self.message("- Camera manager is not selected.")) return while self.viewer.is_active: src = edi.imconv(self.cameraman.capture()) h, w = src.shape H = self.size_param.value W = H * w // h dst = cv2.resize(src, (W, H), interpolation=cv2.INTER_AREA) ## dst = cv2.cvtColor(dst, cv2.COLOR_GRAY2BGR) ## 照準サークルを xor で足し合わせる if 1: ## lines and circles with color:cyan #00c0c0 ## c = (192,192,0) c = 255 cx, cy = W//2, H//2 buf = np.zeros((H, W), dtype=dst.dtype) ## buf = np.resize(0, (H, W)).astype(dst.dtype) cv2.line(buf, (0, cy), (W, cy), c, 1) cv2.line(buf, (cx, 0), (cx, H), c, 1) cv2.circle(buf, (cx, cy), cx//2, c, 1) cv2.circle(buf, (cx, cy), cx//4, c, 1) dst = cv2.bitwise_xor(buf, dst) cv2.imshow(title, dst) cv2.waitKey(self.rate_param.value) if cv2.getWindowProperty(title, 0) < 0: self.button.Value = False self.viewer.Stop() break finally: cv2.destroyAllWindows() if __name__ == '__main__': from plugins import JeolCamera, RigakuCamera from mwx.graphman import Frame app = wx.App() frm = Frame(None) frm.load_plug(__file__, show=1) frm.load_plug(JeolCamera, show=0) frm.load_plug(RigakuCamera, show=0) frm.Show() app.MainLoop()
[ "cv2.line", "cv2.circle", "cv2.bitwise_xor", "mwx.controls.Param", "cv2.destroyAllWindows", "mwx.graphman.Frame", "cv2.waitKey", "cv2.imshow", "mwx.controls.Choice", "numpy.zeros", "mwx.graphman.Layer.Destroy", "wx.App", "mwx.controls.LParam", "mwx.graphman.Thread", "cv2.getWindowProperty", "cv2.resize" ]
[((3655, 3663), 'wx.App', 'wx.App', ([], {}), '()\n', (3661, 3663), False, 'import wx\n'), ((3674, 3685), 'mwx.graphman.Frame', 'Frame', (['None'], {}), '(None)\n', (3679, 3685), False, 'from mwx.graphman import Frame\n'), ((542, 554), 'mwx.graphman.Thread', 'Thread', (['self'], {}), '(self)\n', (548, 554), False, 'from mwx.graphman import Layer, Thread\n'), ((791, 864), 'mwx.controls.LParam', 'LParam', (['"""rate"""', '(100, 500, 100)', '(500)'], {'tip': '"""refresh speed [ms] (>= 100ms)"""'}), "('rate', (100, 500, 100), 500, tip='refresh speed [ms] (>= 100ms)')\n", (797, 864), False, 'from mwx.controls import Param, LParam\n'), ((889, 966), 'mwx.controls.Param', 'Param', (['"""size"""', '(128, 256, 512, 1024)', '(512)'], {'tip': '"""resizing view window (<= 1k)"""'}), "('size', (128, 256, 512, 1024), 512, tip='resizing view window (<= 1k)')\n", (894, 966), False, 'from mwx.controls import Param, LParam\n'), ((1004, 1068), 'mwx.controls.Choice', 'Choice', (['self'], {'choices': "['JeolCamera', 'RigakuCamera']", 'readonly': '(1)'}), "(self, choices=['JeolCamera', 'RigakuCamera'], readonly=1)\n", (1010, 1068), False, 'from mwx.controls import ToggleButton, Choice\n'), ((1936, 1955), 'mwx.graphman.Layer.Destroy', 'Layer.Destroy', (['self'], {}), '(self)\n', (1949, 1955), False, 'from mwx.graphman import Layer, Thread\n'), ((3503, 3526), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3524, 3526), False, 'import cv2\n'), ((2401, 2454), 'cv2.resize', 'cv2.resize', (['src', '(W, H)'], {'interpolation': 'cv2.INTER_AREA'}), '(src, (W, H), interpolation=cv2.INTER_AREA)\n', (2411, 2454), False, 'import cv2\n'), ((3216, 3238), 'cv2.imshow', 'cv2.imshow', (['title', 'dst'], {}), '(title, dst)\n', (3226, 3238), False, 'import cv2\n'), ((3255, 3289), 'cv2.waitKey', 'cv2.waitKey', (['self.rate_param.value'], {}), '(self.rate_param.value)\n', (3266, 3289), False, 'import cv2\n'), ((2795, 2828), 'numpy.zeros', 'np.zeros', (['(H, W)'], {'dtype': 'dst.dtype'}), '((H, W), dtype=dst.dtype)\n', (2803, 2828), True, 'import numpy as np\n'), ((2917, 2954), 'cv2.line', 'cv2.line', (['buf', '(0, cy)', '(W, cy)', 'c', '(1)'], {}), '(buf, (0, cy), (W, cy), c, 1)\n', (2925, 2954), False, 'import cv2\n'), ((2975, 3012), 'cv2.line', 'cv2.line', (['buf', '(cx, 0)', '(cx, H)', 'c', '(1)'], {}), '(buf, (cx, 0), (cx, H), c, 1)\n', (2983, 3012), False, 'import cv2\n'), ((3033, 3073), 'cv2.circle', 'cv2.circle', (['buf', '(cx, cy)', '(cx // 2)', 'c', '(1)'], {}), '(buf, (cx, cy), cx // 2, c, 1)\n', (3043, 3073), False, 'import cv2\n'), ((3092, 3132), 'cv2.circle', 'cv2.circle', (['buf', '(cx, cy)', '(cx // 4)', 'c', '(1)'], {}), '(buf, (cx, cy), cx // 4, c, 1)\n', (3102, 3132), False, 'import cv2\n'), ((3157, 3182), 'cv2.bitwise_xor', 'cv2.bitwise_xor', (['buf', 'dst'], {}), '(buf, dst)\n', (3172, 3182), False, 'import cv2\n'), ((3326, 3357), 'cv2.getWindowProperty', 'cv2.getWindowProperty', (['title', '(0)'], {}), '(title, 0)\n', (3347, 3357), False, 'import cv2\n')]
from __future__ import print_function import tensorflow as tf import numpy as np import pytest import sys from tensorflow.python.ops import array_ops shapes = [ (3, 4), (50, 70, 12) ] seed = 123 def _test_random_func(func_name, shape): print('func_name', func_name) func = eval(func_name) with tf.Graph().as_default(): with tf.device('/cpu:0'): W_t = tf.Variable(func(shape, seed=seed)) with tf.Session(config=tf.ConfigProto(log_device_placement=False)) as sess: sess.run(tf.initialize_all_variables()) W_cpu = sess.run(W_t) with tf.device('/gpu:0'): W_t = tf.Variable(func(shape, seed=seed)) with tf.Session(config=tf.ConfigProto(log_device_placement=False)) as sess: sess.run(tf.initialize_all_variables()) W_gpu = sess.run(W_t) if np.prod(np.array(shape)) < 20: print('W_cpu', W_cpu) print('W_gpu', W_gpu) else: print('W_cpu.reshape(-1)[:20]', W_cpu.reshape(-1)[:20]) print('W_gpu.reshape(-1)[:20]', W_gpu.reshape(-1)[:20]) assert np.all(np.abs(W_cpu - W_gpu) < 1e-4) @pytest.mark.parametrize( 'shape', shapes) def test_random_normal(shape): _test_random_func('tf.random_normal', shape) @pytest.mark.parametrize( 'shape', shapes) def test_random_uniform(shape): _test_random_func('tf.random_uniform', shape) @pytest.mark.parametrize( 'shape', shapes) @pytest.mark.skip(reason='Causes abort currently') def test_truncated_normal(shape): _test_random_func('tf.truncated_normal', shape) if __name__ == '__main__': if len(sys.argv) == 1: print('Please run using py.test') else: eval('%s((3, 4))' % sys.argv[1])
[ "numpy.abs", "tensorflow.device", "tensorflow.ConfigProto", "numpy.array", "tensorflow.initialize_all_variables", "tensorflow.Graph", "pytest.mark.parametrize", "pytest.mark.skip" ]
[((1229, 1269), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', 'shapes'], {}), "('shape', shapes)\n", (1252, 1269), False, 'import pytest\n'), ((1362, 1402), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', 'shapes'], {}), "('shape', shapes)\n", (1385, 1402), False, 'import pytest\n'), ((1497, 1537), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', 'shapes'], {}), "('shape', shapes)\n", (1520, 1537), False, 'import pytest\n'), ((1548, 1597), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Causes abort currently"""'}), "(reason='Causes abort currently')\n", (1564, 1597), False, 'import pytest\n'), ((357, 376), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (366, 376), True, 'import tensorflow as tf\n'), ((628, 647), 'tensorflow.device', 'tf.device', (['"""/gpu:0"""'], {}), "('/gpu:0')\n", (637, 647), True, 'import tensorflow as tf\n'), ((319, 329), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (327, 329), True, 'import tensorflow as tf\n'), ((546, 575), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (573, 575), True, 'import tensorflow as tf\n'), ((817, 846), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (844, 846), True, 'import tensorflow as tf\n'), ((909, 924), 'numpy.array', 'np.array', (['shape'], {}), '(shape)\n', (917, 924), True, 'import numpy as np\n'), ((1196, 1217), 'numpy.abs', 'np.abs', (['(W_cpu - W_gpu)'], {}), '(W_cpu - W_gpu)\n', (1202, 1217), True, 'import numpy as np\n'), ((468, 510), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'log_device_placement': '(False)'}), '(log_device_placement=False)\n', (482, 510), True, 'import tensorflow as tf\n'), ((739, 781), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'log_device_placement': '(False)'}), '(log_device_placement=False)\n', (753, 781), True, 'import tensorflow as tf\n')]
import os import numpy as np import pandas as pd '''This script is for preprocessing the label, finding the mistake in it and stroe label in a unified format in processed_label dic''' file_dic_Extra = os.listdir('../../label/Extra_Labels') file_dic_Train = os.listdir('../../label/Train_labels') file_dic_Test = os.listdir('../../label/Test_labels') #store the gibbon call duration distribution duration_dist = np.array([]) duration_dist2 = np.array([]) for file_name in file_dic_Extra: # go through the Extra_Labels dictionary if file_name[0] == 'g': gibbon_timestamps = pd.read_csv('../../label/Extra_Labels/' + file_name, sep=',') duration = np.asarray(gibbon_timestamps['Duration']) duration_dist = np.concatenate((duration_dist, duration), axis = 0) # test the whether the duration equals to 'end' - 'start' duration2 = np.asarray(gibbon_timestamps['End'] - gibbon_timestamps['Start']) duration_dist2 = np.concatenate((duration_dist2, duration2), axis = 0) if duration.size != 0 : if min(duration) <= 0: print(file_name, 'has wrong record') gibbon_timestamps.to_csv('../../label/processed_label/' + file_name[2:], index = 0) for file_name in file_dic_Train: # go through the Train_Labels dictionary if file_name[0] == 'g': gibbon_timestamps = pd.read_csv('../../label/Train_Labels/' + file_name, sep=',') duration = np.asarray(gibbon_timestamps['Duration']) duration_dist = np.concatenate((duration_dist, duration), axis = 0) # test the whether the duration equals to 'end' - 'start' duration2 = np.asarray(gibbon_timestamps['End'] - gibbon_timestamps['Start']) duration_dist2 = np.concatenate((duration_dist2, duration2), axis = 0) if duration.size != 0: if min(duration) <= 0: print(file_name, 'has wrong record') gibbon_timestamps.to_csv('../../label/processed_label/' + file_name[2:], index = 0) # result show that duration equals to 'end' - 'start' test_duration = duration_dist2 == duration_dist duration_test_result = np.where(test_duration == False) if duration_test_result[0].size == 0: print('duration equals to end - star') else: print('duration record typo exist') for file_name in file_dic_Test: # go through the Test_Labels dictionary and save data to processed label dictionary gibbon_timestamps = pd.read_csv('../../label/Test_Labels/' + file_name, sep=',') gibbon_timestamps['End'] = gibbon_timestamps['Start'] + gibbon_timestamps['Duration'] gibbon_timestamps = gibbon_timestamps[['Start', 'End', 'Duration']] if duration.size != 0 : if min(duration) <= 0: print(file_name, 'has wrong record') gibbon_timestamps.to_csv('../../label/processed_label/' + file_name[:-9] + '.data', index = 0) # g_HGSM3BD_0+1_20160305_060000.data has wrong record # g_HGSM3AC_0+1_20160312_055400.data has wrong record # this two file has minus or equals to zero duration because of typo, these error have been fixed in processed-label manually.
[ "pandas.read_csv", "numpy.asarray", "numpy.where", "numpy.array", "os.listdir", "numpy.concatenate" ]
[((203, 241), 'os.listdir', 'os.listdir', (['"""../../label/Extra_Labels"""'], {}), "('../../label/Extra_Labels')\n", (213, 241), False, 'import os\n'), ((259, 297), 'os.listdir', 'os.listdir', (['"""../../label/Train_labels"""'], {}), "('../../label/Train_labels')\n", (269, 297), False, 'import os\n'), ((314, 351), 'os.listdir', 'os.listdir', (['"""../../label/Test_labels"""'], {}), "('../../label/Test_labels')\n", (324, 351), False, 'import os\n'), ((414, 426), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (422, 426), True, 'import numpy as np\n'), ((444, 456), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (452, 456), True, 'import numpy as np\n'), ((2134, 2166), 'numpy.where', 'np.where', (['(test_duration == False)'], {}), '(test_duration == False)\n', (2142, 2166), True, 'import numpy as np\n'), ((2435, 2495), 'pandas.read_csv', 'pd.read_csv', (["('../../label/Test_Labels/' + file_name)"], {'sep': '""","""'}), "('../../label/Test_Labels/' + file_name, sep=',')\n", (2446, 2495), True, 'import pandas as pd\n'), ((588, 649), 'pandas.read_csv', 'pd.read_csv', (["('../../label/Extra_Labels/' + file_name)"], {'sep': '""","""'}), "('../../label/Extra_Labels/' + file_name, sep=',')\n", (599, 649), True, 'import pandas as pd\n'), ((669, 710), 'numpy.asarray', 'np.asarray', (["gibbon_timestamps['Duration']"], {}), "(gibbon_timestamps['Duration'])\n", (679, 710), True, 'import numpy as np\n'), ((735, 784), 'numpy.concatenate', 'np.concatenate', (['(duration_dist, duration)'], {'axis': '(0)'}), '((duration_dist, duration), axis=0)\n', (749, 784), True, 'import numpy as np\n'), ((873, 938), 'numpy.asarray', 'np.asarray', (["(gibbon_timestamps['End'] - gibbon_timestamps['Start'])"], {}), "(gibbon_timestamps['End'] - gibbon_timestamps['Start'])\n", (883, 938), True, 'import numpy as np\n'), ((964, 1015), 'numpy.concatenate', 'np.concatenate', (['(duration_dist2, duration2)'], {'axis': '(0)'}), '((duration_dist2, duration2), axis=0)\n', (978, 1015), True, 'import numpy as np\n'), ((1365, 1426), 'pandas.read_csv', 'pd.read_csv', (["('../../label/Train_Labels/' + file_name)"], {'sep': '""","""'}), "('../../label/Train_Labels/' + file_name, sep=',')\n", (1376, 1426), True, 'import pandas as pd\n'), ((1446, 1487), 'numpy.asarray', 'np.asarray', (["gibbon_timestamps['Duration']"], {}), "(gibbon_timestamps['Duration'])\n", (1456, 1487), True, 'import numpy as np\n'), ((1512, 1561), 'numpy.concatenate', 'np.concatenate', (['(duration_dist, duration)'], {'axis': '(0)'}), '((duration_dist, duration), axis=0)\n', (1526, 1561), True, 'import numpy as np\n'), ((1650, 1715), 'numpy.asarray', 'np.asarray', (["(gibbon_timestamps['End'] - gibbon_timestamps['Start'])"], {}), "(gibbon_timestamps['End'] - gibbon_timestamps['Start'])\n", (1660, 1715), True, 'import numpy as np\n'), ((1741, 1792), 'numpy.concatenate', 'np.concatenate', (['(duration_dist2, duration2)'], {'axis': '(0)'}), '((duration_dist2, duration2), axis=0)\n', (1755, 1792), True, 'import numpy as np\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- from dataclasses import dataclass, field from typing import Callable, List from astropy.coordinates import SkyCoord, Longitude, Latitude, Angle from astropy.time import Time from astropy.units.quantity import Quantity from astropy.wcs import WCS from astropy.visualization.wcsaxes import WCSAxesSubplot from scipy.spatial.transform import Rotation from matplotlib.patches import Rectangle from shapely.geometry import Polygon, Point from shapely.geometry import MultiPoint from shapely.prepared import prep from descartes.patch import PolygonPatch from scipy.optimize import least_squares import matplotlib.pyplot as plt import astropy.units as u import numpy as np import pandas as pd import sys from .util import get_projection def identity_transformation(position): ''' An identity transformation function. This function is an fallback function for the image distortion. The function requires a tuple of two arrays. The first and second elements are the x- and y-positions on the focal plane without any distortion, respectively. This function returns the positions as they are. Parameters: position: A numpy.array with the shape of (2, Nsrc). The first element contains the x-positions, while the second element contains the y-positions. Return: A numpy.ndarray of the input coordinates. ''' return np.array(position) @dataclass class Optics(object): ''' Definition of optical components. Attributes: pointing (SkyCoord) : the latitude of the telescope pointing. position_angle (Angle) : the position angle of the telescope. focal_length (Quantity): the focal length of the telescope in meter. diameter (Quantity) : the diameter of the telescope in meter. valid_region (Polygon) : the valid region of the focal plane. margin (Quantity) : the margin of the valid region (buffle). distortion (function) : a function to distort the focal plane image. ''' pointing: SkyCoord position_angle: Angle = Angle(0.0, unit='degree') focal_length: Quantity = 7.3*u.m diameter: Quantity = 0.4*u.m valid_region: Polygon = Point(0,0).buffer(30000) margin: Quantity = 5000*u.um distortion: Callable = identity_transformation @property def scale(self): ''' A conversion factor from sky to focal plane in degree/um. ''' return (1.0*u.rad/self.focal_length).to(u.deg/u.um) @property def center(self): ''' A dummy position to defiine the center of the focal plane. ''' return SkyCoord(0*u.deg,0*u.deg,frame='icrs') @property def pointing_angle(self): ''' Angle set to define the pointing position and orientation. ''' ## use the ICRS frame in calculation. icrs = self.pointing.icrs ## calculate position angle in the ICRS frame. north = self.pointing.directional_offset_by(0.0,1*u.arcsec) delta = self.pointing.icrs.position_angle(north) position_angle = -self.position_angle.rad-delta.rad return np.array((icrs.ra.rad,-icrs.dec.rad,position_angle)) def set_distortion(self, distortion): ''' Assign a distortion function. The argument of the distortion function should be a numpy.array with the shape of (2, Nsrc). The first element contains the x-positions, while the second element contains the y-positions. Parameters: distortion (function): a function to distort focal plane image. ''' self.distortion = distortion def block(self, position): ''' Block sources by a certain radius. Parameters: position (ndarray): source positions on the focal plane w/o distortion. Return: A boolean array to indicate which sources are inside the field-of-view. ''' mp = MultiPoint(position.T) polygon = prep(self.valid_region.buffer(self.margin.to_value(u.um))) return np.array([not polygon.contains(p) for p in mp.geoms]) def imaging(self, sources, epoch=None): ''' Map celestial positions onto the focal plane. Parameters: sources (SkyCoord): the coordinates of sources. epoch (Time): the epoch of the observation. Return: A `DataFrame` instance. The DataFrame contains four columns: the "x" and "y" columns are the positions on the focal plane in micron, and the "ra" and "dec" columns are the original celestial positions in the ICRS frame. ''' try: if epoch is not None: sources = sources.apply_space_motion(epoch) except Exception as e: print('No proper motion information is available.', file=sys.stderr) print('The positions are not updated to new epoch.', file=sys.stderr) icrs = sources.transform_to('icrs') xyz = icrs.cartesian.xyz r = Rotation.from_euler('zyx', -self.pointing_angle) pqr = r.as_matrix() @ xyz if pqr.ndim==1: pqr = np.expand_dims(pqr,axis=1) obj = SkyCoord(pqr.T, obstime=epoch, representation_type='cartesian').transform_to('icrs') obj.representation_type = 'spherical' proj = get_projection(self.center,self.scale.to_value()) pos = np.array(obj.to_pixel(proj, origin=0)) blocked = self.block(pos) pos = self.distortion(pos) return pd.DataFrame({ 'x': pos[0], 'y': pos[1], 'ra': icrs.ra, 'dec': icrs.dec, 'blocked': blocked }) @dataclass class PixelDisplacement(object): ''' Definition of the pixel non-uniformity. Attributes: dx (ndarray): a two dimensional array with the same size of the detector. each element contains the x-displacement of the pixel. dy (ndarray): a two dimensional array with the same size of the detector. each element contains the y-displacement of the pixel. ''' dx: np.ndarray = None dy: np.ndarray = None def initialize(self, naxis1, naxis2): ''' Initialize the displacement array with zeros. Parameters: naxis1 (int): the detector size along with NAXIS1. naxis2 (int): the detector size along with NAXIS2. ''' self.dx = np.zeros((naxis2, naxis1)) self.dy = np.zeros((naxis2, naxis1)) def evaluate(self, x, y): ''' Evaluate the source position displacement. Parameters: position (ndarray): a numpy.ndarray with the shape of (2, N(sources)). the first array contains the x-coordinates, while the second does the y-coordinates. Note: Not implemented yet. ''' return (x,y) @dataclass class Detector(object): ''' Definition of a detector. Attributes: naxis1 (int) : detector pixels along with NAXIS1. naxis2 (int) : detector pixels along with NAXIS2. pixel_scale (Quantity): nominal detector pixel scale. offset_dx (Quantity) : the offset along with the x-axis. offset_dy (Quantity) : the offste along with the y-axis. position_angle (Angle): the position angle of the detector. displacement (PixelDisplacement): an instance to define the displacements of the sources due to the pixel non-uniformity. ''' naxis1: int = 4096 naxis2: int = 4096 pixel_scale: Quantity = 10*u.um offset_dx: Quantity = 0*u.um offset_dy: Quantity = 0*u.um position_angle: Angle = Angle(0.0, unit='degree') displacement: PixelDisplacement = None def __post_init__(self): if self.displacement is None: self.displacement = PixelDisplacement() self.displacement.initialize(self.naxis1,self.naxis2) @property def width(self): ''' The physical width of the detector. ''' return self.naxis1*self.pixel_scale.to_value(u.um) @property def height(self): ''' The physical height of the detector. ''' return self.naxis2*self.pixel_scale.to_value(u.um) @property def xrange(self): ''' The x-axis range of the detector. ''' return np.array((-self.width/2,self.width/2)) @property def yrange(self): ''' The y-axis range of the detector. ''' return np.array((-self.height/2,self.height/2)) @property def patch(self): ''' The footprint of the detector on the focal plane as a patch. ''' c,s = np.cos(self.position_angle.rad),np.sin(self.position_angle.rad) x0,y0 = self.offset_dx.to_value(u.um),self.offset_dy.to_value(u.um) x1 = x0 - (+ self.width*c - self.height*s)/2 y1 = y0 - (+ self.width*s + self.height*c)/2 return Rectangle((x1,y1), width=self.width, height=self.height, angle=self.position_angle.deg, ec='r', linewidth=2, fill=False) @property def footprint(self): ''' The footprint of the detector on the focal plane. ''' c,s = np.cos(self.position_angle.rad),np.sin(self.position_angle.rad) x0,y0 = self.offset_dx.to_value(u.um),self.offset_dy.to_value(u.um) x1 = x0 - (+ self.width*c - self.height*s)/2 y1 = y0 - (+ self.width*s + self.height*c)/2 x2 = x0 - (- self.width*c - self.height*s)/2 y2 = y0 - (- self.width*s + self.height*c)/2 x3 = x0 - (- self.width*c + self.height*s)/2 y3 = y0 - (- self.width*s - self.height*c)/2 x4 = x0 - (+ self.width*c + self.height*s)/2 y4 = y0 - (+ self.width*s - self.height*c)/2 return Polygon(([x1,y1],[x2,y2],[x3,y3],[x4,y4])) def align(self, x, y): ''' Align the source position to the detector. Parameters: x (Series): the x-coordinates on the focal plane. y (Series): the y-coordinates on the focal plane. Return: The tuple of the x- and y-positions of the sources, which are remapped onto the detector coordinates. ''' c,s = np.cos(-self.position_angle.rad),np.sin(-self.position_angle.rad) dx,dy = x-self.offset_dx.to_value(u.um), y-self.offset_dy.to_value(u.um) return c*dx-s*dy, s*dx+c*dy def capture(self, position): ''' Calculate the positions of the sources on the detector. Parameters: position (DataFrame): the positions of the sources on the focal plane. the "x" and "y" columns are respectively the x- and y-positions of the sources in units of micron. Return: A list of `DataFrame`s which contains the positions on the detectors. The number of the `DataFrame`s are the same as the detectors. The "x" and "y" columns are the positions on each detector. The "ra" and "dec" columns are the original positions in the ICRS frame. ''' x,y = self.align(position.x, position.y) x,y = self.displacement.evaluate(x,y) position.x = x position.y = y bf = ~position.blocked xf = ((self.xrange[0] < x) & (x < self.xrange[1])) yf = ((self.yrange[0] < y) & (y < self.yrange[1])) return position.loc[xf&yf&bf,:] @dataclass class Telescope(object): ''' An imaginary telescope instance. The `Telescope` class is composed of an `Optics` instance and a list of `Detector` instances. This instance organizes the alignment of the detectors and converts the coordinates of the astronomical sources into the positions on the detectors. Attributes: pointing (SkyCoord) position_angle (Angle): ''' pointing: SkyCoord = None position_angle: Angle = None optics: Optics = None detectors: List[Detector] = None def __post_init__(self): if self.optics is None: self.optics = Optics(self.pointing, self.position_angle) else: self.pointing = self.optics.pointing self.position_angle = self.optics.position_angle if self.detectors is None: self.detectors = [Detector(),] assert self.optics is not None assert self.detectors is not None def set_distortion(self, distortion): ''' Set a distortion function to the optics. Parameters: distortion (function): a function to distort focal plane image. ''' self.optics.set_distortion(distortion) def get_footprints(self, **options): ''' Obtain detector footprints on the sky. Options: frame (string): specify the coordinate of the footprint. limit (bool): limit the footprints within the valid region. patch (bool): obtain PolygonPatch instead of Polygon. ''' frame = options.pop('frame', self.pointing.frame.name) limit = options.pop('limit', True) patch = options.pop('patch', False) if self.pointing.frame.name == 'galactic': l0 = self.pointing.galactic.l b0 = self.pointing.galactic.b else: l0 = self.pointing.icrs.ra b0 = self.pointing.icrs.dec def generate(e): frame = self.pointing.frame def func(x): pos = x.reshape((-1,2)) p0 = SkyCoord(pos[:,0], pos[:,1], frame=frame, unit=u.deg) res = self.optics.imaging(p0) return (e-res[['x','y']].to_numpy()).flatten() return func footprints = [] valid_region = self.optics.valid_region for d in self.detectors: fp = valid_region.intersection(d.footprint) if limit else d.footprint edge = np.array(fp.boundary.coords[0:-1]) p0 = np.tile([l0.deg,b0.deg],edge.shape[0]) func = generate(edge) res = least_squares(func, p0) pos = res.x.reshape((-1,2)) sky = SkyCoord(pos[:,0]*u.deg,pos[:,1]*u.deg, frame=self.pointing.frame.name) if frame == 'galactic': sky = sky.galactic pos = Polygon(np.stack([sky.l.deg,sky.b.deg]).T) else: sky = sky.icrs pos = Polygon(np.stack([sky.ra.deg,sky.dec.deg]).T) footprints.append(PolygonPatch(pos, **options) if patch else pos) return footprints def overlay_footprints(self, axis, **options): ''' Display the footprints on the given axis. Parameters: axis (WCSAxesSubplot): An axis instance with a WCS projection. Options: frame (string): the coodinate frame. label (string): the label of the footprints. color (Color): color of the footprint edges. ''' label = options.pop('label', None) color = options.pop('color','C2') frame = options.pop('frame', self.pointing.frame.name) if isinstance(axis, WCSAxesSubplot): options['tranform'] = axis.get_transform(frame) for footprint in self.get_footprints(frame=frame, **options): v = np.array(footprint.boundary.coords) axis.plot(v[:,0], v[:,1], c=color, label=label, **options) return axis def display_focal_plane( self, sources=None, epoch=None, axis=None, **options): ''' Display the layout of the detectors. Show the layout of the detectors on the focal plane. The detectors are illustrated by the red rectangles. If the `sources` are provided, the detectors are overlaid on the sources on the focal plane. Parameters: sources (SkyCoord): the coordinates of astronomical sources. epoch (Time) : the observation epoch. ''' markersize = options.pop('markersize', 1) marker = options.pop('marker', 'x') figsize = options.pop('figsize', (8,8)) if axis is None: fig = plt.figure(figsize=figsize) axis = fig.add_subplot(111) axis.set_aspect(1.0) axis.add_patch(PolygonPatch( self.optics.valid_region, color=(0.8,0.8,0.8), alpha=0.2)) if sources is not None: position = self.optics.imaging(sources, epoch) axis.scatter(position.x,position.y,markersize,marker=marker) for d in self.detectors: axis.add_patch(d.patch) axis.autoscale_view() axis.grid() axis.set_xlabel('Displacement on the focal plane ($\mu$m)', fontsize=14) axis.set_ylabel('Displacement on the focal plane ($\mu$m)', fontsize=14) if axis is None: fig.tight_layout() def observe(self, sources, epoch=None): ''' Observe astronomical sources. Map the sky coordinates of astronomical sources into the physical positions on the detectors of the telescope. Parameters: sources (SkyCoord): a list of astronomical sources. epoch (Time): the datetime of the observation. Return: A numpy.ndarray with the shape of (N(detector), 2, N(source)). The first index specifies the detector of the telescope. A two dimensional array is assigned for each detector. The first line is the coordinates along the NAXIS1 axis, and the second one is the coordinates along the NAXIS2 axis. ''' position = self.optics.imaging(sources, epoch) fov = [] for det in self.detectors: fov.append(det.capture(position)) return fov
[ "pandas.DataFrame", "numpy.stack", "shapely.geometry.Point", "shapely.geometry.MultiPoint", "descartes.patch.PolygonPatch", "shapely.geometry.Polygon", "matplotlib.patches.Rectangle", "numpy.zeros", "numpy.expand_dims", "scipy.optimize.least_squares", "matplotlib.pyplot.figure", "numpy.sin", "numpy.array", "numpy.tile", "numpy.cos", "astropy.coordinates.Angle", "astropy.coordinates.SkyCoord", "scipy.spatial.transform.Rotation.from_euler" ]
[((1409, 1427), 'numpy.array', 'np.array', (['position'], {}), '(position)\n', (1417, 1427), True, 'import numpy as np\n'), ((2059, 2084), 'astropy.coordinates.Angle', 'Angle', (['(0.0)'], {'unit': '"""degree"""'}), "(0.0, unit='degree')\n", (2064, 2084), False, 'from astropy.coordinates import SkyCoord, Longitude, Latitude, Angle\n'), ((7244, 7269), 'astropy.coordinates.Angle', 'Angle', (['(0.0)'], {'unit': '"""degree"""'}), "(0.0, unit='degree')\n", (7249, 7269), False, 'from astropy.coordinates import SkyCoord, Longitude, Latitude, Angle\n'), ((2568, 2612), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(0 * u.deg)', '(0 * u.deg)'], {'frame': '"""icrs"""'}), "(0 * u.deg, 0 * u.deg, frame='icrs')\n", (2576, 2612), False, 'from astropy.coordinates import SkyCoord, Longitude, Latitude, Angle\n'), ((3026, 3080), 'numpy.array', 'np.array', (['(icrs.ra.rad, -icrs.dec.rad, position_angle)'], {}), '((icrs.ra.rad, -icrs.dec.rad, position_angle))\n', (3034, 3080), True, 'import numpy as np\n'), ((3763, 3785), 'shapely.geometry.MultiPoint', 'MultiPoint', (['position.T'], {}), '(position.T)\n', (3773, 3785), False, 'from shapely.geometry import MultiPoint\n'), ((4745, 4793), 'scipy.spatial.transform.Rotation.from_euler', 'Rotation.from_euler', (['"""zyx"""', '(-self.pointing_angle)'], {}), "('zyx', -self.pointing_angle)\n", (4764, 4793), False, 'from scipy.spatial.transform import Rotation\n'), ((5209, 5305), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': pos[0], 'y': pos[1], 'ra': icrs.ra, 'dec': icrs.dec, 'blocked': blocked}"], {}), "({'x': pos[0], 'y': pos[1], 'ra': icrs.ra, 'dec': icrs.dec,\n 'blocked': blocked})\n", (5221, 5305), True, 'import pandas as pd\n'), ((6038, 6064), 'numpy.zeros', 'np.zeros', (['(naxis2, naxis1)'], {}), '((naxis2, naxis1))\n', (6046, 6064), True, 'import numpy as np\n'), ((6079, 6105), 'numpy.zeros', 'np.zeros', (['(naxis2, naxis1)'], {}), '((naxis2, naxis1))\n', (6087, 6105), True, 'import numpy as np\n'), ((7839, 7882), 'numpy.array', 'np.array', (['(-self.width / 2, self.width / 2)'], {}), '((-self.width / 2, self.width / 2))\n', (7847, 7882), True, 'import numpy as np\n'), ((7967, 8012), 'numpy.array', 'np.array', (['(-self.height / 2, self.height / 2)'], {}), '((-self.height / 2, self.height / 2))\n', (7975, 8012), True, 'import numpy as np\n'), ((8367, 8493), 'matplotlib.patches.Rectangle', 'Rectangle', (['(x1, y1)'], {'width': 'self.width', 'height': 'self.height', 'angle': 'self.position_angle.deg', 'ec': '"""r"""', 'linewidth': '(2)', 'fill': '(False)'}), "((x1, y1), width=self.width, height=self.height, angle=self.\n position_angle.deg, ec='r', linewidth=2, fill=False)\n", (8376, 8493), False, 'from matplotlib.patches import Rectangle\n'), ((9142, 9191), 'shapely.geometry.Polygon', 'Polygon', (['([x1, y1], [x2, y2], [x3, y3], [x4, y4])'], {}), '(([x1, y1], [x2, y2], [x3, y3], [x4, y4]))\n', (9149, 9191), False, 'from shapely.geometry import Polygon, Point\n'), ((2182, 2193), 'shapely.geometry.Point', 'Point', (['(0)', '(0)'], {}), '(0, 0)\n', (2187, 2193), False, 'from shapely.geometry import Polygon, Point\n'), ((4850, 4877), 'numpy.expand_dims', 'np.expand_dims', (['pqr'], {'axis': '(1)'}), '(pqr, axis=1)\n', (4864, 4877), True, 'import numpy as np\n'), ((8122, 8153), 'numpy.cos', 'np.cos', (['self.position_angle.rad'], {}), '(self.position_angle.rad)\n', (8128, 8153), True, 'import numpy as np\n'), ((8154, 8185), 'numpy.sin', 'np.sin', (['self.position_angle.rad'], {}), '(self.position_angle.rad)\n', (8160, 8185), True, 'import numpy as np\n'), ((8603, 8634), 'numpy.cos', 'np.cos', (['self.position_angle.rad'], {}), '(self.position_angle.rad)\n', (8609, 8634), True, 'import numpy as np\n'), ((8635, 8666), 'numpy.sin', 'np.sin', (['self.position_angle.rad'], {}), '(self.position_angle.rad)\n', (8641, 8666), True, 'import numpy as np\n'), ((9536, 9568), 'numpy.cos', 'np.cos', (['(-self.position_angle.rad)'], {}), '(-self.position_angle.rad)\n', (9542, 9568), True, 'import numpy as np\n'), ((9569, 9601), 'numpy.sin', 'np.sin', (['(-self.position_angle.rad)'], {}), '(-self.position_angle.rad)\n', (9575, 9601), True, 'import numpy as np\n'), ((12900, 12934), 'numpy.array', 'np.array', (['fp.boundary.coords[0:-1]'], {}), '(fp.boundary.coords[0:-1])\n', (12908, 12934), True, 'import numpy as np\n'), ((12946, 12986), 'numpy.tile', 'np.tile', (['[l0.deg, b0.deg]', 'edge.shape[0]'], {}), '([l0.deg, b0.deg], edge.shape[0])\n', (12953, 12986), True, 'import numpy as np\n'), ((13025, 13048), 'scipy.optimize.least_squares', 'least_squares', (['func', 'p0'], {}), '(func, p0)\n', (13038, 13048), False, 'from scipy.optimize import least_squares\n'), ((13095, 13173), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(pos[:, 0] * u.deg)', '(pos[:, 1] * u.deg)'], {'frame': 'self.pointing.frame.name'}), '(pos[:, 0] * u.deg, pos[:, 1] * u.deg, frame=self.pointing.frame.name)\n', (13103, 13173), False, 'from astropy.coordinates import SkyCoord, Longitude, Latitude, Angle\n'), ((14161, 14196), 'numpy.array', 'np.array', (['footprint.boundary.coords'], {}), '(footprint.boundary.coords)\n', (14169, 14196), True, 'import numpy as np\n'), ((14935, 14962), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (14945, 14962), True, 'import matplotlib.pyplot as plt\n'), ((15041, 15113), 'descartes.patch.PolygonPatch', 'PolygonPatch', (['self.optics.valid_region'], {'color': '(0.8, 0.8, 0.8)', 'alpha': '(0.2)'}), '(self.optics.valid_region, color=(0.8, 0.8, 0.8), alpha=0.2)\n', (15053, 15113), False, 'from descartes.patch import PolygonPatch\n'), ((4887, 4950), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['pqr.T'], {'obstime': 'epoch', 'representation_type': '"""cartesian"""'}), "(pqr.T, obstime=epoch, representation_type='cartesian')\n", (4895, 4950), False, 'from astropy.coordinates import SkyCoord, Longitude, Latitude, Angle\n'), ((12553, 12608), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['pos[:, 0]', 'pos[:, 1]'], {'frame': 'frame', 'unit': 'u.deg'}), '(pos[:, 0], pos[:, 1], frame=frame, unit=u.deg)\n', (12561, 12608), False, 'from astropy.coordinates import SkyCoord, Longitude, Latitude, Angle\n'), ((13421, 13449), 'descartes.patch.PolygonPatch', 'PolygonPatch', (['pos'], {}), '(pos, **options)\n', (13433, 13449), False, 'from descartes.patch import PolygonPatch\n'), ((13267, 13299), 'numpy.stack', 'np.stack', (['[sky.l.deg, sky.b.deg]'], {}), '([sky.l.deg, sky.b.deg])\n', (13275, 13299), True, 'import numpy as np\n'), ((13359, 13394), 'numpy.stack', 'np.stack', (['[sky.ra.deg, sky.dec.deg]'], {}), '([sky.ra.deg, sky.dec.deg])\n', (13367, 13394), True, 'import numpy as np\n')]
#!/usr/bin/env python3 #------------------------------------------------------------ # Programmer(s): <NAME> @ SMU #------------------------------------------------------------ # Copyright (c) 2019, Southern Methodist University. # All rights reserved. # For details, see the LICENSE file. #------------------------------------------------------------ # matplotlib-based plotting utility function for # hurricane test problem in the yz-plane # imports import numpy as np from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm import matplotlib.pyplot as plt from utilities_euler3D import * # determine if running interactively if __name__=="__main__": showplots = False else: showplots = True # set view for surface plots elevation = 15 angle = 20 # set test constants rho0 = 1.0 v0 = 10.0 Amp = 25.0 gamma = 2.0 yl = -1.0 yr = 1.0 zl = -1.0 zr = 1.0 # utility function to create analytical solution def analytical_solution(t,ny,nz): if (t == 0): t = 1e-14 p0prime = Amp*gamma*rho0**(gamma-1.0) rthresh = 2.0*t*np.sqrt(p0prime) rho = np.zeros((ny,nz), dtype=float) my = np.zeros((ny,nz), dtype=float) mz = np.zeros((ny,nz), dtype=float) dy = (yr-yl)/ny dz = (zr-zl)/nz for j in range(nz): for i in range(ny): y = (i+0.5)*dy + yl z = (j+0.5)*dz + zl r = np.sqrt(y*y + z*z) if (r == 0.0): # protect against division by zero r = 1e-14 costheta = y/r sintheta = z/r if (r < rthresh): rho[i,j] = r*r / (8*Amp*t*t) my[i,j] = rho[i,j] * (y + z) / (2*t) mz[i,j] = rho[i,j] * (z - y) / (2*t) else: rho[i,j] = rho0 my[i,j] = rho0 * ( 2*t*p0prime*costheta + np.sqrt(2*p0prime)*np.sqrt(r*r-2*t*t*p0prime)*sintheta )/r mz[i,j] = rho0 * ( 2*t*p0prime*sintheta - np.sqrt(2*p0prime)*np.sqrt(r*r-2*t*t*p0prime)*costheta )/r return [rho, my, mz] # load solution data nx, ny, nz, nchem, nt, xgrid, ygrid, zgrid, tgrid, rho, mx, my, mz, et, chem = load_data() # output general information to screen print('Generating plots for data set:') print(' ny: ', ny) print(' nz: ', nz) print(' nt: ', nt) # determine extents of plots minmaxrho = [0.9*rho.min(), 1.1*rho.max()] if (rho.min() == rho.max()): minmaxrho = [rho.min()-0.1, rho.max()+0.1] minmaxmy = [0.9*my.min(), 1.1*my.max()] if (my.min() == my.max()): minmaxmy = [my.min()-0.1, my.max()+0.1] minmaxmz = [0.9*mz.min(), 1.1*mz.max()] if (mz.min() == mz.max()): minmaxmz = [mz.min()-0.1, mz.max()+0.1] minmaxet = [0.9*et.min(), 1.1*et.max()] if (et.min() == et.max()): minmaxet = [et.min()-0.1, et.max()+0.1] # generate plots of solution for tstep in range(nt): numfigs = 0 print('time step', tstep+1, 'out of', nt) # get true solutions rhotrue, mytrue, mztrue = analytical_solution(tgrid[tstep],ny,nz) # set string constants for current time, mesh sizes tstr = repr(tstep) nystr = repr(ny) nzstr = repr(nz) # extract 2D velocity fields (computed and true) U = my[nx//2,:,:,tstep]/rho[nx//2,:,:,tstep] Utrue = mytrue/rhotrue V = mz[nx//2,:,:,tstep]/rho[nx//2,:,:,tstep] Vtrue = mztrue/rhotrue speed = np.sqrt(U**2 + V**2) speedtrue = np.sqrt(Utrue**2 + Vtrue**2) # set filenames for graphics rhosurf = 'rho_surface.' + repr(tstep).zfill(4) + '.png' etsurf = 'et_surface.' + repr(tstep).zfill(4) + '.png' vstr = 'velocity.' + repr(tstep).zfill(4) + '.png' rhocont = 'rho_contour.' + repr(tstep).zfill(4) + '.png' etcont = 'et_contour.' + repr(tstep).zfill(4) + '.png' rho1dout = 'rho1d.' + repr(tstep).zfill(4) + '.png' my1dout = 'my1d.' + repr(tstep).zfill(4) + '.png' mz1dout = 'my1d.' + repr(tstep).zfill(4) + '.png' sp1dout = 'speed1d.' + repr(tstep).zfill(4) + '.png' # set y and z meshgrid objects Y,Z = np.meshgrid(ygrid,zgrid) # surface plots numfigs += 1 fig = plt.figure(numfigs) ax = fig.add_subplot(111, projection='3d') ax.plot_surface(Y, Z, rho[nx//2,:,:,tstep], rstride=1, cstride=1, cmap=cm.jet, linewidth=0, antialiased=True, shade=True) ax.set_xlabel('y'); ax.set_ylabel('z'); ax.set_zlim((minmaxrho[0], minmaxrho[1])) ax.view_init(elevation,angle) plt.title(r'$\rho(y,z)$ at output ' + tstr + ', mesh = ' + nystr + 'x' + nzstr) plt.savefig(rhosurf) numfigs += 1 fig = plt.figure(numfigs) ax = fig.add_subplot(111, projection='3d') ax.plot_surface(Y, Z, et[nx//2,:,:,tstep], rstride=1, cstride=1, cmap=cm.jet, linewidth=0, antialiased=True, shade=True) ax.set_xlabel('y'); ax.set_ylabel('z'); ax.set_zlim((minmaxet[0], minmaxet[1])) ax.view_init(elevation,angle) plt.title(r'$e_t(y,z)$ at output ' + tstr + ', mesh = ' + nystr + 'x' + nzstr) plt.savefig(etsurf) # stream plots numfigs += 1 fig = plt.figure(numfigs,figsize=(12,4)) ax1 = fig.add_subplot(121) lw = speed / speed.max() ax1.streamplot(Y, Z, U, V, color='b', linewidth=lw) ax1.set_xlabel('y'); ax1.set_ylabel('z'); ax1.set_aspect('equal') ax2 = fig.add_subplot(122) lw = speedtrue / speedtrue.max() ax2.streamplot(Y, Z, Utrue, Vtrue, color='k', linewidth=lw) ax2.set_xlabel('y'); ax2.set_ylabel('z'); ax2.set_aspect('equal') plt.suptitle(r'$\mathbf{v}(y,z)$ (left) vs $\mathbf{v}_{true}(y,z)$ (right) at output ' + tstr + ', mesh = ' + nystr + 'x' + nzstr) plt.savefig(vstr) # contour plots # numfigs += 1 # fig = plt.figure(numfigs,figsize=(12,4)) # ax1 = fig.add_subplot(121) # ax1.contourf(Y, Z, rho[nx//2,:,:,tstep]) # plt.colorbar(); ax1.set_xlabel('y'); ax1.set_ylabel('z'); ax1.set_axis('equal') # ax2 = fig.add_subplot(122) # ax2.contourf(Y, Z, rhotrue) # ax2.colorbar(); ax2.set_xlabel('y'); ax2.set_ylabel('z'); ax2.set_axis('equal') # plt.suptitle(r'$\rho(y,z)$ (left) vs $\rho_{true}(y,z)$ (right) at output ' + tstr + ', mesh = ' + nystr + 'x' + nzstr) # plt.savefig(rhocont) # numfigs += 1 # fig = plt.figure(numfigs) # plt.contourf(Y, Z, et[nx//2,:,:,tstep]) # plt.colorbar(); plt.xlabel('y'); plt.ylabel('z'); plt.axis('equal') # plt.title(r'$e_t(y,z)$ at output ' + tstr + ', mesh = ' + nystr + 'x' + nzstr) # plt.savefig(etcont) # line/error plots rho1d = rho[nx//2,:,nz//2,tstep] my1d = my[nx//2,:,nz//2,tstep] mz1d = mz[nx//2,:,nz//2,tstep] sp1d = speed[:,nz//2] rhotrue1d = rhotrue[:,nz//2] mytrue1d = mytrue[:,nz//2] mztrue1d = mztrue[:,nz//2] sptrue1d = speedtrue[:,nz//2] numfigs += 1 fig = plt.figure(numfigs,figsize=(12,4)) ax1 = fig.add_subplot(121) ax1.plot(ygrid,rho1d,'b--',ygrid,rhotrue1d,'k-') ax1.legend(('computed','analytical')) ax1.set_xlabel('y'); ax1.set_ylabel(r'$\rho(y)$') ax2 = fig.add_subplot(122) ax2.semilogy(ygrid,np.abs(rho1d-rhotrue1d)+1e-16) ax2.set_xlabel('y'); ax2.set_ylabel(r'$|\rho-\rho_{true}|$') plt.suptitle(r'$\rho(y)$ and error at output ' + tstr + ', mesh = ' + nystr) plt.savefig(rho1dout) numfigs += 1 fig = plt.figure(numfigs,figsize=(12,4)) ax1 = fig.add_subplot(121) ax1.plot(ygrid,my1d,'b--',ygrid,mytrue1d,'k-') ax1.legend(('computed','analytical')) ax1.set_xlabel('y'); ax1.set_ylabel(r'$m_y(y)$') ax2 = fig.add_subplot(122) ax2.semilogy(ygrid,np.abs(my1d-mytrue1d)+1e-16) ax2.set_xlabel('y'); ax2.set_ylabel(r'$|m_y-m_{y,true}|$') plt.suptitle(r'$m_y(y)$ and error at output ' + tstr + ', mesh = ' + nystr) plt.savefig(my1dout) numfigs += 1 fig = plt.figure(numfigs,figsize=(12,4)) ax1 = fig.add_subplot(121) ax1.plot(ygrid,mz1d,'b--',ygrid,mztrue1d,'k-') ax1.legend(('computed','analytical')) ax1.set_xlabel('y'); ax1.set_ylabel(r'$m_z(y)$') ax2 = fig.add_subplot(122) ax2.semilogy(ygrid,np.abs(mz1d-mztrue1d)+1e-16) ax2.set_xlabel('y'); ax2.set_ylabel(r'$|m_z-m_{z,true}|$') plt.suptitle(r'$m_z(y)$ and error at output ' + tstr + ', mesh = ' + nystr) plt.savefig(mz1dout) numfigs += 1 fig = plt.figure(numfigs,figsize=(12,4)) ax1 = fig.add_subplot(121) ax1.plot(ygrid,sp1d,'b--',ygrid,sptrue1d,'k-') ax1.legend(('computed','analytical')) ax1.set_xlabel('y'); ax1.set_ylabel('s(y)') ax2 = fig.add_subplot(122) ax2.semilogy(ygrid,np.abs(sp1d-sptrue1d)+1e-16) ax2.set_xlabel('y'); ax2.set_ylabel(r'$|s-s_{true}|$') plt.suptitle(r'$s(y)$ and error at output ' + tstr + ', mesh = ' + nystr) plt.savefig(sp1dout) if (showplots): plt.show() for i in range(1,numfigs+1): plt.figure(i), plt.close() ##### end of script #####
[ "matplotlib.pyplot.title", "numpy.meshgrid", "matplotlib.pyplot.show", "numpy.abs", "matplotlib.pyplot.suptitle", "matplotlib.pyplot.close", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "numpy.sqrt" ]
[((1092, 1123), 'numpy.zeros', 'np.zeros', (['(ny, nz)'], {'dtype': 'float'}), '((ny, nz), dtype=float)\n', (1100, 1123), True, 'import numpy as np\n'), ((1131, 1162), 'numpy.zeros', 'np.zeros', (['(ny, nz)'], {'dtype': 'float'}), '((ny, nz), dtype=float)\n', (1139, 1162), True, 'import numpy as np\n'), ((1170, 1201), 'numpy.zeros', 'np.zeros', (['(ny, nz)'], {'dtype': 'float'}), '((ny, nz), dtype=float)\n', (1178, 1201), True, 'import numpy as np\n'), ((3275, 3299), 'numpy.sqrt', 'np.sqrt', (['(U ** 2 + V ** 2)'], {}), '(U ** 2 + V ** 2)\n', (3282, 3299), True, 'import numpy as np\n'), ((3312, 3344), 'numpy.sqrt', 'np.sqrt', (['(Utrue ** 2 + Vtrue ** 2)'], {}), '(Utrue ** 2 + Vtrue ** 2)\n', (3319, 3344), True, 'import numpy as np\n'), ((4001, 4026), 'numpy.meshgrid', 'np.meshgrid', (['ygrid', 'zgrid'], {}), '(ygrid, zgrid)\n', (4012, 4026), True, 'import numpy as np\n'), ((4074, 4093), 'matplotlib.pyplot.figure', 'plt.figure', (['numfigs'], {}), '(numfigs)\n', (4084, 4093), True, 'import matplotlib.pyplot as plt\n'), ((4412, 4491), 'matplotlib.pyplot.title', 'plt.title', (["('$\\\\rho(y,z)$ at output ' + tstr + ', mesh = ' + nystr + 'x' + nzstr)"], {}), "('$\\\\rho(y,z)$ at output ' + tstr + ', mesh = ' + nystr + 'x' + nzstr)\n", (4421, 4491), True, 'import matplotlib.pyplot as plt\n'), ((4496, 4516), 'matplotlib.pyplot.savefig', 'plt.savefig', (['rhosurf'], {}), '(rhosurf)\n', (4507, 4516), True, 'import matplotlib.pyplot as plt\n'), ((4545, 4564), 'matplotlib.pyplot.figure', 'plt.figure', (['numfigs'], {}), '(numfigs)\n', (4555, 4564), True, 'import matplotlib.pyplot as plt\n'), ((4880, 4957), 'matplotlib.pyplot.title', 'plt.title', (["('$e_t(y,z)$ at output ' + tstr + ', mesh = ' + nystr + 'x' + nzstr)"], {}), "('$e_t(y,z)$ at output ' + tstr + ', mesh = ' + nystr + 'x' + nzstr)\n", (4889, 4957), True, 'import matplotlib.pyplot as plt\n'), ((4963, 4982), 'matplotlib.pyplot.savefig', 'plt.savefig', (['etsurf'], {}), '(etsurf)\n', (4974, 4982), True, 'import matplotlib.pyplot as plt\n'), ((5034, 5070), 'matplotlib.pyplot.figure', 'plt.figure', (['numfigs'], {'figsize': '(12, 4)'}), '(numfigs, figsize=(12, 4))\n', (5044, 5070), True, 'import matplotlib.pyplot as plt\n'), ((5461, 5603), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (["(\n '$\\\\mathbf{v}(y,z)$ (left) vs $\\\\mathbf{v}_{true}(y,z)$ (right) at output '\n + tstr + ', mesh = ' + nystr + 'x' + nzstr)"], {}), "(\n '$\\\\mathbf{v}(y,z)$ (left) vs $\\\\mathbf{v}_{true}(y,z)$ (right) at output '\n + tstr + ', mesh = ' + nystr + 'x' + nzstr)\n", (5473, 5603), True, 'import matplotlib.pyplot as plt\n'), ((5597, 5614), 'matplotlib.pyplot.savefig', 'plt.savefig', (['vstr'], {}), '(vstr)\n', (5608, 5614), True, 'import matplotlib.pyplot as plt\n'), ((6804, 6840), 'matplotlib.pyplot.figure', 'plt.figure', (['numfigs'], {'figsize': '(12, 4)'}), '(numfigs, figsize=(12, 4))\n', (6814, 6840), True, 'import matplotlib.pyplot as plt\n'), ((7173, 7249), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (["('$\\\\rho(y)$ and error at output ' + tstr + ', mesh = ' + nystr)"], {}), "('$\\\\rho(y)$ and error at output ' + tstr + ', mesh = ' + nystr)\n", (7185, 7249), True, 'import matplotlib.pyplot as plt\n'), ((7254, 7275), 'matplotlib.pyplot.savefig', 'plt.savefig', (['rho1dout'], {}), '(rho1dout)\n', (7265, 7275), True, 'import matplotlib.pyplot as plt\n'), ((7308, 7344), 'matplotlib.pyplot.figure', 'plt.figure', (['numfigs'], {'figsize': '(12, 4)'}), '(numfigs, figsize=(12, 4))\n', (7318, 7344), True, 'import matplotlib.pyplot as plt\n'), ((7670, 7744), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (["('$m_y(y)$ and error at output ' + tstr + ', mesh = ' + nystr)"], {}), "('$m_y(y)$ and error at output ' + tstr + ', mesh = ' + nystr)\n", (7682, 7744), True, 'import matplotlib.pyplot as plt\n'), ((7750, 7770), 'matplotlib.pyplot.savefig', 'plt.savefig', (['my1dout'], {}), '(my1dout)\n', (7761, 7770), True, 'import matplotlib.pyplot as plt\n'), ((7803, 7839), 'matplotlib.pyplot.figure', 'plt.figure', (['numfigs'], {'figsize': '(12, 4)'}), '(numfigs, figsize=(12, 4))\n', (7813, 7839), True, 'import matplotlib.pyplot as plt\n'), ((8165, 8239), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (["('$m_z(y)$ and error at output ' + tstr + ', mesh = ' + nystr)"], {}), "('$m_z(y)$ and error at output ' + tstr + ', mesh = ' + nystr)\n", (8177, 8239), True, 'import matplotlib.pyplot as plt\n'), ((8245, 8265), 'matplotlib.pyplot.savefig', 'plt.savefig', (['mz1dout'], {}), '(mz1dout)\n', (8256, 8265), True, 'import matplotlib.pyplot as plt\n'), ((8294, 8330), 'matplotlib.pyplot.figure', 'plt.figure', (['numfigs'], {'figsize': '(12, 4)'}), '(numfigs, figsize=(12, 4))\n', (8304, 8330), True, 'import matplotlib.pyplot as plt\n'), ((8647, 8719), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (["('$s(y)$ and error at output ' + tstr + ', mesh = ' + nystr)"], {}), "('$s(y)$ and error at output ' + tstr + ', mesh = ' + nystr)\n", (8659, 8719), True, 'import matplotlib.pyplot as plt\n'), ((8725, 8745), 'matplotlib.pyplot.savefig', 'plt.savefig', (['sp1dout'], {}), '(sp1dout)\n', (8736, 8745), True, 'import matplotlib.pyplot as plt\n'), ((1067, 1083), 'numpy.sqrt', 'np.sqrt', (['p0prime'], {}), '(p0prime)\n', (1074, 1083), True, 'import numpy as np\n'), ((8777, 8787), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8785, 8787), True, 'import matplotlib.pyplot as plt\n'), ((1347, 1369), 'numpy.sqrt', 'np.sqrt', (['(y * y + z * z)'], {}), '(y * y + z * z)\n', (1354, 1369), True, 'import numpy as np\n'), ((7073, 7098), 'numpy.abs', 'np.abs', (['(rho1d - rhotrue1d)'], {}), '(rho1d - rhotrue1d)\n', (7079, 7098), True, 'import numpy as np\n'), ((7574, 7597), 'numpy.abs', 'np.abs', (['(my1d - mytrue1d)'], {}), '(my1d - mytrue1d)\n', (7580, 7597), True, 'import numpy as np\n'), ((8069, 8092), 'numpy.abs', 'np.abs', (['(mz1d - mztrue1d)'], {}), '(mz1d - mztrue1d)\n', (8075, 8092), True, 'import numpy as np\n'), ((8555, 8578), 'numpy.abs', 'np.abs', (['(sp1d - sptrue1d)'], {}), '(sp1d - sptrue1d)\n', (8561, 8578), True, 'import numpy as np\n'), ((8827, 8840), 'matplotlib.pyplot.figure', 'plt.figure', (['i'], {}), '(i)\n', (8837, 8840), True, 'import matplotlib.pyplot as plt\n'), ((8842, 8853), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8851, 8853), True, 'import matplotlib.pyplot as plt\n'), ((1749, 1769), 'numpy.sqrt', 'np.sqrt', (['(2 * p0prime)'], {}), '(2 * p0prime)\n', (1756, 1769), True, 'import numpy as np\n'), ((1768, 1804), 'numpy.sqrt', 'np.sqrt', (['(r * r - 2 * t * t * p0prime)'], {}), '(r * r - 2 * t * t * p0prime)\n', (1775, 1804), True, 'import numpy as np\n'), ((1885, 1905), 'numpy.sqrt', 'np.sqrt', (['(2 * p0prime)'], {}), '(2 * p0prime)\n', (1892, 1905), True, 'import numpy as np\n'), ((1904, 1940), 'numpy.sqrt', 'np.sqrt', (['(r * r - 2 * t * t * p0prime)'], {}), '(r * r - 2 * t * t * p0prime)\n', (1911, 1940), True, 'import numpy as np\n')]
############################################### ##<NAME>, 2018## ##Topo-Seq analysis## #The script takes raw GCSs data, returns only trusted GCSs, #computes GCSs shared between different conditions, #draws Venn diagrams of the sets overlappings, #writes GCSs sets. ############################################### ####### #Packages to be imported. ####### import os import matplotlib.pyplot as plt import collections from matplotlib_venn import venn2, venn3, venn3_circles import numpy as np ####### #Variables to be defined. ####### print('Variables to be defined:') #Path to the working directory pwd="C:\\Users\sutor\OneDrive\ThinkPad_working\Sutor\Science\TopoI-ChIP-Seq\TopA_ChIP-Seq\EcTopoI_G116S_M320V_Topo-Seq\TCS_motifs\\" #Input data path_to_replicas={'TopoI_Topo_Seq_1': {'Ara' : pwd + "Replics_1_2_3_Thresholds\TopoI_Topo_Seq_1_Ara_TCSs_called_thr_15.BroadPeak", 'No_Ara' : pwd + "Replics_1_2_3_Thresholds\TopoI_Topo_Seq_1_no_Ara_TCSs_called_thr_15.BroadPeak"}, 'TopoI_Topo_Seq_2': {'Ara' : pwd + "Replics_1_2_3_Thresholds\TopoI_Topo_Seq_2_Ara_TCSs_called_thr_15.BroadPeak", 'No_Ara' : pwd + "Replics_1_2_3_Thresholds\TopoI_Topo_Seq_2_no_Ara_TCSs_called_thr_15.BroadPeak"}, 'TopoI_Topo_Seq_3': {'Ara' : pwd + "Replics_1_2_3_Thresholds\TopoI_Topo_Seq_3_Ara_TCSs_called_thr_15.BroadPeak", 'No_Ara' : pwd + "Replics_1_2_3_Thresholds\TopoI_Topo_Seq_3_no_Ara_TCSs_called_thr_15.BroadPeak"}} #Configuration of the output for the GCSs data in replicas. Replicas_path_out="C:\\Users\sutor\OneDrive\ThinkPad_working\Sutor\Science\TopoI-ChIP-Seq\TopA_ChIP-Seq\EcTopoI_G116S_M320V_Topo-Seq\TCS_motifs\\Replicas_1_2_3_Tresholds_trusted_TCSs\\" if not os.path.exists(Replicas_path_out): os.makedirs(Replicas_path_out) Set_name="Thr_15" All_conditions_name="TopoI_Topo_Seq_123_TCSs_merged" #Configuration of the output for GCSs trusted. Out_path=Replicas_path_out + "TopoI_Topo_Seq_123_TCSs_called_thr_15.BroadPeak" #Outpath for Venn diagrams. plot_outpath=Replicas_path_out ####### #Parsing raw GCSs coordinates, returns dictionary - GCSs_coordinate:N3E. ####### def read_GCSs_file(GCSs_file_path): GCSs_dict={} GCSs_in=open(GCSs_file_path, 'r') for line in GCSs_in: line=line.rstrip().split('\t') if line[0] not in ['GCSs_coordinate']: GCSs_dict[int(line[1])]=float(line[6]) GCSs_in.close() return GCSs_dict ####### #Filter controls. ####### def filter_controls(replicas_path_dict): #Merges a range of replicates TCSs_replicas_dict={} for set_name, set_pair in replicas_path_dict.items(): #Iterates replicas #Read files with raw GCSs Raw_TCSs_dict_Ara=read_GCSs_file(set_pair['Ara']) Raw_TCSs_dict_no_Ara=read_GCSs_file(set_pair['No_Ara']) Raw_TCSs_dict_Ara_filtered={} for TCS_coordinate, TCS_signal in Raw_TCSs_dict_Ara.items(): if TCS_coordinate not in Raw_TCSs_dict_no_Ara: Raw_TCSs_dict_Ara_filtered[TCS_coordinate]=TCS_signal TCSs_replicas_dict[set_name]=Raw_TCSs_dict_Ara_filtered return TCSs_replicas_dict ####### #Combines replicates into one GCSs table. ####### def combine_replicates(replicas_path_dict, path_out, name): #Filter controls. TCSs_replicas_dict=filter_controls(replicas_path_dict) #Merges a range of replicates GCSs_replicas_dict={} names_ar=[] for key, Raw_GCSs_dict in TCSs_replicas_dict.items(): #Iterates replicas names_ar.append(key) for k, v in Raw_GCSs_dict.items(): #Iterates raw GCSs #Table filling process initiation if len(names_ar)==1: GCSs_replicas_dict[k]=[v] #Table filling process continuing (the table already contains at least one GCSs set) else: #If GCSs is already in the table if k in GCSs_replicas_dict: GCSs_replicas_dict[k].append(v) #If this is the first occurrence of the element in a NON empty table. else: add_el=[] for j in range(len(names_ar)-1): add_el.append(0) add_el.append(v) GCSs_replicas_dict[k]=add_el #If table body line contains less elements than header does, hence add zero. for k, v in GCSs_replicas_dict.items(): if len(v)<len(names_ar): GCSs_replicas_dict[k].append(0) #Sorting the list of dictionary keys. GCSs_replicas_dict_sorted=collections.OrderedDict(sorted(GCSs_replicas_dict.items())) #Writes merged GCSs data fileout=open(f'{path_out}{name}_TCSs_replicates.txt', 'w') #TCSs_out.write(f'{Genome_ID}\t{TCSs_list_F[i][0]}\t{TCSs_list_F[i][0]+1}\tTCS_{i}_F\t10\t.\t{TCSs_list_F[i][1]}\t-1\t-1\n') #Header fileout.write('TCSs_coordinate\t') for i in names_ar: fileout.write(str(i) + '_N3E\t') fileout.write('\n') #Body of the table for k, v in GCSs_replicas_dict_sorted.items(): fileout.write(str(k) + '\t') for i in GCSs_replicas_dict_sorted[k]: fileout.write(str(i) + '\t') fileout.write('\n') fileout.close() return GCSs_replicas_dict #Prepares GCSs table for all conditions #combine_replicates(path_to_replicas, Replicas_path_out, All_conditions_name) ####### #Returns only trusted GCSs - observed at least 2 times within 3 biological replicates. #Data organization: 1. coordinate of GCSs, 2.-4. N3E values for biological replicates 1-3 ####### def trusted(ar): av_height=0 ind=0 for i in range(len(ar)): if ar[i]>0: ind=ind+1 av_height=av_height+ar[i] if ind>1: return av_height/ind else: return "No signal" def trusted_GCSs_calling(GCSs_dictionary): ar=[] for k, v in GCSs_dictionary.items(): if trusted(v)!="No signal": ar.append([k, trusted(v)]) return ar def replicas_comb_trust_wrapper(replicas_dict, path_out, name): print('Now working with: ' + str(name)) cur_GCSs_dict=combine_replicates(replicas_dict, path_out, name) cur_GCSs_trusted=trusted_GCSs_calling(cur_GCSs_dict) print('Number of trusted TCSs for ' + str(name) + ' : ' + str(len(cur_GCSs_trusted))) return cur_GCSs_trusted TCSs_trusted=replicas_comb_trust_wrapper(path_to_replicas, Replicas_path_out, All_conditions_name) #Antibs_GCSs_sets=[Cfx, RifCfx, Micro, Oxo] ####### #GCSs shared between pairs of antibiotics - Cfx, Micro and Oxo and between Cfx and RifCfx. ####### def pairs_construction(ar1, ar2): double=[] for i in range(len(ar1)): for j in range(len(ar2)): if ar1[i][0]==ar2[j][0]: double.append([ar1[i][0], ar1[i][1], ar2[j][1]]) #GCSs coordinate, N3E_1, N3E_2 return double #Cfx_RifCfx_shared_GCSs=pairs_construction(Cfx, RifCfx) #print('Number of GCSs shared between Cfx and RifCfx: ' + str(len(Cfx_RifCfx_shared_GCSs)) + '\n') # #Cfx_Micro_shared_GCSs=pairs_construction(Cfx, Micro) #Cfx_Oxo_shared_GCSs=pairs_construction(Cfx, Oxo) #Micro_Oxo_shared_GCSs=pairs_construction(Micro, Oxo) # #print('Number of GCSs shared between Cfx and Micro: ' + str(len(Cfx_Micro_shared_GCSs))) #print('Number of GCSs shared between Cfx and Oxo: ' + str(len(Cfx_Oxo_shared_GCSs))) #print('Number of GCSs shared between Micro and Oxo: ' + str(len(Micro_Oxo_shared_GCSs)) + '\n') # #Antibs_GCSs_sets_pair_shared=[Cfx_Micro_shared_GCSs, Cfx_Oxo_shared_GCSs, Micro_Oxo_shared_GCSs] ####### #GCSs shared between 3 antibiotics ####### def triple_construction(ar12, ar3): triple=[] for i in range(len(ar12)): for j in range(len(ar3)): if ar12[i][0]==ar3[j][0]: triple.append([ar12[i][0], ar12[i][1], ar12[i][2], ar3[j][1]]) #GCSs coordinate, N3E_1, N3E_2, N3E_3 return triple #Cfx_Micro_Oxo_shared_GCSs=triple_construction(Cfx_Micro_shared_GCSs, Oxo) #print('Number of GCSs shared between Cfx, Micro and Oxo: ' + str(len(Cfx_Micro_Oxo_shared_GCSs)) +'\n') ####### #Parses replicas, overlaps lists of GCSs, output data for Venn diagram construction. ####### def replicates_parsing_to_list_and_overlapping(replicas_dict, name): #Parsing GCSs_dict={} for k, v in replicas_dict.items(): #Iterate replicas. GCSs_dict[k]=[] for c, h in read_GCSs_file(v).items(): #Iterate GCSs. GCSs_dict[k].append([c, h]) #Overlapping one_two=pairs_construction(GCSs_dict[name+str(1)], GCSs_dict[name+str(2)]) one_three=pairs_construction(GCSs_dict[name+str(1)], GCSs_dict[name+str(3)]) two_three=pairs_construction(GCSs_dict[name+str(2)], GCSs_dict[name+str(3)]) one_two_three=triple_construction(one_two, GCSs_dict[name+str(3)]) #Venn input description (for 3 sets): one, two, three, one_two, one_three, two_three, one_two_three venn_input=[len(GCSs_dict[name+str(1)])-len(one_two)-len(one_three)+len(one_two_three), len(GCSs_dict[name+str(2)])-len(one_two)-len(two_three)+len(one_two_three), len(one_two)-len(one_two_three), len(GCSs_dict[name+str(3)])-len(one_three)-len(two_three)+len(one_two_three), len(one_three)-len(one_two_three), len(two_three)-len(one_two_three), len(one_two_three)] return venn_input ####### #Venn diagram represents GCSs sets overlapping. #description2: one, two, one_two #description3: one, two, one_two, three, one_three, two_three, one_two_three ####### #venn_data_2=[len(Cfx)-len(Cfx_RifCfx_shared_GCSs), len(RifCfx)-len(Cfx_RifCfx_shared_GCSs), len(Cfx_RifCfx_shared_GCSs)] #venn_data_3=[len(Cfx)-len(Cfx_Micro_shared_GCSs)-len(Cfx_Oxo_shared_GCSs)+len(Cfx_Micro_Oxo_shared_GCSs), # len(Micro)-len(Cfx_Micro_shared_GCSs)-len(Micro_Oxo_shared_GCSs)+len(Cfx_Micro_Oxo_shared_GCSs), # len(Cfx_Micro_shared_GCSs)-len(Cfx_Micro_Oxo_shared_GCSs), # len(Oxo)-len(Cfx_Oxo_shared_GCSs)-len(Micro_Oxo_shared_GCSs)+len(Cfx_Micro_Oxo_shared_GCSs), # len(Cfx_Oxo_shared_GCSs)-len(Cfx_Micro_Oxo_shared_GCSs), # len(Micro_Oxo_shared_GCSs)-len(Cfx_Micro_Oxo_shared_GCSs), # len(Cfx_Micro_Oxo_shared_GCSs)] #venn2(subsets = (venn_data_2), set_labels = ("Ciprofloxacin", "Rifampicin Ciprofloxacin")) #plt.savefig(plot_outpath+'Cfx_RifCfx_venn.png', dpi=320) #plt.close() # #print("Cfx Micro Oxo subsets volumes: " + str(venn_data_3)) #venn3(subsets = (venn_data_3), set_labels = ('Ciprofloxacin', 'Microcin B17', 'Oxolinic acid')) #plt.savefig(plot_outpath+'Cfx_Micro_Oxo_venn.png', dpi=320) #plt.close() # #venn3(subsets = (replicates_parsing_to_list_and_overlapping(path_to_cfx_replicas, 'Cfx_')), set_labels = ('Cfx_1', 'Cfx_2', 'Cfx_3')) #plt.savefig(plot_outpath+'Cfx_replicas_venn.png', dpi=320) #plt.close() # #venn3(subsets = (replicates_parsing_to_list_and_overlapping(path_to_rifcfx_replicas, 'RifCfx_')), set_labels = ('RifCfx_1', 'RifCfx_2', 'RifCfx_3')) #plt.savefig(plot_outpath+'RifCfx_replicas_venn.png', dpi=320) #plt.close() # #venn3(subsets = (replicates_parsing_to_list_and_overlapping(path_to_microcin_replicas, 'Micro_')), set_labels = ('Micro_1', 'Micro_2', 'Micro_3')) #plt.savefig(plot_outpath+'Micro_replicas_venn.png', dpi=320) #plt.close() # #venn3(subsets = (replicates_parsing_to_list_and_overlapping(path_to_oxo_replicas, 'Oxo_')), set_labels = ('Oxo_1', 'Oxo_2', 'Oxo_3')) #plt.savefig(plot_outpath+'Oxo_replicas_venn.png', dpi=320) #plt.close() ####### #GCSs sets average N3E estimation. ####### def average_height(ar): av_he=0 for i in range(len(ar)): peak_he=np.mean(ar[i][1:]) av_he=av_he+peak_he return av_he/len(ar) #print('Cfx average GCSs N3E: ' + str(average_height(Cfx))) #print('Micro average GCSs N3E: ' + str(average_height(Micro))) #print('Oxo average GCSs N3E: ' + str(average_height(Oxo))) #print('Cfx and Micro average GCSs N3E: ' + str(average_height(Cfx_Micro_shared_GCSs))) #print('Cfx and Oxo average GCSs N3E: ' + str(average_height(Cfx_Oxo_shared_GCSs))) #print('Micro and Oxo average GCSs N3E: ' + str(average_height(Micro_Oxo_shared_GCSs))) #print('Cfx, Micro and Oxo average GCSs N3E: ' + str(average_height(Cfx_Micro_Oxo_shared_GCSs)) + '\n') ####### #Write down files with GCSs lists - trusted or shared. ####### #All_GCSs_sets={Cfx_path: Antibs_GCSs_sets[0], # RifCfx_path: Antibs_GCSs_sets[1], # Micro_path: Antibs_GCSs_sets[2], # Oxo_path: Antibs_GCSs_sets[3], # Cfx_Micro_path: Antibs_GCSs_sets_pair_shared[0], # Cfx_Oxo_path: Antibs_GCSs_sets_pair_shared[1], # Micro_Oxo_path: Antibs_GCSs_sets_pair_shared[2], # Cfx_Micro_Oxo_path: Cfx_Micro_Oxo_shared_GCSs} def write_GCSs_file(dictionary): for k, v in dictionary.items(): #Iterates lists to be written v.sort(key=lambda tup: tup[0]) #Sorting lists by the zero elements of the sublists they consist of fileout=open(k, 'w') fileout.write('GCSs_coordinate\tN3E\n') for i in range(len(v)): fileout.write(str(v[i][0]) + '\t' + str(np.mean(v[i][1:])) + '\n') fileout.close() return #write_GCSs_file(All_GCSs_sets) def write_Cfx_RifCfx_shared_GCSs(ar, path): fileout=open(path, 'w') fileout.write('GCSs_coordinate\tCfx_N3E\tRifCfx_N3E\n') ar.sort(key=lambda tup: tup[0]) for i in range(len(ar)): fileout.write(str(ar[i][0]) + '\t' + str(ar[i][1]) + '\t' + str(ar[i][2]) + '\n') fileout.close() return #write_Cfx_RifCfx_shared_GCSs(Cfx_RifCfx_shared_GCSs, Cfx_RifCfx_shared_GCSs_path) # #print('Script ended its work succesfully!')
[ "numpy.mean", "os.path.exists", "os.makedirs" ]
[((1698, 1731), 'os.path.exists', 'os.path.exists', (['Replicas_path_out'], {}), '(Replicas_path_out)\n', (1712, 1731), False, 'import os\n'), ((1737, 1767), 'os.makedirs', 'os.makedirs', (['Replicas_path_out'], {}), '(Replicas_path_out)\n', (1748, 1767), False, 'import os\n'), ((11670, 11688), 'numpy.mean', 'np.mean', (['ar[i][1:]'], {}), '(ar[i][1:])\n', (11677, 11688), True, 'import numpy as np\n'), ((13184, 13201), 'numpy.mean', 'np.mean', (['v[i][1:]'], {}), '(v[i][1:])\n', (13191, 13201), True, 'import numpy as np\n')]
__copyright__ = "Copyright (c) Microsoft Corporation and Mila - Quebec AI Institute" __license__ = "MIT" """Billiards game """ __all__ = ("billiards_default_config", "Billiards", "BilliardsInitialization") import math from typing import Optional import numpy as np from segar.mdps.initializations import ArenaInitialization from segar.mdps.rewards import dead_reward_fn, l2_distance_reward_fn from segar.mdps.tasks import Task from segar.rendering.rgb_rendering import register_color from segar.factors import ( Label, Mass, Charge, Shape, Text, Circle, GaussianNoise, Size, Position, ID, Done, Alive, Visible, Velocity, ) from segar.rules import Prior from segar.things import Ball, Hole, Entity, Object from segar.sim.location_priors import RandomBottomLocation _DEFAULT_CUEBALL_MASS = 1.0 _DEFAULT_CUEBALL_CHARGE = 1.0 _DEFAULT_BALL_MASS = 1.0 _DEFAULT_BALL_SIZE = 0.2 _DEFAULT_BALL_CHARGE = 1.0 _DEFAULT_HOLE_SIZE = 0.3 _DEFAULT_DEAD_REWARD = -100.0 _HOLE_DISTANCE_THRESH = 1e-4 _MAX_BALL_AT_GOAL_VEL = None _ACTION_RANGE = (-100, 100) def billiard_ball_positions( start: list[float, float], r: float = _DEFAULT_BALL_SIZE / 2 + 1e-3, n: int = 10 ) -> list[list[float, float]]: x, y = start sq2r = math.sqrt(2.0) * r positions = [start] positions += [[x - sq2r, y + sq2r], [x + sq2r, y + sq2r]] positions += [ [x - 2 * sq2r, y + 2 * sq2r], [x, y + 2 * sq2r], [x + 2 * sq2r, y + 2 * sq2r], ] positions += [ [x - 3 * sq2r, y + 3 * sq2r], [x - sq2r, y + 3 * sq2r], [x + sq2r, y + 3 * sq2r], [x + 3 * sq2r, y + 3 * sq2r], ] positions = positions[:n] return positions class CueBall( Object, default={ Label: "cueball", Mass: _DEFAULT_CUEBALL_MASS, Charge: _DEFAULT_CUEBALL_CHARGE, Shape: Circle(0.2), Text: "X", ID: "cueball", }, ): pass billiards_default_config = { "numbers": [(CueBall, 1)], "priors": [ Prior( Size, GaussianNoise( _DEFAULT_BALL_SIZE, 0.01, clip=(_DEFAULT_BALL_SIZE / 2.0, 3 * _DEFAULT_BALL_SIZE / 2.0), ), entity_type=CueBall, ), Prior(Size, _DEFAULT_BALL_SIZE, entity_type=Ball), Prior(Mass, _DEFAULT_BALL_MASS, entity_type=Ball), Prior(Size, _DEFAULT_HOLE_SIZE, entity_type=Hole), Prior(Position, RandomBottomLocation(), entity_type=CueBall), ], } class BilliardsInitialization(ArenaInitialization): """Initialization of billiards derived from arena initialization. Adds a cueball, holes, and other billiard balls. """ def __init__(self, config=None): self.cueball_id = None self.ball_ids = [] self.hole_ids = [] super().__init__(config=config) register_color("cueball", (255, 255, 255)) def sample(self, max_iterations: int = 100) -> list[Entity]: self.ball_ids.clear() self.hole_ids.clear() sampled_things = super().sample(max_iterations=max_iterations) ball_positions = billiard_ball_positions([0.0, 0.0]) for i, pos in enumerate(ball_positions): ball = Ball({Position: pos, Text: f"{i + 1}", ID: f"{i + 1}_ball"}) sampled_things.append(ball) hole_positions = [[-0.9, -0.9], [-0.9, 0.9], [0.9, -0.9], [0.9, 0.9]] for i, pos in enumerate(hole_positions): hole = Hole({Position: pos, ID: f"{i}_hole", Size: _DEFAULT_HOLE_SIZE}) sampled_things.append(hole) has_cueball = False has_balls = False has_holes = False for thing in sampled_things: if isinstance(thing, CueBall): has_cueball = True self.cueball_id = thing[ID] if isinstance(thing, Ball): has_balls = True self.ball_ids.append(thing[ID]) if isinstance(thing, Hole): has_holes = True self.hole_ids.append(thing[ID]) if not has_cueball: raise ValueError("cueball wasn't created.") if not has_balls: raise ValueError("balls weren't created.") if not has_holes: raise ValueError("holes weren't created.") return sampled_things def set_arena(self, init_things: Optional[list[Entity]] = None) -> None: super().set_arena(init_things) if self.cueball_id is None: raise RuntimeError("Cueball was not set in arena.") if len(self.ball_ids) == 0: raise RuntimeError("Balls not set in arena.") if len(self.hole_ids) == 0: raise RuntimeError("Holes not set in arena.") class Billiards(Task): """Billiards game. Agent controls the cue ball. Hit the cue ball into billiard balls and get them into holes. Avoid getting the cue ball into the holes. """ def __init__( self, initialization: BilliardsInitialization, action_range: tuple[float, float] = _ACTION_RANGE, action_shape: tuple[int, ...] = (2,), dead_reward: float = _DEFAULT_DEAD_REWARD, hole_distance_threshold: float = _HOLE_DISTANCE_THRESH, max_ball_at_hole_velocity: float = _MAX_BALL_AT_GOAL_VEL, ): """ :param initialization: Initialization object used for initializing the arena. :param action_range: Range of actions used by the agent. :param action_shape: Shape of actions. :param dead_reward: Reward when cue ball is `dead`. :param hole_distance_threshold: Distance between billiard ball and hole under which to stop. :param max_ball_at_hole_velocity: Max billiard ball velocity under which to stop. """ action_type = np.float16 baseline_action = np.array([0, 0]).astype(action_type) super().__init__( action_range=action_range, action_shape=action_shape, action_type=action_type, baseline_action=baseline_action, initialization=initialization, ) self._dead_reward = dead_reward self._hole_distance_threshold = hole_distance_threshold self._max_ball_at_hole_velocity = max_ball_at_hole_velocity @property def cueball_id(self) -> ID: if not hasattr(self._initialization, "cueball_id"): raise AttributeError( "Initialization must define `cueball_id` to " "be compatible with task." ) cueball_id = self._initialization.cueball_id if cueball_id is None: raise ValueError("`cueball_id` is not set yet.") return cueball_id @property def hole_ids(self) -> list[ID]: if not hasattr(self._initialization, "hole_ids"): raise AttributeError( "Initialization must define `hole_ids` to " "be compatible with task." ) hole_ids = self._initialization.hole_ids return hole_ids @property def ball_ids(self) -> list[ID]: if not hasattr(self._initialization, "ball_ids"): raise AttributeError( "Initialization must define `ball_ids` to " "be compatible with task." ) ball_ids = self._initialization.ball_ids return ball_ids def reward(self, state: dict) -> float: """Reward determined by the distance of the billiard balls to the nearest hold and whether the cue ball is in a hole (dead). :param state: States :return: (float) the reward. """ ball_state = state["things"][self.cueball_id] dead_reward = dead_reward_fn(ball_state, self._dead_reward) # Distance reward is tricky: can't do it directly from states # because sim owns scaling distance_reward = 0.0 for ball_id in self.ball_ids: distance = min([self.sim.l2_distance(ball_id, hole_id) for hole_id in self.hole_ids]) if distance <= self._hole_distance_threshold: self.sim.change_thing_state(ball_id, Alive, False) self.sim.change_thing_state(ball_id, Visible, False) distance_reward += l2_distance_reward_fn(distance) return dead_reward + distance_reward def done(self, state: dict) -> bool: """Episode is done if the cue ball is dead or if all of the billiard balls are in the holes. :param state: The states. :return: True if the state indicates the environment is done. """ ball_state = state["things"][self.cueball_id] is_finished = ball_state[Done] or not ball_state[Alive] balls_are_finished = True for ball_id in self.ball_ids: ball_state = state["things"][ball_id] ball_is_finished = ball_state[Done] or not ball_state[Alive] balls_are_finished = balls_are_finished and ball_is_finished return is_finished or balls_are_finished def apply_action(self, force: np.ndarray) -> None: """Applies force to the cue ball. :param force: (np.array) Force to apply """ self.sim.add_force(self.cueball_id, force) def results(self, state: dict) -> dict: """Results for monitoring task. :param state: States :return: Dictionary of results. """ distance = min( [self.sim.l2_distance(self.cueball_id, hole_id) for hole_id in self.hole_ids] ) ball_state = state["things"][self.cueball_id] return dict( dist_to_goal=distance, velocity=ball_state[Velocity].norm(), mass=ball_state[Mass].value, alive=ball_state[Alive].value, ) def demo_action(self): """Generate an action used for demos :return: np.array action """ return np.random.normal() + np.array((4, 3))
[ "math.sqrt", "segar.things.Hole", "segar.sim.location_priors.RandomBottomLocation", "segar.factors.GaussianNoise", "segar.rules.Prior", "segar.mdps.rewards.l2_distance_reward_fn", "numpy.array", "segar.factors.Circle", "segar.things.Ball", "numpy.random.normal", "segar.mdps.rewards.dead_reward_fn", "segar.rendering.rgb_rendering.register_color" ]
[((1278, 1292), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (1287, 1292), False, 'import math\n'), ((1893, 1904), 'segar.factors.Circle', 'Circle', (['(0.2)'], {}), '(0.2)\n', (1899, 1904), False, 'from segar.factors import Label, Mass, Charge, Shape, Text, Circle, GaussianNoise, Size, Position, ID, Done, Alive, Visible, Velocity\n'), ((2309, 2358), 'segar.rules.Prior', 'Prior', (['Size', '_DEFAULT_BALL_SIZE'], {'entity_type': 'Ball'}), '(Size, _DEFAULT_BALL_SIZE, entity_type=Ball)\n', (2314, 2358), False, 'from segar.rules import Prior\n'), ((2368, 2417), 'segar.rules.Prior', 'Prior', (['Mass', '_DEFAULT_BALL_MASS'], {'entity_type': 'Ball'}), '(Mass, _DEFAULT_BALL_MASS, entity_type=Ball)\n', (2373, 2417), False, 'from segar.rules import Prior\n'), ((2427, 2476), 'segar.rules.Prior', 'Prior', (['Size', '_DEFAULT_HOLE_SIZE'], {'entity_type': 'Hole'}), '(Size, _DEFAULT_HOLE_SIZE, entity_type=Hole)\n', (2432, 2476), False, 'from segar.rules import Prior\n'), ((2918, 2960), 'segar.rendering.rgb_rendering.register_color', 'register_color', (['"""cueball"""', '(255, 255, 255)'], {}), "('cueball', (255, 255, 255))\n", (2932, 2960), False, 'from segar.rendering.rgb_rendering import register_color\n'), ((7782, 7827), 'segar.mdps.rewards.dead_reward_fn', 'dead_reward_fn', (['ball_state', 'self._dead_reward'], {}), '(ball_state, self._dead_reward)\n', (7796, 7827), False, 'from segar.mdps.rewards import dead_reward_fn, l2_distance_reward_fn\n'), ((2090, 2196), 'segar.factors.GaussianNoise', 'GaussianNoise', (['_DEFAULT_BALL_SIZE', '(0.01)'], {'clip': '(_DEFAULT_BALL_SIZE / 2.0, 3 * _DEFAULT_BALL_SIZE / 2.0)'}), '(_DEFAULT_BALL_SIZE, 0.01, clip=(_DEFAULT_BALL_SIZE / 2.0, 3 *\n _DEFAULT_BALL_SIZE / 2.0))\n', (2103, 2196), False, 'from segar.factors import Label, Mass, Charge, Shape, Text, Circle, GaussianNoise, Size, Position, ID, Done, Alive, Visible, Velocity\n'), ((2502, 2524), 'segar.sim.location_priors.RandomBottomLocation', 'RandomBottomLocation', ([], {}), '()\n', (2522, 2524), False, 'from segar.sim.location_priors import RandomBottomLocation\n'), ((3289, 3349), 'segar.things.Ball', 'Ball', (["{Position: pos, Text: f'{i + 1}', ID: f'{i + 1}_ball'}"], {}), "({Position: pos, Text: f'{i + 1}', ID: f'{i + 1}_ball'})\n", (3293, 3349), False, 'from segar.things import Ball, Hole, Entity, Object\n'), ((3537, 3601), 'segar.things.Hole', 'Hole', (["{Position: pos, ID: f'{i}_hole', Size: _DEFAULT_HOLE_SIZE}"], {}), "({Position: pos, ID: f'{i}_hole', Size: _DEFAULT_HOLE_SIZE})\n", (3541, 3601), False, 'from segar.things import Ball, Hole, Entity, Object\n'), ((8325, 8356), 'segar.mdps.rewards.l2_distance_reward_fn', 'l2_distance_reward_fn', (['distance'], {}), '(distance)\n', (8346, 8356), False, 'from segar.mdps.rewards import dead_reward_fn, l2_distance_reward_fn\n'), ((9998, 10016), 'numpy.random.normal', 'np.random.normal', ([], {}), '()\n', (10014, 10016), True, 'import numpy as np\n'), ((10019, 10035), 'numpy.array', 'np.array', (['(4, 3)'], {}), '((4, 3))\n', (10027, 10035), True, 'import numpy as np\n'), ((5943, 5959), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (5951, 5959), True, 'import numpy as np\n')]
# Create by Packetsss # Personal use is allowed # Commercial use is prohibited import numpy as np import cv2 from scipy import ndimage import math from copy import deepcopy class Images: def __init__(self, img): self.img = cv2.imread(img, 1) if self.img.shape[0] / self.img.shape[1] < 0.76: self.img_width = 1100 self.img_height = int(self.img_width * self.img.shape[0] / self.img.shape[1]) else: self.img_height = 700 self.img_width = int(self.img_height * self.img.shape[1] / self.img.shape[0]) self.img = cv2.resize(self.img, (self.img_width, self.img_height)) self.img_copy = deepcopy(self.img) self.grand_img_copy = deepcopy(self.img) self.img_name = img.split('\\')[-1].split(".")[0] self.img_format = img.split('\\')[-1].split(".")[1] self.left, self.right, self.top, self.bottom = None, None, None, None # self.bypass_censorship() def auto_contrast(self): clip_hist_percent = 20 gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY) hist = cv2.calcHist([gray], [0], None, [256], [0, 256]) hist_size = len(hist) accumulator = [float(hist[0])] for index in range(1, hist_size): accumulator.append(accumulator[index - 1] + float(hist[index])) maximum = accumulator[-1] clip_hist_percent *= (maximum / 100.0) clip_hist_percent /= 2.0 minimum_gray = 0 while accumulator[minimum_gray] < clip_hist_percent: minimum_gray += 1 maximum_gray = hist_size - 1 while accumulator[maximum_gray] >= (maximum - clip_hist_percent): maximum_gray -= 1 alpha = 255 / (maximum_gray - minimum_gray) beta = -minimum_gray * alpha self.img = cv2.convertScaleAbs(self.img, alpha=alpha, beta=beta) def auto_sharpen(self): self.img = cv2.detailEnhance(self.img, sigma_s=10, sigma_r=0.3) def auto_cartoon(self, style=0): edges1 = cv2.bitwise_not(cv2.Canny(self.img, 100, 200)) gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY) gray = cv2.medianBlur(gray, 5) edges2 = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 7, 7) dst = cv2.edgePreservingFilter(self.img, flags=2, sigma_s=64, sigma_r=0.25) if not style: # less blurry self.img = cv2.bitwise_and(dst, dst, mask=edges1) else: # more blurry self.img = cv2.bitwise_and(dst, dst, mask=edges2) def auto_invert(self): self.img = cv2.bitwise_not(self.img) def change_b_c(self, alpha=1, beta=0): # contrast from 0 to 3, brightness from -100 to 100 self.img = cv2.convertScaleAbs(self.img, alpha=alpha, beta=beta) def change_saturation(self, value): # -300 to 300 img_hsv = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV).astype("float32") (h, s, v) = cv2.split(img_hsv) s += value s = np.clip(s, 0, 255) img_hsv = cv2.merge([h, s, v]) self.img = cv2.cvtColor(img_hsv.astype("uint8"), cv2.COLOR_HSV2BGR) def remove_color(self, color): h = color.lstrip('#') color = np.array([int(h[i:i + 2], 16) for i in (0, 2, 4)]) img_hsv = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV).astype("float32") low = np.array([color[0] - 15, 0, 20]) high = np.array([color[0] + 15, 255, 255]) mask = cv2.inRange(img_hsv, low, high) img_hsv[mask > 0] = (0, 0, 255) self.img = cv2.cvtColor(img_hsv.astype("uint8"), cv2.COLOR_HSV2BGR) def crop_img(self, left, right, top, bottom): self.img = self.img[left:right, top:bottom] def rotate_img(self, angle, crop=False, flip=[False, False]): self.reset(flip) if not crop: self.img = cv2.resize(self.img, (0, 0), fx=0.5, fy=0.5) w, h = self.img.shape[1], self.img.shape[0] else: w, h = self.img_width, self.img_height self.img = ndimage.rotate(self.img, angle) angle = math.radians(angle) quadrant = int(math.floor(angle / (math.pi / 2))) & 3 sign_alpha = angle if ((quadrant & 1) == 0) else math.pi - angle alpha = (sign_alpha % math.pi + math.pi) % math.pi bb_w = w * math.cos(alpha) + h * math.sin(alpha) bb_h = w * math.sin(alpha) + h * math.cos(alpha) gamma = math.atan2(bb_w, bb_w) if (w < h) else math.atan2(bb_w, bb_w) delta = math.pi - alpha - gamma length = h if (w < h) else w d = length * math.cos(alpha) a = d * math.sin(alpha) / math.sin(delta) y = a * math.cos(gamma) x = y * math.tan(gamma) wr, hr = bb_w - 2 * x, bb_h - 2 * y midpoint = (np.array(self.img.shape[:-1]) // 2)[::-1] half_w, half_h = wr // 2, hr // 2 self.left, self.right, self.top, self.bottom = int(midpoint[0] - half_w), int(midpoint[0] + half_w), \ int(midpoint[1] - half_h), int(midpoint[1] + half_h) def detect_face(self): face_cascade = cv2.CascadeClassifier('data/haarcascade_frontalface_alt2.xml') # eye_cascade = cv2.CascadeClassifier('data/haarcascade_eye.xml') gray_scale_img = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY) face_coord = face_cascade.detectMultiScale(gray_scale_img) return face_coord def bypass_censorship(self): width = self.img.shape[1] height = self.img.shape[0] smaller_img = cv2.resize(self.img, (width // 2, height // 2)) image = np.zeros(self.img.shape, np.uint8) try: image[:height // 2, :width // 2] = cv2.rotate(smaller_img, cv2.cv2.ROTATE_180) image[height // 2:, :width // 2] = smaller_img image[height // 2:, width // 2:] = cv2.rotate(smaller_img, cv2.cv2.ROTATE_180) image[:height // 2, width // 2:] = smaller_img except: try: image[:height // 2, :width // 2] = cv2.rotate(smaller_img, cv2.cv2.ROTATE_180) image[height // 2 + 1:, :width // 2] = smaller_img image[height // 2 + 1:, width // 2:] = cv2.rotate(smaller_img, cv2.cv2.ROTATE_180) image[:height // 2, width // 2:] = smaller_img except: image[:height // 2, :width // 2] = cv2.rotate(smaller_img, cv2.cv2.ROTATE_180) image[height // 2:, :width // 2] = smaller_img image[height // 2:, width // 2 + 1:] = cv2.rotate(smaller_img, cv2.cv2.ROTATE_180) image[:height // 2, width // 2 + 1:] = smaller_img self.img = image def save_img(self, file): cv2.imwrite(file, self.img) def reset(self, flip=None): if flip is None: flip = [False, False] self.img = deepcopy(self.img_copy) if flip[0]: self.img = cv2.flip(self.img, 0) if flip[1]: self.img = cv2.flip(self.img, 1) def grand_reset(self): self.img = deepcopy(self.grand_img_copy) self.img_copy = deepcopy(self.grand_img_copy) def main(): path = "ppl.jpg" img = Images(path) img_name = path.split('\\')[-1].split(".")[0] cv2.imshow(img_name, img.img) cv2.waitKey() cv2.destroyAllWindows() if __name__ == "__main__": main()
[ "cv2.bitwise_and", "cv2.medianBlur", "math.atan2", "cv2.adaptiveThreshold", "numpy.clip", "cv2.edgePreservingFilter", "cv2.imshow", "cv2.inRange", "cv2.cvtColor", "math.radians", "cv2.imwrite", "cv2.detailEnhance", "cv2.split", "cv2.convertScaleAbs", "math.cos", "cv2.destroyAllWindows", "cv2.resize", "copy.deepcopy", "cv2.Canny", "cv2.bitwise_not", "cv2.waitKey", "cv2.calcHist", "math.sin", "cv2.flip", "cv2.merge", "cv2.rotate", "math.tan", "numpy.zeros", "math.floor", "cv2.imread", "numpy.array", "cv2.CascadeClassifier", "scipy.ndimage.rotate" ]
[((7505, 7534), 'cv2.imshow', 'cv2.imshow', (['img_name', 'img.img'], {}), '(img_name, img.img)\n', (7515, 7534), False, 'import cv2\n'), ((7540, 7553), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (7551, 7553), False, 'import cv2\n'), ((7559, 7582), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7580, 7582), False, 'import cv2\n'), ((251, 269), 'cv2.imread', 'cv2.imread', (['img', '(1)'], {}), '(img, 1)\n', (261, 269), False, 'import cv2\n'), ((617, 672), 'cv2.resize', 'cv2.resize', (['self.img', '(self.img_width, self.img_height)'], {}), '(self.img, (self.img_width, self.img_height))\n', (627, 672), False, 'import cv2\n'), ((698, 716), 'copy.deepcopy', 'deepcopy', (['self.img'], {}), '(self.img)\n', (706, 716), False, 'from copy import deepcopy\n'), ((748, 766), 'copy.deepcopy', 'deepcopy', (['self.img'], {}), '(self.img)\n', (756, 766), False, 'from copy import deepcopy\n'), ((1088, 1130), 'cv2.cvtColor', 'cv2.cvtColor', (['self.img', 'cv2.COLOR_BGR2GRAY'], {}), '(self.img, cv2.COLOR_BGR2GRAY)\n', (1100, 1130), False, 'import cv2\n'), ((1149, 1197), 'cv2.calcHist', 'cv2.calcHist', (['[gray]', '[0]', 'None', '[256]', '[0, 256]'], {}), '([gray], [0], None, [256], [0, 256])\n', (1161, 1197), False, 'import cv2\n'), ((1882, 1935), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['self.img'], {'alpha': 'alpha', 'beta': 'beta'}), '(self.img, alpha=alpha, beta=beta)\n', (1901, 1935), False, 'import cv2\n'), ((1987, 2039), 'cv2.detailEnhance', 'cv2.detailEnhance', (['self.img'], {'sigma_s': '(10)', 'sigma_r': '(0.3)'}), '(self.img, sigma_s=10, sigma_r=0.3)\n', (2004, 2039), False, 'import cv2\n'), ((2161, 2203), 'cv2.cvtColor', 'cv2.cvtColor', (['self.img', 'cv2.COLOR_BGR2GRAY'], {}), '(self.img, cv2.COLOR_BGR2GRAY)\n', (2173, 2203), False, 'import cv2\n'), ((2220, 2243), 'cv2.medianBlur', 'cv2.medianBlur', (['gray', '(5)'], {}), '(gray, 5)\n', (2234, 2243), False, 'import cv2\n'), ((2262, 2352), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['gray', '(255)', 'cv2.ADAPTIVE_THRESH_MEAN_C', 'cv2.THRESH_BINARY', '(7)', '(7)'], {}), '(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.\n THRESH_BINARY, 7, 7)\n', (2283, 2352), False, 'import cv2\n'), ((2363, 2432), 'cv2.edgePreservingFilter', 'cv2.edgePreservingFilter', (['self.img'], {'flags': '(2)', 'sigma_s': '(64)', 'sigma_r': '(0.25)'}), '(self.img, flags=2, sigma_s=64, sigma_r=0.25)\n', (2387, 2432), False, 'import cv2\n'), ((2703, 2728), 'cv2.bitwise_not', 'cv2.bitwise_not', (['self.img'], {}), '(self.img)\n', (2718, 2728), False, 'import cv2\n'), ((2856, 2909), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['self.img'], {'alpha': 'alpha', 'beta': 'beta'}), '(self.img, alpha=alpha, beta=beta)\n', (2875, 2909), False, 'import cv2\n'), ((3076, 3094), 'cv2.split', 'cv2.split', (['img_hsv'], {}), '(img_hsv)\n', (3085, 3094), False, 'import cv2\n'), ((3128, 3146), 'numpy.clip', 'np.clip', (['s', '(0)', '(255)'], {}), '(s, 0, 255)\n', (3135, 3146), True, 'import numpy as np\n'), ((3166, 3186), 'cv2.merge', 'cv2.merge', (['[h, s, v]'], {}), '([h, s, v])\n', (3175, 3186), False, 'import cv2\n'), ((3497, 3529), 'numpy.array', 'np.array', (['[color[0] - 15, 0, 20]'], {}), '([color[0] - 15, 0, 20])\n', (3505, 3529), True, 'import numpy as np\n'), ((3546, 3581), 'numpy.array', 'np.array', (['[color[0] + 15, 255, 255]'], {}), '([color[0] + 15, 255, 255])\n', (3554, 3581), True, 'import numpy as np\n'), ((3598, 3629), 'cv2.inRange', 'cv2.inRange', (['img_hsv', 'low', 'high'], {}), '(img_hsv, low, high)\n', (3609, 3629), False, 'import cv2\n'), ((4186, 4217), 'scipy.ndimage.rotate', 'ndimage.rotate', (['self.img', 'angle'], {}), '(self.img, angle)\n', (4200, 4217), False, 'from scipy import ndimage\n'), ((4237, 4256), 'math.radians', 'math.radians', (['angle'], {}), '(angle)\n', (4249, 4256), False, 'import math\n'), ((5311, 5373), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""data/haarcascade_frontalface_alt2.xml"""'], {}), "('data/haarcascade_frontalface_alt2.xml')\n", (5332, 5373), False, 'import cv2\n'), ((5477, 5519), 'cv2.cvtColor', 'cv2.cvtColor', (['self.img', 'cv2.COLOR_BGR2GRAY'], {}), '(self.img, cv2.COLOR_BGR2GRAY)\n', (5489, 5519), False, 'import cv2\n'), ((5747, 5794), 'cv2.resize', 'cv2.resize', (['self.img', '(width // 2, height // 2)'], {}), '(self.img, (width // 2, height // 2))\n', (5757, 5794), False, 'import cv2\n'), ((5812, 5846), 'numpy.zeros', 'np.zeros', (['self.img.shape', 'np.uint8'], {}), '(self.img.shape, np.uint8)\n', (5820, 5846), True, 'import numpy as np\n'), ((6947, 6974), 'cv2.imwrite', 'cv2.imwrite', (['file', 'self.img'], {}), '(file, self.img)\n', (6958, 6974), False, 'import cv2\n'), ((7091, 7114), 'copy.deepcopy', 'deepcopy', (['self.img_copy'], {}), '(self.img_copy)\n', (7099, 7114), False, 'from copy import deepcopy\n'), ((7299, 7328), 'copy.deepcopy', 'deepcopy', (['self.grand_img_copy'], {}), '(self.grand_img_copy)\n', (7307, 7328), False, 'from copy import deepcopy\n'), ((7354, 7383), 'copy.deepcopy', 'deepcopy', (['self.grand_img_copy'], {}), '(self.grand_img_copy)\n', (7362, 7383), False, 'from copy import deepcopy\n'), ((2114, 2143), 'cv2.Canny', 'cv2.Canny', (['self.img', '(100)', '(200)'], {}), '(self.img, 100, 200)\n', (2123, 2143), False, 'import cv2\n'), ((2509, 2547), 'cv2.bitwise_and', 'cv2.bitwise_and', (['dst', 'dst'], {'mask': 'edges1'}), '(dst, dst, mask=edges1)\n', (2524, 2547), False, 'import cv2\n'), ((2614, 2652), 'cv2.bitwise_and', 'cv2.bitwise_and', (['dst', 'dst'], {'mask': 'edges2'}), '(dst, dst, mask=edges2)\n', (2629, 2652), False, 'import cv2\n'), ((3995, 4039), 'cv2.resize', 'cv2.resize', (['self.img', '(0, 0)'], {'fx': '(0.5)', 'fy': '(0.5)'}), '(self.img, (0, 0), fx=0.5, fy=0.5)\n', (4005, 4039), False, 'import cv2\n'), ((4587, 4609), 'math.atan2', 'math.atan2', (['bb_w', 'bb_w'], {}), '(bb_w, bb_w)\n', (4597, 4609), False, 'import math\n'), ((4626, 4648), 'math.atan2', 'math.atan2', (['bb_w', 'bb_w'], {}), '(bb_w, bb_w)\n', (4636, 4648), False, 'import math\n'), ((4750, 4765), 'math.cos', 'math.cos', (['alpha'], {}), '(alpha)\n', (4758, 4765), False, 'import math\n'), ((4801, 4816), 'math.sin', 'math.sin', (['delta'], {}), '(delta)\n', (4809, 4816), False, 'import math\n'), ((4834, 4849), 'math.cos', 'math.cos', (['gamma'], {}), '(gamma)\n', (4842, 4849), False, 'import math\n'), ((4867, 4882), 'math.tan', 'math.tan', (['gamma'], {}), '(gamma)\n', (4875, 4882), False, 'import math\n'), ((5911, 5954), 'cv2.rotate', 'cv2.rotate', (['smaller_img', 'cv2.cv2.ROTATE_180'], {}), '(smaller_img, cv2.cv2.ROTATE_180)\n', (5921, 5954), False, 'import cv2\n'), ((6063, 6106), 'cv2.rotate', 'cv2.rotate', (['smaller_img', 'cv2.cv2.ROTATE_180'], {}), '(smaller_img, cv2.cv2.ROTATE_180)\n', (6073, 6106), False, 'import cv2\n'), ((7160, 7181), 'cv2.flip', 'cv2.flip', (['self.img', '(0)'], {}), '(self.img, 0)\n', (7168, 7181), False, 'import cv2\n'), ((7227, 7248), 'cv2.flip', 'cv2.flip', (['self.img', '(1)'], {}), '(self.img, 1)\n', (7235, 7248), False, 'import cv2\n'), ((2995, 3036), 'cv2.cvtColor', 'cv2.cvtColor', (['self.img', 'cv2.COLOR_BGR2HSV'], {}), '(self.img, cv2.COLOR_BGR2HSV)\n', (3007, 3036), False, 'import cv2\n'), ((3422, 3463), 'cv2.cvtColor', 'cv2.cvtColor', (['self.img', 'cv2.COLOR_BGR2HSV'], {}), '(self.img, cv2.COLOR_BGR2HSV)\n', (3434, 3463), False, 'import cv2\n'), ((4281, 4314), 'math.floor', 'math.floor', (['(angle / (math.pi / 2))'], {}), '(angle / (math.pi / 2))\n', (4291, 4314), False, 'import math\n'), ((4474, 4489), 'math.cos', 'math.cos', (['alpha'], {}), '(alpha)\n', (4482, 4489), False, 'import math\n'), ((4496, 4511), 'math.sin', 'math.sin', (['alpha'], {}), '(alpha)\n', (4504, 4511), False, 'import math\n'), ((4532, 4547), 'math.sin', 'math.sin', (['alpha'], {}), '(alpha)\n', (4540, 4547), False, 'import math\n'), ((4554, 4569), 'math.cos', 'math.cos', (['alpha'], {}), '(alpha)\n', (4562, 4569), False, 'import math\n'), ((4783, 4798), 'math.sin', 'math.sin', (['alpha'], {}), '(alpha)\n', (4791, 4798), False, 'import math\n'), ((4951, 4980), 'numpy.array', 'np.array', (['self.img.shape[:-1]'], {}), '(self.img.shape[:-1])\n', (4959, 4980), True, 'import numpy as np\n'), ((6254, 6297), 'cv2.rotate', 'cv2.rotate', (['smaller_img', 'cv2.cv2.ROTATE_180'], {}), '(smaller_img, cv2.cv2.ROTATE_180)\n', (6264, 6297), False, 'import cv2\n'), ((6422, 6465), 'cv2.rotate', 'cv2.rotate', (['smaller_img', 'cv2.cv2.ROTATE_180'], {}), '(smaller_img, cv2.cv2.ROTATE_180)\n', (6432, 6465), False, 'import cv2\n'), ((6603, 6646), 'cv2.rotate', 'cv2.rotate', (['smaller_img', 'cv2.cv2.ROTATE_180'], {}), '(smaller_img, cv2.cv2.ROTATE_180)\n', (6613, 6646), False, 'import cv2\n'), ((6767, 6810), 'cv2.rotate', 'cv2.rotate', (['smaller_img', 'cv2.cv2.ROTATE_180'], {}), '(smaller_img, cv2.cv2.ROTATE_180)\n', (6777, 6810), False, 'import cv2\n')]
""" Minimizes D(b, Ax) for x ∈ ℝ₊^N where aₙ, b ∈ ℝ₊^M and D is a divergence. These occur as ingredients of algorithms for the sparse case. """ import cvxpy import numpy def euclidean(A, b): return _solve_convex(A, b, lambda p, q: cvxpy.norm2(p - q)) def total_variation(A, b): return _solve_convex(A, b, lambda p, q: 0.5 * cvxpy.norm1(p - q)) def _solve_convex(A, b, D): x = cvxpy.Variable(A.shape[1]) objective = cvxpy.Minimize(D(b, A @ x)) constraints = [x >= 0] problem = cvxpy.Problem(objective, constraints) problem.solve() status = problem.status assert status == cvxpy.OPTIMAL, f"Unable to solve optimization problem: {status}" x = x.value x[numpy.isclose(x, 0)] = 0 return x
[ "cvxpy.norm1", "numpy.isclose", "cvxpy.Problem", "cvxpy.Variable", "cvxpy.norm2" ]
[((397, 423), 'cvxpy.Variable', 'cvxpy.Variable', (['A.shape[1]'], {}), '(A.shape[1])\n', (411, 423), False, 'import cvxpy\n'), ((509, 546), 'cvxpy.Problem', 'cvxpy.Problem', (['objective', 'constraints'], {}), '(objective, constraints)\n', (522, 546), False, 'import cvxpy\n'), ((706, 725), 'numpy.isclose', 'numpy.isclose', (['x', '(0)'], {}), '(x, 0)\n', (719, 725), False, 'import numpy\n'), ((240, 258), 'cvxpy.norm2', 'cvxpy.norm2', (['(p - q)'], {}), '(p - q)\n', (251, 258), False, 'import cvxpy\n'), ((339, 357), 'cvxpy.norm1', 'cvxpy.norm1', (['(p - q)'], {}), '(p - q)\n', (350, 357), False, 'import cvxpy\n')]
"""Split each echo to prepare for registration.""" import os import subprocess import numpy as np import nibabel as nb # ============================================================================= NII_NAMES = [ '/home/faruk/data/DATA_MRI_NIFTI/derived/sub-23/T2s/01_crop/sub-23_ses-T2s_run-01_dir-AP_part-mag_MEGRE_crop.nii.gz', '/home/faruk/data/DATA_MRI_NIFTI/derived/sub-23/T2s/01_crop/sub-23_ses-T2s_run-02_dir-RL_part-mag_MEGRE_crop.nii.gz', '/home/faruk/data/DATA_MRI_NIFTI/derived/sub-23/T2s/01_crop/sub-23_ses-T2s_run-03_dir-PA_part-mag_MEGRE_crop.nii.gz', '/home/faruk/data/DATA_MRI_NIFTI/derived/sub-23/T2s/01_crop/sub-23_ses-T2s_run-04_dir-LR_part-mag_MEGRE_crop.nii.gz', ] OUTDIR = "/home/faruk/data/DATA_MRI_NIFTI/derived/sub-23/T2s/05_split_echoes" # ============================================================================= print("Step_05: Split echoes.") # Output directory if not os.path.exists(OUTDIR): os.makedirs(OUTDIR) print(" Output directory: {}".format(OUTDIR)) # Average across echoes for i, nii_name in enumerate(NII_NAMES): # Load data nii = nb.load(nii_name) temp = np.squeeze(np.asanyarray(nii.dataobj)) # Save each echo separately basename, ext = nii.get_filename().split(os.extsep, 1) basename = os.path.basename(basename) out_name = os.path.join(OUTDIR, basename) for j in range(temp.shape[-1]): echo = np.squeeze(temp[..., j]) img = nb.Nifti1Image(echo, affine=nii.affine, header=nii.header) nb.save(img, '{}_echo{}.nii.gz'.format(out_name, j+1)) print(' Finished.')
[ "nibabel.Nifti1Image", "os.makedirs", "nibabel.load", "os.path.basename", "numpy.asanyarray", "os.path.exists", "numpy.squeeze", "os.path.join" ]
[((928, 950), 'os.path.exists', 'os.path.exists', (['OUTDIR'], {}), '(OUTDIR)\n', (942, 950), False, 'import os\n'), ((956, 975), 'os.makedirs', 'os.makedirs', (['OUTDIR'], {}), '(OUTDIR)\n', (967, 975), False, 'import os\n'), ((1115, 1132), 'nibabel.load', 'nb.load', (['nii_name'], {}), '(nii_name)\n', (1122, 1132), True, 'import nibabel as nb\n'), ((1290, 1316), 'os.path.basename', 'os.path.basename', (['basename'], {}), '(basename)\n', (1306, 1316), False, 'import os\n'), ((1332, 1362), 'os.path.join', 'os.path.join', (['OUTDIR', 'basename'], {}), '(OUTDIR, basename)\n', (1344, 1362), False, 'import os\n'), ((1155, 1181), 'numpy.asanyarray', 'np.asanyarray', (['nii.dataobj'], {}), '(nii.dataobj)\n', (1168, 1181), True, 'import numpy as np\n'), ((1414, 1438), 'numpy.squeeze', 'np.squeeze', (['temp[..., j]'], {}), '(temp[..., j])\n', (1424, 1438), True, 'import numpy as np\n'), ((1453, 1511), 'nibabel.Nifti1Image', 'nb.Nifti1Image', (['echo'], {'affine': 'nii.affine', 'header': 'nii.header'}), '(echo, affine=nii.affine, header=nii.header)\n', (1467, 1511), True, 'import nibabel as nb\n')]
import numpy as np class layer(): def __init__(self,name,type,nodes_number): self.name=name self.type=type self.nodes_number=nodes_number self.input_values=np.zeros(shape=(nodes_number,1),dtype=float)##input values of nodes self.sum_values=np.zeros(shape=(nodes_number,1),dtype=float)##sum values of nodes self.output_values=np.zeros(shape=(nodes_number,1),dtype=float)##output values of nodes def set_input_values(self,input): self.input_values=input if (self.type=="input"): self.set_output_values(input) def set_output_values(self,output): self.output_values=output class Model(): def __init__(self,method,input_type,perceptron_rule): self.method=method##method self.perceptron_rule=perceptron_rule self.layers=[]##layers of Model self.input_type=input_type """For Training """ self.Connections_Weight=[]## weight of Connections are stored self.Connections_Bias=[]##Bias of Connections are stored self.input_number=0##total input number for training model, using for iteration during epoch state self.input_length=0##each input's length also output array length self.input_arr=0##input array self.output_arr=0##output array self.output_length=0##output length def add_layer(self,layer): self.layers.append(layer) def create_weight_and_bias_array(self,layer1,layer2,bias): ##create arrays as correspond to connections with layers nodes number w_array=np.zeros(shape=(layer1.nodes_number,layer2.nodes_number),dtype=float) self.Connections_Weight.append(w_array)##append to model weight list b_array=np.full(shape=(layer2.nodes_number),fill_value=float(bias)) self.Connections_Bias.append(b_array) def set_input_values(self,input_arr,input_number,input_length): if(type(input_arr)!=np.ndarray): raise Exception("Type Error: given input aren't ndarray") input_layer=self.layers[0] if not(input_length==input_layer.input_values.shape[0]): raise Exception("input's length and nodes number of input layer aren't matched") self.input_number=input_number self.input_length=input_length self.input_arr=input_arr def set_output_values(self,output_arr,output_length): if(type(output_arr)!=np.ndarray): raise Exception("Type Error: given output aren't ndarray") output_layer=self.layers[-1] if not(output_length==output_layer.output_values.shape[0]): raise Exception("output's length and nodes number of output layer aren't matched") self.output_length=output_length self.output_arr=output_arr def activation_func(self,y_in,th): y=1.0 if (-th < y_in < th): y=0 elif (y_in<-th): y=-1.0 return y def activation_func_bin(self,y_in,th): y=1.0 if (y_in < th): y=0 return y def default_rule(self,input_arr,out,w_array,b_array,n,j): for k,inp in enumerate(input_arr):##Update weights w_array[k][j]=w_array[k][j]+n*out*inp b_array[j]=b_array[j]+n*out##Update bias value def delta_rule(self,input_arr,out,w_array,b_array,n,j,y): for k,inp in enumerate(input_arr):##Update weights w_array[k][j]=w_array[k][j]+n*(out-y)*inp b_array[j]=b_array[j]+n*(out-y)##Update bias value def Feed_Forward_Perceptron(self,input_arr,output_arr,n,th): #bool=np.full((input_layer.nodes_number,output_layer.nodes_number),False)##boolean matrix for weight values #while bool.all()!=True:##Until weights for each connections maintaing equation w_array=self.Connections_Weight[0] b_array=self.Connections_Bias[0] y=0 for j,out in enumerate(output_arr): y_in=0## sum for i,inp in enumerate(input_arr): y_in+=inp*w_array[i][j] y_in+=b_array[j]##bias if(self.input_type=="binary"):##activation y=self.activation_func_bin(y_in,th) elif(self.input_type=="bipolar"): y=self.activation_func(y_in,th) if(y!=out): if self.perceptron_rule == "default": self.default_rule(input_arr,out,w_array,b_array,n,j) if self.perceptron_rule == "delta": self.delta_rule(input_arr,out,w_array,b_array,n,j,y) def Perceptron(self,learning_rate,epoch,threshold,bias): iter=0 self.create_weight_and_bias_array(self.layers[0],self.layers[1],bias)#give input and output layer as arguments acc=[] while iter!=epoch: for i in range(self.input_number): self.Feed_Forward_Perceptron(self.input_arr[i],self.output_arr[i],learning_rate,threshold) iter+=1 if(iter%1==0): print("epoch="+str(iter)) accuracy=self.predict(self.input_arr,self.output_arr,map_prediction=False) acc.append(accuracy) return acc #print("!!!Weights Matrix After Training!!!"+str(self.input_length)+"X"+str(self.output_length)) #print(self.Connections_Weight[0]) def train(self,learning_rate,epoch,bias,threshold):#return accuracy value of each epoch if self.method=="perceptron": acc=self.Perceptron(learning_rate,epoch,threshold,bias) return acc def predict_per_once(self,input,output):##predict a input w_array=self.Connections_Weight[0] b_array=self.Connections_Bias[0] pred_result=np.zeros(shape=(self.output_length),dtype=np.float64) for j,out in enumerate(output): y_in=0.0 for i,inp in enumerate(input): w=w_array[i][j] y_in+=inp*w_array[i][j] y_in+=b_array[j] pred_result[j]=int(y_in) return pred_result def Map_Pred_Matrix(self,results):##listing predictions on matrix with pred value as x, real value as y print("""!!!!!!!!Results Of Prediction Of Given Inputs!!!!!!!!""") sep=" | " Letters=["L","A","B","C","D","E","J","K"] l=sep.join(map(str,Letters)) print("\t"+l) for i,row in enumerate(results): print("\t-----------------------------") x=sep.join(map(str,row)) print("\t"+Letters[i+1]+" | "+x) def predict(self,inputs,labels,map_prediction):##array that have more than one input as argument true_result=0 false_result=0 results=[[0 for x in range(self.output_length)] for x in range(self.output_length)] for i,input in enumerate(inputs): pred_result=self.predict_per_once(input,labels[i]) pred_class=np.argmax(pred_result)##return index of max value as predicted class real_class=np.where(labels[i]==1)[0][0] results[pred_class][real_class]+=1 if pred_class==real_class: true_result+=1 else: false_result+=1 if(map_prediction==True): self.Map_Pred_Matrix(results) accuracy=float(true_result) / float(true_result+false_result) print("accuracy=>"+str(accuracy)) return accuracy
[ "numpy.where", "numpy.zeros", "numpy.argmax" ]
[((193, 239), 'numpy.zeros', 'np.zeros', ([], {'shape': '(nodes_number, 1)', 'dtype': 'float'}), '(shape=(nodes_number, 1), dtype=float)\n', (201, 239), True, 'import numpy as np\n'), ((285, 331), 'numpy.zeros', 'np.zeros', ([], {'shape': '(nodes_number, 1)', 'dtype': 'float'}), '(shape=(nodes_number, 1), dtype=float)\n', (293, 331), True, 'import numpy as np\n'), ((378, 424), 'numpy.zeros', 'np.zeros', ([], {'shape': '(nodes_number, 1)', 'dtype': 'float'}), '(shape=(nodes_number, 1), dtype=float)\n', (386, 424), True, 'import numpy as np\n'), ((1593, 1664), 'numpy.zeros', 'np.zeros', ([], {'shape': '(layer1.nodes_number, layer2.nodes_number)', 'dtype': 'float'}), '(shape=(layer1.nodes_number, layer2.nodes_number), dtype=float)\n', (1601, 1664), True, 'import numpy as np\n'), ((5735, 5787), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self.output_length', 'dtype': 'np.float64'}), '(shape=self.output_length, dtype=np.float64)\n', (5743, 5787), True, 'import numpy as np\n'), ((6913, 6935), 'numpy.argmax', 'np.argmax', (['pred_result'], {}), '(pred_result)\n', (6922, 6935), True, 'import numpy as np\n'), ((7005, 7029), 'numpy.where', 'np.where', (['(labels[i] == 1)'], {}), '(labels[i] == 1)\n', (7013, 7029), True, 'import numpy as np\n')]
"""File with the preprocessing tools.""" import os import numpy as np import nibabel as nib import pandas as pd from tqdm import tqdm from sklearn.metrics import pairwise_distances from sklearn.metrics.pairwise import linear_kernel # Change this path path = '' # folder containing the gray-matter maps # Folders with the resulting data output_data = 'Data/' output_kernels = 'Kernels/' output_target = 'Target/' # List of all the NifTI files nifti_images = [file for file in os.listdir(path) if file.endswith('.nii.gz')] # Convert each NifTI into a numpy.ndarray for file in nifti_images: img = nib.load(os.path.join(path, file)) img_data = img.get_fdata() np.save(os.path.join(output_data, file.split('_')[0]), img_data) # Get the subject IDs subjects = [] listdir = os.listdir(output_data) listdir = [x for x in listdir if not x.startswith('.')] n_samples = len(listdir) # Compute the kernels using batches to reduce the memory usage batches = np.array_split(np.arange(len(listdir)), 20) lin_kernel = np.empty((n_samples, n_samples)) euclidean_norm = np.empty((n_samples, n_samples)) for batch_i in tqdm(batches): data_i = [] for i in batch_i: data_i.append(np.load(output_data + listdir[i]).ravel()) subjects.append(listdir[i].split('.')[0]) data_i = np.asarray(data_i) for batch_j in batches: data_j = [] for j in batch_j: data_j.append(np.load(output_data + listdir[j]).ravel()) data_j = np.asarray(data_j) # Compute the kernels euclidean_norm[batch_i[0]:batch_i[-1] + 1, batch_j[0]:batch_j[-1] + 1] = ( pairwise_distances(data_i, data_j, metric='euclidean') ** 2 ) lin_kernel[batch_i[0]:batch_i[-1] + 1, batch_j[0]:batch_j[-1] + 1] = ( linear_kernel(data_i, data_j) ) # Save the kernels in CSV files linear_kernel_df = pd.DataFrame(lin_kernel, index=subjects, columns=subjects) linear_kernel_df.to_csv(output_kernels + 'linear_kernel.csv') euclidean_norm_df = pd.DataFrame(euclidean_norm, index=subjects, columns=subjects) euclidean_norm_df.to_csv(output_kernels + 'euclidean_norm.csv') # Save the target variable in a CSV file # Change this path df_y = pd.read_csv("/Volumes/dtlake01.aramis/users/clinica/pac2019/dataset/" "PAC2019_BrainAge_Training.csv") y = [] for subject in subjects: y.append(df_y[df_y['subject_ID'] == subject]['age'].item()) df_y_new = pd.Series(y, index=subjects) df_y_new.to_csv(output_target + 'age.csv')
[ "pandas.DataFrame", "tqdm.tqdm", "numpy.load", "sklearn.metrics.pairwise.linear_kernel", "pandas.read_csv", "numpy.empty", "numpy.asarray", "sklearn.metrics.pairwise_distances", "pandas.Series", "os.path.join", "os.listdir" ]
[((789, 812), 'os.listdir', 'os.listdir', (['output_data'], {}), '(output_data)\n', (799, 812), False, 'import os\n'), ((1026, 1058), 'numpy.empty', 'np.empty', (['(n_samples, n_samples)'], {}), '((n_samples, n_samples))\n', (1034, 1058), True, 'import numpy as np\n'), ((1076, 1108), 'numpy.empty', 'np.empty', (['(n_samples, n_samples)'], {}), '((n_samples, n_samples))\n', (1084, 1108), True, 'import numpy as np\n'), ((1125, 1138), 'tqdm.tqdm', 'tqdm', (['batches'], {}), '(batches)\n', (1129, 1138), False, 'from tqdm import tqdm\n'), ((1908, 1966), 'pandas.DataFrame', 'pd.DataFrame', (['lin_kernel'], {'index': 'subjects', 'columns': 'subjects'}), '(lin_kernel, index=subjects, columns=subjects)\n', (1920, 1966), True, 'import pandas as pd\n'), ((2050, 2112), 'pandas.DataFrame', 'pd.DataFrame', (['euclidean_norm'], {'index': 'subjects', 'columns': 'subjects'}), '(euclidean_norm, index=subjects, columns=subjects)\n', (2062, 2112), True, 'import pandas as pd\n'), ((2278, 2387), 'pandas.read_csv', 'pd.read_csv', (['"""/Volumes/dtlake01.aramis/users/clinica/pac2019/dataset/PAC2019_BrainAge_Training.csv"""'], {}), "(\n '/Volumes/dtlake01.aramis/users/clinica/pac2019/dataset/PAC2019_BrainAge_Training.csv'\n )\n", (2289, 2387), True, 'import pandas as pd\n'), ((2509, 2537), 'pandas.Series', 'pd.Series', (['y'], {'index': 'subjects'}), '(y, index=subjects)\n', (2518, 2537), True, 'import pandas as pd\n'), ((1306, 1324), 'numpy.asarray', 'np.asarray', (['data_i'], {}), '(data_i)\n', (1316, 1324), True, 'import numpy as np\n'), ((482, 498), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (492, 498), False, 'import os\n'), ((616, 640), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (628, 640), False, 'import os\n'), ((1486, 1504), 'numpy.asarray', 'np.asarray', (['data_j'], {}), '(data_j)\n', (1496, 1504), True, 'import numpy as np\n'), ((1816, 1845), 'sklearn.metrics.pairwise.linear_kernel', 'linear_kernel', (['data_i', 'data_j'], {}), '(data_i, data_j)\n', (1829, 1845), False, 'from sklearn.metrics.pairwise import linear_kernel\n'), ((1654, 1708), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['data_i', 'data_j'], {'metric': '"""euclidean"""'}), "(data_i, data_j, metric='euclidean')\n", (1672, 1708), False, 'from sklearn.metrics import pairwise_distances\n'), ((1200, 1233), 'numpy.load', 'np.load', (['(output_data + listdir[i])'], {}), '(output_data + listdir[i])\n', (1207, 1233), True, 'import numpy as np\n'), ((1426, 1459), 'numpy.load', 'np.load', (['(output_data + listdir[j])'], {}), '(output_data + listdir[j])\n', (1433, 1459), True, 'import numpy as np\n')]
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch import random import math import logging import itertools from fairseq import utils from fairseq.data import FairseqDataset, LanguagePairDataset from .noise_util import apply_span_mask, apply_random_mask, apply_entity_mask_for_mlm from fairseq.data import data_utils logger = logging.getLogger(__name__) def collate( samples, pad_idx, eos_idx, left_pad_source=False, left_pad_target=False, input_feeding=True, pad_to_length=None, ): if len(samples) == 0: return {} def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None): return data_utils.collate_tokens( [s[key] for s in samples], pad_idx, eos_idx, left_pad, move_eos_to_beginning, pad_to_length=pad_to_length, ) # sort by descending source length src_lengths = torch.LongTensor([s['source'].ne(pad_idx).long().sum() for s in samples]) src_lengths, sort_order = src_lengths.sort(descending=True) id = torch.LongTensor([s['id'] for s in samples]).index_select(0, sort_order) src_tokens = merge('source', left_pad=left_pad_source).index_select(0, sort_order) # sentence classification cls_target = merge('cls_target', left_pad=left_pad_target).index_select(0, sort_order).view(-1) # masked language model mlm_target = merge('mlm_target', left_pad=left_pad_target).index_select(0, sort_order) # causal language model prev_output_tokens = merge('prev_output_tokens', left_pad=left_pad_target).index_select(0, sort_order) prev_output_positions = merge('prev_output_positions', left_pad=left_pad_target).index_select(0, sort_order) clm_target = merge('clm_target', left_pad=left_pad_target).index_select(0, sort_order) # sequence tagging tag_target = merge('tag_target', left_pad=left_pad_target).index_select(0, sort_order) ntokens = src_lengths.sum().item() batch = { 'id': id, 'nsentences': len(samples), 'ntokens': ntokens, 'net_input': { 'src_tokens': src_tokens, 'src_lengths': src_lengths, 'prev_output_tokens': prev_output_tokens, 'prev_output_positions': prev_output_positions, }, 'cls_target': cls_target, 'mlm_target': mlm_target, 'clm_target': clm_target, 'tag_target': tag_target, } return batch class KnowledgeLanguagePairDataset(LanguagePairDataset): @classmethod def apply_mask(cls, dataset: torch.utils.data.Dataset, *args, **kwargs): """Return the source and target datasets for masked LM training.""" return cls(dataset, *args, **kwargs) def __init__( self, src, src_sizes, src_dict, tgt=None, tgt_sizes=None, tgt_dict=None, meta=None, meta_sizes=None, meta_dict=None, left_pad_source=True, left_pad_target=False, max_source_positions=1024, max_target_positions=1024, shuffle=True, mask_idx=None, mask_prob=0.15, leave_unmasked_prob=0.1, random_token_prob=0.1, mask_whole_words=None, block_size=64, sub_task=None, ): super().__init__(src, src_sizes, src_dict, tgt=tgt, tgt_sizes=tgt_sizes, tgt_dict=tgt_dict, left_pad_source=left_pad_source, left_pad_target=left_pad_target, shuffle=shuffle) self.meta = meta self.meta_sizes = meta_sizes self.meta_dict = meta_dict self.mask_idx = mask_idx self.mask_prob = mask_prob assert len(meta_sizes) == len(src_sizes) self.sub_task = sub_task self.cls_pad = self.src_dict.pad() # 0 in bert_dict, 1 in fairseq_dict self.block_size = block_size self.max_source_positions = max_source_positions self.max_target_positions = max_target_positions self.pred_probs = torch.FloatTensor( [1 - leave_unmasked_prob - random_token_prob, leave_unmasked_prob, random_token_prob]) self.debug_size_for_mlm = 0 self.debug_size_for_clm = 0 self.debug_size_for_tag = 0 self.debug_size_for_cls = 0 self.debug_size_for_titlegen = 0 def _parse_ocr_data(self, src_item): """ Args: src_item: - title [SEP] content [SEP] title [SEP] content. - used for title generation - file: discovery_all.ocr """ def _get_title_and_content(sep_idx): title_pos = [] content_pos = [] for i, pos in enumerate(sep_idx): last_pos = sep_idx[i - 1] if i > 0 else 1 pos_range = np.arange(last_pos + 1, pos) if pos > last_pos + 1 else None if i % 2 == 0: title_pos.append(pos_range) else: content_pos.append(pos_range) if len(content_pos) < len(title_pos): content_pos.append(None) return title_pos, content_pos src_item_np = np.array(src_item) sep_idx = np.where(src_item_np == self.src_dict.eos())[0] title_positions, content_positions = _get_title_and_content(sep_idx) source = src_item[:1] clm_target = np.array([], dtype=src_item_np.dtype) prev_output_positions_list = [] sep_positions_list = [] for title_position, content_position in zip(title_positions, content_positions): if title_position is not None: old_len = len(source) source = np.append(source, src_item[title_position]) clm_target = np.append(clm_target, src_item[title_position]) prev_output_positions_list = prev_output_positions_list + list(range(old_len, len(source))) if content_position is not None: source = np.append(source, src_item[content_position]) sep_positions_list.append(len(source) - 1) sep_positions_list = [v for v in sep_positions_list if v != 0 and v != len(source) - 1] source = torch.LongTensor(np.append(source, self.src_dict.eos())) clm_target = torch.LongTensor(clm_target) return source, clm_target, prev_output_positions_list, sep_positions_list def _get_example_for_boundary_detection(self, index, src_item): """ TokenClassification Task: sequence tagging """ source, _, _, sep_positions_list = self._parse_ocr_data(src_item) tag_target = torch.from_numpy(np.full(len(source), 1)) # 0: pad 1: negative 2: positive tag_target[0] = self.cls_pad tag_target[-1] = self.cls_pad tag_target[sep_positions_list] = 2 if self.debug_size_for_tag < 2: self.debug_size_for_tag += 1 logger.info('========= index: {} == boundary detection ======='.format(str(index))) logger.info('src_raw: ' + ''.join([self.src_dict[ii] for ii in src_item])) logger.info('src: ' + ''.join([self.src_dict[ii] for ii in source])) logger.info('tag_target: ' + ''.join([str(ii.item()) for ii in tag_target])) example = { 'id': index, 'source': source, 'cls_target': torch.LongTensor([self.cls_pad]), 'mlm_target': torch.from_numpy(np.full(len(source), self.src_dict.pad())), 'clm_target': torch.from_numpy(np.full(1, self.src_dict.pad())), 'tag_target': tag_target, 'prev_output_tokens': torch.from_numpy(np.full(1, 1)), 'prev_output_positions': torch.LongTensor([1]), } return example def _create_dummy_data(self, task, **kwargs): if task == 'cls': src_label = torch.LongTensor([-1]) return src_label if task == 'mlm': mlm_target = torch.from_numpy(np.full(kwargs['src_sz'], self.src_dict.pad())) return mlm_target if task == 'clm': prev_output_positions = torch.LongTensor([1]) prev_output_tokens = torch.from_numpy(np.full(1, 1)) clm_target = torch.from_numpy(np.full(1, self.src_dict.pad())) return prev_output_positions, prev_output_tokens, clm_target def _get_example_for_title_generation(self, index, src_item): """ title generation Task: CLM + MLM """ source, clm_target, prev_output_positions_list, _ = self._parse_ocr_data(src_item) # build data for MLM (random mask) mlm_positions = apply_random_mask(len(source), ignore_index=set(prev_output_positions_list)) masked_pos = sorted(list(set(prev_output_positions_list + mlm_positions))) mlm_target = torch.from_numpy(np.full(len(source), self.src_dict.pad())) mlm_target[mlm_positions] = source[mlm_positions] # build data for CLM (mask all title) prev_output_positions = np.array(prev_output_positions_list) prev_output_tokens = source[prev_output_positions - 1].clone() prev_output_positions = torch.LongTensor(prev_output_positions) if self.debug_size_for_titlegen < 2: logger.info('========= index: {} == title generation ======='.format(str(index))) logger.info('src_raw: ' + ''.join([self.src_dict[ii] for ii in src_item])) logger.info('src: ' + ''.join([self.src_dict[ii] for ii in source])) source[masked_pos] = self.replace(source[masked_pos]) if self.debug_size_for_titlegen < 2: self.debug_size_for_titlegen += 1 logger.info('src_mask: ' + ''.join([self.src_dict[ii] for ii in source])) logger.info('clm_pos: ' + ' '.join([str(v) for v in prev_output_positions_list])) logger.info('clm_input: ' + ''.join([self.src_dict[ii] for ii in prev_output_tokens])) logger.info('clm_target: ' + ''.join([self.src_dict[ii] for ii in clm_target])) logger.info( 'mlm_target:' + ''.join([self.src_dict[ii] for ii in mlm_target if ii != self.src_dict.pad_index])) if prev_output_tokens.numel() == 0: prev_output_positions, prev_output_tokens, clm_target = self._create_dummy_data('clm') example = { 'id': index, 'source': source, 'cls_target': self._create_dummy_data('cls'), 'mlm_target': mlm_target, 'clm_target': clm_target, 'tag_target': torch.from_numpy(np.full(len(source), self.cls_pad)), 'prev_output_tokens': prev_output_tokens, 'prev_output_positions': prev_output_positions, } return example def _get_example_for_cls(self, index, src_item, src_meta): assert 'cls' in self.sub_task src_meta = np.array([int(self.meta_dict[k]) if k != self.meta_dict.unk() else 10000 for k in src_meta]) src_sz = len(src_item) assert len(src_meta) % 2 == 1 src_label, src_entity = torch.LongTensor(src_meta[:1]), src_meta[1:] # build data for MLM & CLM mlm_target = torch.from_numpy(np.full(src_sz, self.src_dict.pad())) prev_output_positions, prev_output_tokens, clm_target = self._create_dummy_data('clm') if self.debug_size_for_cls < 2: logger.info('========= index: {} ==== MLM and CLM mask ====='.format(str(index))) logger.info('src: ' + ''.join([self.src_dict[ii] for ii in src_item])) if self.debug_size_for_cls < 2: self.debug_size_for_cls += 1 example = { 'id': index, 'source': src_item, 'cls_target': src_label, 'mlm_target': mlm_target, 'clm_target': clm_target, 'tag_target': torch.from_numpy(np.full(len(src_item), self.cls_pad)), 'prev_output_tokens': prev_output_tokens, 'prev_output_positions': prev_output_positions, } return example def _get_example_for_mlm(self, index, src_item, src_meta): assert 'mlm' in self.sub_task src_sz = len(src_item) src_label = src_meta[0] src_entity = src_meta[1:] src_label = torch.LongTensor([int(self.meta_dict[src_label])]) \ if src_label >= self.meta_dict.nspecial else self._create_dummy_data('cls') src_entity = np.array([int(self.meta_dict[k]) for k in src_entity]) assert len(src_entity) % 2 == 0 src_entity = np.array(src_entity.reshape(-1, 2)) + 1 # offset for [CLS] # build data for MLM in Encoder mlm_positions_1 = apply_entity_mask_for_mlm(src_sz, src_entity) # BERT & entity mlm_positions_2 = apply_random_mask(src_sz, ignore_index=set(mlm_positions_1)) # BERT mlm_position_list = sorted(list(set(mlm_positions_1 + mlm_positions_2))) assert len(mlm_positions_1) + len(mlm_positions_2) == len(mlm_position_list) masked_pos_list = sorted(list(set(mlm_position_list))) assert masked_pos_list[0] > 0 # no mask in bos masked_pos = np.array(masked_pos_list) mlm_target = torch.from_numpy(np.full(src_sz, self.src_dict.pad())) mlm_target[mlm_position_list] = src_item[mlm_position_list] # build data for CLM in Decoder prev_output_positions, prev_output_tokens, clm_target = self._create_dummy_data('clm') if self.debug_size_for_mlm < 2: logger.info('========= index: {} ==== MLM mask ====='.format(str(index))) logger.info('src: ' + ''.join([self.src_dict[ii] for ii in src_item])) logger.info('src_entity: ' + ' '.join( [''.join([self.src_dict[src_item[ii]] if ii < src_sz else '' for ii in range(ent[0], ent[1])]) for ent in src_entity])) src_item[masked_pos] = self.replace(src_item[masked_pos]) if self.debug_size_for_mlm < 2: self.debug_size_for_mlm += 1 logger.info('src_mask: ' + ''.join([self.src_dict[ii] for ii in src_item])) logger.info('mlm_pos: ' + ' '.join([str(v) for v in mlm_position_list])) logger.info( 'mlm_target:' + ''.join([self.src_dict[ii] for ii in mlm_target if ii != self.src_dict.pad_index])) if prev_output_tokens.numel() == 0: prev_output_positions, prev_output_tokens, clm_target = self._create_dummy_data('clm') example = { 'id': index, 'source': src_item, 'cls_target': src_label, 'mlm_target': mlm_target, 'clm_target': clm_target, 'tag_target': torch.from_numpy(np.full(len(src_item), self.cls_pad)), 'prev_output_tokens': prev_output_tokens, 'prev_output_positions': prev_output_positions, } return example def _get_example_for_clm(self, index, src_item, src_meta): assert 'clm' in self.sub_task src_meta = np.array([int(self.meta_dict[k]) if k < self.meta_dict.nspecial else None for k in src_meta]) src_sz = len(src_item) assert len(src_meta) % 2 == 1 src_label, src_entity = torch.LongTensor(src_meta[:1]), src_meta[1:] src_entity = np.array(src_entity.reshape(-1, 2)) + 1 src_label = torch.LongTensor(np.array([None])) # build data for CLM in Decoder clm_position_list = np.array(apply_span_mask(src_sz-1) + 1) # start at 1 prev_output_positions = clm_position_list prev_output_tokens = src_item[prev_output_positions - 1].clone() clm_target = src_item[prev_output_positions].clone() prev_output_positions = torch.LongTensor(prev_output_positions) # build data for MLM in Encoder mlm_position_list = [] mlm_target = torch.from_numpy(np.full(src_sz, self.src_dict.pad())) masked_pos = prev_output_positions if self.debug_size_for_clm < 2: logger.info('========= index: {} ==== CLM Mask ====='.format(str(index))) logger.info('src: ' + ''.join([self.src_dict[ii] for ii in src_item])) logger.info('src_entity: ' + ' '.join( [''.join([self.src_dict[src_item[ii]] if ii < src_sz else '' for ii in range(ent[0], ent[1])]) for ent in src_entity])) src_item[masked_pos] = self.replace(src_item[masked_pos]) if self.debug_size_for_clm < 2: self.debug_size_for_clm += 1 logger.info('src_mask: ' + ''.join([self.src_dict[ii] for ii in src_item])) logger.info('clm_pos: ' + ' '.join([str(v) for v in clm_position_list])) logger.info('clm_input: ' + ''.join([self.src_dict[ii] for ii in prev_output_tokens])) logger.info('clm_target: ' + ''.join([self.src_dict[ii] for ii in clm_target])) logger.info('mlm_pos: ' + ' '.join([str(v) for v in mlm_position_list])) logger.info( 'mlm_target:' + ''.join([self.src_dict[ii] for ii in mlm_target if ii != self.src_dict.pad_index])) if prev_output_tokens.numel() == 0: prev_output_positions, prev_output_tokens, clm_target = self._create_dummy_data('clm') example = { 'id': index, 'source': src_item, 'cls_target': src_label, 'mlm_target': mlm_target, 'clm_target': clm_target, 'tag_target': torch.from_numpy(np.full(len(src_item), self.cls_pad)), 'prev_output_tokens': prev_output_tokens, 'prev_output_positions': prev_output_positions, } return example def _get_example_for_multitask(self, index, src_item, src_meta): """ multi-task joint training tasks: - mlm: masked language model (encoder-only) - clm: causal language model (encoder-decoder or decoder-only) - sentcls: sentence classification (encoder-only) - tokencls: token classification, sequence tagging (encoder-only) - spancls: token span classification, such as relation classification, entity classification (encoder-only) """ assert 'clm' in self.sub_task or 'mlm' in self.sub_task src_meta = np.array([int(self.meta_dict[k]) if k != self.meta_dict.unk() else 10000 for k in src_meta]) src_sz = len(src_item) assert len(src_meta) % 2 == 1 src_label, src_entity = torch.LongTensor(src_meta[:1]), src_meta[1:] src_entity = np.array(src_entity.reshape(-1, 2)) + 1 # offset for [CLS] if 'sentcls' not in self.sub_task: src_label = torch.LongTensor([self.cls_pad]) mlm_position_list, clm_position_list = [], [] if 'clm' in self.sub_task: clm_position_list = apply_span_mask(src_sz) prev_output_positions = np.array(clm_position_list) if 'mlm' in self.sub_task: mlm_positions_1 = apply_entity_mask_for_mlm(src_sz, src_entity, ignore_index=set(clm_position_list)) # BERT & entity mlm_positions_2 = apply_random_mask(src_sz, ignore_index=set(clm_position_list + mlm_positions_1)) # BERT mlm_position_list = sorted(list(set(mlm_positions_1 + mlm_positions_2))) assert len(mlm_positions_1) + len(mlm_positions_2) == len(mlm_position_list) masked_pos_list = sorted(list(set(clm_position_list + mlm_position_list))) assert len(clm_position_list) + len(mlm_position_list) == len(masked_pos_list) assert masked_pos_list[0] > 0 masked_pos = np.array(masked_pos_list) # build data for CLM in Decoder prev_output_tokens = src_item[prev_output_positions - 1].clone() clm_target = src_item[prev_output_positions].clone() prev_output_positions = torch.LongTensor(prev_output_positions) # build data for MLM in Encoder mlm_target = torch.from_numpy(np.full(src_sz, self.src_dict.pad())) mlm_target[mlm_position_list] = src_item[mlm_position_list] if self.debug_size_for_mlm < 2: logger.info('========= index: {} ==== MLM and CLM mask ====='.format(str(index))) logger.info('src: ' + ''.join([self.src_dict[ii] for ii in src_item])) logger.info('src_entity: ' + ' '.join( [''.join([self.src_dict[src_item[ii]] if ii < src_sz else '' for ii in range(ent[0], ent[1])]) for ent in src_entity])) src_item[masked_pos] = self.replace(src_item[masked_pos]) if self.debug_size_for_mlm < 2: self.debug_size_for_mlm += 1 logger.info('src_mask: ' + ''.join([self.src_dict[ii] for ii in src_item])) logger.info('clm_pos: ' + ' '.join([str(v) for v in clm_position_list])) logger.info('clm_input: ' + ''.join([self.src_dict[ii] for ii in prev_output_tokens])) logger.info('clm_target: ' + ''.join([self.src_dict[ii] for ii in clm_target])) logger.info('mlm_pos: ' + ' '.join([str(v) for v in mlm_position_list])) logger.info( 'mlm_target:' + ''.join([self.src_dict[ii] for ii in mlm_target if ii != self.src_dict.pad_index])) if prev_output_tokens.numel() == 0: prev_output_positions, prev_output_tokens, clm_target = self._create_dummy_data('clm') example = { 'id': index, 'source': src_item, 'cls_target': src_label, 'mlm_target': mlm_target, 'clm_target': clm_target, 'tag_target': torch.from_numpy(np.full(len(src_item), self.cls_pad)), 'prev_output_tokens': prev_output_tokens, 'prev_output_positions': prev_output_positions, } return example def __getitem__(self, index): """ src: plain text meta: - content: cls_label ent1_start ent1_end ent2_start ent2_end - desc: cls_label 0 represent no label, it should be skipped in cls task. TODO: dynamic_span_length, dynamic_total_length """ src_item = self.src[index] src_meta = self.meta[index] sep_sz = (src_item == self.src_dict.eos()).sum() if sep_sz > 1: # ocr data tasks: titlegen segcls, sentcls if 'titlegen' in self.sub_task and 'segcls' in self.sub_task: task_selector = random.random() if task_selector > 0.5: example = self._get_example_for_title_generation(index, src_item) else: example = self._get_example_for_title_generation(index, src_item) # example = self._get_example_for_boundary_detection(index, src_item) # 这个再确认一下 elif 'segcls' in self.sub_task: example = self._get_example_for_boundary_detection(index, src_item) elif 'titlegen' in self.sub_task: example = self._get_example_for_title_generation(index, src_item) else: return return example else: # product summary data tasks: task_selector = random.random() if task_selector > 0: # if task_selector < 0: # if task_selector < 0.4: return self._get_example_for_mlm(index, src_item, src_meta) elif task_selector < 0.7: # elif task_selector < 2: return self._get_example_for_clm(index, src_item, src_meta) else: return self._get_example_for_clm(index, src_item, src_meta) # return self._get_example_for_cls(index, src_item, src_meta) # return self._get_example_for_multitask(index, src_item, src_meta) def collater(self, samples): return collate(samples, self.src_dict.pad(), self.src_dict.eos()) def replace(self, x): _x_real = x _x_rand = _x_real.clone().random_(self.src_dict.nspecial, len(self.src_dict)) _x_mask = _x_real.clone().fill_(self.mask_idx) probs = torch.multinomial(self.pred_probs, len(x), replacement=True) _x = _x_mask * (probs == 0).long() + \ _x_real * (probs == 1).long() + \ _x_rand * (probs == 2).long() return _x
[ "fairseq.data.data_utils.collate_tokens", "numpy.full", "torch.LongTensor", "torch.FloatTensor", "random.random", "numpy.append", "numpy.array", "numpy.arange", "logging.getLogger" ]
[((488, 515), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (505, 515), False, 'import logging\n'), ((842, 978), 'fairseq.data.data_utils.collate_tokens', 'data_utils.collate_tokens', (['[s[key] for s in samples]', 'pad_idx', 'eos_idx', 'left_pad', 'move_eos_to_beginning'], {'pad_to_length': 'pad_to_length'}), '([s[key] for s in samples], pad_idx, eos_idx,\n left_pad, move_eos_to_beginning, pad_to_length=pad_to_length)\n', (867, 978), False, 'from fairseq.data import data_utils\n'), ((4174, 4282), 'torch.FloatTensor', 'torch.FloatTensor', (['[1 - leave_unmasked_prob - random_token_prob, leave_unmasked_prob,\n random_token_prob]'], {}), '([1 - leave_unmasked_prob - random_token_prob,\n leave_unmasked_prob, random_token_prob])\n', (4191, 4282), False, 'import torch\n'), ((5364, 5382), 'numpy.array', 'np.array', (['src_item'], {}), '(src_item)\n', (5372, 5382), True, 'import numpy as np\n'), ((5577, 5614), 'numpy.array', 'np.array', (['[]'], {'dtype': 'src_item_np.dtype'}), '([], dtype=src_item_np.dtype)\n', (5585, 5614), True, 'import numpy as np\n'), ((6473, 6501), 'torch.LongTensor', 'torch.LongTensor', (['clm_target'], {}), '(clm_target)\n', (6489, 6501), False, 'import torch\n'), ((9216, 9252), 'numpy.array', 'np.array', (['prev_output_positions_list'], {}), '(prev_output_positions_list)\n', (9224, 9252), True, 'import numpy as np\n'), ((9356, 9395), 'torch.LongTensor', 'torch.LongTensor', (['prev_output_positions'], {}), '(prev_output_positions)\n', (9372, 9395), False, 'import torch\n'), ((13328, 13353), 'numpy.array', 'np.array', (['masked_pos_list'], {}), '(masked_pos_list)\n', (13336, 13353), True, 'import numpy as np\n'), ((15920, 15959), 'torch.LongTensor', 'torch.LongTensor', (['prev_output_positions'], {}), '(prev_output_positions)\n', (15936, 15959), False, 'import torch\n'), ((19066, 19093), 'numpy.array', 'np.array', (['clm_position_list'], {}), '(clm_position_list)\n', (19074, 19093), True, 'import numpy as np\n'), ((19839, 19864), 'numpy.array', 'np.array', (['masked_pos_list'], {}), '(masked_pos_list)\n', (19847, 19864), True, 'import numpy as np\n'), ((20072, 20111), 'torch.LongTensor', 'torch.LongTensor', (['prev_output_positions'], {}), '(prev_output_positions)\n', (20088, 20111), False, 'import torch\n'), ((1228, 1272), 'torch.LongTensor', 'torch.LongTensor', (["[s['id'] for s in samples]"], {}), "([s['id'] for s in samples])\n", (1244, 1272), False, 'import torch\n'), ((7555, 7587), 'torch.LongTensor', 'torch.LongTensor', (['[self.cls_pad]'], {}), '([self.cls_pad])\n', (7571, 7587), False, 'import torch\n'), ((7895, 7916), 'torch.LongTensor', 'torch.LongTensor', (['[1]'], {}), '([1])\n', (7911, 7916), False, 'import torch\n'), ((8052, 8074), 'torch.LongTensor', 'torch.LongTensor', (['[-1]'], {}), '([-1])\n', (8068, 8074), False, 'import torch\n'), ((8312, 8333), 'torch.LongTensor', 'torch.LongTensor', (['[1]'], {}), '([1])\n', (8328, 8333), False, 'import torch\n'), ((11267, 11297), 'torch.LongTensor', 'torch.LongTensor', (['src_meta[:1]'], {}), '(src_meta[:1])\n', (11283, 11297), False, 'import torch\n'), ((15420, 15450), 'torch.LongTensor', 'torch.LongTensor', (['src_meta[:1]'], {}), '(src_meta[:1])\n', (15436, 15450), False, 'import torch\n'), ((15563, 15579), 'numpy.array', 'np.array', (['[None]'], {}), '([None])\n', (15571, 15579), True, 'import numpy as np\n'), ((18662, 18692), 'torch.LongTensor', 'torch.LongTensor', (['src_meta[:1]'], {}), '(src_meta[:1])\n', (18678, 18692), False, 'import torch\n'), ((18855, 18887), 'torch.LongTensor', 'torch.LongTensor', (['[self.cls_pad]'], {}), '([self.cls_pad])\n', (18871, 18887), False, 'import torch\n'), ((23391, 23406), 'random.random', 'random.random', ([], {}), '()\n', (23404, 23406), False, 'import random\n'), ((5882, 5925), 'numpy.append', 'np.append', (['source', 'src_item[title_position]'], {}), '(source, src_item[title_position])\n', (5891, 5925), True, 'import numpy as np\n'), ((5955, 6002), 'numpy.append', 'np.append', (['clm_target', 'src_item[title_position]'], {}), '(clm_target, src_item[title_position])\n', (5964, 6002), True, 'import numpy as np\n'), ((6181, 6226), 'numpy.append', 'np.append', (['source', 'src_item[content_position]'], {}), '(source, src_item[content_position])\n', (6190, 6226), True, 'import numpy as np\n'), ((7842, 7855), 'numpy.full', 'np.full', (['(1)', '(1)'], {}), '(1, 1)\n', (7849, 7855), True, 'import numpy as np\n'), ((8384, 8397), 'numpy.full', 'np.full', (['(1)', '(1)'], {}), '(1, 1)\n', (8391, 8397), True, 'import numpy as np\n'), ((22642, 22657), 'random.random', 'random.random', ([], {}), '()\n', (22655, 22657), False, 'import random\n'), ((4958, 4986), 'numpy.arange', 'np.arange', (['(last_pos + 1)', 'pos'], {}), '(last_pos + 1, pos)\n', (4967, 4986), True, 'import numpy as np\n')]
#! /usr/bin/python import numpy as np import math from scipy.spatial import KDTree import openravepy as orpy import transformations from robotiqloader import RobotiqHand, InvalidTriangleException import sys, time, logging, copy import itertools from utils import ObjectFileIO, clamp, compute_grasp_stability, normal_distance, position_distance, dist_in_range import rospy import scipy.optimize class PlanningSceneInterface(object): def __init__(self, or_env, robot_name): """ Sets scene information for grasp planning that considers the whole robot. @param or_env OpenRAVE environment containing the whole planning scene and robot @param robot_name Name of the robot on which the hand is attached (for ik computations) """ self._or_env = or_env self._robot = or_env.GetRobot(robot_name) self._manip = self._robot.GetActiveManipulator() self._arm_ik = orpy.databases.inversekinematics.InverseKinematicsModel(self._robot, iktype=orpy.IkParameterization.Type.Transform6D) # Make sure we have an ik solver if not self._arm_ik.load(): rospy.loginfo('No IKFast solver found. Generating new one...') self._arm_ik.autogenerate() self._object = None def set_target_object(self, obj_name): self._object = self._or_env.GetKinBody(obj_name) def check_arm_ik(self, hand_pose_object, grasp_conf, seed, open_hand_offset): with self._or_env: # compute target pose in world frame object_pose = self._object.GetTransform() hand_pose_scene = np.dot(object_pose, hand_pose_object) # save current state dof_values = self._robot.GetDOFValues() # if we have a seed set it arm_dofs = self._manip.GetArmIndices() hand_dofs = self._manip.GetGripperIndices() if seed is not None: self._robot.SetDOFValues(seed, dofindices=arm_dofs) # Compute a pre-grasp hand configuration and set it pre_grasp_conf = np.asarray(grasp_conf) - open_hand_offset lower_limits, upper_limits = self._robot.GetDOFLimits(hand_dofs) pre_grasp_conf = np.asarray(clamp(pre_grasp_conf, lower_limits, upper_limits)) self._robot.SetDOFValues(pre_grasp_conf, dofindices=hand_dofs) # Now find an ik solution for the target pose with the hand in the pre-grasp configuration sol = self._manip.FindIKSolution(hand_pose_scene, orpy.IkFilterOptions.CheckEnvCollisions) # sol = self.seven_dof_ik(hand_pose_scene, orpy.IkFilterOptions.CheckEnvCollisions) # If that didn't work, try to compute a solution that is in collision (may be useful anyways) if sol is None: # sol = self.seven_dof_ik(hand_pose_scene, orpy.IkFilterOptions.IgnoreCustomFilters) sol = self._manip.FindIKSolution(hand_pose_scene, orpy.IkFilterOptions.IgnoreCustomFilters) b_sol_col_free = False else: b_sol_col_free = True # Restore original dof values self._robot.SetDOFValues(dof_values) return b_sol_col_free, sol, pre_grasp_conf class HFTSSampler: def __init__(self, object_io_interface, scene_interface=None, verbose=False, num_hops=2, vis=False): self._verbose = verbose self._sampler_viewer = vis self._orEnv = orpy.Environment() # create openrave environment self._orEnv.SetDebugLevel(orpy.DebugLevel.Fatal) self._orEnv.GetCollisionChecker().SetCollisionOptions(orpy.CollisionOptions.Contacts) if vis: self._orEnv.SetViewer('qtcoin') # attach viewer (optional) self._or_handles = [] else: self._or_handles = None self._scene_or_env = None self._hand_loaded = False self._scene_interface = scene_interface self._obj_loaded = False self._max_iters = 40 self._reachability_weight = 1.0 self._mu = 2.0 self._min_stability = 0.0 self._b_force_new_hfts = False self._object_kd_tree = None self._object_points = None # self._hops = num_hops # TODO remove this aga self._hops = 2 self._robot = None self._obj = None self._obj_com = None self._data_labeled = None self._hand_manifold = None self._num_contacts = None self._contact_combinations = [] self._num_levels = 0 self._branching_factors = [] self._object_io_interface = object_io_interface def __del__(self): orpy.RaveDestroy() def check_arm_grasp_validity(self, grasp_conf, grasp_pose, seed, open_hand_offset=0.1): if self._scene_interface is None: #TODO Think about what we should do in this case (planning with free-floating hand) return True, None, None object_hfts_pose = self._obj.GetTransform() # pose in environment used for contact planning hand_pose_object_frame = np.dot(np.linalg.inv(object_hfts_pose), grasp_pose) # hand_pose_world = np.dot(object_hfts_pose, grasp_pose) collision_free, arm_conf, pre_grasp_conf = \ self._scene_interface.check_arm_ik(hand_pose_object_frame, grasp_conf, seed=seed, open_hand_offset=open_hand_offset) return collision_free, arm_conf, pre_grasp_conf def check_grasp_validity(self): # Check whether the hand is collision free if self._robot.CheckSelfCollision(): return False real_contacts = self.get_real_contacts() # self.draw_contacts(real_contacts) stability = compute_grasp_stability(grasp_contacts=real_contacts, mu=self._mu) return stability > self._min_stability and self.is_grasp_collision_free() def create_object_kd_tree(self, points): self._object_kd_tree = KDTree(points[:, :3]) self._object_points = points def compute_allowed_contact_combinations(self, depth, label_cache): # Now, for this parent get all possible contacts allowed_finger_combos = set(self._contact_combinations[depth]) # Next, we want to filter out contact combinations that are stored in labelCache forbidden_finger_combos = set() for grasp_label in label_cache: finger_combo = tuple([x[-1] for x in grasp_label]) forbidden_finger_combos.add(finger_combo) # Filter them out allowed_finger_combos.difference_update(forbidden_finger_combos) return list(allowed_finger_combos) def compute_contact_combinations(self): while len(self._contact_combinations) < self._num_levels: self._contact_combinations.append([]) for i in range(self._num_levels): self._contact_combinations[i] = set(itertools.product(range(self._branching_factors[i]), repeat=self._num_contacts)) def compose_grasp_info(self, contact_labels): contacts = [] # a list of contact positions and normals for i in range(self._num_contacts): p, n = self.get_cluster_repr(contact_labels[i]) contacts.append(list(p) + list(n)) object_contacts = np.asarray(contacts) code_tmp = self._hand_manifold.encode_grasp(object_contacts) dummy, grasp_conf = self._hand_manifold.predict_hand_conf(code_tmp) hand_contacts = self._robot.get_ori_tip_pn(grasp_conf) return grasp_conf, object_contacts, hand_contacts def _debug_visualize_quality(self, labels, quality, handles): grasp_conf, object_contacts, hand_contacts = self.compose_grasp_info(labels) self._robot.SetVisible(False) handles.append(self._draw_contacts_quality(object_contacts, quality)) def _draw_contacts_quality(self, object_contacts, quality): colors = [[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]] quality = min(abs(quality), 0.005) width = 0.003 length = max((1.0 - abs(quality) / 0.005) * 0.05, 0.001) # Draw planned contacts arrow_handles = [] for i in range(object_contacts.shape[0]): arrow_handles.append(self._orEnv.drawarrow(object_contacts[i, :3], object_contacts[i, :3] - length * object_contacts[i, 3:], width, colors[i])) return arrow_handles def _debug_visualize(self, labels, handle_index=-1): grasp_conf, object_contacts, hand_contacts = self.compose_grasp_info(labels) rospy.logwarn('Debug visualize') # self._robot.SetVisible(False) # self.draw_contacts(object_contacts, handle_index=handle_index) # time.sleep(1.0) # self._robot.SetVisible(True) def draw_contacts(self, object_contacts, handle_index=-1): if len(self._or_handles) == 0: self._or_handles.append(None) self._or_handles.append(None) # TODO this is hard coded for three contacts colors = [[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]] if handle_index != 0: width = 0.003 length = 0.05 else: width = 0.001 length = 0.1 # Draw planned contacts arrow_handles = [] for i in range(object_contacts.shape[0]): arrow_handles.append(self._orEnv.drawarrow(object_contacts[i, :3], object_contacts[i, :3] - length * object_contacts[i, 3:], width, colors[i])) self._or_handles[handle_index] = arrow_handles def evaluate_grasp(self, contact_label): contacts = [] # a list of contact positions and normals for i in range(self._num_contacts): p, n = self.get_cluster_repr(contact_label[i]) contacts.append(list(p) + list(n)) contacts = np.asarray(contacts) # self.draw_contacts(contacts) s_tmp = self._hand_manifold.compute_grasp_quality(self._obj_com, contacts) code_tmp = self._hand_manifold.encode_grasp(contacts) r_tmp, dummy = self._hand_manifold.predict_hand_conf(code_tmp) # TODO: Research topic. This is kind of hack. Another objective function might be better # o_tmp = s_tmp / (r_tmp + 0.000001) o_tmp = s_tmp - self._reachability_weight * r_tmp assert not math.isnan(o_tmp) and not math.isinf(math.fabs(o_tmp)) # o_tmp = s_tmp / (r_tmp + 1.0) # return s_tmp, r_tmp, o_tmp return s_tmp, r_tmp, -r_tmp def extend_hfts_node(self, old_labels, allowed_finger_combos=None): new_depth = len(old_labels[0]) # a label has length depth + 1 if allowed_finger_combos is not None: fingertip_assignments = np.random.choice(allowed_finger_combos) else: fingertip_assignments = np.random.choice(self._branching_factors[new_depth], self._num_contacts, replace=True) for label, assignment in itertools.izip(old_labels, fingertip_assignments): label.append(assignment) s_tmp, r_tmp, o_tmp = self.evaluate_grasp(old_labels) # self._debug_visualize(old_labels, 0) return o_tmp, old_labels def get_branch_information(self, level): if level < self.get_maximum_depth(): possible_num_children = pow(self._branching_factors[level] + 1, self._num_contacts) possible_num_leaves = 1 for d in range(level, self.get_maximum_depth()): possible_num_leaves *= pow(self._branching_factors[level] + 1, self._num_contacts) else: possible_num_children = 0 possible_num_leaves = 1 return possible_num_children, possible_num_leaves def get_cluster_repr(self, label): level = len(label) - 1 # indexed from 0 idx = np.where((self._data_labeled[:, 6:7 + level] == label).all(axis=1)) points = [self._data_labeled[t, 0:3] for t in idx][0] normals = [self._data_labeled[t, 3:6] for t in idx][0] pos = np.sum(points, axis=0) / len(idx[0]) normal = np.sum(normals, axis=0) / len(idx[0]) normal /= np.linalg.norm(normal) return pos, -normal def get_maximum_depth(self): return self._num_levels def get_or_hand(self): return self._robot def get_random_sibling_label(self, label): ret = [] if len(label) <= self._hops / 2: for i in range(len(label)): ret.append(np.random.randint(self._branching_factors[i])) else: match_len = len(label) - self._hops / 2 ret = label[:match_len] for i in range(len(label) - match_len): ret.append(np.random.randint(self._branching_factors[i + match_len])) return ret def get_random_sibling_labels(self, curr_labels, allowed_finger_combos=None): labels_tmp = [] if allowed_finger_combos is None: for i in range(self._num_contacts): tmp = self.get_random_sibling_label(curr_labels[i]) labels_tmp.append(tmp) else: finger_combo = np.random.choice(allowed_finger_combos) for i in range(self._num_contacts): tmp = list(curr_labels[i]) tmp[-1] = finger_combo[i] labels_tmp.append(tmp) return labels_tmp def get_real_contacts(self): collision_report = orpy.CollisionReport() real_contacts = [] # iterate over all fingertip links and determine the contacts for eel in self._robot.get_fingertip_links(): link = self._robot.GetLink(eel) self._orEnv.CheckCollision(self._obj, link, report=collision_report) # self._orEnv.CheckCollision(link, self._obj, report=collision_report) if len(collision_report.contacts) == 0: raise ValueError('[HFTSSampler::get_real_contacts] No contacts found') # TODO the normals reported by the collision check are wrong, so instead we use a nearest # TODO neighbor lookup. Should see what's wrong with OpenRAVE here... position = collision_report.contacts[0].pos normal = self._object_points[self._object_kd_tree.query(position), 3:][1] # normal = collision_report.contacts[0].norm real_contacts.append(np.concatenate((position, normal))) real_contacts = np.asarray(real_contacts) return real_contacts def get_root_node(self): possible_num_children, possible_num_leaves = self.get_branch_information(0) return HFTSNode(num_possible_children=possible_num_children, num_possible_leaves=possible_num_leaves) def is_grasp_collision_free(self): links = self._robot.get_non_fingertip_links() for link in links: if self._orEnv.CheckCollision(self._robot.GetLink(link)): return False return True def load_hand(self, hand_file, hand_cache_file): if not self._hand_loaded: # TODO make this Robotiq hand independent (external hand loader) self._robot = RobotiqHand(hand_cache_file=hand_cache_file, env=self._orEnv, hand_file=hand_file) self._hand_manifold = self._robot.get_hand_manifold() self._hand_manifold.load() self._num_contacts = self._robot.get_contact_number() shift = transformations.identity_matrix() shift[0, -1] = 0.2 self._robot.SetTransform(shift) rospy.loginfo('Hand loaded in OpenRAVE environment') self._hand_loaded = True def load_object(self, obj_id, model_id=None): if model_id is None: model_id = obj_id self._data_labeled, self._branching_factors, self._obj_com = \ self._object_io_interface.get_hfts(model_id, self._b_force_new_hfts) if self._data_labeled is None: raise RuntimeError('Could not load HFTS model for model ' + model_id) self.create_object_kd_tree(self._data_labeled[:, :6]) self._num_levels = len(self._branching_factors) # First, delete old object if there is any if self._obj_loaded: self._orEnv.Remove(self._obj) or_file_name = self._object_io_interface.get_openrave_file_name(model_id) self._obj_loaded = self._orEnv.Load(or_file_name) if not self._obj_loaded: raise RuntimeError('Could not load object model %s in OpenRAVE' % model_id) self._obj = self._orEnv.GetKinBody('objectModel') rospy.loginfo('Object loaded in OpenRAVE environment') if self._scene_interface is not None: self._scene_interface.set_target_object(obj_id) self.compute_contact_combinations() self._obj_loaded = True import IPython IPython.embed() def sample_grasp(self, node, depth_limit, post_opt=False, label_cache=None, open_hand_offset=0.1): if depth_limit < 0: raise ValueError('HFTSSampler::sample_grasp depth limit must be greater or equal to zero.') if node.get_depth() >= self._num_levels: raise ValueError('HFTSSampler::sample_grasp input node has an invalid depth') if node.get_depth() + depth_limit >= self._num_levels: depth_limit = self._num_levels - node.get_depth() # cap # In case we using the integrated method, we might have a limitation on what nodes to descend to # let's compute this set. allowed_finger_combos = None if label_cache is not None and depth_limit == 1: # TODO This currently only works for hops == 2 assert self._hops == 2 allowed_finger_combos = self.compute_allowed_contact_combinations(node.get_depth(), label_cache) rospy.logdebug('[HFTSSampler::sample_grasp] We have %i allowed contacts' % len(allowed_finger_combos)) if len(allowed_finger_combos) == 0: rospy.logwarn('[HFTSSampler::sample_grasp] We have no allowed contacts left! Aborting.') return node elif label_cache is not None and depth_limit != 1: raise ValueError('[HFTSSampler::sample_grasp] Label cache only works for depth_limit == 1') # Now, get a node to start stochastic optimization from seed_ik = None if node.get_depth() == 0: # at root contact_label = self.pick_new_start_node() best_o = -np.inf # need to also consider non-root nodes else: # If we are not at a leaf node, go down in the hierarchy seed_ik = node.get_arm_configuration() contact_label = copy.deepcopy(node.get_labels()) best_o, contact_label = self.extend_hfts_node(contact_label, allowed_finger_combos=allowed_finger_combos) self.reset_robot() depth_limit -= 1 rospy.logdebug('[HFTSSampler::sample_grasp] Sampling a grasp; %i number of iterations' % self._max_iters) # Do stochastic optimization until depth_limit is reached while depth_limit >= 0: # Randomly select siblings to optimize the objective function for iter_now in range(self._max_iters): labels_tmp = self.get_random_sibling_labels(curr_labels=contact_label, allowed_finger_combos=allowed_finger_combos) s_tmp, r_tmp, o_tmp = self.evaluate_grasp(labels_tmp) if self.shc_evaluation(o_tmp, best_o): contact_label = labels_tmp best_o = o_tmp # self._debug_visualize(labels_tmp, handle_index=0) # Descend to next level if we iterate at least once more if depth_limit > 0: best_o, contact_label = self.extend_hfts_node(contact_label) depth_limit -= 1 # Evaluate grasp on robot hand # First, determine a hand configuration and the contact locations grasp_conf, object_contacts, hand_contacts = self.compose_grasp_info(contact_label) # Simulate the grasp and do local adjustments b_robotiq_ok, grasp_conf, grasp_pose = self.simulate_grasp(grasp_conf=grasp_conf, hand_contacts=hand_contacts, object_contacts=object_contacts, post_opt=post_opt, swap_contacts=label_cache is None) if b_robotiq_ok: sample_q = 0 stability = best_o else: sample_q = 4 stability = 0.0 # except InvalidTriangleException: # grasp_conf = None # sample_q = 4 # stability = 0.0 is_leaf = (len(contact_label[0]) == self._num_levels) is_goal_sample = (sample_q == 0) and is_leaf if not is_goal_sample and grasp_conf is not None: rospy.logdebug('[HFTSSampler::sample_grasp] Approximate has final quality: %i' % sample_q) b_approximate_feasible = self._robot.avoid_collision_at_fingers(n_step=20) if b_approximate_feasible: grasp_conf = self._robot.GetDOFValues() open_hand_offset = 0.0 logging.debug('[HFTSSampler::sample_grasp] We sampled a grasp on level ' + str(len(contact_label[0]))) if is_goal_sample: logging.debug('[HFTSSampler::sample_grasp] We sampled a goal grasp (might be in collision)!') if is_leaf: logging.debug('[HFTSSampler::sample_grasp] We sampled a leaf') if grasp_conf is not None and grasp_pose is not None: collision_free_arm_ik, arm_conf, pre_grasp_conf = \ self.check_arm_grasp_validity(grasp_conf=grasp_conf, grasp_pose=grasp_pose, seed=seed_ik, open_hand_offset=open_hand_offset) else: collision_free_arm_ik = False arm_conf = None pre_grasp_conf = None depth = len(contact_label[0]) possible_num_children, possible_num_leaves = self.get_branch_information(depth) return HFTSNode(labels=contact_label, hand_conf=np.asarray(grasp_conf), pre_grasp_conf=pre_grasp_conf, arm_conf=arm_conf, is_goal=is_goal_sample, is_leaf=is_leaf, is_valid=collision_free_arm_ik, num_possible_children=possible_num_children, num_possible_leaves=possible_num_leaves, hand_transform=self._robot.GetTransform()) def set_max_iter(self, m): assert m > 0 self._max_iters = m def set_parameters(self, max_iters=None, reachability_weight=None, com_center_weight=None, hfts_generation_params=None, b_force_new_hfts=None): # TODO some of these parameters are Robotiq hand specific. We probably wanna pass them as dictionary if max_iters is not None: self._max_iters = max_iters assert self._max_iters > 0 if reachability_weight is not None: self._reachability_weight = reachability_weight assert self._reachability_weight >= 0.0 # TODO this is Robotiq hand specific, and outdated self._hand_manifold.set_parameters(com_center_weight) if hfts_generation_params is not None: self._object_io_interface.set_hfts_generation_parameters(hfts_generation_params) if b_force_new_hfts is not None: self._b_force_new_hfts = b_force_new_hfts def shc_evaluation(self, o_tmp, best_o): if best_o < o_tmp: return True else: return False def _simulate_grasp(self, grasp_conf, hand_contacts, object_contacts, post_opt=False): # self.draw_contacts(object_contacts) self._robot.SetDOFValues(grasp_conf) try: T = self._robot.hand_obj_transform(hand_contacts[:3, :3], object_contacts[:, :3]) self._robot.SetTransform(T) except InvalidTriangleException as ite: logging.warn('[HFTSSampler::simulate_grasp] Caught an InvalidTriangleException: ' + str(ite)) return False, grasp_conf, None if post_opt: self._post_optimization(object_contacts) open_success, tips_in_contact = self._robot.comply_fingertips() if not open_success or not tips_in_contact: return False, self._robot.GetDOFValues(), self._robot.GetTransform() if self.check_grasp_validity(): return True, self._robot.GetDOFValues(), self._robot.GetTransform() return False, self._robot.GetDOFValues(), self._robot.GetTransform() def simulate_grasp(self, grasp_conf, hand_contacts, object_contacts, post_opt=False, swap_contacts=True): # TODO this method as it is right now is only useful for the Robotiq hand. b_grasp_valid, grasp_conf, grasp_pose = self._simulate_grasp(grasp_conf, hand_contacts, object_contacts, post_opt) if not b_grasp_valid and swap_contacts: self.swap_contacts([0, 1], object_contacts) b_grasp_valid, grasp_conf, grasp_pose = self._simulate_grasp(grasp_conf, hand_contacts, object_contacts, post_opt) return b_grasp_valid, grasp_conf, grasp_pose @staticmethod def swap_contacts(rows, object_contacts): frm = rows[0] to = rows[1] object_contacts[[frm, to], :] = object_contacts[[to, frm], :] def reset_robot(self): shift = transformations.identity_matrix() shift[0, -1] = 0.2 self._robot.SetTransform(shift) # Set hand to default (mean) configuration mean_values = map(lambda min_v, max_v: (min_v + max_v) / 2.0, self._robot.GetDOFLimits()[0], self._robot.GetDOFLimits()[1]) self._robot.SetDOFValues(mean_values, range(len(mean_values))) def pick_new_start_node(self): num_nodes_top_level = self._branching_factors[0] contact_label = [] for i in range(self._num_contacts): contact_label.append([np.random.choice(range(num_nodes_top_level + 1))]) return contact_label def plot_clusters(self, contact_labels): if not self._sampler_viewer: return self.cloud_plot = [] colors = [np.array((1,0,0)), np.array((0,1,0)), np.array((0,0,1))] for i in range(3): label = contact_labels[i] level = len(label) - 1 # indexed from 0 idx = np.where((self._data_labeled[:, 6:7 + level] == label).all(axis=1)) points = [self._data_labeled[t, 0:3] for t in idx][0] points = np.asarray(points) self.cloud_plot.append(self._orEnv.plot3(points=points, pointsize=0.006, colors=colors[i], drawstyle=1)) def _post_optimization(self, grasp_contacts): logging.info('[HFTSSampler::_post_optimization] Performing post optimization.') transform = self._robot.GetTransform() angle, axis, point = transformations.rotation_from_matrix(transform) # further optimize hand configuration and pose # TODO this is Robotiq hand specific transform_params = axis.tolist() + [angle] + transform[:3, 3].tolist() robot_dofs = self._robot.GetDOFValues().tolist() def joint_limits_constraint(x, *args): positions, normals, robot = args lower_limits, upper_limits = robot.GetDOFLimits() return -dist_in_range(x[0], [lower_limits[0], upper_limits[0]]) - \ dist_in_range(x[1], [lower_limits[1], upper_limits[1]]) def collision_free_constraint(x, *args): positions, normals, robot = args config = [x[0], x[1]] robot.SetDOFValues(config) env = robot.GetEnv() links = robot.get_non_fingertip_links() for link in links: if env.CheckCollision(robot.GetLink(link)): return -1.0 return 0.0 x_min = scipy.optimize.fmin_cobyla(self._post_optimization_obj_fn, robot_dofs + transform_params, [joint_limits_constraint, collision_free_constraint], rhobeg=.5, rhoend=1e-3, args=(grasp_contacts[:, :3], grasp_contacts[:, 3:], self._robot), maxfun=int(1e8), iprint=0) self._robot.SetDOFValues(x_min[:2]) axis = x_min[2:5] angle = x_min[5] position = x_min[6:] transform = transformations.rotation_matrix(angle, axis) transform[:3, 3] = position self._robot.SetTransform(transform) @staticmethod def _post_optimization_obj_fn(x, *params): # TODO this is Robotiq hand specific desired_contact_points, desired_contact_normals, robot = params dofs = x[:2] robot.SetDOFValues(dofs) axis = x[2:5] angle = x[5] position = x[6:] transform = transformations.rotation_matrix(angle, axis) transform[:3, 3] = position robot.SetTransform(transform) contacts = robot.get_tip_pn() temp_positions = contacts[:, :3] temp_normals = contacts[:, 3:] pos_err = position_distance(desired_contact_points, temp_positions) normal_err = normal_distance(desired_contact_normals, temp_normals) return pos_err + normal_err class HFTSNode: def __init__(self, labels=None, hand_conf=None, hand_transform=None, pre_grasp_conf=None, arm_conf=None, is_leaf=False, is_valid=False, is_goal=False, num_possible_children=0, num_possible_leaves=0, quality=0.0): # None values represent the root node if labels is None: self._depth = 0 else: self._depth = len(labels[0]) self._labels = labels self._hand_config = hand_conf self._hand_transform = hand_transform self._is_goal = is_goal self._is_leaf = is_leaf self._is_valid = is_valid self._pre_grasp_conf = pre_grasp_conf self._arm_conf = arm_conf self._num_possible_children = num_possible_children self._num_possible_leaves = num_possible_leaves self._quality = quality def get_labels(self): return self._labels def get_depth(self): return self._depth def get_hand_config(self): return self._hand_config def get_pre_grasp_config(self): return self._pre_grasp_conf def is_goal(self): return self._is_goal def get_hand_transform(self): return self._hand_transform def get_arm_configuration(self): return self._arm_conf def get_unique_label(self): if self._labels is None: return 'root' label = [] for finger_label in self._labels: label.extend(finger_label) return str(label) def is_extendible(self): return not self._is_leaf def is_leaf(self): return self._is_leaf def is_valid(self): return self._is_valid def get_num_possible_children(self): return self._num_possible_children def get_num_possible_leaves(self): return self._num_possible_leaves def get_quality(self): return self._quality
[ "openravepy.RaveDestroy", "numpy.sum", "numpy.random.randint", "numpy.linalg.norm", "utils.normal_distance", "rospy.logwarn", "openravepy.Environment", "IPython.embed", "numpy.random.choice", "math.isnan", "transformations.rotation_from_matrix", "numpy.asarray", "transformations.rotation_matrix", "openravepy.CollisionReport", "rospy.loginfo", "rospy.logdebug", "numpy.linalg.inv", "robotiqloader.RobotiqHand", "numpy.dot", "utils.clamp", "transformations.identity_matrix", "numpy.concatenate", "openravepy.databases.inversekinematics.InverseKinematicsModel", "logging.debug", "math.fabs", "logging.info", "numpy.array", "itertools.izip", "scipy.spatial.KDTree", "utils.position_distance", "utils.dist_in_range", "utils.compute_grasp_stability" ]
[((931, 1053), 'openravepy.databases.inversekinematics.InverseKinematicsModel', 'orpy.databases.inversekinematics.InverseKinematicsModel', (['self._robot'], {'iktype': 'orpy.IkParameterization.Type.Transform6D'}), '(self._robot, iktype\n =orpy.IkParameterization.Type.Transform6D)\n', (986, 1053), True, 'import openravepy as orpy\n'), ((3537, 3555), 'openravepy.Environment', 'orpy.Environment', ([], {}), '()\n', (3553, 3555), True, 'import openravepy as orpy\n'), ((4757, 4775), 'openravepy.RaveDestroy', 'orpy.RaveDestroy', ([], {}), '()\n', (4773, 4775), True, 'import openravepy as orpy\n'), ((5944, 6010), 'utils.compute_grasp_stability', 'compute_grasp_stability', ([], {'grasp_contacts': 'real_contacts', 'mu': 'self._mu'}), '(grasp_contacts=real_contacts, mu=self._mu)\n', (5967, 6010), False, 'from utils import ObjectFileIO, clamp, compute_grasp_stability, normal_distance, position_distance, dist_in_range\n'), ((6214, 6235), 'scipy.spatial.KDTree', 'KDTree', (['points[:, :3]'], {}), '(points[:, :3])\n', (6220, 6235), False, 'from scipy.spatial import KDTree\n'), ((7592, 7612), 'numpy.asarray', 'np.asarray', (['contacts'], {}), '(contacts)\n', (7602, 7612), True, 'import numpy as np\n'), ((8957, 8989), 'rospy.logwarn', 'rospy.logwarn', (['"""Debug visualize"""'], {}), "('Debug visualize')\n", (8970, 8989), False, 'import rospy\n'), ((10324, 10344), 'numpy.asarray', 'np.asarray', (['contacts'], {}), '(contacts)\n', (10334, 10344), True, 'import numpy as np\n'), ((11530, 11579), 'itertools.izip', 'itertools.izip', (['old_labels', 'fingertip_assignments'], {}), '(old_labels, fingertip_assignments)\n', (11544, 11579), False, 'import itertools\n'), ((12709, 12731), 'numpy.linalg.norm', 'np.linalg.norm', (['normal'], {}), '(normal)\n', (12723, 12731), True, 'import numpy as np\n'), ((14004, 14026), 'openravepy.CollisionReport', 'orpy.CollisionReport', ([], {}), '()\n', (14024, 14026), True, 'import openravepy as orpy\n'), ((15001, 15026), 'numpy.asarray', 'np.asarray', (['real_contacts'], {}), '(real_contacts)\n', (15011, 15026), True, 'import numpy as np\n'), ((17208, 17262), 'rospy.loginfo', 'rospy.loginfo', (['"""Object loaded in OpenRAVE environment"""'], {}), "('Object loaded in OpenRAVE environment')\n", (17221, 17262), False, 'import rospy\n'), ((17476, 17491), 'IPython.embed', 'IPython.embed', ([], {}), '()\n', (17489, 17491), False, 'import IPython\n'), ((19585, 19699), 'rospy.logdebug', 'rospy.logdebug', (["('[HFTSSampler::sample_grasp] Sampling a grasp; %i number of iterations' %\n self._max_iters)"], {}), "(\n '[HFTSSampler::sample_grasp] Sampling a grasp; %i number of iterations' %\n self._max_iters)\n", (19599, 19699), False, 'import rospy\n'), ((26450, 26483), 'transformations.identity_matrix', 'transformations.identity_matrix', ([], {}), '()\n', (26481, 26483), False, 'import transformations\n'), ((27826, 27905), 'logging.info', 'logging.info', (['"""[HFTSSampler::_post_optimization] Performing post optimization."""'], {}), "('[HFTSSampler::_post_optimization] Performing post optimization.')\n", (27838, 27905), False, 'import sys, time, logging, copy\n'), ((27982, 28029), 'transformations.rotation_from_matrix', 'transformations.rotation_from_matrix', (['transform'], {}), '(transform)\n', (28018, 28029), False, 'import transformations\n'), ((29569, 29613), 'transformations.rotation_matrix', 'transformations.rotation_matrix', (['angle', 'axis'], {}), '(angle, axis)\n', (29600, 29613), False, 'import transformations\n'), ((30019, 30063), 'transformations.rotation_matrix', 'transformations.rotation_matrix', (['angle', 'axis'], {}), '(angle, axis)\n', (30050, 30063), False, 'import transformations\n'), ((30274, 30331), 'utils.position_distance', 'position_distance', (['desired_contact_points', 'temp_positions'], {}), '(desired_contact_points, temp_positions)\n', (30291, 30331), False, 'from utils import ObjectFileIO, clamp, compute_grasp_stability, normal_distance, position_distance, dist_in_range\n'), ((30353, 30407), 'utils.normal_distance', 'normal_distance', (['desired_contact_normals', 'temp_normals'], {}), '(desired_contact_normals, temp_normals)\n', (30368, 30407), False, 'from utils import ObjectFileIO, clamp, compute_grasp_stability, normal_distance, position_distance, dist_in_range\n'), ((1217, 1279), 'rospy.loginfo', 'rospy.loginfo', (['"""No IKFast solver found. Generating new one..."""'], {}), "('No IKFast solver found. Generating new one...')\n", (1230, 1279), False, 'import rospy\n'), ((1692, 1729), 'numpy.dot', 'np.dot', (['object_pose', 'hand_pose_object'], {}), '(object_pose, hand_pose_object)\n', (1698, 1729), True, 'import numpy as np\n'), ((5184, 5215), 'numpy.linalg.inv', 'np.linalg.inv', (['object_hfts_pose'], {}), '(object_hfts_pose)\n', (5197, 5215), True, 'import numpy as np\n'), ((11213, 11252), 'numpy.random.choice', 'np.random.choice', (['allowed_finger_combos'], {}), '(allowed_finger_combos)\n', (11229, 11252), True, 'import numpy as np\n'), ((11303, 11393), 'numpy.random.choice', 'np.random.choice', (['self._branching_factors[new_depth]', 'self._num_contacts'], {'replace': '(True)'}), '(self._branching_factors[new_depth], self._num_contacts,\n replace=True)\n', (11319, 11393), True, 'import numpy as np\n'), ((12599, 12621), 'numpy.sum', 'np.sum', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (12605, 12621), True, 'import numpy as np\n'), ((12653, 12676), 'numpy.sum', 'np.sum', (['normals'], {'axis': '(0)'}), '(normals, axis=0)\n', (12659, 12676), True, 'import numpy as np\n'), ((13705, 13744), 'numpy.random.choice', 'np.random.choice', (['allowed_finger_combos'], {}), '(allowed_finger_combos)\n', (13721, 13744), True, 'import numpy as np\n'), ((15735, 15822), 'robotiqloader.RobotiqHand', 'RobotiqHand', ([], {'hand_cache_file': 'hand_cache_file', 'env': 'self._orEnv', 'hand_file': 'hand_file'}), '(hand_cache_file=hand_cache_file, env=self._orEnv, hand_file=\n hand_file)\n', (15746, 15822), False, 'from robotiqloader import RobotiqHand, InvalidTriangleException\n'), ((16047, 16080), 'transformations.identity_matrix', 'transformations.identity_matrix', ([], {}), '()\n', (16078, 16080), False, 'import transformations\n'), ((16168, 16220), 'rospy.loginfo', 'rospy.loginfo', (['"""Hand loaded in OpenRAVE environment"""'], {}), "('Hand loaded in OpenRAVE environment')\n", (16181, 16220), False, 'import rospy\n'), ((21793, 21888), 'rospy.logdebug', 'rospy.logdebug', (["('[HFTSSampler::sample_grasp] Approximate has final quality: %i' % sample_q)"], {}), "(\n '[HFTSSampler::sample_grasp] Approximate has final quality: %i' % sample_q)\n", (21807, 21888), False, 'import rospy\n'), ((22256, 22359), 'logging.debug', 'logging.debug', (['"""[HFTSSampler::sample_grasp] We sampled a goal grasp (might be in collision)!"""'], {}), "(\n '[HFTSSampler::sample_grasp] We sampled a goal grasp (might be in collision)!'\n )\n", (22269, 22359), False, 'import sys, time, logging, copy\n'), ((22382, 22444), 'logging.debug', 'logging.debug', (['"""[HFTSSampler::sample_grasp] We sampled a leaf"""'], {}), "('[HFTSSampler::sample_grasp] We sampled a leaf')\n", (22395, 22444), False, 'import sys, time, logging, copy\n'), ((27284, 27303), 'numpy.array', 'np.array', (['(1, 0, 0)'], {}), '((1, 0, 0))\n', (27292, 27303), True, 'import numpy as np\n'), ((27303, 27322), 'numpy.array', 'np.array', (['(0, 1, 0)'], {}), '((0, 1, 0))\n', (27311, 27322), True, 'import numpy as np\n'), ((27322, 27341), 'numpy.array', 'np.array', (['(0, 0, 1)'], {}), '((0, 0, 1))\n', (27330, 27341), True, 'import numpy as np\n'), ((27631, 27649), 'numpy.asarray', 'np.asarray', (['points'], {}), '(points)\n', (27641, 27649), True, 'import numpy as np\n'), ((2155, 2177), 'numpy.asarray', 'np.asarray', (['grasp_conf'], {}), '(grasp_conf)\n', (2165, 2177), True, 'import numpy as np\n'), ((2314, 2363), 'utils.clamp', 'clamp', (['pre_grasp_conf', 'lower_limits', 'upper_limits'], {}), '(pre_grasp_conf, lower_limits, upper_limits)\n', (2319, 2363), False, 'from utils import ObjectFileIO, clamp, compute_grasp_stability, normal_distance, position_distance, dist_in_range\n'), ((10819, 10836), 'math.isnan', 'math.isnan', (['o_tmp'], {}), '(o_tmp)\n', (10829, 10836), False, 'import math\n'), ((14941, 14975), 'numpy.concatenate', 'np.concatenate', (['(position, normal)'], {}), '((position, normal))\n', (14955, 14975), True, 'import numpy as np\n'), ((18617, 18710), 'rospy.logwarn', 'rospy.logwarn', (['"""[HFTSSampler::sample_grasp] We have no allowed contacts left! Aborting."""'], {}), "(\n '[HFTSSampler::sample_grasp] We have no allowed contacts left! Aborting.')\n", (18630, 18710), False, 'import rospy\n'), ((23106, 23128), 'numpy.asarray', 'np.asarray', (['grasp_conf'], {}), '(grasp_conf)\n', (23116, 23128), True, 'import numpy as np\n'), ((28520, 28575), 'utils.dist_in_range', 'dist_in_range', (['x[1]', '[lower_limits[1], upper_limits[1]]'], {}), '(x[1], [lower_limits[1], upper_limits[1]])\n', (28533, 28575), False, 'from utils import ObjectFileIO, clamp, compute_grasp_stability, normal_distance, position_distance, dist_in_range\n'), ((10856, 10872), 'math.fabs', 'math.fabs', (['o_tmp'], {}), '(o_tmp)\n', (10865, 10872), False, 'import math\n'), ((13054, 13099), 'numpy.random.randint', 'np.random.randint', (['self._branching_factors[i]'], {}), '(self._branching_factors[i])\n', (13071, 13099), True, 'import numpy as np\n'), ((13282, 13339), 'numpy.random.randint', 'np.random.randint', (['self._branching_factors[i + match_len]'], {}), '(self._branching_factors[i + match_len])\n', (13299, 13339), True, 'import numpy as np\n'), ((28441, 28496), 'utils.dist_in_range', 'dist_in_range', (['x[0]', '[lower_limits[0], upper_limits[0]]'], {}), '(x[0], [lower_limits[0], upper_limits[0]])\n', (28454, 28496), False, 'from utils import ObjectFileIO, clamp, compute_grasp_stability, normal_distance, position_distance, dist_in_range\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Jun 1 16:33:02 2018 @author: <NAME> """ # Calculate the distance map between the C-alpha atoms in a protein. The input # file is required to be a C_alpha coordinate file import sys import re import numpy as np import matplotlib.pyplot as plt def get_ca_coordinates (filename): # parse the c-alpha coordinates from the PDB records # pdb_records is a list of lines, each line corresponding to a line entry # in a pdb file fh = open(filename, 'r') all_coords = []; # create a multi-dimensional array to store the coordinates for line_i in fh: if re.match('^\s*?$', line_i): pass elif re.match('^ATOM', line_i): line_i = line_i.rstrip() coords_i = line_i[26:54] coords_i = coords_i.split() # split by white space into individual elements # convert into integers coords_i = list(map(float,coords_i)) # convert from string to numeric all_coords.append(coords_i) fh.close() # convert the multi-dimensional array into numpy array all_coords_ca = np.array(all_coords) return all_coords_ca def calculate_ca_dist(ca_coords): # calculate c-alpha distances nres = len(ca_coords) dist_mat = np.zeros((nres,nres), dtype=float) # declare a 0 x 0 numpy matrix # to store the values for i in range(0,nres-1): for j in range(i+1,nres): diff_ij = ca_coords[i,:]-ca_coords[j,:]; r_ij = np.linalg.norm(diff_ij) dist_mat[i,j] = r_ij dist_mat[j,i] = r_ij return dist_mat # The main script which will invoke the functions filename = sys.argv[1] all_coords_ca = get_ca_coordinates(filename) dist_mat = calculate_ca_dist(all_coords_ca) plt.figure() plt.imshow(dist_mat, cmap='jet') plt.show()
[ "matplotlib.pyplot.show", "matplotlib.pyplot.imshow", "numpy.zeros", "re.match", "matplotlib.pyplot.figure", "numpy.array", "numpy.linalg.norm" ]
[((1873, 1885), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1883, 1885), True, 'import matplotlib.pyplot as plt\n'), ((1886, 1918), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dist_mat'], {'cmap': '"""jet"""'}), "(dist_mat, cmap='jet')\n", (1896, 1918), True, 'import matplotlib.pyplot as plt\n'), ((1919, 1929), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1927, 1929), True, 'import matplotlib.pyplot as plt\n'), ((1150, 1170), 'numpy.array', 'np.array', (['all_coords'], {}), '(all_coords)\n', (1158, 1170), True, 'import numpy as np\n'), ((1315, 1350), 'numpy.zeros', 'np.zeros', (['(nres, nres)'], {'dtype': 'float'}), '((nres, nres), dtype=float)\n', (1323, 1350), True, 'import numpy as np\n'), ((647, 674), 're.match', 're.match', (['"""^\\\\s*?$"""', 'line_i'], {}), "('^\\\\s*?$', line_i)\n", (655, 674), False, 'import re\n'), ((705, 730), 're.match', 're.match', (['"""^ATOM"""', 'line_i'], {}), "('^ATOM', line_i)\n", (713, 730), False, 'import re\n'), ((1590, 1613), 'numpy.linalg.norm', 'np.linalg.norm', (['diff_ij'], {}), '(diff_ij)\n', (1604, 1613), True, 'import numpy as np\n')]
# coding=utf-8 # Author: <NAME> <<EMAIL>> import numpy as np from torch import nn from torch.nn import Parameter from eeggan.pytorch.modules.conv.multiconv import MultiConv1d class WeightScale(object): """ Implemented for PyTorch using WeightNorm implementation https://pytorch.org/docs/stable/_modules/torch/nn/utils/weight_norm.html References ---------- <NAME>., <NAME>., <NAME>., & <NAME>. (2017). Progressive Growing of GANs for Improved Quality, Stability, and Variation. Retrieved from http://arxiv.org/abs/1710.10196 """ def __init__(self, name): self.name = name def compute_weight(self, module): w = getattr(module, self.name + '_unscaled') c = getattr(module, self.name + '_c') tmp = c * w return tmp @staticmethod def apply(module, name, gain): fn = WeightScale(name) weight = getattr(module, name) # remove w from parameter list del module._parameters[name] # Constant from He et al. 2015 c = gain / np.sqrt(np.prod(list(weight.size())[1:])) setattr(module, name + '_c', float(c)) module.register_parameter(name + '_unscaled', nn.Parameter(weight.data)) setattr(module, name, fn.compute_weight(module)) # recompute weight before every forward() module.register_forward_pre_hook(fn) return fn def remove(self, module): weight = self.compute_weight(module) delattr(module, self.name) del module._parameters[self.name + '_unscaled'] del module._parameters[self.name + '_c'] module.register_parameter(self.name, Parameter(weight.data)) def __call__(self, module, inputs, **kwargs): setattr(module, self.name, self.compute_weight(module)) def weight_scale(module, gain=np.sqrt(2), name='weight'): """ Applies equalized learning rate to weights Parameters ---------- module : module Module scaling should be applied to (Conv/Linear) gain : float Gain of following activation layer See torch.nn.init.calculate_gain """ if isinstance(module, MultiConv1d): for i in range(len(module.convs)): WeightScale.apply(module.convs[i], name, gain) else: WeightScale.apply(module, name, gain) return module def remove_weight_scale(module, name='weight'): for k, hook in module._forward_pre_hooks.items(): if isinstance(hook, WeightScale) and hook.name == name: hook.remove(module) del module._forward_pre_hooks[k] return module raise ValueError("weight_scale of '{}' not found in {}" .format(name, module))
[ "torch.nn.Parameter", "numpy.sqrt" ]
[((1836, 1846), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1843, 1846), True, 'import numpy as np\n'), ((1207, 1232), 'torch.nn.Parameter', 'nn.Parameter', (['weight.data'], {}), '(weight.data)\n', (1219, 1232), False, 'from torch import nn\n'), ((1665, 1687), 'torch.nn.Parameter', 'Parameter', (['weight.data'], {}), '(weight.data)\n', (1674, 1687), False, 'from torch.nn import Parameter\n')]
#!/usr/bin/env python3 """ Python EKF Planner @Author: <NAME>, original MATLAB code and Python version @Author: <NAME>, initial MATLAB port Based on code by <NAME>, Oxford University, http://www.robots.ox.ac.uk/~pnewman """ from collections import namedtuple import numpy as np import scipy as sp import matplotlib.pyplot as plt from spatialmath import base """ Monte-carlo based localisation for estimating vehicle pose based on odometry and observations of known landmarks. """ # TODO: refactor this and EKF, RNG, history, common plots, animation, movie class ParticleFilter: def __init__(self, robot, sensor, R, L, nparticles=500, seed=0, x0=None, verbose=False, animate=False, history=True, workspace=None): """ Particle filter :param robot: robot motion model :type robot: :class:`VehicleBase` subclass, :param sensor: vehicle mounted sensor model :type sensor: :class:`SensorBase` subclass :param R: covariance of the zero-mean Gaussian noise added to the particles at each step (diffusion) :type R: ndarray(3,3) :param L: covariance used in the sensor likelihood model :type L: ndarray(2,2) :param nparticles: number of particles, defaults to 500 :type nparticles: int, optional :param seed: random number seed, defaults to 0 :type seed: int, optional :param x0: initial state, defaults to [0, 0, 0] :type x0: array_like(3), optional :param verbose: display extra debug information, defaults to False :type verbose: bool, optional :param history: retain step-by-step history, defaults to True :type history: bool, optional :param workspace: dimension of workspace, see :func:`~spatialmath.base.graphics.expand_dims` :type workspace: scalar, array_like(2), array_like(4) This class implements a Monte-Carlo estimator or particle filter for vehicle state, based on odometry, a landmark map, and landmark observations. The state of each particle is a possible vehicle configuration :math:`(x,y,\theta)`. Bootstrap particle resampling is used. The working area is defined by ``workspace`` or inherited from the landmark map attached to the ``sensor`` (see :func:`~spatialmath.base.graphics.expand_dims`): ============== ======= ======= ``workspace`` x-range y-range ============== ======= ======= A (scalar) -A:A -A:A [A, B] A:B A:B [A, B, C, D] A:B C:D ============== ======= ======= Particles are initially distributed uniform randomly over this area. Example:: V = np.diag([0.02, np.radians(0.5)]) ** 2 robot = Bicycle(covar=V, animation="car", workspace=10) robot.control = RandomPath(workspace=robot) map = LandmarkMap(nlandmarks=20, workspace=robot.workspace) W = np.diag([0.1, np.radians(1)]) ** 2 sensor = RangeBearingSensor(robot, map, covar=W, plot=True) R = np.diag([0.1, 0.1, np.radians(1)]) ** 2 L = np.diag([0.1, 0.1]) pf = ParticleFilter(robot, sensor, R, L, nparticles=1000) pf.run(T=10) map.plot() robot.plot_xy() pf.plot_xy() plt.plot(pf.get_std()[:100,:]) .. note:: Set ``seed=0`` to get different behaviour from run to run. :seealso: :meth:`run` """ self._robot = robot self._sensor = sensor self.R = R self.L = L self.nparticles = nparticles self._animate = animate # self.dim = sensor.map.dim self._history = [] self.x = () self.weight = () self.w0 = 0.05 self._x0 = x0 # create a private random number stream if required self._random = np.random.default_rng(seed) self._seed = seed self._keep_history = history # keep history self._htuple = namedtuple("PFlog", "t odo xest std weights") if workspace is not None: self._dim = base.expand_dims(workspace) else: self._dim = sensor.map.workspace self._workspace = self.robot.workspace self._init() def __str__(self): #ParticleFilter.char Convert to string # # PF.char() is a string representing the state of the ParticleFilter # object in human-readable form. # # See also ParticleFilter.display. def indent(s, n=2): spaces = ' ' * n return s.replace('\n', '\n' + spaces) s = f"ParticleFilter object: {self.nparticles} particles" s += '\nR: ' + base.array2str(self.R) s += '\nL: ' + base.array2str(self.L) if self.robot is not None: s += indent("\nrobot: " + str(self.robot)) if self.sensor is not None: s += indent("\nsensor: " + str(self.sensor)) return s @property def robot(self): """ Get robot object :return: robot used in simulation :rtype: :class:`VehicleBase` subclass """ return self._robot @property def sensor(self): """ Get sensor object :return: sensor used in simulation :rtype: :class:`SensorBase` subclass """ return self._sensor @property def map(self): """ Get map object :return: map used in simulation :rtype: :class:`LandmarkMap` subclass """ return self._map @property def verbose(self): """ Get verbosity state :return: verbosity :rtype: bool """ return self._verbose @property def history(self): """ Get EKF simulation history :return: simulation history :rtype: list of namedtuples At each simulation timestep a namedtuple of is appended to the history list. It contains, for that time step, estimated state and covariance, and sensor observation. :seealso: :meth:`get_t` :meth:`get_xy` :meth:`get_std` :meth:`get_Pnorm` """ return self._history @property def workspace(self): """ Size of robot workspace :return: workspace bounds [xmin, xmax, ymin, ymax] :rtype: ndarray(4) Returns the bounds of the workspace as specified by constructor option ``workspace`` """ return self._workspace @property def random(self): """ Get private random number generator :return: NumPy random number generator :rtype: :class:`numpy.random.Generator` Has methods including: - ``integers(low, high, size, endpoint)`` - ``random(size)`` - ``uniform`` - ``normal(mean, std, size)`` - ``multivariate_normal(mean, covar, size)`` The generator is initialized with the seed provided at constructor time every time ``init`` is called. """ return self._random def _init(self, x0=None): #ParticleFilter.init Initialize the particle filter # # PF.init() initializes the particle distribution and clears the # history. # # Notes:: # - If initial particle states were given to the constructor the states are # set to this value, else a random distribution over the map is used. # - Invoked by the run() method. self.robot.init() self.sensor.init() #clear the history self._history = [] # create a new private random number generator if self._seed is not None: self._random = np.random.default_rng(self._seed) self._t = 0 # initialize particles if x0 is None: x0 = self._x0 if x0 is None: # create initial particle distribution as uniformly randomly distributed # over the map workspace and heading angles x = self.random.uniform(self.workspace[0], self.workspace[1], size=(self.nparticles,)) y = self.random.uniform(self.workspace[2], self.workspace[3], size=(self.nparticles,)) t = self.random.uniform(-np.pi, np.pi, size=(self.nparticles,)) self.x = np.c_[x, y, t] self.weight = np.ones((self.nparticles,)) def run(self, T=10, x0=None): """ Run the particle filter simulation :param T: maximum simulation time in seconds :type T: float :param animate: animate motion of vehicle, defaults to False :type animate: bool, optional :param movie: name of movie file to create, defaults to None :type movie: str, optional Simulates the motion of a vehicle (under the control of a driving agent) and the EKF estimator. The steps are: - initialize the filter, vehicle and vehicle driver agent, sensor - for each time step: - step the vehicle and its driver agent, obtain odometry - take a sensor reading - execute the EKF - save information as a namedtuple to the history list for later display :seealso: :meth:`history` :meth:`landmark` :meth:`landmarks` :meth:`get_xy` :meth:`get_t` :meth:`get_std` :meth:`plot_xy` """ self._init(x0=x0) # anim = Animate(opt.movie) # display the initial particles if self._animate: self.h, = plt.plot(self.x[:, 0], self.x[:, 1], 'go', zorder=0, markersize=3, markeredgecolor='none', alpha=0.3, label='particle') # set(self.h, 'Tag', 'particles') # self.robot.plot() # iterate over time for i in range(round(T / self.robot.dt)): self._step() # anim.add() # anim.close() def _step(self): #fprintf('---- step\n') odo = self.robot.step() # move the robot # update the particles based on odometry self._predict(odo) # get a sensor reading z, lm_id = self.sensor.reading() if z is not None: self._observe(z, lm_id) #fprintf(' observe beacon #d\n', lm_id) self._select() # our estimate is simply the mean of the particles x_est = self.x.mean(axis=0) std_est = self.x.std(axis=0) # std is more complex for angles, need to account for 2pi wrap std_est[2] = np.sqrt(np.sum(base.angdiff(self.x[:,2], x_est[2]) ** 2)) / (self.nparticles-1) # display the updated particles # set(self.h, 'Xdata', self.x(:,1), 'Ydata', self.x(:,2), 'Zdata', self.x(:,3)) if self._animate: self.h.set_xdata(self.x[:, 0]) self.h.set_ydata(self.x[:, 1]) # if ~isempty(self.anim) # self.anim.add() if self._keep_history: hist = self._htuple( self.robot._t, odo.copy(), x_est, std_est, self.weight.copy() ) self._history.append(hist) def plot_pdf(self): """ Plot particle PDF Displays a discrete PDF of vehicle position. Creates a 3D plot where the x- and y-axes are the estimated vehicle position and the z-axis is the particle weight. Each particle is represented by a a vertical line segment of height equal to particle weight. """ ax = base.plotvol3() for (x, y, t), weight in zip(self.x, self.weight): # ax.plot([x, x], [y, y], [0, weight], 'r') ax.plot([x, x], [y, y], [0, weight], 'skyblue', linewidth=3) ax.plot(x, y, weight, 'k.', markersize=6) plt.grid(True) plt.xlabel('X') plt.ylabel('Y') plt.xlim() ax.set_zlabel('particle weight') ax.view_init(29, 59) def _predict(self, odo): # step 2 # update the particle state based on odometry and a random perturbation # Straightforward code: # # for i=1:self.nparticles # x = self.robot.f( self.x(i,:), odo)' + sqrt(self.R)*self.randn[2,0] # x[2] = angdiff(x[2]) # self.x(i,:) = x # # Vectorized code: self.x = self.robot.f(self.x, odo) + \ self.random.multivariate_normal((0, 0, 0), self.R, size=self.nparticles) self.x[:, 2] = base.angdiff(self.x[:, 2]) def _observe(self, z, lm_id): # step 3 # predict observation and score the particles # Straightforward code: # # for p = 1:self.nparticles # # what do we expect observation to be for this particle? # # use the sensor model h(.) # z_pred = self.sensor.h( self.x(p,:), lm_id) # # # how different is it # innov[0] = z[0] - z_pred[0] # innov[1] = angdiff(z[1], z_pred[1]) # # # get likelihood (new importance). Assume Gaussian but any PDF works! # # If predicted obs is very different from actual obs this score will be low # # ie. this particle is not very good at predicting the observation. # # A lower score means it is less likely to be selected for the next generation... # # The weight is never zero. # self.weight(p) = exp(-0.5*innov'*inv(self.L)*innov) + 0.05 # end # # Vectorized code: invL = np.linalg.inv(self.L) z_pred = self.sensor.h(self.x, lm_id) z_pred[:, 0] = z[0] - z_pred[:, 0] z_pred[:, 1] = base.angdiff(z[1], z_pred[:, 1]) LL = -0.5 * np.r_[invL[0,0], invL[1,1], 2*invL[0,1]] e = np.c_[z_pred[:, 0]**2, z_pred[:, 1]**2, z_pred[:,0] * z_pred[:, 1]] @ LL self.weight = np.exp(e) + self.w0 def _select(self): # step 4 # select particles based on their weights # # particles with large weights will occupy a greater percentage of the # y axis in a cummulative plot cdf = np.cumsum(self.weight) / self.weight.sum() # so randomly (uniform) choosing y values is more likely to correspond to # better particles... iselect = self.random.uniform(0, 1, size=(self.nparticles,)) # find the particle that corresponds to each y value (just a look up) interpfun = sp.interpolate.interp1d(cdf, np.arange(self.nparticles), assume_sorted=True, kind='nearest', fill_value='extrapolate') inextgen = interpfun(iselect).astype(np.int) # copy selected particles for next generation.. self.x = self.x[inextgen, :] def get_t(self): """ Get time from simulation :return: simulation time vector :rtype: ndarray(n) Return simulation time vector, starts at zero. The timestep is an attribute of the ``robot`` object. """ return np.array([h.t for h in self._history]) def get_xyt(self): r""" Get estimated vehicle trajectory :return: vehicle trajectory where each row is configuration :math:`(x, y, \theta)` :rtype: ndarray(n,3) :seealso: :meth:`plot_xy` :meth:`run` :meth:`history` """ return np.array([h.xest[:2] for h in self._history]) def get_std(self): r""" Get standard deviation of particles :return: standard deviation of vehicle position estimate :rtype: ndarray(n,2) Return the standard deviation :math:`(\sigma_x, \sigma_y)` of the particle cloud at each time step. :seealso: :meth:`get_xyt` """ return np.array([h.std for h in self._history]) def plot_xy(self, block=False, **kwargs): r""" Plot estimated vehicle position :param args: position arguments passed to :meth:`~matplotlib.axes.Axes.plot` :param kwargs: keywords arguments passed to :meth:`~matplotlib.axes.Axes.plot` :param block: hold plot until figure is closed, defaults to False :type block: bool, optional Plot the estimated vehicle path in the xy-plane. :seealso: :meth:`get_xy` """ xyt = self.get_xyt() plt.plot(xyt[:, 0], xyt[:, 1], **kwargs) # plt.show(block=block)
[ "matplotlib.pyplot.xlim", "spatialmath.base.expand_dims", "matplotlib.pyplot.plot", "numpy.ones", "numpy.random.default_rng", "numpy.cumsum", "spatialmath.base.array2str", "numpy.linalg.inv", "spatialmath.base.plotvol3", "collections.namedtuple", "numpy.array", "numpy.exp", "matplotlib.pyplot.ylabel", "spatialmath.base.angdiff", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.grid", "numpy.arange" ]
[((3961, 3988), 'numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (3982, 3988), True, 'import numpy as np\n'), ((4096, 4141), 'collections.namedtuple', 'namedtuple', (['"""PFlog"""', '"""t odo xest std weights"""'], {}), "('PFlog', 't odo xest std weights')\n", (4106, 4141), False, 'from collections import namedtuple\n'), ((8522, 8549), 'numpy.ones', 'np.ones', (['(self.nparticles,)'], {}), '((self.nparticles,))\n', (8529, 8549), True, 'import numpy as np\n'), ((11741, 11756), 'spatialmath.base.plotvol3', 'base.plotvol3', ([], {}), '()\n', (11754, 11756), False, 'from spatialmath import base\n'), ((12008, 12022), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (12016, 12022), True, 'import matplotlib.pyplot as plt\n'), ((12031, 12046), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (12041, 12046), True, 'import matplotlib.pyplot as plt\n'), ((12055, 12070), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y"""'], {}), "('Y')\n", (12065, 12070), True, 'import matplotlib.pyplot as plt\n'), ((12079, 12089), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (12087, 12089), True, 'import matplotlib.pyplot as plt\n'), ((12707, 12733), 'spatialmath.base.angdiff', 'base.angdiff', (['self.x[:, 2]'], {}), '(self.x[:, 2])\n', (12719, 12733), False, 'from spatialmath import base\n'), ((13778, 13799), 'numpy.linalg.inv', 'np.linalg.inv', (['self.L'], {}), '(self.L)\n', (13791, 13799), True, 'import numpy as np\n'), ((13912, 13944), 'spatialmath.base.angdiff', 'base.angdiff', (['z[1]', 'z_pred[:, 1]'], {}), '(z[1], z_pred[:, 1])\n', (13924, 13944), False, 'from spatialmath import base\n'), ((15267, 15305), 'numpy.array', 'np.array', (['[h.t for h in self._history]'], {}), '([h.t for h in self._history])\n', (15275, 15305), True, 'import numpy as np\n'), ((15595, 15640), 'numpy.array', 'np.array', (['[h.xest[:2] for h in self._history]'], {}), '([h.xest[:2] for h in self._history])\n', (15603, 15640), True, 'import numpy as np\n'), ((15996, 16036), 'numpy.array', 'np.array', (['[h.std for h in self._history]'], {}), '([h.std for h in self._history])\n', (16004, 16036), True, 'import numpy as np\n'), ((16561, 16601), 'matplotlib.pyplot.plot', 'plt.plot', (['xyt[:, 0]', 'xyt[:, 1]'], {}), '(xyt[:, 0], xyt[:, 1], **kwargs)\n', (16569, 16601), True, 'import matplotlib.pyplot as plt\n'), ((4201, 4228), 'spatialmath.base.expand_dims', 'base.expand_dims', (['workspace'], {}), '(workspace)\n', (4217, 4228), False, 'from spatialmath import base\n'), ((4808, 4830), 'spatialmath.base.array2str', 'base.array2str', (['self.R'], {}), '(self.R)\n', (4822, 4830), False, 'from spatialmath import base\n'), ((4855, 4877), 'spatialmath.base.array2str', 'base.array2str', (['self.L'], {}), '(self.L)\n', (4869, 4877), False, 'from spatialmath import base\n'), ((7888, 7921), 'numpy.random.default_rng', 'np.random.default_rng', (['self._seed'], {}), '(self._seed)\n', (7909, 7921), True, 'import numpy as np\n'), ((9705, 9828), 'matplotlib.pyplot.plot', 'plt.plot', (['self.x[:, 0]', 'self.x[:, 1]', '"""go"""'], {'zorder': '(0)', 'markersize': '(3)', 'markeredgecolor': '"""none"""', 'alpha': '(0.3)', 'label': '"""particle"""'}), "(self.x[:, 0], self.x[:, 1], 'go', zorder=0, markersize=3,\n markeredgecolor='none', alpha=0.3, label='particle')\n", (9713, 9828), True, 'import matplotlib.pyplot as plt\n'), ((14114, 14123), 'numpy.exp', 'np.exp', (['e'], {}), '(e)\n', (14120, 14123), True, 'import numpy as np\n'), ((14378, 14400), 'numpy.cumsum', 'np.cumsum', (['self.weight'], {}), '(self.weight)\n', (14387, 14400), True, 'import numpy as np\n'), ((14732, 14758), 'numpy.arange', 'np.arange', (['self.nparticles'], {}), '(self.nparticles)\n', (14741, 14758), True, 'import numpy as np\n'), ((10723, 10759), 'spatialmath.base.angdiff', 'base.angdiff', (['self.x[:, 2]', 'x_est[2]'], {}), '(self.x[:, 2], x_est[2])\n', (10735, 10759), False, 'from spatialmath import base\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Generate various Similarity matrix through the MatrixGenerator methods gen_matrix for synthetic data, and gen_E_coli_matrix for DNA data. """ import numpy as np # from scipy import sparse as sp from scipy.linalg import toeplitz def gen_lambdas(type_matrix, n): ''' Generates lambdas to define a toeplitz matrix with diagonal elements t_k = lambdas[k] ''' array_lambdas = np.zeros(n) if type_matrix == 'LinearBanded': # Bandwidth = 10% ? cov = int(np.floor(n/10)) array_lambdas[:cov] = cov - abs(np.arange(cov)) elif type_matrix == 'LinearStrongDecrease': alpha = 0.1 array_lambdas = np.exp(-alpha*np.arange(n)) elif type_matrix == 'CircularBanded': # Bandwidth = 10% ? cov = int(np.floor(n/10)) array_lambdas[:cov] = cov - abs(np.arange(cov)) array_lambdas[-cov:] = array_lambdas[:cov][::-1] elif type_matrix == 'CircularStrongDecrease': alpha = 0.1 array_lambdas = np.exp(-alpha*np.arange(n)) p = int(np.floor(n/2)) array_lambdas[-p:] = array_lambdas[:p][::-1] else: raise ValueError("Unrecognized type_matrix !") return(array_lambdas) def gen_toeplitz_sim(lambdas): '''Build Toeplitz strong-R-matrix''' return(toeplitz(lambdas)) # # # def sym_max(X): # """ # Returns symmetrization of sparse matrix X. # X_sym = max(X, X.T) rather than X + X.T to avoid adding up values when # there are duplicates in the overlap file. # If X is triangular, max(X, X.T) and X + X.T are equal. # # TODO : check how many values are not symmetric # and separate cases where Aij = 0 ... # """ # # dif_mat = X - X.T # dif_mat.data = np.where(dif_mat.data < 0, 1, 0) # return X - X.multiply(dif_mat) + X.T.multiply(dif_mat) class MatrixGenerator(): # Apply permutation def apply_perm(self, perm): ''' Apply a permutation to the similarity matrix. perm is given as a numpy array ''' n_ = self.n # check size is ok if np.shape(perm)[0] != n_: raise ValueError('the size of the permutation matrix does not match that of the\ similarity matrix.') # check perm is a permutation if not (np.sort(perm) == np.arange(n_)).all(): raise ValueError('perm is not considered as a' 'permutation matrix of [0; \cdots; n-1]') self.sim_matrix = self.sim_matrix[perm] self.sim_matrix = self.sim_matrix.T[perm] self.sim_matrix = self.sim_matrix.T return self # Add additive noise def add_sparse_noise(self, noise_prop, noise_eps, law='uniform'): ''' Create a function that add a symetric sparse noise! noiseprop controls the support of the sparse noise noiseeps controls the eps amplitude of the noise ''' n_ = self.n # first find a random support N = np.tril(np.random.rand(n_, n_)) idx = np.where(N > noise_prop) N[idx] = 0 # allocate value on the support [ii, jj] = np.where(N != 0) if law == 'gaussian': N[np.where(N != 0)] = noise_eps * np.abs( np.random.normal(0, 1, len(ii))) elif law == 'uniform': N[np.where(N != 0)] = noise_eps*np.random.rand(1, len(ii)) # symetrize the noise N = N + N.T # Add noise to similarity matrix self.sim_matrix += N return self def gen_matrix(self, n, type_matrix='LinearBanded', apply_perm=True, perm=None, noise_prop=1, noise_ampl=0, law='uniform'): self.n = n lambdas = gen_lambdas(type_matrix, n) self.sim_matrix = gen_toeplitz_sim(lambdas) if apply_perm: if not perm: # generate permutation if not provided by user perm = np.random.permutation(n) self.apply_perm(perm) self.true_perm = perm else: self.true_perm = np.arange(n) if noise_ampl > 0: normed_fro = np.sqrt(np.mean(self.sim_matrix**2)) self.add_sparse_noise(noise_prop, noise_ampl*normed_fro, law=law) return self # # def gen_E_coli_matrix(self, apply_perm=False): # """ # generate similarity matrix from <NAME>i ONT reads [ref Loman et al.] # TODO : # - change the path to data folder if this is a package ? # - recompute reads_pos with minimap2 instead of BWA. # """ # # Read data matrix # data_dir = './data/' # mat_fn = data_dir + 'ecoli_mat.csv' # pos_fn = data_dir + 'ecoli_ref_pos.csv' # mat_idxs = np.genfromtxt(mat_fn, delimiter=',') # reads_pos = np.genfromtxt(pos_fn, delimiter=',') # n_reads = reads_pos.shape[0] # sim_mat = sp.coo_matrix((mat_idxs[:, 2], # (mat_idxs[:, 0]-1, mat_idxs[:, 1]-1)), # shape=(n_reads, n_reads), # dtype='float64').tocsr() # sim_mat = sym_max(sim_mat) # # Remove unaligned reads (unknown ground turh position) # in_idx = np.argwhere(reads_pos < 7e6)[:, 0] # sim_lil = sim_mat.tolil() # self.n = len(in_idx) # if apply_perm: # perm = np.random.permutation(self.n) # self.true_perm = perm # in_idx = in_idx[perm] # else: # self.true_perm = np.arange(self.n) # sim_lil = sim_lil[in_idx, :][:, in_idx] # self.sim_matrix = sim_lil.tocsr() # # return self
[ "scipy.linalg.toeplitz", "numpy.floor", "numpy.zeros", "numpy.shape", "numpy.sort", "numpy.where", "numpy.arange", "numpy.mean", "numpy.random.permutation", "numpy.random.rand" ]
[((444, 455), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (452, 455), True, 'import numpy as np\n'), ((1336, 1353), 'scipy.linalg.toeplitz', 'toeplitz', (['lambdas'], {}), '(lambdas)\n', (1344, 1353), False, 'from scipy.linalg import toeplitz\n'), ((3120, 3144), 'numpy.where', 'np.where', (['(N > noise_prop)'], {}), '(N > noise_prop)\n', (3128, 3144), True, 'import numpy as np\n'), ((3223, 3239), 'numpy.where', 'np.where', (['(N != 0)'], {}), '(N != 0)\n', (3231, 3239), True, 'import numpy as np\n'), ((540, 556), 'numpy.floor', 'np.floor', (['(n / 10)'], {}), '(n / 10)\n', (548, 556), True, 'import numpy as np\n'), ((3082, 3104), 'numpy.random.rand', 'np.random.rand', (['n_', 'n_'], {}), '(n_, n_)\n', (3096, 3104), True, 'import numpy as np\n'), ((4155, 4167), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (4164, 4167), True, 'import numpy as np\n'), ((596, 610), 'numpy.arange', 'np.arange', (['cov'], {}), '(cov)\n', (605, 610), True, 'import numpy as np\n'), ((2130, 2144), 'numpy.shape', 'np.shape', (['perm'], {}), '(perm)\n', (2138, 2144), True, 'import numpy as np\n'), ((3284, 3300), 'numpy.where', 'np.where', (['(N != 0)'], {}), '(N != 0)\n', (3292, 3300), True, 'import numpy as np\n'), ((4019, 4043), 'numpy.random.permutation', 'np.random.permutation', (['n'], {}), '(n)\n', (4040, 4043), True, 'import numpy as np\n'), ((4228, 4257), 'numpy.mean', 'np.mean', (['(self.sim_matrix ** 2)'], {}), '(self.sim_matrix ** 2)\n', (4235, 4257), True, 'import numpy as np\n'), ((719, 731), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (728, 731), True, 'import numpy as np\n'), ((822, 838), 'numpy.floor', 'np.floor', (['(n / 10)'], {}), '(n / 10)\n', (830, 838), True, 'import numpy as np\n'), ((3418, 3434), 'numpy.where', 'np.where', (['(N != 0)'], {}), '(N != 0)\n', (3426, 3434), True, 'import numpy as np\n'), ((878, 892), 'numpy.arange', 'np.arange', (['cov'], {}), '(cov)\n', (887, 892), True, 'import numpy as np\n'), ((1090, 1105), 'numpy.floor', 'np.floor', (['(n / 2)'], {}), '(n / 2)\n', (1098, 1105), True, 'import numpy as np\n'), ((2352, 2365), 'numpy.sort', 'np.sort', (['perm'], {}), '(perm)\n', (2359, 2365), True, 'import numpy as np\n'), ((2369, 2382), 'numpy.arange', 'np.arange', (['n_'], {}), '(n_)\n', (2378, 2382), True, 'import numpy as np\n'), ((1060, 1072), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1069, 1072), True, 'import numpy as np\n')]
from __future__ import print_function import torch import numpy as np import os # from torch_scatter import scatter_add def mkdir(path): if not os.path.exists(path): os.makedirs(path) MESH_EXTENSIONS = [ '.obj', ] def is_mesh_file(filename): return any(filename.endswith(extension) for extension in MESH_EXTENSIONS) def pad(input_arr, target_length, val=0, dim=1): shp = input_arr.shape npad = [(0, 0) for _ in range(len(shp))] npad[dim] = (0, target_length - shp[dim]) return np.pad(input_arr, pad_width=npad, mode='constant', constant_values=val) def seg_accuracy(predicted, ssegs, meshes): correct = 0 ssegs = ssegs.squeeze(-1) correct_mat = ssegs.gather(2, predicted.cpu().unsqueeze(dim=2)) for mesh_id, mesh in enumerate(meshes): correct_vec = correct_mat[mesh_id, :mesh.edges_count, 0] edge_areas = torch.from_numpy(mesh.get_edge_areas()) correct += (correct_vec.float() * edge_areas).sum() return correct def intersection_over_union(preds, target, num_classes): preds, target = torch.nn.functional.one_hot(preds, num_classes), torch.nn.functional.one_hot(target, num_classes) iou = torch.zeros(num_classes, dtype=torch.float32) for idx, pred in enumerate(preds): i = (pred & target[idx]).sum(dim=0) u = (pred | target[idx]).sum(dim=0) iou = iou.add(i.cpu().to(torch.float) / u.cpu().to(torch.float)) return iou def mean_iou_calc(pred, target, num_classes): #Removal of padded labels marked with -1 slimpred = [] slimtarget = [] for batch in range(pred.shape[0]): if (target[batch] == -1).any(): slimLabels = target[batch][target[batch]!=-1] slimtarget.append(slimLabels) slimpred.append(pred[batch][:slimLabels.size()[0]]) pred = torch.stack(slimpred,0) target = torch.stack(slimtarget, 0) iou = intersection_over_union(pred, target, num_classes) mean_iou = iou.mean(dim=-1) return mean_iou, iou def print_network(net): """Print the total number of parameters in the network Parameters: network """ print('---------- Network initialized -------------') num_params = 0 for param in net.parameters(): num_params += param.numel() print('[Network] Total number of parameters : %.3f M' % (num_params / 1e6)) print('-----------------------------------------------') def get_heatmap_color(value, minimum=0, maximum=1): minimum, maximum = float(minimum), float(maximum) ratio = 2 * (value - minimum) / (maximum - minimum) b = int(max(0, 255 * (1 - ratio))) r = int(max(0, 255 * (ratio - 1))) g = 255 - b - r return r, g, b def normalize_np_array(np_array): min_value = np.min(np_array) max_value = np.max(np_array) return (np_array - min_value) / (max_value - min_value) def calculate_entropy(np_array): entropy = 0 np_array /= np.sum(np_array) for a in np_array: if a != 0: entropy -= a * np.log(a) entropy /= np.log(np_array.shape[0]) return entropy def pad_with(vector, pad_width, iaxis, kwargs): pad_value = kwargs.get('padder', 10) vector[:pad_width[0]] = pad_value vector[-pad_width[1]:] = pad_value def myindexrowselect(groups, mask_index, device): sparseIndices = groups._indices() newIndices = [] for i, value in enumerate(mask_index): #Get index from relevant indices index = (sparseIndices[0] == value).nonzero() #Get rows by index sparseRow = [sparseIndices[:, value] for value in index] sparseRow = torch.cat(sparseRow,1)[1] singleRowIndices = torch.squeeze(torch.full((1,len(sparseRow)),i, dtype=torch.long),0).to(sparseRow.device) indices = torch.stack((singleRowIndices,sparseRow)) newIndices.append(indices) allNewIndices = torch.cat(newIndices,1) #Create new tensor groups = torch.sparse_coo_tensor(indices=allNewIndices, values=torch.ones(allNewIndices.shape[1], dtype=torch.float), size=(len(mask_index), groups.shape[1])) return groups
[ "numpy.pad", "torch.ones", "numpy.sum", "torch.stack", "numpy.log", "os.makedirs", "os.path.exists", "torch.nn.functional.one_hot", "torch.cat", "numpy.min", "numpy.max", "torch.zeros" ]
[((522, 593), 'numpy.pad', 'np.pad', (['input_arr'], {'pad_width': 'npad', 'mode': '"""constant"""', 'constant_values': 'val'}), "(input_arr, pad_width=npad, mode='constant', constant_values=val)\n", (528, 593), True, 'import numpy as np\n'), ((1190, 1235), 'torch.zeros', 'torch.zeros', (['num_classes'], {'dtype': 'torch.float32'}), '(num_classes, dtype=torch.float32)\n', (1201, 1235), False, 'import torch\n'), ((1850, 1874), 'torch.stack', 'torch.stack', (['slimpred', '(0)'], {}), '(slimpred, 0)\n', (1861, 1874), False, 'import torch\n'), ((1887, 1913), 'torch.stack', 'torch.stack', (['slimtarget', '(0)'], {}), '(slimtarget, 0)\n', (1898, 1913), False, 'import torch\n'), ((2780, 2796), 'numpy.min', 'np.min', (['np_array'], {}), '(np_array)\n', (2786, 2796), True, 'import numpy as np\n'), ((2813, 2829), 'numpy.max', 'np.max', (['np_array'], {}), '(np_array)\n', (2819, 2829), True, 'import numpy as np\n'), ((2957, 2973), 'numpy.sum', 'np.sum', (['np_array'], {}), '(np_array)\n', (2963, 2973), True, 'import numpy as np\n'), ((3068, 3093), 'numpy.log', 'np.log', (['np_array.shape[0]'], {}), '(np_array.shape[0])\n', (3074, 3093), True, 'import numpy as np\n'), ((151, 171), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (165, 171), False, 'import os\n'), ((181, 198), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (192, 198), False, 'import os\n'), ((1082, 1129), 'torch.nn.functional.one_hot', 'torch.nn.functional.one_hot', (['preds', 'num_classes'], {}), '(preds, num_classes)\n', (1109, 1129), False, 'import torch\n'), ((1131, 1179), 'torch.nn.functional.one_hot', 'torch.nn.functional.one_hot', (['target', 'num_classes'], {}), '(target, num_classes)\n', (1158, 1179), False, 'import torch\n'), ((3807, 3849), 'torch.stack', 'torch.stack', (['(singleRowIndices, sparseRow)'], {}), '((singleRowIndices, sparseRow))\n', (3818, 3849), False, 'import torch\n'), ((3909, 3933), 'torch.cat', 'torch.cat', (['newIndices', '(1)'], {}), '(newIndices, 1)\n', (3918, 3933), False, 'import torch\n'), ((3647, 3670), 'torch.cat', 'torch.cat', (['sparseRow', '(1)'], {}), '(sparseRow, 1)\n', (3656, 3670), False, 'import torch\n'), ((4061, 4114), 'torch.ones', 'torch.ones', (['allNewIndices.shape[1]'], {'dtype': 'torch.float'}), '(allNewIndices.shape[1], dtype=torch.float)\n', (4071, 4114), False, 'import torch\n'), ((3043, 3052), 'numpy.log', 'np.log', (['a'], {}), '(a)\n', (3049, 3052), True, 'import numpy as np\n')]
import logging import os import numpy as np from sklearn.metrics import classification_report import torch from torch.optim import Adam from torch.optim.lr_scheduler import ReduceLROnPlateau from torch.nn import CrossEntropyLoss from data.dataset import COVIDxFolder from data import transforms from torch.utils.data import DataLoader from model import architecture import util import config log = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) def save_model(model, config): if isinstance(model, torch.nn.DataParallel): # Save without the DataParallel module model_dict = model.module.state_dict() else: model_dict = model.state_dict() state = { "state_dict": model_dict, "global_step": config['global_step'], "clf_report": config['clf_report'] } f1_macro = config['clf_report']['macro avg']['f1-score'] * 100 name = "{}_F1_{:.2f}_step_{}.pth".format(config['name'], f1_macro, config['global_step']) model_path = os.path.join(config['save_dir'], name) torch.save(state, model_path) log.info("Saved model to {}".format(model_path)) def validate(data_loader, model, best_score, global_step, cfg): model.eval() gts, predictions = [], [] log.info("Validation started...") for data in data_loader: imgs, labels = data imgs = util.to_device(imgs, gpu=cfg.gpu) with torch.no_grad(): logits = model(imgs) probs = model.module.probability(logits) preds = torch.argmax(probs, dim=1).cpu().numpy() labels = labels.cpu().detach().numpy() predictions.extend(preds) gts.extend(labels) predictions = np.array(predictions, dtype=np.int32) gts = np.array(gts, dtype=np.int32) acc, f1, prec, rec = util.clf_metrics(predictions=predictions, targets=gts, average="macro") report = classification_report(gts, predictions, output_dict=True) log.info("VALIDATION | Accuracy {:.4f} | F1 {:.4f} | Precision {:.4f} | " "Recall {:.4f}".format(acc, f1, prec, rec)) if acc > best_score: save_config = { 'name': config.name, 'save_dir': config.ckpts_dir, 'global_step': global_step, 'clf_report': report } save_model(model=model, config=save_config) best_score = acc log.info("Validation end") model.train() return best_score def main(): if config.gpu and not torch.cuda.is_available(): raise ValueError("GPU not supported or enabled on this system.") use_gpu = config.gpu log.info("Loading train dataset") train_dataset = COVIDxFolder(config.train_imgs, config.train_labels, transforms.train_transforms(config.width, config.height)) train_loader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True, drop_last=True, num_workers=config.n_threads, pin_memory=use_gpu) log.info("Number of training examples {}".format(len(train_dataset))) log.info("Loading val dataset") val_dataset = COVIDxFolder(config.val_imgs, config.val_labels, transforms.val_transforms(config.width, config.height)) val_loader = DataLoader(val_dataset, batch_size=config.batch_size, shuffle=False, num_workers=config.n_threads, pin_memory=use_gpu) log.info("Number of validation examples {}".format(len(val_dataset))) if config.weights: #state = torch.load(config.weights) state = None log.info("Loaded model weights from: {}".format(config.weights)) else: state = None state_dict = state["state_dict"] if state else None model = architecture.COVIDNext50(n_classes=config.n_classes) if state_dict: model = util.load_model_weights(model=model, state_dict=state_dict) if use_gpu: model.cuda() model = torch.nn.DataParallel(model) optim_layers = filter(lambda p: p.requires_grad, model.parameters()) # optimizer and lr scheduler optimizer = Adam(optim_layers, lr=config.lr, weight_decay=config.weight_decay) scheduler = ReduceLROnPlateau(optimizer=optimizer, factor=config.lr_reduce_factor, patience=config.lr_reduce_patience, mode='max', min_lr=1e-7) # Load the last global_step from the checkpoint if existing global_step = 0 if state is None else state['global_step'] + 1 class_weights = util.to_device(torch.FloatTensor(config.loss_weights), gpu=use_gpu) loss_fn = CrossEntropyLoss() # Reset the best metric score best_score = -1 for epoch in range(config.epochs): log.info("Started epoch {}/{}".format(epoch + 1, config.epochs)) for data in train_loader: imgs, labels = data imgs = util.to_device(imgs, gpu=use_gpu) labels = util.to_device(labels, gpu=use_gpu) logits = model(imgs) loss = loss_fn(logits, labels) optimizer.zero_grad() loss.backward() optimizer.step() if global_step % config.log_steps == 0 and global_step > 0: probs = model.module.probability(logits) preds = torch.argmax(probs, dim=1).detach().cpu().numpy() labels = labels.cpu().detach().numpy() acc, f1, _, _ = util.clf_metrics(preds, labels) lr = util.get_learning_rate(optimizer) log.info("Step {} | TRAINING batch: Loss {:.4f} | F1 {:.4f} | " "Accuracy {:.4f} | LR {:.2e}".format(global_step, loss.item(), f1, acc, lr)) if global_step % config.eval_steps == 0 and global_step > 0: best_score = validate(val_loader, model, best_score=best_score, global_step=global_step, cfg=config) scheduler.step(best_score) global_step += 1 if __name__ == '__main__': seed = config.random_seed if seed: np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) main()
[ "numpy.random.seed", "model.architecture.COVIDNext50", "torch.argmax", "sklearn.metrics.classification_report", "torch.no_grad", "util.load_model_weights", "os.path.join", "util.clf_metrics", "torch.utils.data.DataLoader", "torch.optim.lr_scheduler.ReduceLROnPlateau", "torch.FloatTensor", "torch.manual_seed", "torch.optim.Adam", "torch.cuda.is_available", "util.get_learning_rate", "logging.basicConfig", "util.to_device", "torch.nn.CrossEntropyLoss", "torch.save", "torch.cuda.manual_seed_all", "numpy.array", "data.transforms.val_transforms", "torch.nn.DataParallel", "logging.getLogger", "data.transforms.train_transforms" ]
[((402, 429), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (419, 429), False, 'import logging\n'), ((430, 469), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (449, 469), False, 'import logging\n'), ((1108, 1146), 'os.path.join', 'os.path.join', (["config['save_dir']", 'name'], {}), "(config['save_dir'], name)\n", (1120, 1146), False, 'import os\n'), ((1151, 1180), 'torch.save', 'torch.save', (['state', 'model_path'], {}), '(state, model_path)\n', (1161, 1180), False, 'import torch\n'), ((1799, 1836), 'numpy.array', 'np.array', (['predictions'], {'dtype': 'np.int32'}), '(predictions, dtype=np.int32)\n', (1807, 1836), True, 'import numpy as np\n'), ((1847, 1876), 'numpy.array', 'np.array', (['gts'], {'dtype': 'np.int32'}), '(gts, dtype=np.int32)\n', (1855, 1876), True, 'import numpy as np\n'), ((1902, 1973), 'util.clf_metrics', 'util.clf_metrics', ([], {'predictions': 'predictions', 'targets': 'gts', 'average': '"""macro"""'}), "(predictions=predictions, targets=gts, average='macro')\n", (1918, 1973), False, 'import util\n'), ((2071, 2128), 'sklearn.metrics.classification_report', 'classification_report', (['gts', 'predictions'], {'output_dict': '(True)'}), '(gts, predictions, output_dict=True)\n', (2092, 2128), False, 'from sklearn.metrics import classification_report\n'), ((3110, 3249), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'config.batch_size', 'shuffle': '(True)', 'drop_last': '(True)', 'num_workers': 'config.n_threads', 'pin_memory': 'use_gpu'}), '(train_dataset, batch_size=config.batch_size, shuffle=True,\n drop_last=True, num_workers=config.n_threads, pin_memory=use_gpu)\n', (3120, 3249), False, 'from torch.utils.data import DataLoader\n'), ((3735, 3857), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dataset'], {'batch_size': 'config.batch_size', 'shuffle': '(False)', 'num_workers': 'config.n_threads', 'pin_memory': 'use_gpu'}), '(val_dataset, batch_size=config.batch_size, shuffle=False,\n num_workers=config.n_threads, pin_memory=use_gpu)\n', (3745, 3857), False, 'from torch.utils.data import DataLoader\n'), ((4302, 4354), 'model.architecture.COVIDNext50', 'architecture.COVIDNext50', ([], {'n_classes': 'config.n_classes'}), '(n_classes=config.n_classes)\n', (4326, 4354), False, 'from model import architecture\n'), ((4656, 4722), 'torch.optim.Adam', 'Adam', (['optim_layers'], {'lr': 'config.lr', 'weight_decay': 'config.weight_decay'}), '(optim_layers, lr=config.lr, weight_decay=config.weight_decay)\n', (4660, 4722), False, 'from torch.optim import Adam\n'), ((4781, 4917), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'optimizer': 'optimizer', 'factor': 'config.lr_reduce_factor', 'patience': 'config.lr_reduce_patience', 'mode': '"""max"""', 'min_lr': '(1e-07)'}), "(optimizer=optimizer, factor=config.lr_reduce_factor,\n patience=config.lr_reduce_patience, mode='max', min_lr=1e-07)\n", (4798, 4917), False, 'from torch.optim.lr_scheduler import ReduceLROnPlateau\n'), ((5319, 5337), 'torch.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (5335, 5337), False, 'from torch.nn import CrossEntropyLoss\n'), ((1458, 1491), 'util.to_device', 'util.to_device', (['imgs'], {'gpu': 'cfg.gpu'}), '(imgs, gpu=cfg.gpu)\n', (1472, 1491), False, 'import util\n'), ((2972, 3028), 'data.transforms.train_transforms', 'transforms.train_transforms', (['config.width', 'config.height'], {}), '(config.width, config.height)\n', (2999, 3028), False, 'from data import transforms\n'), ((3605, 3659), 'data.transforms.val_transforms', 'transforms.val_transforms', (['config.width', 'config.height'], {}), '(config.width, config.height)\n', (3630, 3659), False, 'from data import transforms\n'), ((4390, 4449), 'util.load_model_weights', 'util.load_model_weights', ([], {'model': 'model', 'state_dict': 'state_dict'}), '(model=model, state_dict=state_dict)\n', (4413, 4449), False, 'import util\n'), ((4504, 4532), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (4525, 4532), False, 'import torch\n'), ((5217, 5255), 'torch.FloatTensor', 'torch.FloatTensor', (['config.loss_weights'], {}), '(config.loss_weights)\n', (5234, 5255), False, 'import torch\n'), ((7137, 7157), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (7151, 7157), True, 'import numpy as np\n'), ((7166, 7189), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (7183, 7189), False, 'import torch\n'), ((7201, 7226), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7224, 7226), False, 'import torch\n'), ((1506, 1521), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1519, 1521), False, 'import torch\n'), ((2702, 2727), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2725, 2727), False, 'import torch\n'), ((5636, 5669), 'util.to_device', 'util.to_device', (['imgs'], {'gpu': 'use_gpu'}), '(imgs, gpu=use_gpu)\n', (5650, 5669), False, 'import util\n'), ((5691, 5726), 'util.to_device', 'util.to_device', (['labels'], {'gpu': 'use_gpu'}), '(labels, gpu=use_gpu)\n', (5705, 5726), False, 'import util\n'), ((7240, 7272), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (7266, 7272), False, 'import torch\n'), ((6186, 6217), 'util.clf_metrics', 'util.clf_metrics', (['preds', 'labels'], {}), '(preds, labels)\n', (6202, 6217), False, 'import util\n'), ((6239, 6272), 'util.get_learning_rate', 'util.get_learning_rate', (['optimizer'], {}), '(optimizer)\n', (6261, 6272), False, 'import util\n'), ((1629, 1655), 'torch.argmax', 'torch.argmax', (['probs'], {'dim': '(1)'}), '(probs, dim=1)\n', (1641, 1655), False, 'import torch\n'), ((6049, 6075), 'torch.argmax', 'torch.argmax', (['probs'], {'dim': '(1)'}), '(probs, dim=1)\n', (6061, 6075), False, 'import torch\n')]
import cv2 import numpy as np from calibration import get_calib_from_file # kitti # name = '000000' # pc_pathname = '/home/alex/github/waymo_to_kitti_converter/tools/kitti/velodyne/'+name+'.bin' # img_pathname = '/home/alex/github/waymo_to_kitti_converter/tools/kitti/image_2/'+name+'.png' # calib_pathname = '/home/alex/github/waymo_to_kitti_converter/tools/kitti/calib/'+name+'.txt' # waymo-kitti name = '00000-00001' pc_pathname = '/home/alex/github/waymo_to_kitti_converter/tools/waymo_kitti/velodyne/'+name+'.bin' img_pathname = '/home/alex/github/waymo_to_kitti_converter/tools/waymo_kitti/image_0/'+name+'.png' calib_pathname = '/home/alex/github/waymo_to_kitti_converter/tools/waymo_kitti/calib/'+name+'.txt' def cart_to_homo(mat): mat = np.vstack([mat, np.ones((1, mat.shape[1]))]) return mat def pc_to_pt(pc, V2C, R0, P): def cart2hom(pts_3d): """ Input: nx3 points in Cartesian Oupput: nx4 points in Homogeneous by pending 1 """ n = pts_3d.shape[0] pts_3d_hom = np.hstack((pts_3d, np.ones((n, 1)))) return pts_3d_hom def project_velo_to_ref(pts_3d_velo): pts_3d_velo = cart2hom(pts_3d_velo) # nx4 return np.dot(pts_3d_velo, np.transpose(V2C)) def project_ref_to_rect(pts_3d_ref): """ Input and Output are nx3 points """ return np.transpose(np.dot(R0, np.transpose(pts_3d_ref))) def project_rect_to_image(pts_3d_rect): """ Input: nx3 points in rect camera coord. Output: nx2 points in image2 coord. """ pts_3d_rect = cart2hom(pts_3d_rect) pts_2d = np.dot(pts_3d_rect, np.transpose(P)) # nx3 pts_2d[:, 0] /= pts_2d[:, 2] pts_2d[:, 1] /= pts_2d[:, 2] return pts_2d[:, 0:2] # filter behind ind = pc[:, 0] > 0 # lidar: x is front pc = pc[ind, :] print('pc', pc) ref = project_velo_to_ref(pc) print('ref',ref) rect = project_ref_to_rect(ref) print('rect', rect) depth = rect[:, 2] print(rect.shape, depth.shape) image = project_rect_to_image(rect) return image, depth def main(): calib = get_calib_from_file(calib_pathname) v2c = calib['Tr_velo2cam'] r0 = calib['R0'] px = calib['P2'] # v2c = np.array([ # [7.533745000000e-03, -9.999714000000e-01, -6.166020000000e-04, -4.069766000000e-03], # [1.480249000000e-02, 7.280733000000e-04, -9.998902000000e-01, -7.631618000000e-02], # [9.998621000000e-01, 7.523790000000e-03, 1.480755000000e-02, -2.717806000000e-01]]) # r0 = np.array([ # [9.999239000000e-01, 9.837760000000e-03, -7.445048000000e-03], # [-9.869795000000e-03, 9.999421000000e-01, -4.278459000000e-03], # [7.402527000000e-03, 4.351614000000e-03, 9.999631000000e-01]]) # px = np.array([ # [7.215377000000e+02, 0.000000000000e+00, 6.095593000000e+02, 4.485728000000e+01], # [0.000000000000e+00, 7.215377000000e+02, 1.728540000000e+02, 2.163791000000e-01], # [0.000000000000e+00, 0.000000000000e+00, 1.000000000000e+00, 2.745884000000e-03]]) pc = np.fromfile(pc_pathname, dtype=np.float32).reshape((-1, 4))[:, :3] # filter all behind image plane keep = [] for i in range(pc.shape[0]): p = pc[i, :] if p[0] > 0: keep.append(p) # pc = np.vstack(keep) # # tmp = np.eye(4) # tmp[:3, :3] = r0 # r0 = tmp # pc = np.transpose(pc) # (n,3) -> (3,n) # pc = cart_to_homo(pc) # (3,n) -> (4,n) # # v2c = cart_to_homo(v2c) # (3,4) -> (4,4) # # print(px.shape, r0.shape, v2c.shape, pc.shape) pt, depth = pc_to_pt(pc, v2c, r0, px) print(pt.shape, depth.shape) # pt = px @ r0 @ v2c @ pc # print(pt.shape) # pt = pt[:2] / pt[2] print(pt) import matplotlib.pyplot as plt cmap = plt.cm.get_cmap("hsv", 256) cmap = np.array([cmap(i) for i in range(256)])[:, :3] * 255 # draw img = cv2.imread(img_pathname) for i in range(pt.shape[0]): x = pt[i, 0] y = pt[i, 1] color = cmap[np.clip(640/depth[i], 0, 255).astype(np.int), :] # if 0 < x < 1920 and 0 < y < 1080: # print('yah') # print(int(x), int(y)) cv2.circle(img, (int(x), int(y)), 1, tuple(color), -1) cv2.namedWindow('image', cv2.WINDOW_NORMAL) while True: cv2.imshow('image', img) key = cv2.waitKey(1) if key == 27: # exit break elif key != -1: print('Undefined key:', key) if __name__ == '__main__': main()
[ "cv2.waitKey", "calibration.get_calib_from_file", "numpy.fromfile", "numpy.transpose", "numpy.ones", "numpy.clip", "cv2.imread", "cv2.imshow", "matplotlib.pyplot.cm.get_cmap", "cv2.namedWindow" ]
[((2141, 2176), 'calibration.get_calib_from_file', 'get_calib_from_file', (['calib_pathname'], {}), '(calib_pathname)\n', (2160, 2176), False, 'from calibration import get_calib_from_file\n'), ((3845, 3872), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""hsv"""', '(256)'], {}), "('hsv', 256)\n", (3860, 3872), True, 'import matplotlib.pyplot as plt\n'), ((3959, 3983), 'cv2.imread', 'cv2.imread', (['img_pathname'], {}), '(img_pathname)\n', (3969, 3983), False, 'import cv2\n'), ((4301, 4344), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""', 'cv2.WINDOW_NORMAL'], {}), "('image', cv2.WINDOW_NORMAL)\n", (4316, 4344), False, 'import cv2\n'), ((4369, 4393), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (4379, 4393), False, 'import cv2\n'), ((4408, 4422), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4419, 4422), False, 'import cv2\n'), ((770, 796), 'numpy.ones', 'np.ones', (['(1, mat.shape[1])'], {}), '((1, mat.shape[1]))\n', (777, 796), True, 'import numpy as np\n'), ((1228, 1245), 'numpy.transpose', 'np.transpose', (['V2C'], {}), '(V2C)\n', (1240, 1245), True, 'import numpy as np\n'), ((1641, 1656), 'numpy.transpose', 'np.transpose', (['P'], {}), '(P)\n', (1653, 1656), True, 'import numpy as np\n'), ((1055, 1070), 'numpy.ones', 'np.ones', (['(n, 1)'], {}), '((n, 1))\n', (1062, 1070), True, 'import numpy as np\n'), ((1376, 1400), 'numpy.transpose', 'np.transpose', (['pts_3d_ref'], {}), '(pts_3d_ref)\n', (1388, 1400), True, 'import numpy as np\n'), ((3109, 3151), 'numpy.fromfile', 'np.fromfile', (['pc_pathname'], {'dtype': 'np.float32'}), '(pc_pathname, dtype=np.float32)\n', (3120, 3151), True, 'import numpy as np\n'), ((4081, 4112), 'numpy.clip', 'np.clip', (['(640 / depth[i])', '(0)', '(255)'], {}), '(640 / depth[i], 0, 255)\n', (4088, 4112), True, 'import numpy as np\n')]
from __future__ import division, print_function # coding=utf-8 import sys import os import glob import re import numpy as np import tensorflow as tf import pathlib import wget # from tensorflow.compat.v1.compat import ConfigProto # from tensorflow.compat.v1 import InteractiveSession #from tensorflow.python.client.session import InteractiveSession # config = tf.ConfigProto() # config.gpu_options.per_process_gpu_memory_fraction = 0.2 # config.gpu_options.allow_growth = True # session = InteractiveSession(config=config) # Keras from tensorflow.keras.applications.resnet50 import preprocess_input from tensorflow.keras.models import load_model from tensorflow.keras.preprocessing import image # Flask utils from flask import Flask, redirect, url_for, request, render_template from werkzeug.utils import secure_filename # from gevent.pywsgi import WSGIServer # Model saved with Keras model.save() MODEL_PATH = 'model_resnet.hdf5' MODEL_URL = 'https://github.com/DARK-art108/Cotton-Leaf-Disease-Prediction/releases/download/v1.0/model_resnet.hdf5' UPLOAD_FOLDER = os.path.join(os.path.dirname(__file__), 'static', 'uploads') # Download model if not present while not pathlib.Path(MODEL_PATH).is_file(): print(f'Model {MODEL_PATH} not found. Downloading...') wget.download(MODEL_URL) # Define a flask app app = Flask(__name__) # Define upload path app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER # Developing in the absence of TensorFlow :P (Python 3.9.0 x64) # def load_model(aa): # class a: # @staticmethod # def predict(*args): # return 1 # return a() # class image: # @staticmethod # def load_img(path, target_size): # return 'a' # @staticmethod # def img_to_array(img): # return 'v' # Load your trained model model = load_model(MODEL_PATH) def model_predict(img_path, model): print(img_path) img = image.load_img(img_path, target_size=(224, 224)) # Preprocessing the image x = image.img_to_array(img) # x = np.true_divide(x, 255) ## Scaling x = x / 255 x = np.expand_dims(x, axis=0) # Be careful how your trained model deals with the input # otherwise, it won't make correct prediction! # x = preprocess_input(x) preds = model.predict(x) preds = np.argmax(preds, axis=1) if preds == 0: preds = "The leaf is a diseased cotton leaf." elif preds == 1: preds = "The leaf is a diseased cotton plant." elif preds == 2: preds = "The leaf is a fresh cotton leaf." else: preds = "The leaf is a fresh cotton plant." return preds @app.route('/', methods=['GET', 'POST']) def index(): # Main page if request.method == 'POST': # Get the file from post request print(request.files, request.form, request.args) f = None if 'image' in request.files: f = request.files['image'] if f: # Save the file to ./uploads file_path = os.path.join( app.config['UPLOAD_FOLDER'], secure_filename(f.filename)) f.save(file_path) # Make prediction preds = model_predict(file_path, model) result = preds return render_template('index.html', result=result, img=secure_filename(f.filename)) return render_template('index.html', result=None, err='Failed to receive file') # First time return render_template('index.html', result=None) if __name__ == '__main__': app.run(port=5001, debug=True)
[ "tensorflow.keras.models.load_model", "numpy.argmax", "os.path.dirname", "flask.Flask", "tensorflow.keras.preprocessing.image.img_to_array", "numpy.expand_dims", "werkzeug.utils.secure_filename", "wget.download", "tensorflow.keras.preprocessing.image.load_img", "pathlib.Path", "flask.render_template" ]
[((1368, 1383), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1373, 1383), False, 'from flask import Flask, redirect, url_for, request, render_template\n'), ((1871, 1893), 'tensorflow.keras.models.load_model', 'load_model', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (1881, 1893), False, 'from tensorflow.keras.models import load_model\n'), ((1117, 1142), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1132, 1142), False, 'import os\n'), ((1312, 1336), 'wget.download', 'wget.download', (['MODEL_URL'], {}), '(MODEL_URL)\n', (1325, 1336), False, 'import wget\n'), ((1967, 2015), 'tensorflow.keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(224, 224)'}), '(img_path, target_size=(224, 224))\n', (1981, 2015), False, 'from tensorflow.keras.preprocessing import image\n'), ((2058, 2081), 'tensorflow.keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (2076, 2081), False, 'from tensorflow.keras.preprocessing import image\n'), ((2158, 2183), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (2172, 2183), True, 'import numpy as np\n'), ((2376, 2400), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (2385, 2400), True, 'import numpy as np\n'), ((3539, 3581), 'flask.render_template', 'render_template', (['"""index.html"""'], {'result': 'None'}), "('index.html', result=None)\n", (3554, 3581), False, 'from flask import Flask, redirect, url_for, request, render_template\n'), ((3436, 3508), 'flask.render_template', 'render_template', (['"""index.html"""'], {'result': 'None', 'err': '"""Failed to receive file"""'}), "('index.html', result=None, err='Failed to receive file')\n", (3451, 3508), False, 'from flask import Flask, redirect, url_for, request, render_template\n'), ((1211, 1235), 'pathlib.Path', 'pathlib.Path', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (1223, 1235), False, 'import pathlib\n'), ((3148, 3175), 'werkzeug.utils.secure_filename', 'secure_filename', (['f.filename'], {}), '(f.filename)\n', (3163, 3175), False, 'from werkzeug.utils import secure_filename\n'), ((3391, 3418), 'werkzeug.utils.secure_filename', 'secure_filename', (['f.filename'], {}), '(f.filename)\n', (3406, 3418), False, 'from werkzeug.utils import secure_filename\n')]
#!/usr/bin/env python # module TEST_ACD import unittest import numpy as np from .test_common import BaseCommon from pylocus.point_set import PointSet from pylocus.algorithms import reconstruct_acd from pylocus.simulation import create_noisy_edm class TestACD(BaseCommon.TestAlgorithms): def setUp(self): BaseCommon.TestAlgorithms.setUp(self) self.create_points() self.n_it = 10 def create_points(self, N=5, d=2): print('TestACD:create_points') self.pts = PointSet(N, d) self.pts.set_points('random') self.pts.init() self.index = 0 def call_method(self, method=''): print('TestACD:call_method') Xhat, costs = reconstruct_acd(self.pts.edm, W=np.ones(self.pts.edm.shape), X0=self.pts.points, print_out=False, sweeps=3) return Xhat def add_noise(self, noise=1e-6): self.pts.edm = create_noisy_edm(self.pts.edm, noise) if __name__ == "__main__": unittest.main()
[ "unittest.main", "pylocus.simulation.create_noisy_edm", "numpy.ones", "pylocus.point_set.PointSet" ]
[((1073, 1088), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1086, 1088), False, 'import unittest\n'), ((507, 521), 'pylocus.point_set.PointSet', 'PointSet', (['N', 'd'], {}), '(N, d)\n', (515, 521), False, 'from pylocus.point_set import PointSet\n'), ((1002, 1039), 'pylocus.simulation.create_noisy_edm', 'create_noisy_edm', (['self.pts.edm', 'noise'], {}), '(self.pts.edm, noise)\n', (1018, 1039), False, 'from pylocus.simulation import create_noisy_edm\n'), ((773, 800), 'numpy.ones', 'np.ones', (['self.pts.edm.shape'], {}), '(self.pts.edm.shape)\n', (780, 800), True, 'import numpy as np\n')]
import os.path as op import numpy as np import matplotlib.pyplot as plt import seaborn as sns import config as cfg sns.set_style('darkgrid') sns.set_context('notebook') sns.despine(trim=True) plt.close('all') fig, ax = plt.subplots(1, 1, figsize=(8, 6)) scores = np.load(op.join(cfg.path_outputs, 'all_scores_learning_curves.npy')).item() train_sizes = scores['train_sizes'] train_scores = scores['train_scores'] test_scores = scores['test_scores'] train_mean = - np.mean(train_scores, axis=1) train_std = - np.std(train_scores, axis=1) test_mean = - np.mean(test_scores, axis=1) test_std = - np.std(test_scores, axis=1) ax.plot(train_sizes, train_mean, 'b--', lw=2, label="Training score") ax.fill_between(train_sizes, train_mean - train_std, train_mean + train_std, alpha=0.1) ax.plot(train_sizes, test_mean, 'b-', label="CV score") ax.fill_between(train_sizes, test_mean - test_std, test_mean + test_std, alpha=0.1, color="b") # ax.set_xticks(train_sizes) ax.set_xlabel("Number of training examples") ax.set_ylabel("MAE", rotation=0) # ax.set_title('Learning Curve (SpatialFilter + Riemann)') ax.legend() plt.tight_layout() plt.savefig(op.join(cfg.path_outputs, 'plot_MAE_learning_curves.png'), dpi=300)
[ "seaborn.set_style", "matplotlib.pyplot.tight_layout", "os.path.join", "numpy.std", "matplotlib.pyplot.close", "seaborn.despine", "numpy.mean", "matplotlib.pyplot.subplots", "seaborn.set_context" ]
[((118, 143), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (131, 143), True, 'import seaborn as sns\n'), ((144, 171), 'seaborn.set_context', 'sns.set_context', (['"""notebook"""'], {}), "('notebook')\n", (159, 171), True, 'import seaborn as sns\n'), ((172, 194), 'seaborn.despine', 'sns.despine', ([], {'trim': '(True)'}), '(trim=True)\n', (183, 194), True, 'import seaborn as sns\n'), ((195, 211), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (204, 211), True, 'import matplotlib.pyplot as plt\n'), ((222, 256), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8, 6)'}), '(1, 1, figsize=(8, 6))\n', (234, 256), True, 'import matplotlib.pyplot as plt\n'), ((1166, 1184), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1182, 1184), True, 'import matplotlib.pyplot as plt\n'), ((486, 515), 'numpy.mean', 'np.mean', (['train_scores'], {'axis': '(1)'}), '(train_scores, axis=1)\n', (493, 515), True, 'import numpy as np\n'), ((530, 558), 'numpy.std', 'np.std', (['train_scores'], {'axis': '(1)'}), '(train_scores, axis=1)\n', (536, 558), True, 'import numpy as np\n'), ((573, 601), 'numpy.mean', 'np.mean', (['test_scores'], {'axis': '(1)'}), '(test_scores, axis=1)\n', (580, 601), True, 'import numpy as np\n'), ((615, 642), 'numpy.std', 'np.std', (['test_scores'], {'axis': '(1)'}), '(test_scores, axis=1)\n', (621, 642), True, 'import numpy as np\n'), ((1197, 1254), 'os.path.join', 'op.join', (['cfg.path_outputs', '"""plot_MAE_learning_curves.png"""'], {}), "(cfg.path_outputs, 'plot_MAE_learning_curves.png')\n", (1204, 1254), True, 'import os.path as op\n'), ((275, 334), 'os.path.join', 'op.join', (['cfg.path_outputs', '"""all_scores_learning_curves.npy"""'], {}), "(cfg.path_outputs, 'all_scores_learning_curves.npy')\n", (282, 334), True, 'import os.path as op\n')]
import alepy import numpy as np import os.path as osp from control3 import CTRL_ROOT # import cv2 world = alepy.AtariWorld(osp.join(CTRL_ROOT,"domain_data/atari_roms/space_invaders.bin")) for j in xrange(5): x0 = world.GetInitialState(np.random.randint(0,50)) u0 = np.array([0],'uint8') y,r,o,d = world.Step(x0,u0) for i in xrange(3): y1,r1,o1,d1 = world.Step(x0,u0) assert (y==y1).all() and (r==r1) and (np.array(o)==np.array(o1)).all() nsteps = np.random.randint(10) x = x0 for t in xrange(nsteps): u = np.array([np.random.randint(0,10)],dtype='uint8') x,_,_,_ = world.Step(x,u)
[ "numpy.array", "numpy.random.randint", "os.path.join" ]
[((124, 188), 'os.path.join', 'osp.join', (['CTRL_ROOT', '"""domain_data/atari_roms/space_invaders.bin"""'], {}), "(CTRL_ROOT, 'domain_data/atari_roms/space_invaders.bin')\n", (132, 188), True, 'import os.path as osp\n'), ((276, 298), 'numpy.array', 'np.array', (['[0]', '"""uint8"""'], {}), "([0], 'uint8')\n", (284, 298), True, 'import numpy as np\n'), ((242, 266), 'numpy.random.randint', 'np.random.randint', (['(0)', '(50)'], {}), '(0, 50)\n', (259, 266), True, 'import numpy as np\n'), ((493, 514), 'numpy.random.randint', 'np.random.randint', (['(10)'], {}), '(10)\n', (510, 514), True, 'import numpy as np\n'), ((589, 613), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (606, 613), True, 'import numpy as np\n'), ((441, 452), 'numpy.array', 'np.array', (['o'], {}), '(o)\n', (449, 452), True, 'import numpy as np\n'), ((454, 466), 'numpy.array', 'np.array', (['o1'], {}), '(o1)\n', (462, 466), True, 'import numpy as np\n')]
#!/usr/bin/env python3 import json import sys import numpy as np import cv2 import math import time from collections import namedtuple from cscore import CameraServer from networktables import NetworkTables # Magic Numbers lowerGreen = (50, 120, 130) # Our Robot's Camera higherGreen = (100, 220, 220) minContourArea = 10 angleOffset = 10 rightAngleSize = -14 leftAngleSize = -75.5 screenX = 320 screenY = 240 screenSize = (screenX, screenY) distance_away = 110 realTapeDistance = 0.2 # metres between closest tape points focal_length = 325 # Initialisation configFile = "/boot/frc.json" CameraConfig = namedtuple("CameraConfig", ["name", "path", "config"]) def readCameraConfig(config): """Read single camera configuration.""" return CameraConfig(config["name"], config["path"], config) def readConfig(): """Read configuration file.""" # parse file with open(configFile) as f: j = json.load(f) # cameras cameras = j["cameras"] cameras = [readCameraConfig(camera) for camera in cameras] return cameras # Our code begins here def startCamera(config): """Start running the camera.""" cs = CameraServer.getInstance() camera = cs.startAutomaticCapture(name=config.name, path=config.path) camera.setConfigJson(json.dumps(config.config)) return cs, camera # Process Functions def getDistance(boxes): if boxes is None: return math.nan, math.nan Lpoint = max(boxes[0], key=lambda x: x[0]) Rpoint = min(boxes[1], key=lambda x: x[0]) width = abs(Lpoint[0] - Rpoint[0]) mid = (Rpoint[0] + Lpoint[0]) / 2 distance_from_center = mid - screenX / 2 offset = getOffset(width, distance_from_center) if width > 0: dist = (realTapeDistance * focal_length) / width return dist, offset else: return math.nan, offset def getOffset(width, x): # if width = 20cm then what is x in cm offset = x / (width / (realTapeDistance)) return -offset def createAnnotatedDisplay( frame: np.array, pairs: list, closestToMiddle: tuple, circle: tuple ) -> np.array: frame = cv2.line(frame, (160, 0), (160, 240), (255, 0, 0), thickness=1) for pair in pairs: if (pair[0][1][0] == closestToMiddle[0][0]).all(): colour = (0, 255, 0) #Green frame = cv2.circle( frame, (int(circle[0][0]), int(circle[0][1])), int(circle[1]), colour ) else: colour = (0, 0, 255) #Red for tape in pair: frame = cv2.drawContours( frame, [np.int0(tape[1])], 0, colour, thickness=2 ) return frame def getRetroPos(frame: np.array, annotated: bool, hsv: np.array, mask: np.array) -> (np.array, float, float): """Function for finding retro-reflective tape""" hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV, dst=hsv) # Convert to HSV to make the mask easier mask = cv2.inRange(hsv, lowerGreen, higherGreen, dst=mask) # Create a mask of everything in between the greens _, contours, _ = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # Find the contours if len(contours) <= 1: # Get contours with area above magic number 10 and append its smallest rectangle return frame, math.nan, math.nan rects = [] for cnt in contours: if cv2.contourArea(cnt) > minContourArea: rects.append(cv2.minAreaRect(cnt)) boxed_and_angles = [] for rect in rects: if math.isclose(rect[2], leftAngleSize, abs_tol=angleOffset): boxed_and_angles.append([False, np.array(cv2.boxPoints(rect)), cv2.contourArea(cv2.boxPoints(rect))]) elif math.isclose(rect[2], rightAngleSize, abs_tol=angleOffset): boxed_and_angles.append([True, np.array(cv2.boxPoints(rect)), cv2.contourArea(cv2.boxPoints(rect))]) pairs = [] leftRect = None for rect in sorted( boxed_and_angles, key=lambda x: max(x[1][:, 0]) if x[0] else min(x[1][:, 0]) ): # Get rectangle pairs if not rect[0]: leftRect = rect elif leftRect and math.isclose(leftRect[2], rect[2], abs_tol=0.3*leftRect[2]): pairs.append((leftRect, rect)) leftRect = None if len(pairs) < 1: return frame, math.nan, math.nan closestToMiddle = list(min( pairs, key=lambda x: abs(np.mean([x[0][1][:,0] + x[1][1][:,0]]) - screenSize[0]) )) closestToMiddle = [closestToMiddle[0][1], closestToMiddle[1][1]] (x, y), radius = cv2.minEnclosingCircle(np.array(closestToMiddle).reshape(-1, 2)) if annotated: frame = createAnnotatedDisplay(frame, pairs, closestToMiddle, ((x, y), radius)) dist, offset = getDistance(closestToMiddle) return ( frame, dist, offset, ) if __name__ == "__main__": if len(sys.argv) >= 2: configFile = sys.argv[1] # read configuration cameraConfigs = readConfig() # start NetworkTables NetworkTables.initialize(server="10.47.74.2") NetworkTables.setUpdateRate(1) nt = NetworkTables.getTable("/vision") ping = nt.getEntry("ping") raspi_pong = nt.getEntry("raspi_pong") rio_pong = nt.getEntry("rio_pong") entry_game_piece = nt.getEntry("game_piece") entry_dist = nt.getEntry("fiducial_x") entry_offset = nt.getEntry("fiducial_y") entry_fiducial_time = nt.getEntry("fiducial_time") entry_camera = nt.getEntry("using_cargo_camera") # start cameras cameras = [] for cameraConfig in cameraConfigs: cameras.append(startCamera(cameraConfig)) cargo_rocket_sink = cameras[0][0].getVideo(camera=cameras[0][1]) hatch_sink = cameras[1][0].getVideo(camera=cameras[1][1]) source = cameras[0][0].putVideo("Driver_Stream", screenX, screenY) frame = np.zeros(shape=(screenSize[1], screenSize[0], 3), dtype=np.uint8) image = np.zeros(shape=(screenSize[1], screenSize[0], 3), dtype=np.uint8) hsv = np.zeros(shape=(screenSize[1], screenSize[0], 3), dtype=np.uint8) mask = np.zeros(shape=(screenSize[1], screenSize[0]), dtype=np.uint8) img = np.zeros(shape=(screenSize[1], screenSize[0], 3), dtype=np.uint8) old_ping_time = 0 while True: ping_time = ping.getNumber(0) if abs(ping_time - old_ping_time) > 0.00000001: raspi_pong.setNumber(time.monotonic()) rio_pong.setNumber(ping_time) old_ping_time = ping_time game_piece = entry_game_piece.getBoolean(0) fiducial_time = time.monotonic() sink = hatch_sink if game_piece == 0 else cargo_rocket_sink entry_camera.setBoolean(False if not game_piece else True) frame_time, frame = sink.grabFrameNoTimeout(image=frame) if frame_time == 0: print(sink.getError(), file=sys.stderr) source.notifyError(sink.getError()) outtake = False percent = math.nan else: image, dist, offset = getRetroPos(frame, True, hsv, mask) source.putFrame(image) if not math.isnan(dist): if game_piece == 1: dist *= -1 offset *= -1 entry_dist.setNumber(dist) entry_offset.setNumber(offset) entry_fiducial_time.setNumber(fiducial_time) NetworkTables.flush()
[ "networktables.NetworkTables.getTable", "networktables.NetworkTables.flush", "json.dumps", "cv2.boxPoints", "numpy.mean", "cv2.minAreaRect", "cv2.inRange", "cv2.line", "cv2.contourArea", "cv2.cvtColor", "networktables.NetworkTables.setUpdateRate", "math.isnan", "numpy.int0", "networktables.NetworkTables.initialize", "json.load", "cscore.CameraServer.getInstance", "numpy.zeros", "time.monotonic", "numpy.array", "collections.namedtuple", "math.isclose", "cv2.findContours" ]
[((609, 663), 'collections.namedtuple', 'namedtuple', (['"""CameraConfig"""', "['name', 'path', 'config']"], {}), "('CameraConfig', ['name', 'path', 'config'])\n", (619, 663), False, 'from collections import namedtuple\n'), ((1154, 1180), 'cscore.CameraServer.getInstance', 'CameraServer.getInstance', ([], {}), '()\n', (1178, 1180), False, 'from cscore import CameraServer\n'), ((2108, 2171), 'cv2.line', 'cv2.line', (['frame', '(160, 0)', '(160, 240)', '(255, 0, 0)'], {'thickness': '(1)'}), '(frame, (160, 0), (160, 240), (255, 0, 0), thickness=1)\n', (2116, 2171), False, 'import cv2\n'), ((2815, 2862), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {'dst': 'hsv'}), '(frame, cv2.COLOR_BGR2HSV, dst=hsv)\n', (2827, 2862), False, 'import cv2\n'), ((2919, 2970), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lowerGreen', 'higherGreen'], {'dst': 'mask'}), '(hsv, lowerGreen, higherGreen, dst=mask)\n', (2930, 2970), False, 'import cv2\n'), ((3049, 3111), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n', (3065, 3111), False, 'import cv2\n'), ((4985, 5030), 'networktables.NetworkTables.initialize', 'NetworkTables.initialize', ([], {'server': '"""10.47.74.2"""'}), "(server='10.47.74.2')\n", (5009, 5030), False, 'from networktables import NetworkTables\n'), ((5036, 5066), 'networktables.NetworkTables.setUpdateRate', 'NetworkTables.setUpdateRate', (['(1)'], {}), '(1)\n', (5063, 5066), False, 'from networktables import NetworkTables\n'), ((5076, 5109), 'networktables.NetworkTables.getTable', 'NetworkTables.getTable', (['"""/vision"""'], {}), "('/vision')\n", (5098, 5109), False, 'from networktables import NetworkTables\n'), ((5812, 5877), 'numpy.zeros', 'np.zeros', ([], {'shape': '(screenSize[1], screenSize[0], 3)', 'dtype': 'np.uint8'}), '(shape=(screenSize[1], screenSize[0], 3), dtype=np.uint8)\n', (5820, 5877), True, 'import numpy as np\n'), ((5890, 5955), 'numpy.zeros', 'np.zeros', ([], {'shape': '(screenSize[1], screenSize[0], 3)', 'dtype': 'np.uint8'}), '(shape=(screenSize[1], screenSize[0], 3), dtype=np.uint8)\n', (5898, 5955), True, 'import numpy as np\n'), ((5966, 6031), 'numpy.zeros', 'np.zeros', ([], {'shape': '(screenSize[1], screenSize[0], 3)', 'dtype': 'np.uint8'}), '(shape=(screenSize[1], screenSize[0], 3), dtype=np.uint8)\n', (5974, 6031), True, 'import numpy as np\n'), ((6043, 6105), 'numpy.zeros', 'np.zeros', ([], {'shape': '(screenSize[1], screenSize[0])', 'dtype': 'np.uint8'}), '(shape=(screenSize[1], screenSize[0]), dtype=np.uint8)\n', (6051, 6105), True, 'import numpy as np\n'), ((6116, 6181), 'numpy.zeros', 'np.zeros', ([], {'shape': '(screenSize[1], screenSize[0], 3)', 'dtype': 'np.uint8'}), '(shape=(screenSize[1], screenSize[0], 3), dtype=np.uint8)\n', (6124, 6181), True, 'import numpy as np\n'), ((921, 933), 'json.load', 'json.load', (['f'], {}), '(f)\n', (930, 933), False, 'import json\n'), ((1280, 1305), 'json.dumps', 'json.dumps', (['config.config'], {}), '(config.config)\n', (1290, 1305), False, 'import json\n'), ((3492, 3549), 'math.isclose', 'math.isclose', (['rect[2]', 'leftAngleSize'], {'abs_tol': 'angleOffset'}), '(rect[2], leftAngleSize, abs_tol=angleOffset)\n', (3504, 3549), False, 'import math\n'), ((6522, 6538), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (6536, 6538), False, 'import time\n'), ((7309, 7330), 'networktables.NetworkTables.flush', 'NetworkTables.flush', ([], {}), '()\n', (7328, 7330), False, 'from networktables import NetworkTables\n'), ((3346, 3366), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (3361, 3366), False, 'import cv2\n'), ((3678, 3736), 'math.isclose', 'math.isclose', (['rect[2]', 'rightAngleSize'], {'abs_tol': 'angleOffset'}), '(rect[2], rightAngleSize, abs_tol=angleOffset)\n', (3690, 3736), False, 'import math\n'), ((7056, 7072), 'math.isnan', 'math.isnan', (['dist'], {}), '(dist)\n', (7066, 7072), False, 'import math\n'), ((3410, 3430), 'cv2.minAreaRect', 'cv2.minAreaRect', (['cnt'], {}), '(cnt)\n', (3425, 3430), False, 'import cv2\n'), ((4104, 4165), 'math.isclose', 'math.isclose', (['leftRect[2]', 'rect[2]'], {'abs_tol': '(0.3 * leftRect[2])'}), '(leftRect[2], rect[2], abs_tol=0.3 * leftRect[2])\n', (4116, 4165), False, 'import math\n'), ((4544, 4569), 'numpy.array', 'np.array', (['closestToMiddle'], {}), '(closestToMiddle)\n', (4552, 4569), True, 'import numpy as np\n'), ((6348, 6364), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (6362, 6364), False, 'import time\n'), ((2566, 2582), 'numpy.int0', 'np.int0', (['tape[1]'], {}), '(tape[1])\n', (2573, 2582), True, 'import numpy as np\n'), ((3604, 3623), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (3617, 3623), False, 'import cv2\n'), ((3642, 3661), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (3655, 3661), False, 'import cv2\n'), ((3790, 3809), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (3803, 3809), False, 'import cv2\n'), ((3828, 3847), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (3841, 3847), False, 'import cv2\n'), ((4367, 4407), 'numpy.mean', 'np.mean', (['[x[0][1][:, 0] + x[1][1][:, 0]]'], {}), '([x[0][1][:, 0] + x[1][1][:, 0]])\n', (4374, 4407), True, 'import numpy as np\n')]
#!/usr/bin/python import os.path import os import glob import subprocess import numpy as np import numpy.lib.recfunctions as rfn from astropy.io import fits from astropy.stats import bayesian_blocks import argparse from collections import defaultdict def checkDatFile(datFileName): if not os.path.isfile(datFileName): print(datFileName + ' is not a file!\n') return False return True def extractErrors(errorStr): errors = errorStr.replace('(', '').replace(')', '').split(' - ') return float(errors[0]), float(errors[1]) def calibrate_ncp_prior(flux=None, fluxerr=None, time=None, timebin=None, p_0=[0.05], n_sims=1000, min_prior=0.2, max_prior=4, n_steps=20, outPrefix=None): # path='./gammas/', exp='VERITAS', source=''): # Calibration of ncp_prior: # input: # flux, fluxerr, time, timebin : Lightcurve in format numpy.ndarray or pandas.Series # p_0 : FPR input array # n_sims : float # min_prior : float/int # max_prior : float/int # n_stepts : number of steps in [min_prior, max_prior] sourceNow = outPrefix.split('/')[0] falsecount = np.zeros(n_steps) ncp_priors = np.linspace(min_prior, max_prior, n_steps) result = {} best = {} # distance between points not relevant but should be ordered x = np.arange(len(flux)) average = np.average(flux, weights=fluxerr) # simulating lightcurves for n_sims times and applying algorithem # in n_steps steps between min_prior and max_prior. Afterwards # false positive rate is calculated if a block was detected. for k in range(n_sims): if k % 10 == 0: print(sourceNow, 'current simulation: {}'.format(k)) # simulate the flux values datapoints = np.random.normal(average, fluxerr, len(fluxerr)) # aply bayesian block and count fpr for l, ncp_prior in enumerate(ncp_priors): gamma = 10**(-ncp_prior) bb = bayesian_blocks(x, datapoints, fluxerr, fitness='measures', gamma=gamma) if len(bb) > 2: falsecount[l] += 1 fp_rate = falsecount/n_sims # Final result of FPR in dependency of ncp_prior result = np.core.records.fromarrays([ncp_priors, fp_rate], names='ncp, fp') # Calculation of best results for the values in p_0 for p0 in p_0: best[str(p0)] = result[(np.abs(result.fp - p0)).argmin()] # Saving result and best to txt file with open(outPrefix + '_result.txt', 'wb') as fOut: np.savetxt(fOut, result) # with open(outPrefix + '_results_best.txt', 'wb') as fOut: # np.savetxt(fOut, [best]) return(result, best) def readSwiftLC(swiftFileName, rebin, veritasObs): swiftFile = open(swiftFileName, 'r') date, dateErrUp, dateErrDn = list(), list(), list() rate, rateErrUp, rateErrDn = list(), list(), list() mode = list() for line in swiftFile: if '!' in line: if 'WT data' in line: modeNow = 'WT' continue if 'PC data' in line: modeNow = 'PC' continue if 'Upper limit' in line: break if '!' not in line and len(line) > 1 and 'NO' not in line and 'READ' not in line: date.append(float(line.split()[0].strip())) dateErrUp.append(abs(float(line.split()[1].strip()))) dateErrDn.append(abs(float(line.split()[2].strip()))) rate.append(float(line.split()[3].strip())) rateErrUp.append(abs(float(line.split()[4].strip()))) rateErrDn.append(abs(float(line.split()[5].strip()))) mode.append(modeNow) swiftData = np.c_[date, dateErrDn, dateErrUp, rate, rateErrDn, rateErrUp, mode] headersType = {'names': ('Date', 'Date error down', 'Date error up', 'Rate', 'Rate error down', 'Rate error up', 'mode'), 'formats': ('f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'U40')} swiftData = np.core.records.fromarrays(swiftData.transpose(), dtype=headersType) if rebin == 'monthly' or rebin == 'weekly' or rebin == 'yearly': if rebin == 'yearly': # Take only contemporaneous observations swiftMask = list() for swiftObsNow in swiftData['Date']: keepSwift = False for veritasObsNow in veritasObs: if abs(swiftObsNow - veritasObsNow) < 1: keepSwift = True swiftMask.append(keepSwift) swiftData = swiftData[swiftMask] nDays = 28 if rebin == 'yearly': nDays = 365 if rebin == 'weekly': nDays = 7 mjd_min = 53423 # This is exactly 147 weeks before the start day of Fermi mjd_max = 58465 # This is ~today nBins = int((mjd_max - mjd_min)/nDays) timeBins = np.linspace(mjd_min, mjd_max, nBins, False) date, dateErrDn, dateErrUp = list(), list(), list() rate, rateErrDn, rateErrUp = list(), list(), list() mode = list() for i_bin, edgeDn in enumerate(timeBins): edgeUp = 1e6 if i_bin < len(timeBins) - 1: edgeUp = timeBins[i_bin+1] # TODO - should we divide into the different modes? tempSwiftData = swiftData[(edgeDn <= swiftData['Date']) & (swiftData['Date'] < edgeUp)] if len(tempSwiftData) > 0: date.append(np.average(tempSwiftData['Date'])) dateErrDn.append(date[-1] - np.min(tempSwiftData['Date'])) dateErrUp.append(np.max(tempSwiftData['Date'] - date[-1])) totalError = tempSwiftData['Rate error down'] + tempSwiftData['Rate error up'] rate.append(np.average(tempSwiftData['Rate'], weights=1./totalError)) rateErrDn.append(np.sqrt(np.sum(np.power(tempSwiftData['Rate error down'], 2)))) rateErrUp.append(np.sqrt(np.sum(np.power(tempSwiftData['Rate error up'], 2)))) mode.append('Combined') swiftData = np.c_[date, dateErrDn, dateErrUp, rate, rateErrDn, rateErrUp, mode] swiftData = np.core.records.fromarrays(swiftData.transpose(), dtype=headersType) return swiftData def rebinFermi(fermiLC, veritasObs): # First convert to numpy array to make it easier fermiLC = np.c_[fermiLC['tmax_mjd'], fermiLC['tmin_mjd'], fermiLC['flux'], fermiLC['flux_err']] headersType = {'names': ('tmax_mjd', 'tmin_mjd', 'flux', 'flux_err'), 'formats': ('f8', 'f8', 'f8', 'f8')} fermiLC = np.core.records.fromarrays(fermiLC.transpose(), dtype=headersType) # Take only contemporaneous observations (in this case, within a month) # fermiBlocks = bayesian_blocks(fermiLC['tmax_mjd'], fermiLC['flux'], fermiLC['flux_err'] fermiMask = list() for fermiDataPoint in fermiLC['tmax_mjd']: keepFermi = False for veritasObsNow in veritasObs: if abs(fermiDataPoint - veritasObsNow) < 28: keepFermi = True fermiMask.append(keepFermi) fermiLC = fermiLC[fermiMask] nDays = 365 mjd_min = 53423 # This is exactly 147 weeks before the start day of Fermi mjd_max = 58465 # This is ~today nBins = int((mjd_max - mjd_min)/nDays) timeBins = np.linspace(mjd_min, mjd_max, nBins, False) rebinnedFermi = defaultdict(list) for i_bin, edgeDn in enumerate(timeBins): edgeUp = 1e6 if i_bin < len(timeBins) - 1: edgeUp = timeBins[i_bin+1] tempFermiData = fermiLC[(edgeDn <= fermiLC['tmax_mjd']) & (fermiLC['tmax_mjd'] < edgeUp)] if len(tempFermiData) > 0: rebinnedFermi['tmax_mjd'].append(np.average(tempFermiData['tmax_mjd'])) rebinnedFermi['tmin_mjd'].append(np.average(tempFermiData['tmin_mjd'])) rebinnedFermi['flux'].append(np.average(tempFermiData['flux'], weights=1./tempFermiData['flux_err'])) rebinnedFermi['flux_err'].append(np.sqrt(np.sum(np.power(tempFermiData['flux_err'], 2)))) fermiLC = np.c_[rebinnedFermi['tmax_mjd'], rebinnedFermi['tmin_mjd'], rebinnedFermi['flux'], rebinnedFermi['flux_err']] fermiLC = np.core.records.fromarrays(fermiLC.transpose(), dtype=headersType) return fermiLC def readCorrTable(corrTableFile): headersType = {'names': ('Left edges', 'Right edges', 'Correction factor', 'CorrFactorError', 'CorrFactorErrorCons'), 'formats': ('f8', 'f8', 'f8', 'f8', 'f8')} return np.loadtxt(corrTableFile, dtype=headersType) def correctFluxesFromCrabLC(origLC, corrTable): corrLC = np.copy(origLC) for i_point, dateNow in enumerate(corrLC['DateMJD']): corrBin = np.argmax(dateNow < corrTable['Right edges']) if corrTable['Correction factor'][corrBin] != 1: corrLC['Flux'][i_point] = (corrLC['Flux'][i_point] / corrTable['Correction factor'][corrBin]) corrLC['Flux Error'][i_point] = np.sqrt(np.power(corrLC['Flux Error'][i_point], 2) + np.power(corrTable['CorrFactorError'][corrBin] * corrLC['Flux'][i_point], 2)) return corrLC def correctFluxes(origLC, corrTable): corrLC = correctFluxesFromCrabLC(origLC, corrTable) # We increased the threshold, so no need to add a systematic uncertainty anymore return corrLC def determinePriors(veritasDatFileName, fermiFile, swiftFullFileName, corrTable, veritasObsFile, sourceNow, binning): for fileNow in [veritasDatFileName, fermiFile, swiftFullFileName]: if not checkDatFile(fileNow): return veritasDatFile = open(veritasDatFileName, 'r') headersType = {'names': ('DateMJD', 'Date Error', 'Flux', 'Flux Error'), 'formats': ('f8', 'f8', 'f8', 'f8')} veritasData = np.loadtxt(veritasDatFile, dtype=headersType) veritasFluxes = veritasData[veritasData['Flux Error'] > 0] veritasFluxes = correctFluxes(veritasFluxes, corrTable) nsims = 15000 n_steps = 40 experiment = 'veritas' outPrefix = '{}/{}_{}_{}'.format(sourceNow, experiment, binning, str(nsims)) result, best = calibrate_ncp_prior(flux=veritasFluxes['Flux'], fluxerr=veritasFluxes['Flux Error'], time=veritasFluxes['DateMJD'], timebin=veritasFluxes['Date Error'], p_0=[0.01, 0.05], n_sims=nsims, min_prior=0.2, max_prior=4, n_steps=n_steps, outPrefix=outPrefix) gamma = 10**(- best[str(0.01)].ncp) print(sourceNow, 'VERITAS', 'gamma - ', gamma) if binning == 'yearly' or binning == 'monthly': fermiDatFile = open(fermiFile, 'rb') fermiLC = np.load(fermiDatFile, encoding='latin1').flat[0] if binning == 'yearly': headersType = {'names': ('run', 'date', 'flux', 'fluxError', 'significance', 'ze'), 'formats': ('f8', 'f8', 'f8', 'f8', 'f8', 'f8')} veritasObs = np.loadtxt(veritasObsFile, dtype=headersType) fermiLC = rebinFermi(fermiLC, veritasObs['date']) experiment = 'fermi' outPrefix = '{}/{}_{}_{}'.format(sourceNow, experiment, binning, str(nsims)) result, best = calibrate_ncp_prior(flux=fermiLC['flux'], fluxerr=fermiLC['flux_err'], time=fermiLC['tmax_mjd'], timebin=fermiLC['tmax_mjd'] - fermiLC['tmin_mjd'], p_0=[0.01, 0.05], n_sims=nsims, min_prior=0.2, max_prior=4, n_steps=n_steps, outPrefix=outPrefix) gamma = 10**(- best[str(0.01)].ncp) print(sourceNow, 'Fermi', 'gamma - ', gamma) swiftBinnings = [binning] if binning == 'yearly': # run also the daily for Swift in this case swiftBinnings = ['daily', 'yearly'] for swiftBinNow in swiftBinnings: if swiftBinNow == 'yearly': veritasObsDates = veritasObs['date'] else: veritasObsDates = list() swiftData = readSwiftLC(swiftFile, swiftBinNow, veritasObsDates) experiment = 'swift' outPrefix = '{}/{}_{}_{}'.format(sourceNow, experiment, swiftBinNow, str(nsims)) swiftRateErrorAverage = (swiftData['Rate error down'] + swiftData['Rate error up'])/2. result, best = calibrate_ncp_prior(flux=swiftData['Rate'], fluxerr=swiftRateErrorAverage, time=swiftData['Date'], timebin=(swiftData['Date error down'] + swiftData['Date error up']), p_0=[0.01, 0.05], n_sims=nsims, min_prior=0.2, max_prior=4, n_steps=n_steps, outPrefix=outPrefix) gamma = 10**(- best[str(0.01)].ncp) print(sourceNow, 'Swift', swiftBinNow, 'gamma - ', gamma) return if __name__ == '__main__': np.random.seed(1234) parser = argparse.ArgumentParser(description=('Calculate optimal ' 'priors for Bayesian blocks.')) parser.add_argument('source') parser.add_argument('binning') args = parser.parse_args() sources = {'1ES0033': '1ES 0033+595', '1ES0502': '1ES 0502+675', '1ES1011': '1ES 1011+496', '1ES1218': '1ES 1218+304', '1ES0229': '1ES 0229+200', 'RGBJ0710': 'RGB J0710+591', 'PG1553': 'PG 1553+113', 'PKS1424': 'PKS 1424+240' } if args.source not in sources: print('Source', args.source, 'not known') hdulist = fits.open(('/afs/ifh.de/group/cta/scratch/ogueta/sw/anaconda/envs/fermi/' 'lib/python2.7/site-packages/fermipy/data/catalogs/gll_psc_8year_v5.fit')) sourceCatalog = hdulist[1].data workDir = os.getcwd() + '/' fermiPrefix = '/lustre/fs19/group/cta/users/ogueta/fermi/variabilityStudy/' veritasPrefix = '/afs/ifh.de/group/cta/scratch/ogueta/vts/variabilityStudy/makeLC/' swiftPrefix = '/afs/ifh.de/group/cta/scratch/ogueta/vts/variabilityStudy/swift/onlineTool/' corrTableFile = ('/afs/ifh.de/group/cta/scratch/ogueta/vts/variabilityStudy/' 'crabStability/plotLC/correctionFactors.txt') veritasObsPrefix = '/afs/ifh.de/group/cta/scratch/ogueta/vts/variabilityStudy/spectra/' for i_src, sourceTeV in enumerate(sourceCatalog['ASSOC_TEV']): if sources[args.source] in sourceTeV: fermiLC = sourceCatalog['Source_Name'][i_src].replace(' ', '_').lower() fermiLC += '_lightcurve.npy' fermiBinning = args.binning if fermiBinning != 'monthly': fermiBinning = 'monthly' fermiFile = os.path.join(fermiPrefix, '{}LightCurves'.format(fermiBinning), args.source, args.source, fermiLC) veritasDirectory = os.path.join(veritasPrefix, args.source) veritasLC = glob.glob(os.path.join(veritasDirectory, '{}*fullEnergyRange*.txt'.format(args.binning)))[0] veritasFile = os.path.join(veritasDirectory, veritasLC) corrTable = readCorrTable(corrTableFile) veritasObsFile = os.path.join(os.path.join(veritasObsPrefix, args.source), 'fluxPerRun.txt') swiftFile = os.path.join(swiftPrefix, args.source, 'dailyBins', '{}_lightcurve.qdp'.format(args.source)) try: subprocess.check_call(['mkdir', '-p', args.source]) except subprocess.CalledProcessError as e: print('Could not create output directory') sys.exit(1) determinePriors(veritasFile, fermiFile, swiftFile, corrTable, veritasObsFile, args.source, args.binning)
[ "numpy.load", "numpy.random.seed", "argparse.ArgumentParser", "astropy.stats.bayesian_blocks", "numpy.argmax", "numpy.abs", "numpy.core.records.fromarrays", "collections.defaultdict", "os.path.isfile", "os.path.join", "subprocess.check_call", "numpy.copy", "numpy.power", "numpy.savetxt", "numpy.max", "numpy.loadtxt", "numpy.linspace", "numpy.average", "numpy.min", "astropy.io.fits.open", "os.getcwd", "numpy.zeros" ]
[((1234, 1251), 'numpy.zeros', 'np.zeros', (['n_steps'], {}), '(n_steps)\n', (1242, 1251), True, 'import numpy as np\n'), ((1269, 1311), 'numpy.linspace', 'np.linspace', (['min_prior', 'max_prior', 'n_steps'], {}), '(min_prior, max_prior, n_steps)\n', (1280, 1311), True, 'import numpy as np\n'), ((1451, 1484), 'numpy.average', 'np.average', (['flux'], {'weights': 'fluxerr'}), '(flux, weights=fluxerr)\n', (1461, 1484), True, 'import numpy as np\n'), ((2297, 2363), 'numpy.core.records.fromarrays', 'np.core.records.fromarrays', (['[ncp_priors, fp_rate]'], {'names': '"""ncp, fp"""'}), "([ncp_priors, fp_rate], names='ncp, fp')\n", (2323, 2363), True, 'import numpy as np\n'), ((7634, 7677), 'numpy.linspace', 'np.linspace', (['mjd_min', 'mjd_max', 'nBins', '(False)'], {}), '(mjd_min, mjd_max, nBins, False)\n', (7645, 7677), True, 'import numpy as np\n'), ((7699, 7716), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7710, 7716), False, 'from collections import defaultdict\n'), ((9038, 9082), 'numpy.loadtxt', 'np.loadtxt', (['corrTableFile'], {'dtype': 'headersType'}), '(corrTableFile, dtype=headersType)\n', (9048, 9082), True, 'import numpy as np\n'), ((9147, 9162), 'numpy.copy', 'np.copy', (['origLC'], {}), '(origLC)\n', (9154, 9162), True, 'import numpy as np\n'), ((10529, 10574), 'numpy.loadtxt', 'np.loadtxt', (['veritasDatFile'], {'dtype': 'headersType'}), '(veritasDatFile, dtype=headersType)\n', (10539, 10574), True, 'import numpy as np\n'), ((14482, 14502), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (14496, 14502), True, 'import numpy as np\n'), ((14517, 14606), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Calculate optimal priors for Bayesian blocks."""'}), "(description=\n 'Calculate optimal priors for Bayesian blocks.')\n", (14540, 14606), False, 'import argparse\n'), ((15213, 15366), 'astropy.io.fits.open', 'fits.open', (['"""/afs/ifh.de/group/cta/scratch/ogueta/sw/anaconda/envs/fermi/lib/python2.7/site-packages/fermipy/data/catalogs/gll_psc_8year_v5.fit"""'], {}), "(\n '/afs/ifh.de/group/cta/scratch/ogueta/sw/anaconda/envs/fermi/lib/python2.7/site-packages/fermipy/data/catalogs/gll_psc_8year_v5.fit'\n )\n", (15222, 15366), False, 'from astropy.io import fits\n'), ((16462, 16502), 'os.path.join', 'os.path.join', (['veritasPrefix', 'args.source'], {}), '(veritasPrefix, args.source)\n', (16474, 16502), False, 'import os\n'), ((16656, 16697), 'os.path.join', 'os.path.join', (['veritasDirectory', 'veritasLC'], {}), '(veritasDirectory, veritasLC)\n', (16668, 16697), False, 'import os\n'), ((296, 323), 'os.path.isfile', 'os.path.isfile', (['datFileName'], {}), '(datFileName)\n', (310, 323), False, 'import os\n'), ((2612, 2636), 'numpy.savetxt', 'np.savetxt', (['fOut', 'result'], {}), '(fOut, result)\n', (2622, 2636), True, 'import numpy as np\n'), ((5141, 5184), 'numpy.linspace', 'np.linspace', (['mjd_min', 'mjd_max', 'nBins', '(False)'], {}), '(mjd_min, mjd_max, nBins, False)\n', (5152, 5184), True, 'import numpy as np\n'), ((9240, 9285), 'numpy.argmax', 'np.argmax', (["(dateNow < corrTable['Right edges'])"], {}), "(dateNow < corrTable['Right edges'])\n", (9249, 9285), True, 'import numpy as np\n'), ((15438, 15449), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (15447, 15449), False, 'import os\n'), ((16777, 16820), 'os.path.join', 'os.path.join', (['veritasObsPrefix', 'args.source'], {}), '(veritasObsPrefix, args.source)\n', (16789, 16820), False, 'import os\n'), ((16997, 17048), 'subprocess.check_call', 'subprocess.check_call', (["['mkdir', '-p', args.source]"], {}), "(['mkdir', '-p', args.source])\n", (17018, 17048), False, 'import subprocess\n'), ((2061, 2133), 'astropy.stats.bayesian_blocks', 'bayesian_blocks', (['x', 'datapoints', 'fluxerr'], {'fitness': '"""measures"""', 'gamma': 'gamma'}), "(x, datapoints, fluxerr, fitness='measures', gamma=gamma)\n", (2076, 2133), False, 'from astropy.stats import bayesian_blocks\n'), ((12022, 12067), 'numpy.loadtxt', 'np.loadtxt', (['veritasObsFile'], {'dtype': 'headersType'}), '(veritasObsFile, dtype=headersType)\n', (12032, 12067), True, 'import numpy as np\n'), ((8041, 8078), 'numpy.average', 'np.average', (["tempFermiData['tmax_mjd']"], {}), "(tempFermiData['tmax_mjd'])\n", (8051, 8078), True, 'import numpy as np\n'), ((8125, 8162), 'numpy.average', 'np.average', (["tempFermiData['tmin_mjd']"], {}), "(tempFermiData['tmin_mjd'])\n", (8135, 8162), True, 'import numpy as np\n'), ((8205, 8279), 'numpy.average', 'np.average', (["tempFermiData['flux']"], {'weights': "(1.0 / tempFermiData['flux_err'])"}), "(tempFermiData['flux'], weights=1.0 / tempFermiData['flux_err'])\n", (8215, 8279), True, 'import numpy as np\n'), ((11667, 11707), 'numpy.load', 'np.load', (['fermiDatFile'], {'encoding': '"""latin1"""'}), "(fermiDatFile, encoding='latin1')\n", (11674, 11707), True, 'import numpy as np\n'), ((2472, 2494), 'numpy.abs', 'np.abs', (['(result.fp - p0)'], {}), '(result.fp - p0)\n', (2478, 2494), True, 'import numpy as np\n'), ((5721, 5754), 'numpy.average', 'np.average', (["tempSwiftData['Date']"], {}), "(tempSwiftData['Date'])\n", (5731, 5754), True, 'import numpy as np\n'), ((5864, 5904), 'numpy.max', 'np.max', (["(tempSwiftData['Date'] - date[-1])"], {}), "(tempSwiftData['Date'] - date[-1])\n", (5870, 5904), True, 'import numpy as np\n'), ((6029, 6088), 'numpy.average', 'np.average', (["tempSwiftData['Rate']"], {'weights': '(1.0 / totalError)'}), "(tempSwiftData['Rate'], weights=1.0 / totalError)\n", (6039, 6088), True, 'import numpy as np\n'), ((9540, 9582), 'numpy.power', 'np.power', (["corrLC['Flux Error'][i_point]", '(2)'], {}), "(corrLC['Flux Error'][i_point], 2)\n", (9548, 9582), True, 'import numpy as np\n'), ((9637, 9713), 'numpy.power', 'np.power', (["(corrTable['CorrFactorError'][corrBin] * corrLC['Flux'][i_point])", '(2)'], {}), "(corrTable['CorrFactorError'][corrBin] * corrLC['Flux'][i_point], 2)\n", (9645, 9713), True, 'import numpy as np\n'), ((5800, 5829), 'numpy.min', 'np.min', (["tempSwiftData['Date']"], {}), "(tempSwiftData['Date'])\n", (5806, 5829), True, 'import numpy as np\n'), ((8390, 8428), 'numpy.power', 'np.power', (["tempFermiData['flux_err']", '(2)'], {}), "(tempFermiData['flux_err'], 2)\n", (8398, 8428), True, 'import numpy as np\n'), ((6135, 6180), 'numpy.power', 'np.power', (["tempSwiftData['Rate error down']", '(2)'], {}), "(tempSwiftData['Rate error down'], 2)\n", (6143, 6180), True, 'import numpy as np\n'), ((6232, 6275), 'numpy.power', 'np.power', (["tempSwiftData['Rate error up']", '(2)'], {}), "(tempSwiftData['Rate error up'], 2)\n", (6240, 6275), True, 'import numpy as np\n')]
import sys import numpy as np from mayavi import mlab from mayavi.scripts import mayavi2 from traits.api import HasTraits, Button, Instance from traitsui.api import View, Item from ._plot3d import plot3d_embeddings def plot3d_gmphd(dataset, embeddings, grid, gm_s=None, gm_list=None, observation=None, title=None, contours=4, log_plot=True): """3D plot of CASAS sensor embedding with GM-PHD sampled by grid. Multi-target PHD represented either by scalar ``gm_s`` or Gaussian Mixture ``gm_list`` is plotted as 3D contour graph with mayavi. Current observations and sensor embedding are plotted as spheres as well. Args: dataset (:obj:`~pymrt.casas.CASASDataset`): CASAS smart home dataset. embeddings (:obj:`numpy.ndarray`): 3D sensor vector embedding of shape (num_sensors, 3) where num_sensors corresponds to the length of ``dataset.sensor_list``. grid (:obj:`numpy.ndarray`): 3D mesh generated by :func:`numpy.mgrid` or :func:`numpy.meshgrid`. gm_s (:obj:`numpy.ndarray`): Multi-target PHD scalar sampled at each point defined by the 3D mesh grid ``grid``. gm_list (:obj:`list`): List of :obj:`~pymrt.tracking.utils.GaussianComponent` representing the multi-target PHD at the moment. If ``gm_s`` is None, this list is used to generate the PHD scalar for plotting. observation (:obj:`list`): List of observations to be plotted. Each observation is a :obj:`numpy.ndarray` of shape (n, 1). It has to be the embedding vector of one of the sensor in the dataset. title (:obj:`string`): Plot title. contours (:obj:`int`): Number of contour surfaces to draw. log_plot (:obj:`bool`): Plot ``gm_s`` in log scale. """ if gm_s is None: if gm_list is None: raise ValueError("Must provide 3D sampled GM scalar gm_s or a " "Gaussian Mixture list") else: print('Sampling PHD in 3D space') from ...tracking.utils import gm_calculate gm_s = gm_calculate(gm_list=gm_list, grid=grid) if title is None: title = 'PHD' print('Start Plotting with Mayavi') figure = mlab.figure(dataset.get_name() + ' ' + title) if log_plot: contour_s = np.log(gm_s + np.finfo(np.float).tiny) else: contour_s = gm_s # Plot Contour Surf first contour = mlab.contour3d( grid[0], grid[1], grid[2], contour_s, contours=contours, transparent=True, opacity=0.5 ) mlab.colorbar(contour, title='PHD', orientation='vertical') _, points = plot3d_embeddings(dataset, embeddings, figure=figure) if observation is not None: obs_array = np.block(observation).T obs_points = mlab.points3d( obs_array[:, 0], obs_array[:, 1], obs_array[:, 2], scale_factor=0.03, color=(0, 0, 1) ) mlab.show() def plot3d_gmphd_track(dataset, embeddings, grid, gm_s_list=None, gm_list_list=None, observation_list=None, title=None, contours=4, log_plot=True): """ 3D plot of CASAS sensor embedding with GM-PHD sampled by grid. Multi-target PHD represented either by scalar ``gm_s`` or Gaussian Mixture ``gm_list`` is plotted as 3D contour graph with mayavi. Current observations and sensor embedding are plotted as spheres as well. It wraps the whole sequence in a mayavi application, user can go back and forth in time and visually see how the PHD changes in time. Args: dataset (:obj:`~pymrt.casas.CASASDataset`): CASAS smart home dataset. embeddings (:obj:`numpy.ndarray`): 3D sensor vector embedding of shape (num_sensors, 3) where num_sensors corresponds to the length of ``dataset.sensor_list``. grid (:obj:`numpy.ndarray`): 3D mesh generated by :func:`numpy.mgrid` or :func:`numpy.meshgrid`. gm_s_list (:obj:`list`): List of PHD scalars at each time step. gm_list_list (:obj:`list`): List of Gaussian Mixtures at each time step. If ``gm_s_list`` is None, it is used along with ``grid`` to generate the PHD scalar at each time step. observation_list (:obj:`list`): List of observations at each time step. title (:obj:`string`): Plot title. contours (:obj:`int`): Number of contour surfaces to draw. log_plot (:obj:`bool`): Plot ``gm_s`` in log scale. """ if gm_s_list is None: if gm_list_list is None: raise ValueError("Must provide 3D sampled GM scalar gm_s or a " "Gaussian Mixture list") else: print('Sampling PHD in 3D space') from ...tracking.utils import gm_calculate gm_s_list = [] i = 0 for gm_list in gm_list_list: sys.stdout.write('calculate gm_scalar for step %d' % i) gm_s_list.append(gm_calculate( gm_list=gm_list, grid=grid )) i += 1 if title is None: title = 'PHD' print('Start Plotting with Mayavi') class Controller(HasTraits): next_frame = Button('Next Frame') previous_frame = Button('Previous Frame') view = View( Item(name='next_frame'), Item(name='previous_frame') ) current_frame = 0 play_state = False def _next_frame_changed(self, value): """Goto next frame""" if self.current_frame + 1 < len(gm_s_list): self.current_frame += 1 self.update_frame() def _previous_frame_changed(self, value): """Goto previous frame""" if self.current_frame - 1 >= 0: self.current_frame -= 1 self.update_frame() def update_frame(self): print('Frame %d' % self.current_frame) if log_plot: contour_s = np.log( gm_s_list[self.current_frame] + np.finfo(np.float).tiny ) else: contour_s = gm_s_list[self.current_frame] self.phd_contour.mlab_source.set( scalars=contour_s ) self.color_vector[:] = 0. if observation_list is not None: obs_array = observation_list[self.current_frame] obs_index = [ np.where( np.all(embeddings == sensor_vec.flatten(), axis=1) )[0][0] for sensor_vec in obs_array ] self.color_vector[obs_index] = 1. self.sensor_points.mlab_source.dataset.point_data.scalars = \ self.color_vector mlab.draw() @mayavi2.standalone def main_view(): """Example showing how to view a 3D numpy array in mayavi2. """ figure = mlab.figure(dataset.get_name() + ' ' + title) if log_plot: contour_s = np.log(gm_s_list[0] + np.finfo(np.float).tiny) else: contour_s = gm_s_list[0] # Plot Contour Surf first contour = mlab.contour3d( grid[0], grid[1], grid[2], contour_s, contours=contours, transparent=True, opacity=0.5 ) mlab.colorbar(contour, title='PHD', orientation='vertical') _, points = plot3d_embeddings(dataset, embeddings, figure=figure) points.glyph.scale_mode = 'scale_by_vector' points.mlab_source.dataset.point_data.vectors = np.tile( np.ones(embeddings.shape[0]), (3, 1)) color_vector = np.zeros(embeddings.shape[0]) points.mlab_source.dataset.point_data.scalars = color_vector if observation_list is not None: obs_array = observation_list[0] obs_index = [ np.where( np.all(embeddings == sensor_vec.flatten(), axis=1) )[0][0] for sensor_vec in obs_array ] color_vector[obs_index] = 1. computation = Controller( sensor_points=points, phd_contour=contour, color_vector=color_vector, figure=figure ) computation.edit_traits() main_view()
[ "sys.stdout.write", "mayavi.mlab.colorbar", "traits.api.Button", "mayavi.mlab.show", "mayavi.mlab.draw", "numpy.zeros", "numpy.ones", "mayavi.mlab.points3d", "numpy.finfo", "traitsui.api.Item", "mayavi.mlab.contour3d", "numpy.block" ]
[((2505, 2611), 'mayavi.mlab.contour3d', 'mlab.contour3d', (['grid[0]', 'grid[1]', 'grid[2]', 'contour_s'], {'contours': 'contours', 'transparent': '(True)', 'opacity': '(0.5)'}), '(grid[0], grid[1], grid[2], contour_s, contours=contours,\n transparent=True, opacity=0.5)\n', (2519, 2611), False, 'from mayavi import mlab\n'), ((2675, 2734), 'mayavi.mlab.colorbar', 'mlab.colorbar', (['contour'], {'title': '"""PHD"""', 'orientation': '"""vertical"""'}), "(contour, title='PHD', orientation='vertical')\n", (2688, 2734), False, 'from mayavi import mlab\n'), ((3044, 3055), 'mayavi.mlab.show', 'mlab.show', ([], {}), '()\n', (3053, 3055), False, 'from mayavi import mlab\n'), ((2904, 3008), 'mayavi.mlab.points3d', 'mlab.points3d', (['obs_array[:, 0]', 'obs_array[:, 1]', 'obs_array[:, 2]'], {'scale_factor': '(0.03)', 'color': '(0, 0, 1)'}), '(obs_array[:, 0], obs_array[:, 1], obs_array[:, 2],\n scale_factor=0.03, color=(0, 0, 1))\n', (2917, 3008), False, 'from mayavi import mlab\n'), ((5357, 5377), 'traits.api.Button', 'Button', (['"""Next Frame"""'], {}), "('Next Frame')\n", (5363, 5377), False, 'from traits.api import HasTraits, Button, Instance\n'), ((5403, 5427), 'traits.api.Button', 'Button', (['"""Previous Frame"""'], {}), "('Previous Frame')\n", (5409, 5427), False, 'from traits.api import HasTraits, Button, Instance\n'), ((7374, 7480), 'mayavi.mlab.contour3d', 'mlab.contour3d', (['grid[0]', 'grid[1]', 'grid[2]', 'contour_s'], {'contours': 'contours', 'transparent': '(True)', 'opacity': '(0.5)'}), '(grid[0], grid[1], grid[2], contour_s, contours=contours,\n transparent=True, opacity=0.5)\n', (7388, 7480), False, 'from mayavi import mlab\n'), ((7580, 7639), 'mayavi.mlab.colorbar', 'mlab.colorbar', (['contour'], {'title': '"""PHD"""', 'orientation': '"""vertical"""'}), "(contour, title='PHD', orientation='vertical')\n", (7593, 7639), False, 'from mayavi import mlab\n'), ((7906, 7935), 'numpy.zeros', 'np.zeros', (['embeddings.shape[0]'], {}), '(embeddings.shape[0])\n', (7914, 7935), True, 'import numpy as np\n'), ((2859, 2880), 'numpy.block', 'np.block', (['observation'], {}), '(observation)\n', (2867, 2880), True, 'import numpy as np\n'), ((5461, 5484), 'traitsui.api.Item', 'Item', ([], {'name': '"""next_frame"""'}), "(name='next_frame')\n", (5465, 5484), False, 'from traitsui.api import View, Item\n'), ((5498, 5525), 'traitsui.api.Item', 'Item', ([], {'name': '"""previous_frame"""'}), "(name='previous_frame')\n", (5502, 5525), False, 'from traitsui.api import View, Item\n'), ((6976, 6987), 'mayavi.mlab.draw', 'mlab.draw', ([], {}), '()\n', (6985, 6987), False, 'from mayavi import mlab\n'), ((7845, 7873), 'numpy.ones', 'np.ones', (['embeddings.shape[0]'], {}), '(embeddings.shape[0])\n', (7852, 7873), True, 'import numpy as np\n'), ((5025, 5080), 'sys.stdout.write', 'sys.stdout.write', (["('calculate gm_scalar for step %d' % i)"], {}), "('calculate gm_scalar for step %d' % i)\n", (5041, 5080), False, 'import sys\n'), ((2401, 2419), 'numpy.finfo', 'np.finfo', (['np.float'], {}), '(np.float)\n', (2409, 2419), True, 'import numpy as np\n'), ((7245, 7263), 'numpy.finfo', 'np.finfo', (['np.float'], {}), '(np.float)\n', (7253, 7263), True, 'import numpy as np\n'), ((6209, 6227), 'numpy.finfo', 'np.finfo', (['np.float'], {}), '(np.float)\n', (6217, 6227), True, 'import numpy as np\n')]
import numpy as np def gauss2D(x, y, fwhmx, fwhmy, x0=0, y0=0, offset=0, order=1, int_FWHM=True): """ Define a (super-)Gaussian 2D beam. Identical to laser.misc.gauss2D. Parameters ---------- x: float 2D np.array Horizontal axis of the Gaussian y: float 2D np.array Vertical axis of the Gaussian fwhmx: float Horizontal Full Width at Half Maximum fwhmy: float Vertical Full Width at Half Maximum x0: float, optional Horizontal center position of the Gaussian y0: float, optional Vertical center position of the Gaussian offset: float, optional Amplitude offset of the Gaussian order: int, optional order of the super-Gaussian function. Defined as: exp( - ( x**2 + y**2 )**order ) int_FWHM: boolean, optional If True, the FWHM is the FWHM of the square of the Gaussian (intensity). If False, it is the FWHM of the Gaussian directly (electric field). """ coeff = 1.0 if int_FWHM: coeff = 0.5 return np.exp(-np.log(2) * coeff * ((2 * (x - x0) / fwhmx)**2 + (2 * (y - y0) / fwhmy)**2)**order) + offset def gauss1D(x, fwhm, x0=0, offset=0, order=1, int_FWHM=True): """ Define a (super-)Gaussian 1D beam. Identical to laser.misc.gauss2D. Parameters ---------- x: float 1D np.array Axis of the Gaussian fwhm: float Full Width at Half Maximum x0: float, optional Center position of the Gaussian offset: float, optional Amplitude offset of the Gaussian order: int, optional order of the super-Gaussian function. Defined as: exp( - ( x**2 )**order ) int_FWHM: boolean, optional If True, the FWHM is the FWHM of the square of the Gaussian (intensity). If False, it is the FWHM of the Gaussian directly (electric field). """ coeff = 1.0 if int_FWHM: coeff = 0.5 return np.exp(-np.log(2) * coeff * ((2 * (x - x0) / fwhm)**2)**order) + offset def cart2pol(x, y): """Convert cartesian to polar coordinates""" return np.abs(x + 1j * y), np.angle(x + 1j * y) def pol2cart(r, theta): """Convert polar to cartesian coodinates""" return np.real(r * exp(1j * theta)), np.imag(r * exp(1j * theta)) def array_trim(ar): """Trim zeros of 2D map""" ar_trim = ar.copy() ar_trim = ar_trim[:, ar_trim.any(axis=0)] # trim columns ar_trim = ar_trim[ar_trim.any(axis=1), :] # trim rows return ar_trim def vect(N): """Returns a centered array between -0.5 and 0.5""" return np.linspace(0, N, num=N) / N - 0.5 def norm(a): """Normalise an array by it's maximum value""" return a / np.max(np.abs(a)) def text_progress_bar(iteration, num_iteration): """Displays a progress bar with the print function""" return print('|' * (iteration + 1) + '.' * (num_iteration - iteration - 1) + ' %.1f %%' % ((iteration + 1) / num_iteration * 100), end='\r')
[ "numpy.angle", "numpy.abs", "numpy.log", "numpy.linspace" ]
[((2122, 2142), 'numpy.abs', 'np.abs', (['(x + 1.0j * y)'], {}), '(x + 1.0j * y)\n', (2128, 2142), True, 'import numpy as np\n'), ((2142, 2164), 'numpy.angle', 'np.angle', (['(x + 1.0j * y)'], {}), '(x + 1.0j * y)\n', (2150, 2164), True, 'import numpy as np\n'), ((2606, 2630), 'numpy.linspace', 'np.linspace', (['(0)', 'N'], {'num': 'N'}), '(0, N, num=N)\n', (2617, 2630), True, 'import numpy as np\n'), ((2729, 2738), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (2735, 2738), True, 'import numpy as np\n'), ((1080, 1089), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1086, 1089), True, 'import numpy as np\n'), ((1976, 1985), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1982, 1985), True, 'import numpy as np\n')]
#!/usr/bin/env python from typing import Optional import datetime import logging import pathlib import cv2 import numpy as np import yacs.config from gaze_estimation.gaze_estimator.common import (Face, FacePartsName, Visualizer) from gaze_estimation.utils import load_config from gaze_estimation import GazeEstimationMethod, GazeEstimator logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) import pdb import pickle import time import imutils import sys import os import draw_utils from helper_fn import point_to_screen, dump_dict, calc_metrics, round_tup from screen_conf import * # FLAGS PARAMETERS #------------------------------------------------ fpath = 'recs/' rgb_fp = 'det/' # AVERAGING OVER GAZE VALUES TOGGLE #------------------------------------------------ GAZE_AVG_FLAG = 0 num_frames = 3 # num of frames to average over #------------------------------------------------ # AVERAGING OVER LANDMARKS TOGGLE AVG_LANDMARKS = 0 num_avg_frames = 3 # num of frames to average over # GLOBAL VARIABLES #------------------------------------------------ img = np.zeros((adj_H, W_px,3)) mid_point = (0,0) rng_pos = (np.random.randint(0, W_px),np.random.randint(0, H_px)) focus = 0 avg_pos = [] #------------------------------------------------ DEBUG = 0 #'EYE' # 'EYE' DEBUG INDIVIDUAL VALUES if DEBUG: try: print('Creating dirs') os.mkdir(fpath) os.mkdirs(fpath+rgb_fp) except: print('dirs already exist') #------------------------------------------------ class Demo: QUIT_KEYS = {27, ord('q')} def __init__(self, config: yacs.config.CfgNode): self.config = config self.gaze_estimator = GazeEstimator(config, AVG_LANDMARKS=AVG_LANDMARKS, num_frames=num_avg_frames) self.visualizer = Visualizer(self.gaze_estimator.camera) self.cap = self._create_capture() self.output_dir = self._create_output_dir() # Turn writer on and off. if SAVE_VIDEO: self.writer = self._create_video_writer() else: self.writer = 0 self.stop = False self.show_bbox = self.config.demo.show_bbox self.show_head_pose = self.config.demo.show_head_pose self.show_landmarks = self.config.demo.show_landmarks self.show_normalized_image = NORM_EYEZ # self.config.demo.show_normalized_image self.show_template_model = self.config.demo.show_template_model # FRAME COUNTER self.i = 0 self.pts = [] self.cur_pos = [] self.true_pos = [] self.dist = [] self.left_eye_cent = [] self.right_eye_cent = [] self.right_eye_gaze = [] self.left_eye_gaze = [] self.face_gaze = [] self.face_cent = [] def run(self) -> None: while True: if DEMO: pts = draw_utils.display_canv(CANV_MODE=CANV_MODE, cur_pos=mid_point) #cur_pos=cur_pos self.pts.append(pts) self.true_pos.append(pts[0]) self.cur_pos.append(pts[1]) if self.config.demo.display_on_screen: self._wait_key() if self.stop: break ok, frame = self.cap.read() if not ok: break if CUST_VIDEO: frame = imutils.resize(frame, width=self.gaze_estimator.camera.width, height=self.gaze_estimator.camera.height) calib_time = time.time() # FIRST WE UNDISTORT THE IMAGE! undistorted = cv2.undistort( frame, self.gaze_estimator.camera.camera_matrix, self.gaze_estimator.camera.dist_coefficients) if RUNTIME: print('Image calibration: ', time.time()-calib_time, ' seconds.') self.visualizer.set_image(frame.copy()) dlib_time = time.time() faces = self.gaze_estimator.detect_faces(undistorted) if RUNTIME: print('DLIB faces: ', time.time() - dlib_time, ' seconds.') for face in faces: self.gaze_estimator.estimate_gaze(undistorted, face) self._draw_face_bbox(face) self._draw_head_pose(face) self._draw_landmarks(face) self._draw_face_template_model(face) self._draw_gaze_vector(face) self._display_normalized_image(face) if self.config.demo.use_camera: self.visualizer.image = self.visualizer.image[:, ::-1] if self.writer: self.writer.write(self.visualizer.image) #self.write_eyes.write(self.visualizer.image) if self.config.demo.display_on_screen: self.visualizer.image = cv2.resize(self.visualizer.image, (0, 0), fy=IMG_SCALE, fx=IMG_SCALE) cv2.imshow('frame', self.visualizer.image) # MOVE TO TOP LEFT CORNER cv2.moveWindow("frame", 0,0) if DEBUG: cv2.imwrite(fpath+rgb_fp+'rgb_'+str(self.i).zfill(5)+'.png', self.visualizer.image) # INCREMENT COUNTER self.i += 1 self.cap.release() if self.writer: self.writer.release() def _create_capture(self) -> cv2.VideoCapture: if self.config.demo.use_camera: # use recording or the custom video if CUST_VIDEO: cap = cv2.VideoCapture(vid_file) else: cap = cv2.VideoCapture(0) elif self.config.demo.video_path: cap = cv2.VideoCapture(self.config.demo.video_path) else: raise ValueError # pdb.set_trace() cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.gaze_estimator.camera.width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.gaze_estimator.camera.height) return cap def _create_output_dir(self) -> Optional[pathlib.Path]: if not self.config.demo.output_dir: return output_dir = pathlib.Path(self.config.demo.output_dir) output_dir.mkdir(exist_ok=True, parents=True) return output_dir @staticmethod def _create_timestamp() -> str: dt = datetime.datetime.now() return dt.strftime('%Y%m%d_%H%M%S') def _create_video_writer(self) -> Optional[cv2.VideoWriter]: if not self.output_dir: return None ext = self.config.demo.output_file_extension if ext == 'mp4': fourcc = cv2.VideoWriter_fourcc(*'H264') elif ext == 'avi': fourcc = cv2.VideoWriter_fourcc(*'PIM1') else: raise ValueError output_path = self.output_dir / f'{self._create_timestamp()}.{ext}' writer = cv2.VideoWriter(output_path.as_posix(), fourcc, FPS, (VID_W, VID_H)) if writer is None: raise RuntimeError return writer def _wait_key(self) -> None: key = cv2.waitKey(self.config.demo.wait_time) & 0xff if key in self.QUIT_KEYS: self.stop = True elif key == ord('b'): self.show_bbox = not self.show_bbox elif key == ord('l'): self.show_landmarks = not self.show_landmarks elif key == ord('h'): self.show_head_pose = not self.show_head_pose elif key == ord('n'): self.show_normalized_image = not self.show_normalized_image elif key == ord('t'): self.show_template_model = not self.show_template_model def _draw_face_bbox(self, face: Face) -> None: if not self.show_bbox: return self.visualizer.draw_bbox(face.bbox) def _draw_head_pose(self, face: Face) -> None: if not self.show_head_pose: return # Draw the axes of the model coordinate system length = self.config.demo.head_pose_axis_length self.visualizer.draw_model_axes(face, length, lw=2) euler_angles = face.head_pose_rot.as_euler('XYZ', degrees=True) pitch, yaw, roll = face.change_coordinate_system(euler_angles) logger.info(f'[head] pitch: {pitch:.2f}, yaw: {yaw:.2f}, ' f'roll: {roll:.2f}, distance: {face.distance:.2f}') self.dist.append(face.distance) def _draw_landmarks(self, face: Face) -> None: if not self.show_landmarks: return self.visualizer.draw_points(face.landmarks, color=(0, 255, 255), size=1) def _draw_face_template_model(self, face: Face) -> None: if not self.show_template_model: return self.visualizer.draw_3d_points(face.model3d, color=(255, 0, 525), size=1) def _display_normalized_image(self, face: Face) -> None: if not self.config.demo.display_on_screen: return if not self.show_normalized_image: return if self.config.mode == GazeEstimationMethod.MPIIGaze.name: reye = face.reye.normalized_image leye = face.leye.normalized_image normalized = np.hstack([reye, leye]) elif self.config.mode == GazeEstimationMethod.MPIIFaceGaze.name: normalized = face.normalized_image else: raise ValueError if self.config.demo.use_camera: normalized = normalized[:, ::-1] normalized = cv2.resize(normalized, (0, 0), fy=5, fx=5) if PRINT_VALS: H, W = normalized.shape left_edge = W - 50 left_edge_H = 20 cv2.putText(normalized, str(self.i), #'cur frame = ' (left_edge, left_edge_H), cv2.FONT_HERSHEY_SIMPLEX, 0.8, RED, 1) save_str = 'norm_eyes_fix/img_'+str(self.i).zfill(5)+'.png' if NORM_EYEZ: cv2.imwrite(save_str, normalized[:,300:]) cv2.imshow('normalized', normalized) def avg_frames(self): if 0: r_avg_cent = [np.array([x[0] for x in self.right_eye_cent[-num_frames:]]).mean(), np.array([x[1] for x in self.right_eye_cent[-num_frames:]]).mean(), np.array([x[2] for x in self.right_eye_cent[-num_frames:]]).mean()] l_avg_cent = [np.array([x[0] for x in self.left_eye_cent[-num_frames:]]).mean(), np.array([x[1] for x in self.left_eye_cent[-num_frames:]]).mean(), np.array([x[2] for x in self.left_eye_cent[-num_frames:]]).mean()] else: r_avg_cent = self.right_eye_cent[-1] l_avg_cent = self.left_eye_cent[-1] r_avg_gaze = [np.array([x[0] for x in self.right_eye_gaze[-num_frames:]]).mean(), np.array([x[1] for x in self.right_eye_gaze[-num_frames:]]).mean(), np.array([x[2] for x in self.right_eye_gaze[-num_frames:]]).mean()] l_avg_gaze = [np.array([x[0] for x in self.left_eye_gaze[-num_frames:]]).mean(), np.array([x[1] for x in self.left_eye_gaze[-num_frames:]]).mean(), np.array([x[2] for x in self.left_eye_gaze[-num_frames:]]).mean()] right_eye_XY = point_to_screen(r_avg_cent, r_avg_gaze) left_eye_XY = point_to_screen(l_avg_cent, l_avg_gaze) mid_x = np.mean([right_eye_XY[0], left_eye_XY[0]]) mid_y = np.mean([right_eye_XY[1], left_eye_XY[1]]) if PRINT_VALS: self.draw_vals(r_avg_gaze, r_avg_cent, l_avg_gaze,l_avg_cent) return mid_x, mid_y def draw_vals(self, r_gaze, r_cent, l_gaze, l_cent): H, W, _ = self.visualizer.image.shape left_edge = W - 350 left_edge_H = 40 flip_img = cv2.flip(self.visualizer.image, 1) r_gaze = round_tup(r_gaze) r_cent = round_tup(r_cent) l_gaze = round_tup(l_gaze) l_cent = round_tup(l_cent) print('frame no ', self.i) print('right_gaze, ', r_gaze) print('left_gaze , ', l_gaze) print('right_cent, ', r_cent) print('left_cent , ', l_cent) cv2.putText(flip_img, 'cur frame = '+ str(self.i), (left_edge, left_edge_H-20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, RED, 1) cv2.putText(flip_img, 'R_Gaze = '+str(r_gaze), (left_edge, left_edge_H), cv2.FONT_HERSHEY_SIMPLEX, 0.5, BLACK, 1) cv2.putText(flip_img, 'R_Cent = '+str(r_cent), (left_edge, left_edge_H+20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, BLACK, 1) cv2.putText(flip_img, 'L_Gaze = '+str(l_gaze), (left_edge, left_edge_H+40), cv2.FONT_HERSHEY_SIMPLEX, 0.5, BLACK, 1) cv2.putText(flip_img, 'L_Cent = '+str(l_cent), (left_edge, left_edge_H+60), cv2.FONT_HERSHEY_SIMPLEX, 0.5, BLACK, 1) if GAZE_AVG_FLAG: avg_str = 'ON' + ' frames = ' + str(num_frames) else: avg_str = 'OFF' cv2.putText(flip_img, 'AVG = ' + str(avg_str), (left_edge, left_edge_H+85), cv2.FONT_HERSHEY_SIMPLEX, 0.8, RED, 1) self.visualizer.image = cv2.flip(flip_img, 1) def _draw_gaze_vector(self, face: Face) -> None: length = self.config.demo.gaze_visualization_length print('*'*50) right_eye_XY = (0,0) left_eye_XY = (0,0) r_gaze_ = (0,0,0) r_cent_ = (0,0,0) l_gaze_ = (0,0,0) l_cent_ = (0,0,0) if self.config.mode == GazeEstimationMethod.MPIIGaze.name: for key in [FacePartsName.REYE, FacePartsName.LEYE]: eye = getattr(face, key.name.lower()) self.visualizer.draw_3d_line( eye.center, eye.center + length * eye.gaze_vector) if key.name.lower() == 'reye': self.right_eye_cent.append(eye.center) self.right_eye_gaze.append(eye.gaze_vector) r_gaze_ = tuple(eye.gaze_vector) r_cent_ = tuple(eye.center) right_eye_XY = point_to_screen(eye.center, eye.gaze_vector) else: self.left_eye_cent.append(eye.center) self.left_eye_gaze.append(eye.gaze_vector) left_eye_XY = point_to_screen(eye.center, eye.gaze_vector) l_gaze_ = tuple(eye.gaze_vector) l_cent_ = tuple(eye.center) print('{} gaze = '.format(key.name.lower()), eye.gaze_vector) print('{} center = '.format(key.name.lower()), eye.center) pitch, yaw = np.rad2deg(eye.vector_to_angle(eye.gaze_vector)) logger.info( f'[{key.name.lower()}] pitch: {pitch:.2f}, yaw: {yaw:.2f}') elif self.config.mode == GazeEstimationMethod.MPIIFaceGaze.name: self.visualizer.draw_3d_line( face.center, face.center + length * face.gaze_vector) self.face_cent.append(face.center) self.face_gaze.append(face.gaze_vector) pitch, yaw = np.rad2deg(face.vector_to_angle(face.gaze_vector)) logger.info(f'[face] pitch: {pitch:.2f}, yaw: {yaw:.2f}') else: raise ValueError global mid_point if self.config.mode == GazeEstimationMethod.MPIIGaze.name: # ----------------------------------------------- if GAZE_AVG_FLAG: if len(self.right_eye_cent) >= num_frames: mid_x, mid_y = self.avg_frames() else: if PRINT_VALS: self.draw_vals(r_gaze_, r_cent_, l_gaze_,l_cent_) else: mid_x = np.mean([right_eye_XY[0], left_eye_XY[0]]) mid_y = np.mean([right_eye_XY[1], left_eye_XY[1]]) if PRINT_VALS: self.draw_vals(r_gaze_, r_cent_, l_gaze_,l_cent_) elif self.config.mode == GazeEstimationMethod.MPIIFaceGaze.name: XY = point_to_screen(face.center, face.gaze_vector) mid_x = XY[0] mid_y = XY[1] else: raise ValueError mid_point = (int(mid_x), int(mid_y)) def main(): ''' # EYE MODEL python demo.py --config configs/demo_mpiigaze_resnet.yaml # FACE MODEL python demo.py --config configs/demo_mpiifacegaze_resnet_simple_14.yaml ''' global DEMO, CANV_MODE, IMG_SCALE, NORM_EYEZ, SAVE_VIDEO global RUNTIME, CUST_VIDEO, vid_file, PRINT_VALS start_time = time.time() config, custom = load_config() # pdb.set_trace() DEMO = custom['demo'] # Save normalized eyes NORM_EYEZ = custom['eyes'] # FLAG TO SAVE MOVE, DEFAULT = FALSE SAVE_VIDEO = custom['save_vid'] # PRINT RUNTIME RUNTIME = custom['runtime'] #0 # PRINTS VALS ON THE WEBCAM IMG PRINT_VALS = custom['printvals'] #0 # CUSTOM VIDEO: CUST_VIDEO = custom['cust_vid'] if CUST_VIDEO != None: vid_file = CUST_VIDEO CANV_MODE = custom['mode'] if CANV_MODE == 'STABILITY' or CANV_MODE == 'UPDOWN' \ or CANV_MODE == 'LEFTRIGHT' or CANV_MODE == 'SEQ': print('Current mode is {}'.format(CANV_MODE)) else: print('Breaking since current mode is {}'.format(CANV_MODE)) print('Set correct CANV_MODE --mode: ') print('*STABILITY* *UPDOWN* *LEFTRIGHT* *SEQ*') sys.exit(1) if DEMO: IMG_SCALE = custom['imgscale'] CANV_MODE = custom['mode'] #'RNG' demo = Demo(config) demo.run() n_frames = len(demo.pts) tot_time = time.time()-start_time print('nr of frames: ', n_frames) print('All finished: ',tot_time , ' seconds.') print('FPS: ', round(n_frames/tot_time,2)) # This part only gets executed in case there is input to the model if CUST_VIDEO: # COMPUTE ACCURACY METRICS HERE save_path = 'testResults/' try: os.mkdir(save_path) except: print('folder already existing {}'.format(save_path)) str_name = vid_file.split('/')[1].split('.')[0] + '_LM_' +str(AVG_LANDMARKS) + '_GAZE_' + str(GAZE_AVG_FLAG) str_name = str(demo.gaze_estimator.camera.width) + 'x' + str(demo.gaze_estimator.camera.height) + '_' + str_name str_name = config.mode + str_name indices = [sum(item) for item in demo.cur_pos if sum(item) == 0] for item in reversed(indices): demo.true_pos.pop(item) demo.cur_pos.pop(item) # DUMP THE GAZE AND CENTER VALUES if config.mode == 'MPIIGaze': dump_dict(str_name,items=[demo.left_eye_cent,demo.left_eye_gaze, demo.right_eye_cent, demo.right_eye_gaze, demo.true_pos, demo.dist], item_name = ['lcent', 'lgaze', 'rcent', 'rgaze', 'tpos', 'fdist']) elif config.mode == 'MPIIFaceGaze': dump_dict(str_name,items=[demo.face_cent,demo.face_gaze, demo.true_pos, demo.dist], item_name = ['fcent', 'fgaze', 'tpos', 'fdist']) print('EXTI BEFORE METRICS & PLOTS') _, MAE, CEP, CE95 = calc_metrics((demo.true_pos,demo.cur_pos)) print('MAE = ', MAE) print('CEP = ', CEP) print('CEP95 = ', CE95) # draw results draw_utils.plot_pts((demo.true_pos,demo.cur_pos), str_name, MAE, save_path) if __name__ == '__main__': main()
[ "os.mkdir", "helper_fn.calc_metrics", "cv2.VideoWriter_fourcc", "draw_utils.display_canv", "pathlib.Path", "numpy.random.randint", "numpy.mean", "imutils.resize", "cv2.imshow", "gaze_estimation.gaze_estimator.common.Visualizer", "cv2.undistort", "gaze_estimation.utils.load_config", "helper_fn.point_to_screen", "cv2.imwrite", "os.mkdirs", "helper_fn.round_tup", "gaze_estimation.GazeEstimator", "datetime.datetime.now", "cv2.resize", "cv2.waitKey", "numpy.hstack", "cv2.flip", "helper_fn.dump_dict", "sys.exit", "logging.basicConfig", "numpy.zeros", "time.time", "cv2.VideoCapture", "numpy.array", "draw_utils.plot_pts", "cv2.moveWindow", "logging.getLogger" ]
[((395, 434), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (414, 434), False, 'import logging\n'), ((444, 471), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (461, 471), False, 'import logging\n'), ((1148, 1174), 'numpy.zeros', 'np.zeros', (['(adj_H, W_px, 3)'], {}), '((adj_H, W_px, 3))\n', (1156, 1174), True, 'import numpy as np\n'), ((1203, 1229), 'numpy.random.randint', 'np.random.randint', (['(0)', 'W_px'], {}), '(0, W_px)\n', (1220, 1229), True, 'import numpy as np\n'), ((1230, 1256), 'numpy.random.randint', 'np.random.randint', (['(0)', 'H_px'], {}), '(0, H_px)\n', (1247, 1256), True, 'import numpy as np\n'), ((17064, 17075), 'time.time', 'time.time', ([], {}), '()\n', (17073, 17075), False, 'import time\n'), ((17097, 17110), 'gaze_estimation.utils.load_config', 'load_config', ([], {}), '()\n', (17108, 17110), False, 'from gaze_estimation.utils import load_config\n'), ((1439, 1454), 'os.mkdir', 'os.mkdir', (['fpath'], {}), '(fpath)\n', (1447, 1454), False, 'import os\n'), ((1463, 1488), 'os.mkdirs', 'os.mkdirs', (['(fpath + rgb_fp)'], {}), '(fpath + rgb_fp)\n', (1472, 1488), False, 'import os\n'), ((1742, 1819), 'gaze_estimation.GazeEstimator', 'GazeEstimator', (['config'], {'AVG_LANDMARKS': 'AVG_LANDMARKS', 'num_frames': 'num_avg_frames'}), '(config, AVG_LANDMARKS=AVG_LANDMARKS, num_frames=num_avg_frames)\n', (1755, 1819), False, 'from gaze_estimation import GazeEstimationMethod, GazeEstimator\n'), ((1846, 1884), 'gaze_estimation.gaze_estimator.common.Visualizer', 'Visualizer', (['self.gaze_estimator.camera'], {}), '(self.gaze_estimator.camera)\n', (1856, 1884), False, 'from gaze_estimation.gaze_estimator.common import Face, FacePartsName, Visualizer\n'), ((6110, 6151), 'pathlib.Path', 'pathlib.Path', (['self.config.demo.output_dir'], {}), '(self.config.demo.output_dir)\n', (6122, 6151), False, 'import pathlib\n'), ((6300, 6323), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6321, 6323), False, 'import datetime\n'), ((9627, 9669), 'cv2.resize', 'cv2.resize', (['normalized', '(0, 0)'], {'fy': '(5)', 'fx': '(5)'}), '(normalized, (0, 0), fy=5, fx=5)\n', (9637, 9669), False, 'import cv2\n'), ((10159, 10195), 'cv2.imshow', 'cv2.imshow', (['"""normalized"""', 'normalized'], {}), "('normalized', normalized)\n", (10169, 10195), False, 'import cv2\n'), ((11462, 11501), 'helper_fn.point_to_screen', 'point_to_screen', (['r_avg_cent', 'r_avg_gaze'], {}), '(r_avg_cent, r_avg_gaze)\n', (11477, 11501), False, 'from helper_fn import point_to_screen, dump_dict, calc_metrics, round_tup\n'), ((11524, 11563), 'helper_fn.point_to_screen', 'point_to_screen', (['l_avg_cent', 'l_avg_gaze'], {}), '(l_avg_cent, l_avg_gaze)\n', (11539, 11563), False, 'from helper_fn import point_to_screen, dump_dict, calc_metrics, round_tup\n'), ((11580, 11622), 'numpy.mean', 'np.mean', (['[right_eye_XY[0], left_eye_XY[0]]'], {}), '([right_eye_XY[0], left_eye_XY[0]])\n', (11587, 11622), True, 'import numpy as np\n'), ((11639, 11681), 'numpy.mean', 'np.mean', (['[right_eye_XY[1], left_eye_XY[1]]'], {}), '([right_eye_XY[1], left_eye_XY[1]])\n', (11646, 11681), True, 'import numpy as np\n'), ((11984, 12018), 'cv2.flip', 'cv2.flip', (['self.visualizer.image', '(1)'], {}), '(self.visualizer.image, 1)\n', (11992, 12018), False, 'import cv2\n'), ((12036, 12053), 'helper_fn.round_tup', 'round_tup', (['r_gaze'], {}), '(r_gaze)\n', (12045, 12053), False, 'from helper_fn import point_to_screen, dump_dict, calc_metrics, round_tup\n'), ((12071, 12088), 'helper_fn.round_tup', 'round_tup', (['r_cent'], {}), '(r_cent)\n', (12080, 12088), False, 'from helper_fn import point_to_screen, dump_dict, calc_metrics, round_tup\n'), ((12106, 12123), 'helper_fn.round_tup', 'round_tup', (['l_gaze'], {}), '(l_gaze)\n', (12115, 12123), False, 'from helper_fn import point_to_screen, dump_dict, calc_metrics, round_tup\n'), ((12141, 12158), 'helper_fn.round_tup', 'round_tup', (['l_cent'], {}), '(l_cent)\n', (12150, 12158), False, 'from helper_fn import point_to_screen, dump_dict, calc_metrics, round_tup\n'), ((13633, 13654), 'cv2.flip', 'cv2.flip', (['flip_img', '(1)'], {}), '(flip_img, 1)\n', (13641, 13654), False, 'import cv2\n'), ((18153, 18164), 'time.time', 'time.time', ([], {}), '()\n', (18162, 18164), False, 'import time\n'), ((19659, 19702), 'helper_fn.calc_metrics', 'calc_metrics', (['(demo.true_pos, demo.cur_pos)'], {}), '((demo.true_pos, demo.cur_pos))\n', (19671, 19702), False, 'from helper_fn import point_to_screen, dump_dict, calc_metrics, round_tup\n'), ((19824, 19900), 'draw_utils.plot_pts', 'draw_utils.plot_pts', (['(demo.true_pos, demo.cur_pos)', 'str_name', 'MAE', 'save_path'], {}), '((demo.true_pos, demo.cur_pos), str_name, MAE, save_path)\n', (19843, 19900), False, 'import draw_utils\n'), ((3533, 3544), 'time.time', 'time.time', ([], {}), '()\n', (3542, 3544), False, 'import time\n'), ((3615, 3728), 'cv2.undistort', 'cv2.undistort', (['frame', 'self.gaze_estimator.camera.camera_matrix', 'self.gaze_estimator.camera.dist_coefficients'], {}), '(frame, self.gaze_estimator.camera.camera_matrix, self.\n gaze_estimator.camera.dist_coefficients)\n', (3628, 3728), False, 'import cv2\n'), ((3941, 3952), 'time.time', 'time.time', ([], {}), '()\n', (3950, 3952), False, 'import time\n'), ((6589, 6620), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'H264'"], {}), "(*'H264')\n", (6611, 6620), False, 'import cv2\n'), ((7101, 7140), 'cv2.waitKey', 'cv2.waitKey', (['self.config.demo.wait_time'], {}), '(self.config.demo.wait_time)\n', (7112, 7140), False, 'import cv2\n'), ((9334, 9357), 'numpy.hstack', 'np.hstack', (['[reye, leye]'], {}), '([reye, leye])\n', (9343, 9357), True, 'import numpy as np\n'), ((17964, 17975), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (17972, 17975), False, 'import sys\n'), ((18503, 18522), 'os.mkdir', 'os.mkdir', (['save_path'], {}), '(save_path)\n', (18511, 18522), False, 'import os\n'), ((19160, 19369), 'helper_fn.dump_dict', 'dump_dict', (['str_name'], {'items': '[demo.left_eye_cent, demo.left_eye_gaze, demo.right_eye_cent, demo.\n right_eye_gaze, demo.true_pos, demo.dist]', 'item_name': "['lcent', 'lgaze', 'rcent', 'rgaze', 'tpos', 'fdist']"}), "(str_name, items=[demo.left_eye_cent, demo.left_eye_gaze, demo.\n right_eye_cent, demo.right_eye_gaze, demo.true_pos, demo.dist],\n item_name=['lcent', 'lgaze', 'rcent', 'rgaze', 'tpos', 'fdist'])\n", (19169, 19369), False, 'from helper_fn import point_to_screen, dump_dict, calc_metrics, round_tup\n'), ((2918, 2981), 'draw_utils.display_canv', 'draw_utils.display_canv', ([], {'CANV_MODE': 'CANV_MODE', 'cur_pos': 'mid_point'}), '(CANV_MODE=CANV_MODE, cur_pos=mid_point)\n', (2941, 2981), False, 'import draw_utils\n'), ((3403, 3511), 'imutils.resize', 'imutils.resize', (['frame'], {'width': 'self.gaze_estimator.camera.width', 'height': 'self.gaze_estimator.camera.height'}), '(frame, width=self.gaze_estimator.camera.width, height=self.\n gaze_estimator.camera.height)\n', (3417, 3511), False, 'import imutils\n'), ((4854, 4923), 'cv2.resize', 'cv2.resize', (['self.visualizer.image', '(0, 0)'], {'fy': 'IMG_SCALE', 'fx': 'IMG_SCALE'}), '(self.visualizer.image, (0, 0), fy=IMG_SCALE, fx=IMG_SCALE)\n', (4864, 4923), False, 'import cv2\n'), ((4940, 4982), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'self.visualizer.image'], {}), "('frame', self.visualizer.image)\n", (4950, 4982), False, 'import cv2\n'), ((5041, 5070), 'cv2.moveWindow', 'cv2.moveWindow', (['"""frame"""', '(0)', '(0)'], {}), "('frame', 0, 0)\n", (5055, 5070), False, 'import cv2\n'), ((5530, 5556), 'cv2.VideoCapture', 'cv2.VideoCapture', (['vid_file'], {}), '(vid_file)\n', (5546, 5556), False, 'import cv2\n'), ((5597, 5616), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (5613, 5616), False, 'import cv2\n'), ((5677, 5722), 'cv2.VideoCapture', 'cv2.VideoCapture', (['self.config.demo.video_path'], {}), '(self.config.demo.video_path)\n', (5693, 5722), False, 'import cv2\n'), ((6669, 6700), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'PIM1'"], {}), "(*'PIM1')\n", (6691, 6700), False, 'import cv2\n'), ((10108, 10150), 'cv2.imwrite', 'cv2.imwrite', (['save_str', 'normalized[:, 300:]'], {}), '(save_str, normalized[:, 300:])\n', (10119, 10150), False, 'import cv2\n'), ((16231, 16273), 'numpy.mean', 'np.mean', (['[right_eye_XY[0], left_eye_XY[0]]'], {}), '([right_eye_XY[0], left_eye_XY[0]])\n', (16238, 16273), True, 'import numpy as np\n'), ((16298, 16340), 'numpy.mean', 'np.mean', (['[right_eye_XY[1], left_eye_XY[1]]'], {}), '([right_eye_XY[1], left_eye_XY[1]])\n', (16305, 16340), True, 'import numpy as np\n'), ((16532, 16578), 'helper_fn.point_to_screen', 'point_to_screen', (['face.center', 'face.gaze_vector'], {}), '(face.center, face.gaze_vector)\n', (16547, 16578), False, 'from helper_fn import point_to_screen, dump_dict, calc_metrics, round_tup\n'), ((19435, 19571), 'helper_fn.dump_dict', 'dump_dict', (['str_name'], {'items': '[demo.face_cent, demo.face_gaze, demo.true_pos, demo.dist]', 'item_name': "['fcent', 'fgaze', 'tpos', 'fdist']"}), "(str_name, items=[demo.face_cent, demo.face_gaze, demo.true_pos,\n demo.dist], item_name=['fcent', 'fgaze', 'tpos', 'fdist'])\n", (19444, 19571), False, 'from helper_fn import point_to_screen, dump_dict, calc_metrics, round_tup\n'), ((10923, 10982), 'numpy.array', 'np.array', (['[x[0] for x in self.right_eye_gaze[-num_frames:]]'], {}), '([x[0] for x in self.right_eye_gaze[-num_frames:]])\n', (10931, 10982), True, 'import numpy as np\n'), ((11011, 11070), 'numpy.array', 'np.array', (['[x[1] for x in self.right_eye_gaze[-num_frames:]]'], {}), '([x[1] for x in self.right_eye_gaze[-num_frames:]])\n', (11019, 11070), True, 'import numpy as np\n'), ((11099, 11158), 'numpy.array', 'np.array', (['[x[2] for x in self.right_eye_gaze[-num_frames:]]'], {}), '([x[2] for x in self.right_eye_gaze[-num_frames:]])\n', (11107, 11158), True, 'import numpy as np\n'), ((11189, 11247), 'numpy.array', 'np.array', (['[x[0] for x in self.left_eye_gaze[-num_frames:]]'], {}), '([x[0] for x in self.left_eye_gaze[-num_frames:]])\n', (11197, 11247), True, 'import numpy as np\n'), ((11276, 11334), 'numpy.array', 'np.array', (['[x[1] for x in self.left_eye_gaze[-num_frames:]]'], {}), '([x[1] for x in self.left_eye_gaze[-num_frames:]])\n', (11284, 11334), True, 'import numpy as np\n'), ((11363, 11421), 'numpy.array', 'np.array', (['[x[2] for x in self.left_eye_gaze[-num_frames:]]'], {}), '([x[2] for x in self.left_eye_gaze[-num_frames:]])\n', (11371, 11421), True, 'import numpy as np\n'), ((14579, 14623), 'helper_fn.point_to_screen', 'point_to_screen', (['eye.center', 'eye.gaze_vector'], {}), '(eye.center, eye.gaze_vector)\n', (14594, 14623), False, 'from helper_fn import point_to_screen, dump_dict, calc_metrics, round_tup\n'), ((14801, 14845), 'helper_fn.point_to_screen', 'point_to_screen', (['eye.center', 'eye.gaze_vector'], {}), '(eye.center, eye.gaze_vector)\n', (14816, 14845), False, 'from helper_fn import point_to_screen, dump_dict, calc_metrics, round_tup\n'), ((3826, 3837), 'time.time', 'time.time', ([], {}), '()\n', (3835, 3837), False, 'import time\n'), ((4081, 4092), 'time.time', 'time.time', ([], {}), '()\n', (4090, 4092), False, 'import time\n'), ((10263, 10322), 'numpy.array', 'np.array', (['[x[0] for x in self.right_eye_cent[-num_frames:]]'], {}), '([x[0] for x in self.right_eye_cent[-num_frames:]])\n', (10271, 10322), True, 'import numpy as np\n'), ((10355, 10414), 'numpy.array', 'np.array', (['[x[1] for x in self.right_eye_cent[-num_frames:]]'], {}), '([x[1] for x in self.right_eye_cent[-num_frames:]])\n', (10363, 10414), True, 'import numpy as np\n'), ((10447, 10506), 'numpy.array', 'np.array', (['[x[2] for x in self.right_eye_cent[-num_frames:]]'], {}), '([x[2] for x in self.right_eye_cent[-num_frames:]])\n', (10455, 10506), True, 'import numpy as np\n'), ((10541, 10599), 'numpy.array', 'np.array', (['[x[0] for x in self.left_eye_cent[-num_frames:]]'], {}), '([x[0] for x in self.left_eye_cent[-num_frames:]])\n', (10549, 10599), True, 'import numpy as np\n'), ((10632, 10690), 'numpy.array', 'np.array', (['[x[1] for x in self.left_eye_cent[-num_frames:]]'], {}), '([x[1] for x in self.left_eye_cent[-num_frames:]])\n', (10640, 10690), True, 'import numpy as np\n'), ((10723, 10781), 'numpy.array', 'np.array', (['[x[2] for x in self.left_eye_cent[-num_frames:]]'], {}), '([x[2] for x in self.left_eye_cent[-num_frames:]])\n', (10731, 10781), True, 'import numpy as np\n')]
import numpy as np from openmdao.main.api import Component from openmdao.lib.datatypes.api import Float, Array class KSfunction(object): """Helper class that can be used inside other components to aggregate constraint vectors with a KS function.""" def compute(self, g, rho=50): """Gets the value of the KS function for the given array of constraints.""" self.rho = rho self.g_max = np.max(g) self.g_diff = g-self.g_max self.exponents = np.exp(rho * self.g_diff) self.summation = np.sum(self.exponents) self.KS = self.g_max + 1.0/rho * np.log(self.summation) return self.KS def derivatives(self): """returns a row vector of [dKS_gd, dKS_drho]""" dsum_dg = self.rho*self.exponents dKS_dsum = 1.0/self.rho/self.summation self.dKS_dg = dKS_dsum * dsum_dg dsum_drho = np.sum(self.g_diff*self.exponents) self.dKS_drho = dKS_dsum * dsum_drho return self.dKS_dg, self.dKS_drho class KSComp(Component): """Aggregates a number of functions to a single value via the Kreisselmeier-Steinhauser Function.""" rho = Float(.1, iotype="in", desc="Hyperparameter for the KS function") KS = Float(0, iotype="out", desc="Value of the aggregate KS function") def __init__(self, n=2): super(KS, self).__init__() self.n = n self.add('g',Array(zeros((n,)), size=(n,1), dtype=Float, iotype="in", desc="Array of function values to be aggregated")) self._ks = KSfunction() def execute(self): self.KS = self._ks.compute(self.g, self.rho) def linearize(self): """Linearize around the last executed point""" #use g_max, exponsnte, summation from last executed point self.J = np.hstack(self._ks.derivatives()) def provideDer(self): ins = ('g','rho') outs = ('KS', ) return ins, outs, self.J
[ "numpy.sum", "numpy.log", "openmdao.lib.datatypes.api.Float", "numpy.max", "numpy.exp" ]
[((1169, 1235), 'openmdao.lib.datatypes.api.Float', 'Float', (['(0.1)'], {'iotype': '"""in"""', 'desc': '"""Hyperparameter for the KS function"""'}), "(0.1, iotype='in', desc='Hyperparameter for the KS function')\n", (1174, 1235), False, 'from openmdao.lib.datatypes.api import Float, Array\n'), ((1277, 1342), 'openmdao.lib.datatypes.api.Float', 'Float', (['(0)'], {'iotype': '"""out"""', 'desc': '"""Value of the aggregate KS function"""'}), "(0, iotype='out', desc='Value of the aggregate KS function')\n", (1282, 1342), False, 'from openmdao.lib.datatypes.api import Float, Array\n'), ((427, 436), 'numpy.max', 'np.max', (['g'], {}), '(g)\n', (433, 436), True, 'import numpy as np\n'), ((497, 522), 'numpy.exp', 'np.exp', (['(rho * self.g_diff)'], {}), '(rho * self.g_diff)\n', (503, 522), True, 'import numpy as np\n'), ((548, 570), 'numpy.sum', 'np.sum', (['self.exponents'], {}), '(self.exponents)\n', (554, 570), True, 'import numpy as np\n'), ((895, 931), 'numpy.sum', 'np.sum', (['(self.g_diff * self.exponents)'], {}), '(self.g_diff * self.exponents)\n', (901, 931), True, 'import numpy as np\n'), ((612, 634), 'numpy.log', 'np.log', (['self.summation'], {}), '(self.summation)\n', (618, 634), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- """ @author: <NAME> """ from __future__ import division import finite_difference as fd import numpy as np def rtm1d(v,seis,dt,dz): nt = len(seis) nx = len(v) a = fd.alpha(v,dt,dz) ul, u, up = np.zeros((3,nx)) data = np.zeros((nt,nx)) g = np.zeros(u.shape) g[0] = 1 ul += g*seis[nt-1] u += g*seis[nt-2] for i in xrange(nt-3,-1,-1): src = g*seis[i] ul[0]=2*u[0]-up[0]+a[0]**2*(u[1]-2*u[0]) + src[0] ul[1:nx-1]=2*u[1:nx-1]-up[1:nx-1]+a[1:nx-1]**2*(u[2:nx]-2*u[1:nx-1]+ \ u[0:nx-2]) + src[1:nx-1] ul = fd.abc1D(u, ul, a, src) up = np.copy(u) u = np.copy(ul) data[i] = np.copy(u) return data def rtm2D(v,shotgt,dt,dx,dz): # rtm 2D with different algorithm nz,nx = v.shape nt = shotgt[:,0].size ul, u, up = np.zeros((3,nz,nx)) up[0,:], u[0,:], ul[0,:] = shotgt[nt-3:nt,:] a = fd.alpha(v,dt,dx)**2 a2 = 2-4*a data = np.zeros((nt, nz, nx)) e = (np.exp(-((0.015*(20-np.arange(1,21)))**2) ))**10 c = 2 for i in xrange(nt-2,-1,-1): c+=1 b = min(c,nz) for iz in xrange(b): ul[iz,0:20] = e*ul[iz,0:20] u[iz,0:20] = e*u[iz,0:20] ul[iz,nx-20:] = e[::-1]*ul[iz,nx-20:] u[iz,nx-20:] = e[::-1]*u[iz,nx-20:] if b >= (nz-20): for iz in xrange(nz-20,nz): ul[iz] = e[nz-iz-1]*ul[iz] u[iz] = e[nz-iz-1]*u[iz] if b == nz: d = nz-2 else: d = b up[0:b,1:nx-1] = up[0:b,1:nx-1]-ul[0:b,1:nx-1] u[1:d,1:nx-1] = a2[1:d,1:nx-1]*ul[1:d,1:nx-1]+u[1:d,1:nx-1]+a[1:d,2:nx]*ul[1:d,2:nx]\ +a[1:d,0:nx-2]*ul[1:d,0:nx-2]+a[2:d+1,1:nx-1]*ul[2:d+1,1:nx-1]+\ +a[0:d-1,1:nx-1]*ul[0:d-1,1:nx-1] u[0,1:nx-1] = a2[0,1:nx-1]*ul[0,1:nx-1]+u[0,1:nx-1]+a[0,2:nx]*ul[0,2:nx]\ +a[0,0:nx-2]*ul[0,0:nx-2]+a[1,1:nx-1]*ul[1,1:nx-1] if b == nz: u[nz-1,1:nx-1] = a2[nz-1,1:nx-1]*ul[nz-1,1:nx-1]+u[nz-1,1:nx-1]\ +a[nz-1,2:nx]*ul[nz-1,2:nx]+a[nz-1,0:nx-2]*ul[nz-1,0:nx-2]\ +a[nz-2,1:nx-1]*ul[nz-1,1:nx-1] u[nz-1,0] = a2[nz-1,0]*ul[nz-1,0]+u[nz-1,0]+a[nz-1,1]*ul[nz-1,1]\ +a[nz-2,0]*ul[nz-2,0] u[1:d,0] = a2[1:d,0]*ul[1:d,0]+u[1:d,0]+a[1:d,1]*ul[1:d,1]+a[2:d+1,0]\ *ul[2:d+1,0]+a[0:d-1,0]*ul[0:d-1,0] u[1:d,nx-1] = a2[1:d,nx-1]*ul[1:d,nx-1]+u[1:d,nx-1]+a[1:d,nx-2]*ul[1:d,nx-2]\ +a[2:d+1,nx-1]*ul[2:d+1,nx-1]+a[0:d-1,nx-1]*ul[0:d-1,nx-1] u[0,0] = a2[0,0]*ul[0,0]+u[0,0]+a[0,1]*ul[0,1]+a[1,0]*ul[1,0] u[0,nx-1] = a2[0,nx-1]*ul[0,nx-1]+u[0,nx-1]+a[0,nx-1]*ul[0,nx-1]+a[1,nx-1]*ul[1,nx-1] ul = np.copy(u) u = np.copy(up) if i > 1: up[1:nz-1] = 0; up[0] = shotgt[i-3,:] data[i] = ul return data
[ "numpy.copy", "finite_difference.abc1D", "numpy.zeros", "numpy.arange", "finite_difference.alpha" ]
[((202, 221), 'finite_difference.alpha', 'fd.alpha', (['v', 'dt', 'dz'], {}), '(v, dt, dz)\n', (210, 221), True, 'import finite_difference as fd\n'), ((236, 253), 'numpy.zeros', 'np.zeros', (['(3, nx)'], {}), '((3, nx))\n', (244, 253), True, 'import numpy as np\n'), ((264, 282), 'numpy.zeros', 'np.zeros', (['(nt, nx)'], {}), '((nt, nx))\n', (272, 282), True, 'import numpy as np\n'), ((290, 307), 'numpy.zeros', 'np.zeros', (['u.shape'], {}), '(u.shape)\n', (298, 307), True, 'import numpy as np\n'), ((868, 889), 'numpy.zeros', 'np.zeros', (['(3, nz, nx)'], {}), '((3, nz, nx))\n', (876, 889), True, 'import numpy as np\n'), ((992, 1014), 'numpy.zeros', 'np.zeros', (['(nt, nz, nx)'], {}), '((nt, nz, nx))\n', (1000, 1014), True, 'import numpy as np\n'), ((618, 641), 'finite_difference.abc1D', 'fd.abc1D', (['u', 'ul', 'a', 'src'], {}), '(u, ul, a, src)\n', (626, 641), True, 'import finite_difference as fd\n'), ((655, 665), 'numpy.copy', 'np.copy', (['u'], {}), '(u)\n', (662, 665), True, 'import numpy as np\n'), ((678, 689), 'numpy.copy', 'np.copy', (['ul'], {}), '(ul)\n', (685, 689), True, 'import numpy as np\n'), ((708, 718), 'numpy.copy', 'np.copy', (['u'], {}), '(u)\n', (715, 718), True, 'import numpy as np\n'), ((945, 964), 'finite_difference.alpha', 'fd.alpha', (['v', 'dt', 'dx'], {}), '(v, dt, dx)\n', (953, 964), True, 'import finite_difference as fd\n'), ((2958, 2968), 'numpy.copy', 'np.copy', (['u'], {}), '(u)\n', (2965, 2968), True, 'import numpy as np\n'), ((2981, 2992), 'numpy.copy', 'np.copy', (['up'], {}), '(up)\n', (2988, 2992), True, 'import numpy as np\n'), ((1044, 1060), 'numpy.arange', 'np.arange', (['(1)', '(21)'], {}), '(1, 21)\n', (1053, 1060), True, 'import numpy as np\n')]
from scipy.io import loadmat import tables import numpy as np import matplotlib.pyplot as plt import pandas as pd import os, os.path import time import scipy.signal from scipy import signal from lmfit import minimize, Parameters import scipy.optimize as optimization import operator class temperature_preprocessing_extract_phase_amplitude(): def __init__(self,exp_setup,line_info,time_stamp): self.exp_setup = exp_setup # exp_setup = {'px':25/10**6,'f_heating':1,'gap':20} self.line_info = line_info # line_info = {'N_line_groups':N_line_groups,'N_horizontal_lines':N_horizontal_lines,'N_files':N_files} self.time_stamp = time_stamp def butter_highpass(self,cutoff, fs, order=5): nyq = 0.5 * fs normal_cutoff = cutoff / nyq b, a = signal.butter(order, normal_cutoff, btype='high', analog=False) return b, a def butter_highpass_filter(self,data, cutoff, fs, order=4): b, a = self.butter_highpass(cutoff, fs, order=order) y = signal.filtfilt(b, a, data) return y def filter_signal(self,df_rec,f0): cutoff = f0*0.5 fs = (df_rec.shape[0])/(max(df_rec['reltime'])-min(df_rec['reltime'])) # Plot the frequency response for a few different orders. time = df_rec['reltime'] N = df_rec.shape[1]-1 df_filtered = pd.DataFrame(data = {'reltime':np.array(df_rec['reltime'])}) for i in range(N): temp = (self.butter_highpass_filter(df_rec[i],cutoff,fs)) df_filtered[i] = np.array(temp) return df_filtered def sin_func(self,x,amplitude,phase,bias,f_heating): return amplitude*np.sin(2*np.pi*f_heating*x + phase)+bias def residual(self,params, x, data, eps_data): amplitude = params['amplitude'] phase = params['phase'] bias = params['bias'] freq = params['frequency'] model = amplitude*np.sin(2*np.pi*freq*x + phase)+bias return (data-model) / eps_data def extract_phase_amplitude_sinusoidal_function(self,index,df_temperature): px = self.exp_setup['px'] f_heating = self.exp_setup['f_heating'] gap = self.exp_setup['gap'] fitting_params_initial = {'amplitude':0.2,'phase':0.1,'bias':0.1} n_col = df_temperature.shape[1] tmin = df_temperature['reltime'][0] time = df_temperature['reltime']-tmin # A1 = df_temperature.iloc[:,index[0]+3] # A2 = df_temperature.iloc[:,index[1]+3] A1 = df_temperature[index[0]] A2 = df_temperature[index[1]] A1-= A1.mean() A2-= A2.mean() x0 = np.array([1,0,0]) # amplitude,phase,bias sigma = np.ones(len(time)) params1 = Parameters() params1.add('amplitude', value=fitting_params_initial['amplitude']) params1.add('phase', value=fitting_params_initial['phase']) params1.add('bias', value=fitting_params_initial['bias']) params1.add('frequency', value=f_heating,vary=False) res1 = minimize(self.residual, params1, args=(time, A1, sigma)) params2 = Parameters() params2.add('amplitude', value=fitting_params_initial['amplitude']) params2.add('phase', value=fitting_params_initial['phase']) params2.add('bias', value=fitting_params_initial['bias']) params2.add('frequency', value=f_heating,vary=False) res2 = minimize(self.residual, params2, args=(time, A2, sigma)) amp1 = np.abs(res1.params['amplitude'].value) amp2 = np.abs(res2.params['amplitude'].value) p1 = res1.params['phase'].value p2 = res2.params['phase'].value amp_ratio = min(np.abs(amp1/amp2),np.abs(amp2/amp1)) phase_diff = np.abs(p1-p2) if phase_diff>2*np.pi: phase_diff = phase_diff - 2*np.pi if phase_diff>np.pi/2: phase_diff = np.pi - phase_diff T_total = np.max(time)-np.min(time) df = 1/T_total L = abs(index[0]-index[1])*px*gap w = 2*np.pi*f_heating return L, phase_diff,amp_ratio def extract_phase_amplitude_Fourier_transform(self,index,df_temperature): px = self.exp_setup['px'] f_heating = self.exp_setup['f_heating'] gap = self.exp_setup['gap'] n_col = df_temperature.shape[1] tmin = df_temperature['reltime'][0] time = df_temperature['reltime']-tmin fft_X1 = np.fft.fft(df_temperature.iloc[:,index[0]+3]) fft_X2 = np.fft.fft(df_temperature.iloc[:,index[1]+3]) T_total = np.max(time)-np.min(time) df = 1/T_total N_0 = int(f_heating/df) magnitude_X1 = np.abs(fft_X1) magnitude_X2 = np.abs(fft_X2) phase_X1 = np.angle(fft_X1) phase_X2 = np.angle(fft_X2) N1, Amp1 = max(enumerate(magnitude_X1[N_0-5:N_0+5]), key=operator.itemgetter(1)) N2, Amp2 = max(enumerate(magnitude_X2[N_0-5:N_0+5]), key=operator.itemgetter(1)) Nf = N_0+N1-5 amp_ratio = magnitude_X1[Nf]/magnitude_X2[Nf] phase_diff = phase_X1[Nf]-phase_X2[Nf] if phase_diff<0: phase_diff = phase_diff+np.pi*2 L = abs(index[0]-index[1])*px*gap return L, phase_diff,amp_ratio def fit_amp_phase_one_batch(self,df_temperature,method): px = self.exp_setup['px'] f_heating = self.exp_setup['f_heating'] gap = self.exp_setup['gap'] N_lines = df_temperature.shape[1]-1 x_list = np.zeros(N_lines-1) phase_diff_list = np.zeros(N_lines-1) amp_ratio_list = np.zeros(N_lines-1) for i in range(N_lines): if i>0: index = [0,i] if method == 'fft': x_list[i-1],phase_diff_list[i-1], amp_ratio_list[i-1] = self.extract_phase_amplitude_Fourier_transform(index,df_temperature) else: x_list[i-1],phase_diff_list[i-1], amp_ratio_list[i-1] = self.extract_phase_amplitude_sinusoidal_function(index,df_temperature) return x_list,phase_diff_list,amp_ratio_list def extract_temperature_from_IR(self,X0,Y0,rec_name,N_avg): # this function takes the average of N pixels in Y0 direction, typically N = 100 gap = self.exp_setup['gap'] N_line_groups = self.line_info['N_line_groups'] N_horizontal_lines = self.line_info['N_horizontal_lines'] N_files = self.line_info['N_files'] T = np.zeros((N_line_groups,N_horizontal_lines,N_files)) for k in range(N_files): temp = pd.read_csv(self.line_info['data_path']+rec_name+str(k)+'.csv') for j in range(N_line_groups): for i in range(N_horizontal_lines): T[j,i,k] = temp.iloc[Y0-int(N_avg/2):Y0+int(N_avg/2),X0-j-gap*i].mean() # for T, first dim is line group, 2nd dimension is # of lines, 3rd dim is number of files return T def batch_process_horizontal_lines(self,T,method): #T averaged temperature for N_lines and N_line_groups and N_frames x_list_all = [] phase_diff_list_all = [] amp_ratio_list_all = [] N_horizontal_lines = self.line_info['N_horizontal_lines'] N_line_groups = self.line_info['N_line_groups'] px = self.exp_setup['px'] f_heating = self.exp_setup['f_heating'] gap = self.exp_setup['gap'] time_stamp = self.time_stamp for j in range(N_line_groups): horinzontal_temp = T[j,:,:].T df = pd.DataFrame(horinzontal_temp) df['reltime'] = time_stamp['reltime'] df_filtered = self.filter_signal(df,f_heating) x_list,phase_diff_list,amp_ratio_list = self.fit_amp_phase_one_batch(df_filtered,method) x_list_all = x_list_all+list(x_list) phase_diff_list_all = phase_diff_list_all+list(phase_diff_list) amp_ratio_list_all = amp_ratio_list_all+list(amp_ratio_list) df_result_IR = pd.DataFrame(data = {'x':x_list_all,'amp_ratio':amp_ratio_list_all,'phase_diff':phase_diff_list_all}) return df_result_IR
[ "pandas.DataFrame", "numpy.abs", "scipy.signal.filtfilt", "numpy.fft.fft", "numpy.angle", "numpy.zeros", "numpy.max", "lmfit.minimize", "numpy.array", "numpy.min", "numpy.sin", "operator.itemgetter", "scipy.signal.butter", "lmfit.Parameters" ]
[((819, 882), 'scipy.signal.butter', 'signal.butter', (['order', 'normal_cutoff'], {'btype': '"""high"""', 'analog': '(False)'}), "(order, normal_cutoff, btype='high', analog=False)\n", (832, 882), False, 'from scipy import signal\n'), ((1041, 1068), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'data'], {}), '(b, a, data)\n', (1056, 1068), False, 'from scipy import signal\n'), ((2673, 2692), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (2681, 2692), True, 'import numpy as np\n'), ((2769, 2781), 'lmfit.Parameters', 'Parameters', ([], {}), '()\n', (2779, 2781), False, 'from lmfit import minimize, Parameters\n'), ((3069, 3125), 'lmfit.minimize', 'minimize', (['self.residual', 'params1'], {'args': '(time, A1, sigma)'}), '(self.residual, params1, args=(time, A1, sigma))\n', (3077, 3125), False, 'from lmfit import minimize, Parameters\n'), ((3145, 3157), 'lmfit.Parameters', 'Parameters', ([], {}), '()\n', (3155, 3157), False, 'from lmfit import minimize, Parameters\n'), ((3444, 3500), 'lmfit.minimize', 'minimize', (['self.residual', 'params2'], {'args': '(time, A2, sigma)'}), '(self.residual, params2, args=(time, A2, sigma))\n', (3452, 3500), False, 'from lmfit import minimize, Parameters\n'), ((3517, 3555), 'numpy.abs', 'np.abs', (["res1.params['amplitude'].value"], {}), "(res1.params['amplitude'].value)\n", (3523, 3555), True, 'import numpy as np\n'), ((3571, 3609), 'numpy.abs', 'np.abs', (["res2.params['amplitude'].value"], {}), "(res2.params['amplitude'].value)\n", (3577, 3609), True, 'import numpy as np\n'), ((3784, 3799), 'numpy.abs', 'np.abs', (['(p1 - p2)'], {}), '(p1 - p2)\n', (3790, 3799), True, 'import numpy as np\n'), ((4509, 4557), 'numpy.fft.fft', 'np.fft.fft', (['df_temperature.iloc[:, index[0] + 3]'], {}), '(df_temperature.iloc[:, index[0] + 3])\n', (4519, 4557), True, 'import numpy as np\n'), ((4572, 4620), 'numpy.fft.fft', 'np.fft.fft', (['df_temperature.iloc[:, index[1] + 3]'], {}), '(df_temperature.iloc[:, index[1] + 3])\n', (4582, 4620), True, 'import numpy as np\n'), ((4743, 4757), 'numpy.abs', 'np.abs', (['fft_X1'], {}), '(fft_X1)\n', (4749, 4757), True, 'import numpy as np\n'), ((4781, 4795), 'numpy.abs', 'np.abs', (['fft_X2'], {}), '(fft_X2)\n', (4787, 4795), True, 'import numpy as np\n'), ((4816, 4832), 'numpy.angle', 'np.angle', (['fft_X1'], {}), '(fft_X1)\n', (4824, 4832), True, 'import numpy as np\n'), ((4852, 4868), 'numpy.angle', 'np.angle', (['fft_X2'], {}), '(fft_X2)\n', (4860, 4868), True, 'import numpy as np\n'), ((5598, 5619), 'numpy.zeros', 'np.zeros', (['(N_lines - 1)'], {}), '(N_lines - 1)\n', (5606, 5619), True, 'import numpy as np\n'), ((5644, 5665), 'numpy.zeros', 'np.zeros', (['(N_lines - 1)'], {}), '(N_lines - 1)\n', (5652, 5665), True, 'import numpy as np\n'), ((5689, 5710), 'numpy.zeros', 'np.zeros', (['(N_lines - 1)'], {}), '(N_lines - 1)\n', (5697, 5710), True, 'import numpy as np\n'), ((6570, 6624), 'numpy.zeros', 'np.zeros', (['(N_line_groups, N_horizontal_lines, N_files)'], {}), '((N_line_groups, N_horizontal_lines, N_files))\n', (6578, 6624), True, 'import numpy as np\n'), ((8132, 8240), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'x': x_list_all, 'amp_ratio': amp_ratio_list_all, 'phase_diff':\n phase_diff_list_all}"}), "(data={'x': x_list_all, 'amp_ratio': amp_ratio_list_all,\n 'phase_diff': phase_diff_list_all})\n", (8144, 8240), True, 'import pandas as pd\n'), ((1570, 1584), 'numpy.array', 'np.array', (['temp'], {}), '(temp)\n', (1578, 1584), True, 'import numpy as np\n'), ((3717, 3736), 'numpy.abs', 'np.abs', (['(amp1 / amp2)'], {}), '(amp1 / amp2)\n', (3723, 3736), True, 'import numpy as np\n'), ((3735, 3754), 'numpy.abs', 'np.abs', (['(amp2 / amp1)'], {}), '(amp2 / amp1)\n', (3741, 3754), True, 'import numpy as np\n'), ((3970, 3982), 'numpy.max', 'np.max', (['time'], {}), '(time)\n', (3976, 3982), True, 'import numpy as np\n'), ((3983, 3995), 'numpy.min', 'np.min', (['time'], {}), '(time)\n', (3989, 3995), True, 'import numpy as np\n'), ((4637, 4649), 'numpy.max', 'np.max', (['time'], {}), '(time)\n', (4643, 4649), True, 'import numpy as np\n'), ((4650, 4662), 'numpy.min', 'np.min', (['time'], {}), '(time)\n', (4656, 4662), True, 'import numpy as np\n'), ((7669, 7699), 'pandas.DataFrame', 'pd.DataFrame', (['horinzontal_temp'], {}), '(horinzontal_temp)\n', (7681, 7699), True, 'import pandas as pd\n'), ((1699, 1740), 'numpy.sin', 'np.sin', (['(2 * np.pi * f_heating * x + phase)'], {}), '(2 * np.pi * f_heating * x + phase)\n', (1705, 1740), True, 'import numpy as np\n'), ((1955, 1991), 'numpy.sin', 'np.sin', (['(2 * np.pi * freq * x + phase)'], {}), '(2 * np.pi * freq * x + phase)\n', (1961, 1991), True, 'import numpy as np\n'), ((4939, 4961), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (4958, 4961), False, 'import operator\n'), ((5028, 5050), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (5047, 5050), False, 'import operator\n'), ((1413, 1440), 'numpy.array', 'np.array', (["df_rec['reltime']"], {}), "(df_rec['reltime'])\n", (1421, 1440), True, 'import numpy as np\n')]
# Copyright (c) 2020 Foundry. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## from __future__ import print_function import sys import os import time import scipy.misc import numpy as np import cv2 import tensorflow as tf tf.compat.v1.disable_eager_execution() # For TF 2.x compatibility from models.baseModel import BaseModel from models.common.model_builder import baseline_model from models.common.util import print_, get_ckpt_list, linear_to_srgb, srgb_to_linear import message_pb2 class Model(BaseModel): """Load your trained model and do inference in Nuke""" def __init__(self): super(Model, self).__init__() self.name = 'Regression Template TF' self.n_levels = 3 self.scale = 0.5 dir_path = os.path.dirname(os.path.realpath(__file__)) self.checkpoints_dir = os.path.join(dir_path, 'checkpoints') self.patch_size = 50 self.output_param_number = 1 # Initialise checkpoint name to the latest checkpoint ckpt_names = get_ckpt_list(self.checkpoints_dir) if not ckpt_names: # empty list self.checkpoint_name = '' else: latest_ckpt = tf.compat.v1.train.latest_checkpoint(self.checkpoints_dir) if latest_ckpt is not None: self.checkpoint_name = latest_ckpt.split('/')[-1] else: self.checkpoint_name = ckpt_names[-1] self.prev_ckpt_name = self.checkpoint_name # Silence TF log when creating tf.Session() os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Define options self.gamma_to_predict = 1.0 self.predict = False self.options = ('checkpoint_name', 'gamma_to_predict',) self.buttons = ('predict',) # Define inputs/outputs self.inputs = {'input': 3} self.outputs = {'output': 3} def load(self, model): # Check if empty or invalid checkpoint name if self.checkpoint_name=='': ckpt_names = get_ckpt_list(self.checkpoints_dir) if not ckpt_names: raise ValueError("No checkpoints found in {}".format(self.checkpoints_dir)) else: raise ValueError("Empty checkpoint name, try an available checkpoint in {} (ex: {})" .format(self.checkpoints_dir, ckpt_names[-1])) print_("Loading trained model checkpoint...\n", 'm') # Load from given checkpoint file name self.saver.restore(self.sess, os.path.join(self.checkpoints_dir, self.checkpoint_name)) print_("...Checkpoint {} loaded\n".format(self.checkpoint_name), 'm') def inference(self, image_list): """Do an inference on the model with a set of inputs. # Arguments: image_list: The input image list Return the result of the inference. """ image = image_list[0] image = linear_to_srgb(image).copy() if not hasattr(self, 'sess'): # Initialise tensorflow graph tf.compat.v1.reset_default_graph() config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth=True self.sess=tf.compat.v1.Session(config=config) # Input is stacked histograms of original and gamma-graded images. input_shape = [1, 2, 100] # Initialise input placeholder size self.input = tf.compat.v1.placeholder(tf.float32, shape=input_shape) self.model = baseline_model( input_shape=input_shape[1:], output_param_number=self.output_param_number) self.infer_op = self.model(self.input) # Load latest model checkpoint self.saver = tf.compat.v1.train.Saver() self.load(self.model) self.prev_ckpt_name = self.checkpoint_name # If checkpoint name has changed, load new checkpoint if self.prev_ckpt_name != self.checkpoint_name or self.checkpoint_name == '': self.load(self.model) # If checkpoint correctly loaded, update previous checkpoint name self.prev_ckpt_name = self.checkpoint_name # Preprocess image same way we preprocessed it for training # Here for gamma correction compute histograms def histogram(x, value_range=[0.0, 1.0], nbins=100): """Return histogram of tensor x""" h, w, c = x.shape hist = tf.histogram_fixed_width(x, value_range, nbins=nbins) hist = tf.divide(hist, h * w * c) return hist with tf.compat.v1.Session() as sess: # Convert to grayscale img_gray = tf.image.rgb_to_grayscale(image) img_gray = tf.image.resize(img_gray, [self.patch_size, self.patch_size]) # Apply gamma correction img_gray_grade = tf.math.pow(img_gray, self.gamma_to_predict) img_grade = tf.math.pow(image, self.gamma_to_predict) # Compute histograms img_hist = histogram(img_gray) img_grade_hist = histogram(img_gray_grade) hists_op = tf.stack([img_hist, img_grade_hist], axis=0) hists, img_grade = sess.run([hists_op, img_grade]) res_img = srgb_to_linear(img_grade) hists_batch = np.expand_dims(hists, 0) start = time.time() # Run model inference inference = self.sess.run(self.infer_op, feed_dict={self.input: hists_batch}) duration = time.time() - start print('Inference duration: {:4.3f}s'.format(duration)) res = inference[-1] print("Predicted gamma: {}".format(res)) # If predict button is pressed in Nuke if self.predict: script_msg = message_pb2.FieldValuePairAttrib() script_msg.name = "PythonScript" # Create a Python script message to run in Nuke python_script = self.nuke_script(res) script_msg_val = script_msg.values.add() script_msg_str = script_msg_val.string_attributes.add() script_msg_str.values.extend([python_script]) return [res_img, script_msg] return [res_img] def nuke_script(self, res): """Return the Python script function to create a pop up window in Nuke.""" popup_msg = "Predicted gamma: {}".format(res) script = "nuke.message('{}')\n".format(popup_msg) return script
[ "tensorflow.image.rgb_to_grayscale", "models.common.util.print_", "tensorflow.compat.v1.disable_eager_execution", "tensorflow.histogram_fixed_width", "tensorflow.divide", "os.path.join", "message_pb2.FieldValuePairAttrib", "tensorflow.compat.v1.placeholder", "tensorflow.stack", "tensorflow.compat.v1.Session", "models.common.model_builder.baseline_model", "tensorflow.math.pow", "tensorflow.compat.v1.train.latest_checkpoint", "models.common.util.srgb_to_linear", "os.path.realpath", "tensorflow.compat.v1.train.Saver", "tensorflow.compat.v1.ConfigProto", "models.common.util.linear_to_srgb", "models.common.util.get_ckpt_list", "numpy.expand_dims", "time.time", "tensorflow.compat.v1.reset_default_graph", "tensorflow.image.resize" ]
[((802, 840), 'tensorflow.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ([], {}), '()\n', (838, 840), True, 'import tensorflow as tf\n'), ((1405, 1442), 'os.path.join', 'os.path.join', (['dir_path', '"""checkpoints"""'], {}), "(dir_path, 'checkpoints')\n", (1417, 1442), False, 'import os\n'), ((1593, 1628), 'models.common.util.get_ckpt_list', 'get_ckpt_list', (['self.checkpoints_dir'], {}), '(self.checkpoints_dir)\n', (1606, 1628), False, 'from models.common.util import print_, get_ckpt_list, linear_to_srgb, srgb_to_linear\n'), ((2935, 2987), 'models.common.util.print_', 'print_', (['"""Loading trained model checkpoint...\n"""', '"""m"""'], {}), "('Loading trained model checkpoint...\\n', 'm')\n", (2941, 2987), False, 'from models.common.util import print_, get_ckpt_list, linear_to_srgb, srgb_to_linear\n'), ((5872, 5896), 'numpy.expand_dims', 'np.expand_dims', (['hists', '(0)'], {}), '(hists, 0)\n', (5886, 5896), True, 'import numpy as np\n'), ((5913, 5924), 'time.time', 'time.time', ([], {}), '()\n', (5922, 5924), False, 'import time\n'), ((1346, 1372), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1362, 1372), False, 'import os\n'), ((1747, 1805), 'tensorflow.compat.v1.train.latest_checkpoint', 'tf.compat.v1.train.latest_checkpoint', (['self.checkpoints_dir'], {}), '(self.checkpoints_dir)\n', (1783, 1805), True, 'import tensorflow as tf\n'), ((2582, 2617), 'models.common.util.get_ckpt_list', 'get_ckpt_list', (['self.checkpoints_dir'], {}), '(self.checkpoints_dir)\n', (2595, 2617), False, 'from models.common.util import print_, get_ckpt_list, linear_to_srgb, srgb_to_linear\n'), ((3073, 3129), 'os.path.join', 'os.path.join', (['self.checkpoints_dir', 'self.checkpoint_name'], {}), '(self.checkpoints_dir, self.checkpoint_name)\n', (3085, 3129), False, 'import os\n'), ((3601, 3635), 'tensorflow.compat.v1.reset_default_graph', 'tf.compat.v1.reset_default_graph', ([], {}), '()\n', (3633, 3635), True, 'import tensorflow as tf\n'), ((3657, 3683), 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {}), '()\n', (3681, 3683), True, 'import tensorflow as tf\n'), ((3755, 3790), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'config': 'config'}), '(config=config)\n', (3775, 3790), True, 'import tensorflow as tf\n'), ((3981, 4036), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32'], {'shape': 'input_shape'}), '(tf.float32, shape=input_shape)\n', (4005, 4036), True, 'import tensorflow as tf\n'), ((4062, 4156), 'models.common.model_builder.baseline_model', 'baseline_model', ([], {'input_shape': 'input_shape[1:]', 'output_param_number': 'self.output_param_number'}), '(input_shape=input_shape[1:], output_param_number=self.\n output_param_number)\n', (4076, 4156), False, 'from models.common.model_builder import baseline_model\n'), ((4304, 4330), 'tensorflow.compat.v1.train.Saver', 'tf.compat.v1.train.Saver', ([], {}), '()\n', (4328, 4330), True, 'import tensorflow as tf\n'), ((5017, 5070), 'tensorflow.histogram_fixed_width', 'tf.histogram_fixed_width', (['x', 'value_range'], {'nbins': 'nbins'}), '(x, value_range, nbins=nbins)\n', (5041, 5070), True, 'import tensorflow as tf\n'), ((5090, 5116), 'tensorflow.divide', 'tf.divide', (['hist', '(h * w * c)'], {}), '(hist, h * w * c)\n', (5099, 5116), True, 'import tensorflow as tf\n'), ((5154, 5176), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (5174, 5176), True, 'import tensorflow as tf\n'), ((5244, 5276), 'tensorflow.image.rgb_to_grayscale', 'tf.image.rgb_to_grayscale', (['image'], {}), '(image)\n', (5269, 5276), True, 'import tensorflow as tf\n'), ((5300, 5361), 'tensorflow.image.resize', 'tf.image.resize', (['img_gray', '[self.patch_size, self.patch_size]'], {}), '(img_gray, [self.patch_size, self.patch_size])\n', (5315, 5361), True, 'import tensorflow as tf\n'), ((5428, 5472), 'tensorflow.math.pow', 'tf.math.pow', (['img_gray', 'self.gamma_to_predict'], {}), '(img_gray, self.gamma_to_predict)\n', (5439, 5472), True, 'import tensorflow as tf\n'), ((5497, 5538), 'tensorflow.math.pow', 'tf.math.pow', (['image', 'self.gamma_to_predict'], {}), '(image, self.gamma_to_predict)\n', (5508, 5538), True, 'import tensorflow as tf\n'), ((5693, 5737), 'tensorflow.stack', 'tf.stack', (['[img_hist, img_grade_hist]'], {'axis': '(0)'}), '([img_hist, img_grade_hist], axis=0)\n', (5701, 5737), True, 'import tensorflow as tf\n'), ((5823, 5848), 'models.common.util.srgb_to_linear', 'srgb_to_linear', (['img_grade'], {}), '(img_grade)\n', (5837, 5848), False, 'from models.common.util import print_, get_ckpt_list, linear_to_srgb, srgb_to_linear\n'), ((6060, 6071), 'time.time', 'time.time', ([], {}), '()\n', (6069, 6071), False, 'import time\n'), ((6318, 6352), 'message_pb2.FieldValuePairAttrib', 'message_pb2.FieldValuePairAttrib', ([], {}), '()\n', (6350, 6352), False, 'import message_pb2\n'), ((3479, 3500), 'models.common.util.linear_to_srgb', 'linear_to_srgb', (['image'], {}), '(image)\n', (3493, 3500), False, 'from models.common.util import print_, get_ckpt_list, linear_to_srgb, srgb_to_linear\n')]
import json import numpy as np import os import skimage def save_np_arrays(images, img_names, save_path): for img, img_name in zip(images, img_names): np.save(f'{save_path}/{img_name}', img) def load_np_arrays(path, num=None): images = [] img_names = sorted(os.listdir(path)) if num is None: num = len(img_names) for idx in range(num): img_name = img_names[idx] img = np.load(f'{path}/{img_name}') images.append(img) return np.array(images) def load_images(path, img_names, num_images=None): images = [] if num_images is None: num_images = len(img_names) for idx in range(num_images): img_name = img_names[idx] img_path = f'{path}/{img_name}' img = skimage.io.imread(img_path) / 255. images.append(img) return images def load_images_and_density_maps(path, num_images): img_names = sorted(os.listdir(f'{path}/images'))[:num_images] density_map_names = sorted(os.listdir(f'{path}/gt_density_maps'))[:num_images] images = [] density_maps = [] for img_name, density_map_name in zip(img_names, density_map_names): img = skimage.io.imread(f'{path}/images/{img_name}') / 255. density_map = np.load(f'{path}/gt_density_maps/{density_map_name}') images.append(img) density_maps.append(density_map) return images, density_maps def save_gt_counts(counts, img_names, save_path): for img_name, count in zip(img_names, counts): txt_name = f'{img_name.split(".")[0]}.txt' txt_path = f'{save_path}/{txt_name}' with open(txt_path, 'w') as fo: fo.write(str(int(count))) def load_gt_counts(counts_path): txt_names = sorted(os.listdir(counts_path)) counts = np.empty(len(txt_names), dtype=np.int) for i, txt_name in enumerate(txt_names): txt_path = f'{counts_path}/{txt_name}' with open(txt_path, 'r') as fi: counts[i] = int(fi.read().split()[0]) return counts def read_json(filename): with open(filename, 'r') as fi: data = json.load(fi) return data def write_json(data, filename): dirname = os.path.dirname(filename) if not os.path.isdir(dirname): os.makedirs(dirname) with open(filename, 'w') as fo: json.dump(data, fo)
[ "json.dump", "numpy.load", "numpy.save", "json.load", "os.makedirs", "os.path.isdir", "os.path.dirname", "numpy.array", "os.listdir", "skimage.io.imread" ]
[((513, 529), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (521, 529), True, 'import numpy as np\n'), ((2242, 2267), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (2257, 2267), False, 'import os\n'), ((164, 203), 'numpy.save', 'np.save', (['f"""{save_path}/{img_name}"""', 'img'], {}), "(f'{save_path}/{img_name}', img)\n", (171, 203), True, 'import numpy as np\n'), ((288, 304), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (298, 304), False, 'import os\n'), ((440, 469), 'numpy.load', 'np.load', (['f"""{path}/{img_name}"""'], {}), "(f'{path}/{img_name}')\n", (447, 469), True, 'import numpy as np\n'), ((1274, 1327), 'numpy.load', 'np.load', (['f"""{path}/gt_density_maps/{density_map_name}"""'], {}), "(f'{path}/gt_density_maps/{density_map_name}')\n", (1281, 1327), True, 'import numpy as np\n'), ((1784, 1807), 'os.listdir', 'os.listdir', (['counts_path'], {}), '(counts_path)\n', (1794, 1807), False, 'import os\n'), ((2165, 2178), 'json.load', 'json.load', (['fi'], {}), '(fi)\n', (2174, 2178), False, 'import json\n'), ((2279, 2301), 'os.path.isdir', 'os.path.isdir', (['dirname'], {}), '(dirname)\n', (2292, 2301), False, 'import os\n'), ((2311, 2331), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (2322, 2331), False, 'import os\n'), ((2385, 2404), 'json.dump', 'json.dump', (['data', 'fo'], {}), '(data, fo)\n', (2394, 2404), False, 'import json\n'), ((784, 811), 'skimage.io.imread', 'skimage.io.imread', (['img_path'], {}), '(img_path)\n', (801, 811), False, 'import skimage\n'), ((945, 973), 'os.listdir', 'os.listdir', (['f"""{path}/images"""'], {}), "(f'{path}/images')\n", (955, 973), False, 'import os\n'), ((1019, 1056), 'os.listdir', 'os.listdir', (['f"""{path}/gt_density_maps"""'], {}), "(f'{path}/gt_density_maps')\n", (1029, 1056), False, 'import os\n'), ((1198, 1244), 'skimage.io.imread', 'skimage.io.imread', (['f"""{path}/images/{img_name}"""'], {}), "(f'{path}/images/{img_name}')\n", (1215, 1244), False, 'import skimage\n')]
# -*- coding: utf-8 -*- from __future__ import print_function import argparse import time import numpy as np import torch import torch.utils.data from optimization.training import evaluate, plot_samples from utils.load_data import load_dataset from os.path import join parser = argparse.ArgumentParser(description='PyTorch Discrete Normalizing flows') parser.add_argument('-d', '--dataset', type=str, default='cifar10', choices=['cifar10', 'imagenet32', 'imagenet64', 'svhn'], metavar='DATASET', help='Dataset choice.') parser.add_argument('-bs', '--batch_size', type=int, default=1000, metavar='BATCH_SIZE', help='input batch size for training (default: 100)') parser.add_argument('--snap_dir', type=str, default='') def main(): args = parser.parse_args() args.cuda = torch.cuda.is_available() args.break_epoch = False snap_dir = args.snap_dir = join('snapshots', args.snap_dir) + '/' train_loader, val_loader, test_loader, args = load_dataset(args) final_model = torch.load(snap_dir + 'a.model', map_location='cpu') if args.cuda: final_model = final_model.cuda() # Just for timing at the moment. with torch.no_grad(): final_model.eval() timing_results = [] for i in range(100): torch.cuda.synchronize() start = time.time() x_sample = final_model.sample(n_samples=100) torch.cuda.synchronize() duration = time.time() - start timing_results.append(duration) print('Timings: ', timing_results) print('Mean time:', np.mean(timing_results)) plot_samples(final_model, args, epoch=9999, bpd=0.0) if torch.cuda.device_count() > 1: print("Let's use", torch.cuda.device_count(), "GPUs!") final_model = torch.nn.DataParallel(final_model, dim=0) test_bpd = evaluate(test_loader, final_model, args) with open(snap_dir + 'log.txt', 'a') as ff: msg = 'FINAL \ttest negative elbo bpd {:.4f}'.format( test_bpd) print(msg) print(msg, file=ff) test_bpd = evaluate(test_loader, final_model, args, iw_samples=1000) with open(snap_dir + 'log.txt', 'a') as ff: msg = 'FINAL \ttest negative log_px bpd {:.4f}'.format( test_bpd) print(msg) print(msg, file=ff) if __name__ == '__main__': main()
[ "torch.cuda.synchronize", "optimization.training.evaluate", "argparse.ArgumentParser", "torch.load", "torch.cuda.device_count", "time.time", "numpy.mean", "torch.cuda.is_available", "optimization.training.plot_samples", "torch.nn.DataParallel", "torch.no_grad", "os.path.join", "utils.load_data.load_dataset" ]
[((282, 355), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch Discrete Normalizing flows"""'}), "(description='PyTorch Discrete Normalizing flows')\n", (305, 355), False, 'import argparse\n'), ((869, 894), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (892, 894), False, 'import torch\n'), ((1047, 1065), 'utils.load_data.load_dataset', 'load_dataset', (['args'], {}), '(args)\n', (1059, 1065), False, 'from utils.load_data import load_dataset\n'), ((1085, 1137), 'torch.load', 'torch.load', (["(snap_dir + 'a.model')"], {'map_location': '"""cpu"""'}), "(snap_dir + 'a.model', map_location='cpu')\n", (1095, 1137), False, 'import torch\n'), ((1937, 1977), 'optimization.training.evaluate', 'evaluate', (['test_loader', 'final_model', 'args'], {}), '(test_loader, final_model, args)\n', (1945, 1977), False, 'from optimization.training import evaluate, plot_samples\n'), ((2178, 2235), 'optimization.training.evaluate', 'evaluate', (['test_loader', 'final_model', 'args'], {'iw_samples': '(1000)'}), '(test_loader, final_model, args, iw_samples=1000)\n', (2186, 2235), False, 'from optimization.training import evaluate, plot_samples\n'), ((957, 989), 'os.path.join', 'join', (['"""snapshots"""', 'args.snap_dir'], {}), "('snapshots', args.snap_dir)\n", (961, 989), False, 'from os.path import join\n'), ((1244, 1259), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1257, 1259), False, 'import torch\n'), ((1703, 1755), 'optimization.training.plot_samples', 'plot_samples', (['final_model', 'args'], {'epoch': '(9999)', 'bpd': '(0.0)'}), '(final_model, args, epoch=9999, bpd=0.0)\n', (1715, 1755), False, 'from optimization.training import evaluate, plot_samples\n'), ((1764, 1789), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1787, 1789), False, 'import torch\n'), ((1880, 1921), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['final_model'], {'dim': '(0)'}), '(final_model, dim=0)\n', (1901, 1921), False, 'import torch\n'), ((1359, 1383), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1381, 1383), False, 'import torch\n'), ((1404, 1415), 'time.time', 'time.time', ([], {}), '()\n', (1413, 1415), False, 'import time\n'), ((1485, 1509), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1507, 1509), False, 'import torch\n'), ((1669, 1692), 'numpy.mean', 'np.mean', (['timing_results'], {}), '(timing_results)\n', (1676, 1692), True, 'import numpy as np\n'), ((1822, 1847), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1845, 1847), False, 'import torch\n'), ((1533, 1544), 'time.time', 'time.time', ([], {}), '()\n', (1542, 1544), False, 'import time\n')]
import numpy as np from manimlib import * class Quaternion: def __init__(self, x=None, y=0, z=0, w=1): """Quaternion style [x, y, z, w]""" if issubclass(type(x), (np.ndarray, list, tuple)): self._x = x[0] self._y = x[1] self._z = x[2] self._w = x[3] else: if x is None: x = 0 self._x = x self._y = y self._z = z self._w = w self._vec = np.array([self._x, self._y, self._z]) self._q = np.array([*self._vec, self._w]) def _set_q(self): self._vec = np.array([self._x, self._y, self._z]) self._q = np.array([*self._vec, self._w]) def to_array(self): return self._q def normalise(self): L = np.linalg.norm(self._vec) # self._q /= L self._x /= L self._y /= L self._z /= L self._w /= L self._set_q() def slerp(self): """TODO""" pass def multi(self, *quats): q = self for qi in quats: q = Quaternion.multiply_quat_2(q, qi) self._vec = q._vec self._q = q._q # self._set_q() return q @staticmethod def multiply_quat(q1, q2): """reference http://www.euclideanspace.com/maths/algebra/realNormedAlgebra/quaternions/code/index.htm""" x = q1.x * q2.w + q1.y * q2.z - q1.z * q2.y + q1.w * q2.x y = -q1.x * q2.z + q1.y * q2.w + q1.z * q2.x + q1.w * q2.y z = q1.x * q2.y - q1.y * q2.x + q1.z * q2.w + q1.w * q2.z w = -q1.x * q2.x - q1.y * q2.y - q1.z * q2.z + q1.w * q2.w new_q = object.__new__(Quaternion) new_q.__init__(x, y, z, w) return new_q @staticmethod def multiply_quat_2(q1, q2): """Graßmann Product""" v1 = q1._vec v2 = q2._vec w1 = q1._w w2 = q2._w vec = w1 * v2 + w2 * v1 + np.cross(v1, v2) w = w1 * w2 - v1.dot(v2) new_q = object.__new__(Quaternion) new_q.__init__([*vec, w]) return new_q def __new__(cls, *args, **kwargs): return object.__new__(cls) def copy(self): obj = object.__new__(Quaternion) obj.__init__(*self._q) return obj def set_x(self, value): self._x = value self._set_q() def set_y(self, value): self._y = value self._set_q() def set_z(self, value): self._z = value self._set_q() def set_w(self, value): self._w = value self._set_q() def set_from_euler(self): """TODO""" pass def set_from_axis_angle(self, axis: np.ndarray, angle): axis = normalize(np.array(axis)) half_angle = angle / 2 s = np.sin(half_angle) self._x = axis[0] * s self._y = axis[1] * s self._z = axis[2] * s self._w = np.cos(half_angle) self._set_q() return self def conjugate(self, in_place=True): if in_place: self._vec *= -1 self._set_q() return self else: q = self.copy() q._vec *= -1 q._set_q() return q def invert(self): return self.conjugate() def dot(self, v): return self._q.dot(v) def __str__(self): return self._q.__str__() @property def x(self): return self._vec[0] @property def y(self): return self._vec[1] @property def z(self): return self._vec[2] @property def w(self): return self._w if __name__ == "__main__": axis = np.array([1, 1, 1]) q1 = Quaternion().set_from_axis_angle(axis, 20 * DEGREES) q2 = Quaternion().set_from_axis_angle(axis, 30 * DEGREES) print(Quaternion.multiply_quat(q1, q2)) print(Quaternion.multiply_quat_2(q1, q2))
[ "numpy.cross", "numpy.sin", "numpy.linalg.norm", "numpy.array", "numpy.cos" ]
[((3676, 3695), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (3684, 3695), True, 'import numpy as np\n'), ((499, 536), 'numpy.array', 'np.array', (['[self._x, self._y, self._z]'], {}), '([self._x, self._y, self._z])\n', (507, 536), True, 'import numpy as np\n'), ((555, 586), 'numpy.array', 'np.array', (['[*self._vec, self._w]'], {}), '([*self._vec, self._w])\n', (563, 586), True, 'import numpy as np\n'), ((630, 667), 'numpy.array', 'np.array', (['[self._x, self._y, self._z]'], {}), '([self._x, self._y, self._z])\n', (638, 667), True, 'import numpy as np\n'), ((686, 717), 'numpy.array', 'np.array', (['[*self._vec, self._w]'], {}), '([*self._vec, self._w])\n', (694, 717), True, 'import numpy as np\n'), ((804, 829), 'numpy.linalg.norm', 'np.linalg.norm', (['self._vec'], {}), '(self._vec)\n', (818, 829), True, 'import numpy as np\n'), ((2795, 2813), 'numpy.sin', 'np.sin', (['half_angle'], {}), '(half_angle)\n', (2801, 2813), True, 'import numpy as np\n'), ((2923, 2941), 'numpy.cos', 'np.cos', (['half_angle'], {}), '(half_angle)\n', (2929, 2941), True, 'import numpy as np\n'), ((1951, 1967), 'numpy.cross', 'np.cross', (['v1', 'v2'], {}), '(v1, v2)\n', (1959, 1967), True, 'import numpy as np\n'), ((2736, 2750), 'numpy.array', 'np.array', (['axis'], {}), '(axis)\n', (2744, 2750), True, 'import numpy as np\n')]
import matplotlib.pyplot as plt import cv2 #import imutils import requests import base64 import json import numpy as np from PIL import Image from PIL import ImageEnhance from skimage import color, data, restoration from scipy.signal import convolve2d import pytesseract import PIL.ImageOps pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files (x86)\Tesseract-OCR\tesseract.exe' plate=None def main (img): img = cv2.imread(img,cv2.IMREAD_COLOR) img = cv2.resize(img, (600,400) ) img = cv2.resize(img, (600,400) ) threshold = 180 # to be determined _, img_binarized = cv2.threshold(img, threshold, 255, cv2.THRESH_BINARY) pil_img = Image.fromarray(img_binarized) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) gray = cv2.bilateralFilter(gray, 13, 15, 15) edged = cv2.Canny(gray, 30, 200) thresh = cv2.adaptiveThreshold(gray, 255, 1, 1, 11, 2) #contours = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) #contours = imutils.grab_contours(contours) contours = sorted(contours, key = cv2.contourArea, reverse = True)[:30] screenCnt = None gaussian_blur_license_plate = cv2.GaussianBlur( img, (5, 5), 0) for c in contours: peri = cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, 0.02 * peri, True) if len(approx) == 4: screenCnt = approx break if screenCnt is None: detected = 0 print ("No contour detected") else: detected = 1 if detected == 1: cv2.drawContours(img, [screenCnt], -1, (0, 0, 255), 3) mask = np.zeros(gray.shape,np.uint8) new_image = cv2.drawContours(mask,[screenCnt],0,255,-1,) new_image = cv2.bitwise_and(img,img,mask=mask) (x, y) = np.where(mask == 255) (topx, topy) = (np.min(x), np.min(y)) (bottomx, bottomy) = (np.max(x), np.max(y)) Cropped = gray[topx:bottomx+1, topy:bottomy+1] text = pytesseract.image_to_string(Cropped, lang='eng') print("programming_fever's License Plate Recognition\n") print("Detected license plate Number is:",text) img = cv2.resize(img,(500,300)) Cropped = cv2.resize(Cropped,(400,200)) im = Image.fromarray(Cropped) im.save('test.png') image = Image.open('test.png') enh_bri = ImageEnhance.Brightness(image ) brightness = 1.0 image_brightened = enh_bri.enhance(brightness) imwhole = np.array(image_brightened) cv2.imshow('car',img) cv2.imshow('Cropped',imwhole) cv2.waitKey(0) cv2.destroyAllWindows()
[ "cv2.GaussianBlur", "PIL.ImageEnhance.Brightness", "cv2.bitwise_and", "cv2.approxPolyDP", "cv2.arcLength", "cv2.adaptiveThreshold", "cv2.bilateralFilter", "cv2.imshow", "cv2.cvtColor", "numpy.max", "cv2.drawContours", "cv2.destroyAllWindows", "cv2.resize", "cv2.Canny", "cv2.waitKey", "numpy.min", "cv2.threshold", "numpy.zeros", "pytesseract.image_to_string", "PIL.Image.open", "cv2.imread", "numpy.where", "numpy.array", "PIL.Image.fromarray", "cv2.findContours" ]
[((428, 461), 'cv2.imread', 'cv2.imread', (['img', 'cv2.IMREAD_COLOR'], {}), '(img, cv2.IMREAD_COLOR)\n', (438, 461), False, 'import cv2\n'), ((472, 499), 'cv2.resize', 'cv2.resize', (['img', '(600, 400)'], {}), '(img, (600, 400))\n', (482, 499), False, 'import cv2\n'), ((510, 537), 'cv2.resize', 'cv2.resize', (['img', '(600, 400)'], {}), '(img, (600, 400))\n', (520, 537), False, 'import cv2\n'), ((600, 653), 'cv2.threshold', 'cv2.threshold', (['img', 'threshold', '(255)', 'cv2.THRESH_BINARY'], {}), '(img, threshold, 255, cv2.THRESH_BINARY)\n', (613, 653), False, 'import cv2\n'), ((668, 698), 'PIL.Image.fromarray', 'Image.fromarray', (['img_binarized'], {}), '(img_binarized)\n', (683, 698), False, 'from PIL import Image\n'), ((712, 749), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (724, 749), False, 'import cv2\n'), ((761, 798), 'cv2.bilateralFilter', 'cv2.bilateralFilter', (['gray', '(13)', '(15)', '(15)'], {}), '(gray, 13, 15, 15)\n', (780, 798), False, 'import cv2\n'), ((812, 836), 'cv2.Canny', 'cv2.Canny', (['gray', '(30)', '(200)'], {}), '(gray, 30, 200)\n', (821, 836), False, 'import cv2\n'), ((850, 895), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['gray', '(255)', '(1)', '(1)', '(11)', '(2)'], {}), '(gray, 255, 1, 1, 11, 2)\n', (871, 895), False, 'import cv2\n'), ((1009, 1073), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n', (1025, 1073), False, 'import cv2\n'), ((1254, 1286), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(5, 5)', '(0)'], {}), '(img, (5, 5), 0)\n', (1270, 1286), False, 'import cv2\n'), ((1711, 1741), 'numpy.zeros', 'np.zeros', (['gray.shape', 'np.uint8'], {}), '(gray.shape, np.uint8)\n', (1719, 1741), True, 'import numpy as np\n'), ((1757, 1804), 'cv2.drawContours', 'cv2.drawContours', (['mask', '[screenCnt]', '(0)', '(255)', '(-1)'], {}), '(mask, [screenCnt], 0, 255, -1)\n', (1773, 1804), False, 'import cv2\n'), ((1818, 1854), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'img'], {'mask': 'mask'}), '(img, img, mask=mask)\n', (1833, 1854), False, 'import cv2\n'), ((1867, 1888), 'numpy.where', 'np.where', (['(mask == 255)'], {}), '(mask == 255)\n', (1875, 1888), True, 'import numpy as np\n'), ((2042, 2090), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['Cropped'], {'lang': '"""eng"""'}), "(Cropped, lang='eng')\n", (2069, 2090), False, 'import pytesseract\n'), ((2214, 2241), 'cv2.resize', 'cv2.resize', (['img', '(500, 300)'], {}), '(img, (500, 300))\n', (2224, 2241), False, 'import cv2\n'), ((2255, 2286), 'cv2.resize', 'cv2.resize', (['Cropped', '(400, 200)'], {}), '(Cropped, (400, 200))\n', (2265, 2286), False, 'import cv2\n'), ((2296, 2320), 'PIL.Image.fromarray', 'Image.fromarray', (['Cropped'], {}), '(Cropped)\n', (2311, 2320), False, 'from PIL import Image\n'), ((2359, 2381), 'PIL.Image.open', 'Image.open', (['"""test.png"""'], {}), "('test.png')\n", (2369, 2381), False, 'from PIL import Image\n'), ((2396, 2426), 'PIL.ImageEnhance.Brightness', 'ImageEnhance.Brightness', (['image'], {}), '(image)\n', (2419, 2426), False, 'from PIL import ImageEnhance\n'), ((2516, 2542), 'numpy.array', 'np.array', (['image_brightened'], {}), '(image_brightened)\n', (2524, 2542), True, 'import numpy as np\n'), ((2549, 2571), 'cv2.imshow', 'cv2.imshow', (['"""car"""', 'img'], {}), "('car', img)\n", (2559, 2571), False, 'import cv2\n'), ((2575, 2605), 'cv2.imshow', 'cv2.imshow', (['"""Cropped"""', 'imwhole'], {}), "('Cropped', imwhole)\n", (2585, 2605), False, 'import cv2\n'), ((2610, 2624), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2621, 2624), False, 'import cv2\n'), ((2629, 2652), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2650, 2652), False, 'import cv2\n'), ((1336, 1358), 'cv2.arcLength', 'cv2.arcLength', (['c', '(True)'], {}), '(c, True)\n', (1349, 1358), False, 'import cv2\n'), ((1376, 1414), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['c', '(0.02 * peri)', '(True)'], {}), '(c, 0.02 * peri, True)\n', (1392, 1414), False, 'import cv2\n'), ((1644, 1698), 'cv2.drawContours', 'cv2.drawContours', (['img', '[screenCnt]', '(-1)', '(0, 0, 255)', '(3)'], {}), '(img, [screenCnt], -1, (0, 0, 255), 3)\n', (1660, 1698), False, 'import cv2\n'), ((1909, 1918), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (1915, 1918), True, 'import numpy as np\n'), ((1920, 1929), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (1926, 1929), True, 'import numpy as np\n'), ((1957, 1966), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (1963, 1966), True, 'import numpy as np\n'), ((1968, 1977), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (1974, 1977), True, 'import numpy as np\n')]
import sys import gym import numpy as np import gym.spaces import math import pandas as pd df = pd.read_csv('./logs.csv', sep=',') df = df.sample(frac=1) def getData(line, keyNum): if keyNum == 0: # vec vec = str(df.iloc[line, 0]) v = np.zeros(11, dtype=np.float32) for i in range(11): v[i] = float(vec[i+1]) return v else: # where, angle, power, reward if keyNum==4: ans=df.iloc[line+1, keyNum] else: ans = df.iloc[line, keyNum] return ans class MyEnv(gym.core.Env): def __init__(self): self.board = np.zeros(11, dtype=np.float32) self.action_space = gym.spaces.Discrete(30) low_bound = 0 high_bound = 1 self.observation_space = gym.spaces.Box( low=low_bound, high=high_bound, shape=self.board.shape, dtype=np.float32) self.time = 0 self.obs = getData(0,0) def step(self, action): st = "9" for i in range(len(observation)): st+=str(int(observation[i])) power = math.floor(action/6) action = action-power*6 angle = math.floor(action/3) action = action-angle*3 where = action df2 = df[(df['vec']==st)&(df['where']==where)&(df['angle']==angle)&(df['power']==power)] df2 = df2.sample(frac=1) reward = float(df2.iloc[0,4]) self.time+=1 observation = getData(self.time, 0) done = True return observation, reward, done, {} def reset(self): self.obs = getData(self.time, 0)
[ "pandas.read_csv", "gym.spaces.Discrete", "math.floor", "numpy.zeros", "gym.spaces.Box" ]
[((97, 131), 'pandas.read_csv', 'pd.read_csv', (['"""./logs.csv"""'], {'sep': '""","""'}), "('./logs.csv', sep=',')\n", (108, 131), True, 'import pandas as pd\n'), ((259, 289), 'numpy.zeros', 'np.zeros', (['(11)'], {'dtype': 'np.float32'}), '(11, dtype=np.float32)\n', (267, 289), True, 'import numpy as np\n'), ((620, 650), 'numpy.zeros', 'np.zeros', (['(11)'], {'dtype': 'np.float32'}), '(11, dtype=np.float32)\n', (628, 650), True, 'import numpy as np\n'), ((679, 702), 'gym.spaces.Discrete', 'gym.spaces.Discrete', (['(30)'], {}), '(30)\n', (698, 702), False, 'import gym\n'), ((781, 873), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': 'low_bound', 'high': 'high_bound', 'shape': 'self.board.shape', 'dtype': 'np.float32'}), '(low=low_bound, high=high_bound, shape=self.board.shape,\n dtype=np.float32)\n', (795, 873), False, 'import gym\n'), ((1083, 1105), 'math.floor', 'math.floor', (['(action / 6)'], {}), '(action / 6)\n', (1093, 1105), False, 'import math\n'), ((1152, 1174), 'math.floor', 'math.floor', (['(action / 3)'], {}), '(action / 3)\n', (1162, 1174), False, 'import math\n')]
# -*- coding: utf-8 -*- """ @file @brief Generates random answers for challenges. """ import os import numpy import pandas def random_answers_2020_images(): """ Generates random answers the deep learning challenge of hackathons :ref:`l-hackathon-2020`. """ name = os.path.join(os.path.split(__file__)[0], "labels_2020_random.csv") df = pandas.read_csv(name)[['file_name']] df['label'] = numpy.random.randint(low=0, high=2, size=(df.shape[0], )) df['score'] = numpy.random.random((df.shape[0], )) return df def random_answers_2020_ml(): """ Generates random answers the machine learning challenge of hackathons :ref:`l-hackathon-2020`. """ df = pandas.DataFrame({"index": numpy.arange(473333)}) df['label'] = numpy.random.randint(low=0, high=2, size=(df.shape[0], )) df['score'] = numpy.random.random((df.shape[0], )) return df
[ "pandas.read_csv", "numpy.random.randint", "numpy.arange", "numpy.random.random", "os.path.split" ]
[((417, 473), 'numpy.random.randint', 'numpy.random.randint', ([], {'low': '(0)', 'high': '(2)', 'size': '(df.shape[0],)'}), '(low=0, high=2, size=(df.shape[0],))\n', (437, 473), False, 'import numpy\n'), ((493, 528), 'numpy.random.random', 'numpy.random.random', (['(df.shape[0],)'], {}), '((df.shape[0],))\n', (512, 528), False, 'import numpy\n'), ((772, 828), 'numpy.random.randint', 'numpy.random.randint', ([], {'low': '(0)', 'high': '(2)', 'size': '(df.shape[0],)'}), '(low=0, high=2, size=(df.shape[0],))\n', (792, 828), False, 'import numpy\n'), ((848, 883), 'numpy.random.random', 'numpy.random.random', (['(df.shape[0],)'], {}), '((df.shape[0],))\n', (867, 883), False, 'import numpy\n'), ((362, 383), 'pandas.read_csv', 'pandas.read_csv', (['name'], {}), '(name)\n', (377, 383), False, 'import pandas\n'), ((299, 322), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (312, 322), False, 'import os\n'), ((731, 751), 'numpy.arange', 'numpy.arange', (['(473333)'], {}), '(473333)\n', (743, 751), False, 'import numpy\n')]
#!/usr/bin/env python # Copyright (c) 2021 Computer Vision Center (CVC) at the Universitat Autonoma de # Barcelona (UAB). # # This work is licensed under the terms of the MIT license. # For a copy, see <https://opensource.org/licenses/MIT>. """ Here are defined all the CARLA sensors """ import copy import math import numpy as np import carla # ================================================================================================== # -- BaseSensor ----------------------------------------------------------------------------------- # ================================================================================================== class BaseSensor(object): def __init__(self, name, attributes, interface, parent): self.name = name self.attributes = attributes self.interface = interface self.parent = parent self.interface.register(self.name, self) def is_event_sensor(self): return False def parse(self): raise NotImplementedError def update_sensor(self, data, frame): if not self.is_event_sensor(): self.interface._data_buffers.put((self.name, frame, self.parse(data))) else: self.interface._event_data_buffers.put((self.name, frame, self.parse(data))) def callback(self, data): self.update_sensor(data, data.frame) def destroy(self): raise NotImplementedError class CarlaSensor(BaseSensor): def __init__(self, name, attributes, interface, parent): super().__init__(name, attributes, interface, parent) world = self.parent.get_world() type_ = self.attributes.pop("type", "") transform = self.attributes.pop("transform", "0,0,0,0,0,0") if isinstance(transform, str): transform = [float(x) for x in transform.split(",")] assert len(transform) == 6 blueprint = world.get_blueprint_library().find(type_) blueprint.set_attribute("role_name", name) for key, value in attributes.items(): blueprint.set_attribute(str(key), str(value)) transform = carla.Transform( carla.Location(transform[0], transform[1], transform[2]), carla.Rotation(transform[4], transform[5], transform[3]) ) self.sensor = world.spawn_actor(blueprint, transform, attach_to=self.parent) self.sensor.listen(self.callback) def destroy(self): if self.sensor is not None: self.sensor.destroy() self.sensor = None class PseudoSensor(BaseSensor): def __init__(self, name, attributes, interface, parent): super().__init__(name, attributes, interface, parent) def callback(self, data, frame): self.update_sensor(data, frame) # ================================================================================================== # -- Cameras ----------------------------------------------------------------------------------- # ================================================================================================== class BaseCamera(CarlaSensor): def __init__(self, name, attributes, interface, parent): super().__init__(name, attributes, interface, parent) def parse(self, sensor_data): """Parses the Image into an numpy array""" # sensor_data: [fov, height, width, raw_data] array = np.frombuffer(sensor_data.raw_data, dtype=np.dtype("uint8")) array = np.reshape(array, (sensor_data.height, sensor_data.width, 4)) array = array[:, :, :3] array = array[:, :, ::-1] return array class CameraRGB(BaseCamera): def __init__(self, name, attributes, interface, parent): super().__init__(name, attributes, interface, parent) class CameraDepth(BaseCamera): def __init__(self, name, attributes, interface, parent): super().__init__(name, attributes, interface, parent) class CameraSemanticSegmentation(BaseCamera): def __init__(self, name, attributes, interface, parent): super().__init__(name, attributes, interface, parent) class CameraDVS(CarlaSensor): def __init__(self, name, attributes, interface, parent): super().__init__(name, attributes, interface, parent) def is_event_sensor(self): return True def parse(self, sensor_data): """Parses the DVSEvents into an RGB image""" # sensor_data: [x, y, t, polarity] dvs_events = np.frombuffer(sensor_data.raw_data, dtype=np.dtype([ ('x', np.uint16), ('y', np.uint16), ('t', np.int64), ('pol', np.bool)])) dvs_img = np.zeros((sensor_data.height, sensor_data.width, 3), dtype=np.uint8) dvs_img[dvs_events[:]['y'], dvs_events[:]['x'], dvs_events[:]['pol'] * 2] = 255 # Blue is positive, red is negative return dvs_img # ================================================================================================== # -- LIDAR ----------------------------------------------------------------------------------- # ================================================================================================== class Lidar(CarlaSensor): def __init__(self, name, attributes, interface, parent): super().__init__(name, attributes, interface, parent) def parse(self, sensor_data): """Parses the LidarMeasurememt into an numpy array""" # sensor_data: [x, y, z, intensity] points = np.frombuffer(sensor_data.raw_data, dtype=np.dtype('f4')) points = copy.deepcopy(points) points = np.reshape(points, (int(points.shape[0] / 4), 4)) return points class SemanticLidar(CarlaSensor): def __init__(self, name, attributes, interface, parent): super().__init__(name, attributes, interface, parent) def parse(self, sensor_data): """Parses the SemanticLidarMeasurememt into an numpy array""" # sensor_data: [x, y, z, cos(angle), actor index, semantic tag] points = np.frombuffer(sensor_data.raw_data, dtype=np.dtype('f4')) points = copy.deepcopy(points) points = np.reshape(points, (int(points.shape[0] / 6), 6)) return points # ================================================================================================== # -- Others ----------------------------------------------------------------------------------- # ================================================================================================== class Radar(CarlaSensor): def __init__(self, name, attributes, interface, parent): super().__init__(name, attributes, interface, parent) def parse(self, sensor_data): """Parses the RadarMeasurement into an numpy array""" # sensor_data: [depth, azimuth, altitute, velocity] points = np.frombuffer(sensor_data.raw_data, dtype=np.dtype('f4')) points = copy.deepcopy(points) points = np.reshape(points, (int(points.shape[0] / 4), 4)) points = np.flip(points, 1) return points class Gnss(CarlaSensor): def __init__(self, name, attributes, interface, parent): super().__init__(name, attributes, interface, parent) def parse(self, sensor_data): """Parses the GnssMeasurement into an numpy array""" # sensor_data: [latitude, longitude, altitude] return np.array([sensor_data.latitude, sensor_data.longitude, sensor_data.altitude], dtype=np.float64) class Imu(CarlaSensor): def __init__(self, name, attributes, interface, parent): super().__init__(name, attributes, interface, parent) def parse(self, sensor_data): """Parses the IMUMeasurement into an numpy array""" # sensor_data: [accelerometer, gyroscope, compass] return np.array([sensor_data.accelerometer.x, sensor_data.accelerometer.y, sensor_data.accelerometer.z, sensor_data.gyroscope.x, sensor_data.gyroscope.y, sensor_data.gyroscope.z, sensor_data.compass, ], dtype=np.float64) class LaneInvasion(CarlaSensor): def __init__(self, name, attributes, interface, parent): super().__init__(name, attributes, interface, parent) def is_event_sensor(self): return True def parse(self, sensor_data): """Parses the IMUMeasurement into a list""" # sensor_data: [transform, lane marking] return [sensor_data.transform, sensor_data.crossed_lane_markings] class Collision(CarlaSensor): def __init__(self, name, attributes, interface, parent): self._last_event_frame = 0 super().__init__(name, attributes, interface, parent) def callback(self, data): # The collision sensor can have multiple callbacks per tick. Get only the first one if self._last_event_frame != data.frame: self._last_event_frame = data.frame self.update_sensor(data, data.frame) def is_event_sensor(self): return True def parse(self, sensor_data): """Parses the ObstacleDetectionEvent into a list""" # sensor_data: [other actor, distance] impulse = sensor_data.normal_impulse impulse_value = math.sqrt(impulse.x ** 2 + impulse.y ** 2 + impulse.z ** 2) return [sensor_data.other_actor, impulse_value] class Obstacle(CarlaSensor): def __init__(self, name, attributes, interface, parent): super().__init__(name, attributes, interface, parent) def is_event_sensor(self): return True def parse(self, sensor_data): """Parses the ObstacleDetectionEvent into a list""" # sensor_data: [other actor, distance] return [sensor_data.other_actor, sensor_data.distance]
[ "copy.deepcopy", "numpy.flip", "math.sqrt", "numpy.dtype", "numpy.zeros", "numpy.array", "numpy.reshape", "carla.Rotation", "carla.Location" ]
[((3460, 3521), 'numpy.reshape', 'np.reshape', (['array', '(sensor_data.height, sensor_data.width, 4)'], {}), '(array, (sensor_data.height, sensor_data.width, 4))\n', (3470, 3521), True, 'import numpy as np\n'), ((4610, 4678), 'numpy.zeros', 'np.zeros', (['(sensor_data.height, sensor_data.width, 3)'], {'dtype': 'np.uint8'}), '((sensor_data.height, sensor_data.width, 3), dtype=np.uint8)\n', (4618, 4678), True, 'import numpy as np\n'), ((5507, 5528), 'copy.deepcopy', 'copy.deepcopy', (['points'], {}), '(points)\n', (5520, 5528), False, 'import copy\n'), ((6046, 6067), 'copy.deepcopy', 'copy.deepcopy', (['points'], {}), '(points)\n', (6059, 6067), False, 'import copy\n'), ((6856, 6877), 'copy.deepcopy', 'copy.deepcopy', (['points'], {}), '(points)\n', (6869, 6877), False, 'import copy\n'), ((6962, 6980), 'numpy.flip', 'np.flip', (['points', '(1)'], {}), '(points, 1)\n', (6969, 6980), True, 'import numpy as np\n'), ((7319, 7419), 'numpy.array', 'np.array', (['[sensor_data.latitude, sensor_data.longitude, sensor_data.altitude]'], {'dtype': 'np.float64'}), '([sensor_data.latitude, sensor_data.longitude, sensor_data.altitude\n ], dtype=np.float64)\n', (7327, 7419), True, 'import numpy as np\n'), ((7734, 7959), 'numpy.array', 'np.array', (['[sensor_data.accelerometer.x, sensor_data.accelerometer.y, sensor_data.\n accelerometer.z, sensor_data.gyroscope.x, sensor_data.gyroscope.y,\n sensor_data.gyroscope.z, sensor_data.compass]'], {'dtype': 'np.float64'}), '([sensor_data.accelerometer.x, sensor_data.accelerometer.y,\n sensor_data.accelerometer.z, sensor_data.gyroscope.x, sensor_data.\n gyroscope.y, sensor_data.gyroscope.z, sensor_data.compass], dtype=np.\n float64)\n', (7742, 7959), True, 'import numpy as np\n'), ((9166, 9225), 'math.sqrt', 'math.sqrt', (['(impulse.x ** 2 + impulse.y ** 2 + impulse.z ** 2)'], {}), '(impulse.x ** 2 + impulse.y ** 2 + impulse.z ** 2)\n', (9175, 9225), False, 'import math\n'), ((2146, 2202), 'carla.Location', 'carla.Location', (['transform[0]', 'transform[1]', 'transform[2]'], {}), '(transform[0], transform[1], transform[2])\n', (2160, 2202), False, 'import carla\n'), ((2216, 2272), 'carla.Rotation', 'carla.Rotation', (['transform[4]', 'transform[5]', 'transform[3]'], {}), '(transform[4], transform[5], transform[3])\n', (2230, 2272), False, 'import carla\n'), ((3425, 3442), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (3433, 3442), True, 'import numpy as np\n'), ((4495, 4581), 'numpy.dtype', 'np.dtype', (["[('x', np.uint16), ('y', np.uint16), ('t', np.int64), ('pol', np.bool)]"], {}), "([('x', np.uint16), ('y', np.uint16), ('t', np.int64), ('pol', np.\n bool)])\n", (4503, 4581), True, 'import numpy as np\n'), ((5474, 5488), 'numpy.dtype', 'np.dtype', (['"""f4"""'], {}), "('f4')\n", (5482, 5488), True, 'import numpy as np\n'), ((6013, 6027), 'numpy.dtype', 'np.dtype', (['"""f4"""'], {}), "('f4')\n", (6021, 6027), True, 'import numpy as np\n'), ((6823, 6837), 'numpy.dtype', 'np.dtype', (['"""f4"""'], {}), "('f4')\n", (6831, 6837), True, 'import numpy as np\n')]
import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from scipy.interpolate import interp2d from scipy.ndimage import convolve1d from PIL import Image c = 299792.0 # um/ns class Ray: def __init__(self, lambda0: "um" = .532, pulse_length: "ns" = 10, radius: "um" = 100): """ Parameters ---------- lambda_: flaot wave length of the light in um pulse_length : float time in us radius : float radius of the beam """ self.radius = radius self.lambda0 = lambda0 self._t = np.linspace(0, pulse_length, 2048) self._y = np.linspace(-2, 2, 2048) self._tt, self._yy = np.meshgrid(self._t, self._y) self._lambda0 = lambda0*np.ones_like(self._tt) self._delta = 0 @property def pulse_length(self): return self._t @property def beam_width(self): return self._y @property def _k(self): return 2*np.pi/self._lambda @property def _k0(self): return 2*np.pi/self._lambda0 @property def phi(self): return self._k*self.dz + self._k0*self.dz def E(self, t): E = np.exp(1j*(self.phi + self._delta)) E_real = np.real(E) E_imag = np.imag(E) fE_real = interp2d(self._t, self._y, E_real) fE_imag = interp2d(self._t, self._y, E_imag) return fE_real(t, self._y) + 1j*fE_imag(t, self._y) def set_lambda(self, lambda_): self._lambda = lambda_ def propogate(self, dz): self.dz = dz def add_delta(self, delta): self._delta = delta class Target: def __init__(self, velocity_equation): """ Parameters ---------- velocity_equation : str or fn either step or sigmoid to use default velocity profile, or a function that excepts a t and y meshgrid """ self._t = np.linspace(-5, 15, 2048) self._y = np.linspace(-3, 3, 2048) self.tau = 0 self._tt, self._yy = np.meshgrid(self._t, self._y) if velocity_equation == "step": self.velocity_equation = self.step elif velocity_equation == "sigmoid": self.velocity_equation = self.sigmoid elif velocity_equation == "stationary": self.velocity_equation = self.stationary else: self.velocity_equation = velocity_equation @property def _zz(self): dt = np.diff(self._t).mean() return np.cumsum(self._vv, axis=1)*dt @property def zz(self): return interp2d(self._t, self._y, self._zz) @property def _dz(self): """Path the light travels to the target and back """ dzz = self._zz[..., -1, np.newaxis] - self._zz return dzz @property def dz(self): return interp2d(self._t, self._y, self._dz) @property def _vv(self): return self.velocity_equation(self._tt, self._yy) @property def vv(self): return interp2d(self._t, self._y, self._vv) @staticmethod def sigmoid(t: "ns", y: "um", max_velocity: "um/ns" = 5): """A velocity profile that follows a sigmoid like shape """ return max_velocity*np.exp(-y**4)/(1 + np.exp(-5*(t-3))) @staticmethod def step(t: "ns", y: "um", max_velocity: "um/ns" = 1): """A discontinuous jump velocity profile """ assert t.shape == y.shape v = np.zeros_like(t) v[t > 3] = max_velocity return v @staticmethod def stationary(t: "ns", y: "um"): """A static target, not moving """ return np.zeros_like(t) def reflect_off_target(self, ray): ray = self._doppler_shift(ray) dz = self.dz(ray.pulse_length, ray.beam_width) ray.propogate(dz) return ray def _doppler_shift(self, ray): vv = self.vv(ray.pulse_length, ray.beam_width) ray.set_lambda(ray.lambda0*(1 - 2*vv/c)) return ray def reflection_intensity(self, ray): dy = np.diff(ray.beam_width).mean() dz = np.diff(self.zz(ray.pulse_length, ray.beam_width), axis=0) theta = np.arctan(dz/dy) Idot = np.vstack( (np.ones(shape=(2048)), np.apply_along_axis(np.cos, 0, theta)) ) return Idot def plot_velocity(self): fig = plt.figure() ax = fig.add_subplot(111, projection="3d") Axes3D.plot_surface(ax, self._tt, self._yy, self._vv) ax.set_xlabel("Time [ns]") ax.set_ylabel("x [mm]") ax.set_zlabel("Velocity [km s-1]") fig = plt.figure() im = plt.pcolormesh(self._tt, self._yy, self._vv) cb = fig.colorbar(im) plt.xlabel("Time [ns]") plt.ylabel("x [mm]") cb.set_label("Velocity [km s-1]") class Etalon: def __init__(self, thickness: "mm", n): """Initial an etalon object Parameters ----------- d : float thickness of the etalon n : float ndex of refraction of the etalon """ self._n = n self._d = thickness @property def tau(self) -> "ns": return 2*self._d/c*(self._n - 1/self._n) def VPF(self, lambda0=.532): return lambda0/(2*self.tau) def set_VPF(self, VPF, lambda0: "um"): tau = lambda0/(2*VPF) self.set_tau(tau) def set_tau(self, tau: "ns"): """Change the thickness of the etalon to match a """ self._d = c*tau/(2*(self._n - 1/self._n)) class Interferometer: def __init__(self, etalon, tau: "ns" = .1): """ Parameters ---------- etalon : Etalon the etalon used in the interferometer, provides VPF tau : float the time resolution of the streak camera, determined by the width of the streak slit """ self.etalon = etalon self.tau = tau def _interfear_ray(self, ray): """Generate the interference pattern """ # get the electric field over the pulse length E1 = ray.E(ray.pulse_length) # generate the offset for the second ray _delta_shape = len(ray.beam_width) ray.add_delta( np.linspace(0, 100, _delta_shape).reshape(_delta_shape, 1) ) # generate the second ray, which is delayed by the etalon thickness E2 = ray.E(ray.pulse_length - self.etalon.tau) # Super position of the rays E = E1 + E2 # only take the real component of the inner product (intensity) Icos = np.real(E*E.conj()) return Icos def _add_noise(self, im, ray, target, noise_level, signal_level): """Add detector noise to the generated fringe pattern """ print("...Including noise") """ noise = np.load("noise.npy") """ sig = im[:, 500] sig_fft = np.fft.rfft(sig) noise_fft = np.zeros_like(sig_fft) noise_fft[3] = 50000 noise_fft[50] = 20000 noise_fft[200] = 5000 noise = np.fft.irfft(noise_fft) noise /= noise.max() nenv = noise_level*signal_level*np.exp(-i/40) n = nenv*(2*np.random.random(size=(len(i))) - 1) im = (im.T*noise.real).T im /= im.max() im += np.random.random(size=im.shape)*im.std()/3 return im def _convolve_streak_slit(self, im, t): """Blur in the time-domain to account for the width of the streak camera Parameters ----------- im : 2d np array generated sweep t : np array array corresponding to the time of the sweep """ print("...Convolving streak slit") dt = np.diff(t).mean() tpx = int(self.tau//dt) window = np.ones(shape=tpx) return convolve1d(im, window, axis=1) def output(self, ray, target, noise=False): """Generate the simulated data Parameters ---------- ray : Ray class the input ray target :Target class target containing the velocity profile noise : bool (optional) add in detector noise to the generated image """ I = self._interfear_ray(ray) I = self._convolve_streak_slit(I, ray.pulse_length) if noise: I = self._add_noise(I, ray, target) return I def spatial_var_step(a: "angle", t: "ns", y: "um", max_velocity: "um/ns" = 1): """A velocity step-profile which varies linearly in space Parameters ---------- a : float the slope of the spatially varying profile t : float the time (in ns) at which to evaluate the velocity y : float the spatial location at which to evaluate the velocity max_velocity : float the maximum velocity of the shock Returns ------- the velocity determined by the argument parameters """ assert t.shape == y.shape v = np.zeros_like(t) v[t > -y/a + 3] = max_velocity return v def sin_step(freq, amp, t: "ns", y: "um", max_velocity: "um/ns" = 1): """A sinusoidally varying velocity profile in space Parameters ---------- freq : float the frequency of the spatially varying profile amp : float the amplitude of oscillations t : float the time (in ns) at which to evaluate the velocity y : float the spatial location at which to evaluate the velocity max_velocity : float the maximum velocity of the shock Returns ------- the velocity determined by the argument parameters """ v = np.zeros_like(t) v[t > -amp*np.sin(freq*y/(2*np.pi)) + 3] = max_velocity return v def reference_shot(save=False, noise=False): """Generate a reference image Parameters ---- save : bool (optional) save the generated image noise : bool (optional) add in detector noise Returns ------- Pil Image instance """ stationary_target = Target(velocity_equation="stationary") ray = Ray(pulse_length=10) ray = stationary_target.reflect_off_target(ray) etalon = Etalon(1, 1.5195) # VPF doesn't matter interferometer = Interferometer(etalon=etalon) ref = interferometer.output(ray, stationary_target, noise) ref *= 256/ref.max() ref = ref.astype(np.uint8) refim = Image.fromarray(ref, mode="L") plt.figure() plt.imshow(refim, aspect='auto', cmap="gray", extent=(0, 10, -2, 2)) plt.xlabel("Time [ns]") if save: refim.save("~/Desktop/ref.jpg", "JPEG") return ref if __name__ == "__main__": plt.close("all") velocity_equation = lambda t, y: sin_step(20, .5, t, y, max_velocity=1) etalon = Etalon(1, 1.5195) etalon.set_VPF(2., lambda0=.532) # target = Target(velocity_equation="step") target = Target(velocity_equation=velocity_equation) ray = Ray(pulse_length=10) ray = target.reflect_off_target(ray) interferometer = Interferometer(etalon=etalon) sweep = interferometer.output(ray, target, noise=False) plt.figure() plt.imshow(sweep, aspect='auto', cmap="gray", extent=(0, 10, -2, 2)) plt.xlabel("Time [ns]") sweep *= 256/sweep.max() sweep = sweep.astype(np.uint8) im = Image.fromarray(sweep, mode="L")
[ "numpy.fft.rfft", "numpy.ones", "matplotlib.pyplot.figure", "numpy.imag", "numpy.sin", "numpy.exp", "numpy.zeros_like", "numpy.meshgrid", "numpy.fft.irfft", "matplotlib.pyplot.imshow", "matplotlib.pyplot.close", "numpy.cumsum", "numpy.apply_along_axis", "numpy.linspace", "numpy.real", "mpl_toolkits.mplot3d.Axes3D.plot_surface", "numpy.ones_like", "scipy.ndimage.convolve1d", "scipy.interpolate.interp2d", "matplotlib.pyplot.pcolormesh", "matplotlib.pyplot.ylabel", "numpy.arctan", "numpy.diff", "numpy.random.random", "PIL.Image.fromarray", "matplotlib.pyplot.xlabel" ]
[((9115, 9131), 'numpy.zeros_like', 'np.zeros_like', (['t'], {}), '(t)\n', (9128, 9131), True, 'import numpy as np\n'), ((9778, 9794), 'numpy.zeros_like', 'np.zeros_like', (['t'], {}), '(t)\n', (9791, 9794), True, 'import numpy as np\n'), ((10530, 10560), 'PIL.Image.fromarray', 'Image.fromarray', (['ref'], {'mode': '"""L"""'}), "(ref, mode='L')\n", (10545, 10560), False, 'from PIL import Image\n'), ((10566, 10578), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10576, 10578), True, 'import matplotlib.pyplot as plt\n'), ((10583, 10651), 'matplotlib.pyplot.imshow', 'plt.imshow', (['refim'], {'aspect': '"""auto"""', 'cmap': '"""gray"""', 'extent': '(0, 10, -2, 2)'}), "(refim, aspect='auto', cmap='gray', extent=(0, 10, -2, 2))\n", (10593, 10651), True, 'import matplotlib.pyplot as plt\n'), ((10656, 10679), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [ns]"""'], {}), "('Time [ns]')\n", (10666, 10679), True, 'import matplotlib.pyplot as plt\n'), ((10791, 10807), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (10800, 10807), True, 'import matplotlib.pyplot as plt\n'), ((11247, 11259), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11257, 11259), True, 'import matplotlib.pyplot as plt\n'), ((11264, 11332), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sweep'], {'aspect': '"""auto"""', 'cmap': '"""gray"""', 'extent': '(0, 10, -2, 2)'}), "(sweep, aspect='auto', cmap='gray', extent=(0, 10, -2, 2))\n", (11274, 11332), True, 'import matplotlib.pyplot as plt\n'), ((11337, 11360), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [ns]"""'], {}), "('Time [ns]')\n", (11347, 11360), True, 'import matplotlib.pyplot as plt\n'), ((11435, 11467), 'PIL.Image.fromarray', 'Image.fromarray', (['sweep'], {'mode': '"""L"""'}), "(sweep, mode='L')\n", (11450, 11467), False, 'from PIL import Image\n'), ((646, 680), 'numpy.linspace', 'np.linspace', (['(0)', 'pulse_length', '(2048)'], {}), '(0, pulse_length, 2048)\n', (657, 680), True, 'import numpy as np\n'), ((699, 723), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(2048)'], {}), '(-2, 2, 2048)\n', (710, 723), True, 'import numpy as np\n'), ((753, 782), 'numpy.meshgrid', 'np.meshgrid', (['self._t', 'self._y'], {}), '(self._t, self._y)\n', (764, 782), True, 'import numpy as np\n'), ((1249, 1288), 'numpy.exp', 'np.exp', (['(1.0j * (self.phi + self._delta))'], {}), '(1.0j * (self.phi + self._delta))\n', (1255, 1288), True, 'import numpy as np\n'), ((1302, 1312), 'numpy.real', 'np.real', (['E'], {}), '(E)\n', (1309, 1312), True, 'import numpy as np\n'), ((1330, 1340), 'numpy.imag', 'np.imag', (['E'], {}), '(E)\n', (1337, 1340), True, 'import numpy as np\n'), ((1359, 1393), 'scipy.interpolate.interp2d', 'interp2d', (['self._t', 'self._y', 'E_real'], {}), '(self._t, self._y, E_real)\n', (1367, 1393), False, 'from scipy.interpolate import interp2d\n'), ((1412, 1446), 'scipy.interpolate.interp2d', 'interp2d', (['self._t', 'self._y', 'E_imag'], {}), '(self._t, self._y, E_imag)\n', (1420, 1446), False, 'from scipy.interpolate import interp2d\n'), ((1990, 2015), 'numpy.linspace', 'np.linspace', (['(-5)', '(15)', '(2048)'], {}), '(-5, 15, 2048)\n', (2001, 2015), True, 'import numpy as np\n'), ((2034, 2058), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(2048)'], {}), '(-3, 3, 2048)\n', (2045, 2058), True, 'import numpy as np\n'), ((2110, 2139), 'numpy.meshgrid', 'np.meshgrid', (['self._t', 'self._y'], {}), '(self._t, self._y)\n', (2121, 2139), True, 'import numpy as np\n'), ((2657, 2693), 'scipy.interpolate.interp2d', 'interp2d', (['self._t', 'self._y', 'self._zz'], {}), '(self._t, self._y, self._zz)\n', (2665, 2693), False, 'from scipy.interpolate import interp2d\n'), ((2919, 2955), 'scipy.interpolate.interp2d', 'interp2d', (['self._t', 'self._y', 'self._dz'], {}), '(self._t, self._y, self._dz)\n', (2927, 2955), False, 'from scipy.interpolate import interp2d\n'), ((3096, 3132), 'scipy.interpolate.interp2d', 'interp2d', (['self._t', 'self._y', 'self._vv'], {}), '(self._t, self._y, self._vv)\n', (3104, 3132), False, 'from scipy.interpolate import interp2d\n'), ((3540, 3556), 'numpy.zeros_like', 'np.zeros_like', (['t'], {}), '(t)\n', (3553, 3556), True, 'import numpy as np\n'), ((3729, 3745), 'numpy.zeros_like', 'np.zeros_like', (['t'], {}), '(t)\n', (3742, 3745), True, 'import numpy as np\n'), ((4258, 4276), 'numpy.arctan', 'np.arctan', (['(dz / dy)'], {}), '(dz / dy)\n', (4267, 4276), True, 'import numpy as np\n'), ((4450, 4462), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4460, 4462), True, 'import matplotlib.pyplot as plt\n'), ((4522, 4575), 'mpl_toolkits.mplot3d.Axes3D.plot_surface', 'Axes3D.plot_surface', (['ax', 'self._tt', 'self._yy', 'self._vv'], {}), '(ax, self._tt, self._yy, self._vv)\n', (4541, 4575), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((4701, 4713), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4711, 4713), True, 'import matplotlib.pyplot as plt\n'), ((4727, 4771), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['self._tt', 'self._yy', 'self._vv'], {}), '(self._tt, self._yy, self._vv)\n', (4741, 4771), True, 'import matplotlib.pyplot as plt\n'), ((4810, 4833), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [ns]"""'], {}), "('Time [ns]')\n", (4820, 4833), True, 'import matplotlib.pyplot as plt\n'), ((4842, 4862), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""x [mm]"""'], {}), "('x [mm]')\n", (4852, 4862), True, 'import matplotlib.pyplot as plt\n'), ((7025, 7041), 'numpy.fft.rfft', 'np.fft.rfft', (['sig'], {}), '(sig)\n', (7036, 7041), True, 'import numpy as np\n'), ((7062, 7084), 'numpy.zeros_like', 'np.zeros_like', (['sig_fft'], {}), '(sig_fft)\n', (7075, 7084), True, 'import numpy as np\n'), ((7190, 7213), 'numpy.fft.irfft', 'np.fft.irfft', (['noise_fft'], {}), '(noise_fft)\n', (7202, 7213), True, 'import numpy as np\n'), ((7930, 7948), 'numpy.ones', 'np.ones', ([], {'shape': 'tpx'}), '(shape=tpx)\n', (7937, 7948), True, 'import numpy as np\n'), ((7964, 7994), 'scipy.ndimage.convolve1d', 'convolve1d', (['im', 'window'], {'axis': '(1)'}), '(im, window, axis=1)\n', (7974, 7994), False, 'from scipy.ndimage import convolve1d\n'), ((815, 837), 'numpy.ones_like', 'np.ones_like', (['self._tt'], {}), '(self._tt)\n', (827, 837), True, 'import numpy as np\n'), ((2578, 2605), 'numpy.cumsum', 'np.cumsum', (['self._vv'], {'axis': '(1)'}), '(self._vv, axis=1)\n', (2587, 2605), True, 'import numpy as np\n'), ((7284, 7299), 'numpy.exp', 'np.exp', (['(-i / 40)'], {}), '(-i / 40)\n', (7290, 7299), True, 'import numpy as np\n'), ((2539, 2555), 'numpy.diff', 'np.diff', (['self._t'], {}), '(self._t)\n', (2546, 2555), True, 'import numpy as np\n'), ((3318, 3333), 'numpy.exp', 'np.exp', (['(-y ** 4)'], {}), '(-y ** 4)\n', (3324, 3333), True, 'import numpy as np\n'), ((3337, 3357), 'numpy.exp', 'np.exp', (['(-5 * (t - 3))'], {}), '(-5 * (t - 3))\n', (3343, 3357), True, 'import numpy as np\n'), ((4139, 4162), 'numpy.diff', 'np.diff', (['ray.beam_width'], {}), '(ray.beam_width)\n', (4146, 4162), True, 'import numpy as np\n'), ((4314, 4333), 'numpy.ones', 'np.ones', ([], {'shape': '(2048)'}), '(shape=2048)\n', (4321, 4333), True, 'import numpy as np\n'), ((4337, 4374), 'numpy.apply_along_axis', 'np.apply_along_axis', (['np.cos', '(0)', 'theta'], {}), '(np.cos, 0, theta)\n', (4356, 4374), True, 'import numpy as np\n'), ((7427, 7458), 'numpy.random.random', 'np.random.random', ([], {'size': 'im.shape'}), '(size=im.shape)\n', (7443, 7458), True, 'import numpy as np\n'), ((7862, 7872), 'numpy.diff', 'np.diff', (['t'], {}), '(t)\n', (7869, 7872), True, 'import numpy as np\n'), ((6351, 6384), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '_delta_shape'], {}), '(0, 100, _delta_shape)\n', (6362, 6384), True, 'import numpy as np\n'), ((9810, 9840), 'numpy.sin', 'np.sin', (['(freq * y / (2 * np.pi))'], {}), '(freq * y / (2 * np.pi))\n', (9816, 9840), True, 'import numpy as np\n')]