code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import numpy as np
X = 2 * np.random.randn(100, 5)
y = 2.5382 * np.cos(X[:, 3]) + X[:, 0] ** 2 - 0.5
from pysr import PySRRegressor
model = PySRRegressor(
niterations=40,
binary_operators=["+", "*"],
unary_operators=[
"cos",
"exp",
"sin",
"inv(x) = 1/x", # Custom operator (julia syntax)
],
model_selection="best",
loss="loss(x, y) = (x - y)^2", # Custom loss function (julia syntax)
)
model.fit(X, y)
print(model)
|
[
"pysr.PySRRegressor",
"numpy.random.randn",
"numpy.cos"
] |
[((143, 321), 'pysr.PySRRegressor', 'PySRRegressor', ([], {'niterations': '(40)', 'binary_operators': "['+', '*']", 'unary_operators': "['cos', 'exp', 'sin', 'inv(x) = 1/x']", 'model_selection': '"""best"""', 'loss': '"""loss(x, y) = (x - y)^2"""'}), "(niterations=40, binary_operators=['+', '*'], unary_operators=\n ['cos', 'exp', 'sin', 'inv(x) = 1/x'], model_selection='best', loss=\n 'loss(x, y) = (x - y)^2')\n", (156, 321), False, 'from pysr import PySRRegressor\n'), ((28, 51), 'numpy.random.randn', 'np.random.randn', (['(100)', '(5)'], {}), '(100, 5)\n', (43, 51), True, 'import numpy as np\n'), ((65, 80), 'numpy.cos', 'np.cos', (['X[:, 3]'], {}), '(X[:, 3])\n', (71, 80), True, 'import numpy as np\n')]
|
import json
import os
import cv2
import numpy as np
from dgp.datasets.synchronized_dataset import SynchronizedScene
from dgp.utils.visualization_engine import visualize_dataset_3d, visualize_dataset_2d, visualize_dataset_sample_3d, visualize_dataset_sample_2d
from tests import TEST_DATA_DIR
def dummy_caption(dataset, idx):
return "SAMPLE"
def test_visualize_dataset_3d():
'''
Uses parametrized testing to run multiple cases for SynchronizedSceneDataset
'''
scene_json = os.path.join(
TEST_DATA_DIR, "dgp", "test_scene/scene_01/scene_a8dc5ed1da0923563f85ea129f0e0a83e7fe1867.json"
)
filepath = "./test_3d_vis.avi"
dataset = SynchronizedScene(
scene_json,
datum_names=['LIDAR', 'CAMERA_01', 'CAMERA_05', 'CAMERA_06'],
forward_context=1,
backward_context=1,
requested_annotations=("bounding_box_2d", "bounding_box_3d")
)
visualize_dataset_3d(
dataset=dataset,
camera_datum_names=['CAMERA_01'],
lidar_datum_names=['LIDAR'],
radar_datum_names=[],
output_video_file=filepath
)
assert os.path.exists(filepath)
os.remove(filepath)
def test_visualize_dataset_2d():
'''
Uses parametrized testing to run multiple cases for SynchronizedSceneDataset
'''
scene_json = os.path.join(
TEST_DATA_DIR, "dgp", "test_scene/scene_01/scene_a8dc5ed1da0923563f85ea129f0e0a83e7fe1867.json"
)
filepath = "./test_2d_vis.avi"
dataset = SynchronizedScene(
scene_json,
datum_names=['LIDAR', 'CAMERA_01', 'CAMERA_05', 'CAMERA_06'],
forward_context=1,
backward_context=1,
requested_annotations=("bounding_box_2d", "bounding_box_3d")
)
visualize_dataset_2d(
dataset=dataset, camera_datum_names=['CAMERA_01'], caption_fn=dummy_caption, output_video_file=filepath
)
assert os.path.exists(filepath)
os.remove(filepath)
def test_visualize_dataset_sample_3d():
'''
Uses parametrized testing to run multiple cases for SynchronizedSceneDataset
'''
scene_json = os.path.join(
TEST_DATA_DIR, "dgp", "test_scene/scene_01/scene_a8dc5ed1da0923563f85ea129f0e0a83e7fe1867.json"
)
dataset = SynchronizedScene(
scene_json,
datum_names=['LIDAR', 'CAMERA_01', 'CAMERA_05', 'CAMERA_06'],
forward_context=1,
backward_context=1,
requested_annotations=("bounding_box_2d", "bounding_box_3d")
)
result = visualize_dataset_sample_3d(dataset=dataset, scene_idx=0, sample_idx=0, camera_datum_names=['camera_05'])
data = cv2.imread('tests/data/dgp/vis_output.png', cv2.IMREAD_COLOR)
assert np.allclose(result["camera_05"], data)
|
[
"os.path.exists",
"numpy.allclose",
"dgp.utils.visualization_engine.visualize_dataset_sample_3d",
"dgp.utils.visualization_engine.visualize_dataset_2d",
"os.path.join",
"dgp.utils.visualization_engine.visualize_dataset_3d",
"dgp.datasets.synchronized_dataset.SynchronizedScene",
"cv2.imread",
"os.remove"
] |
[((498, 611), 'os.path.join', 'os.path.join', (['TEST_DATA_DIR', '"""dgp"""', '"""test_scene/scene_01/scene_a8dc5ed1da0923563f85ea129f0e0a83e7fe1867.json"""'], {}), "(TEST_DATA_DIR, 'dgp',\n 'test_scene/scene_01/scene_a8dc5ed1da0923563f85ea129f0e0a83e7fe1867.json')\n", (510, 611), False, 'import os\n'), ((671, 871), 'dgp.datasets.synchronized_dataset.SynchronizedScene', 'SynchronizedScene', (['scene_json'], {'datum_names': "['LIDAR', 'CAMERA_01', 'CAMERA_05', 'CAMERA_06']", 'forward_context': '(1)', 'backward_context': '(1)', 'requested_annotations': "('bounding_box_2d', 'bounding_box_3d')"}), "(scene_json, datum_names=['LIDAR', 'CAMERA_01',\n 'CAMERA_05', 'CAMERA_06'], forward_context=1, backward_context=1,\n requested_annotations=('bounding_box_2d', 'bounding_box_3d'))\n", (688, 871), False, 'from dgp.datasets.synchronized_dataset import SynchronizedScene\n'), ((914, 1073), 'dgp.utils.visualization_engine.visualize_dataset_3d', 'visualize_dataset_3d', ([], {'dataset': 'dataset', 'camera_datum_names': "['CAMERA_01']", 'lidar_datum_names': "['LIDAR']", 'radar_datum_names': '[]', 'output_video_file': 'filepath'}), "(dataset=dataset, camera_datum_names=['CAMERA_01'],\n lidar_datum_names=['LIDAR'], radar_datum_names=[], output_video_file=\n filepath)\n", (934, 1073), False, 'from dgp.utils.visualization_engine import visualize_dataset_3d, visualize_dataset_2d, visualize_dataset_sample_3d, visualize_dataset_sample_2d\n'), ((1122, 1146), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (1136, 1146), False, 'import os\n'), ((1151, 1170), 'os.remove', 'os.remove', (['filepath'], {}), '(filepath)\n', (1160, 1170), False, 'import os\n'), ((1320, 1433), 'os.path.join', 'os.path.join', (['TEST_DATA_DIR', '"""dgp"""', '"""test_scene/scene_01/scene_a8dc5ed1da0923563f85ea129f0e0a83e7fe1867.json"""'], {}), "(TEST_DATA_DIR, 'dgp',\n 'test_scene/scene_01/scene_a8dc5ed1da0923563f85ea129f0e0a83e7fe1867.json')\n", (1332, 1433), False, 'import os\n'), ((1493, 1693), 'dgp.datasets.synchronized_dataset.SynchronizedScene', 'SynchronizedScene', (['scene_json'], {'datum_names': "['LIDAR', 'CAMERA_01', 'CAMERA_05', 'CAMERA_06']", 'forward_context': '(1)', 'backward_context': '(1)', 'requested_annotations': "('bounding_box_2d', 'bounding_box_3d')"}), "(scene_json, datum_names=['LIDAR', 'CAMERA_01',\n 'CAMERA_05', 'CAMERA_06'], forward_context=1, backward_context=1,\n requested_annotations=('bounding_box_2d', 'bounding_box_3d'))\n", (1510, 1693), False, 'from dgp.datasets.synchronized_dataset import SynchronizedScene\n'), ((1737, 1866), 'dgp.utils.visualization_engine.visualize_dataset_2d', 'visualize_dataset_2d', ([], {'dataset': 'dataset', 'camera_datum_names': "['CAMERA_01']", 'caption_fn': 'dummy_caption', 'output_video_file': 'filepath'}), "(dataset=dataset, camera_datum_names=['CAMERA_01'],\n caption_fn=dummy_caption, output_video_file=filepath)\n", (1757, 1866), False, 'from dgp.utils.visualization_engine import visualize_dataset_3d, visualize_dataset_2d, visualize_dataset_sample_3d, visualize_dataset_sample_2d\n'), ((1888, 1912), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (1902, 1912), False, 'import os\n'), ((1917, 1936), 'os.remove', 'os.remove', (['filepath'], {}), '(filepath)\n', (1926, 1936), False, 'import os\n'), ((2093, 2206), 'os.path.join', 'os.path.join', (['TEST_DATA_DIR', '"""dgp"""', '"""test_scene/scene_01/scene_a8dc5ed1da0923563f85ea129f0e0a83e7fe1867.json"""'], {}), "(TEST_DATA_DIR, 'dgp',\n 'test_scene/scene_01/scene_a8dc5ed1da0923563f85ea129f0e0a83e7fe1867.json')\n", (2105, 2206), False, 'import os\n'), ((2231, 2431), 'dgp.datasets.synchronized_dataset.SynchronizedScene', 'SynchronizedScene', (['scene_json'], {'datum_names': "['LIDAR', 'CAMERA_01', 'CAMERA_05', 'CAMERA_06']", 'forward_context': '(1)', 'backward_context': '(1)', 'requested_annotations': "('bounding_box_2d', 'bounding_box_3d')"}), "(scene_json, datum_names=['LIDAR', 'CAMERA_01',\n 'CAMERA_05', 'CAMERA_06'], forward_context=1, backward_context=1,\n requested_annotations=('bounding_box_2d', 'bounding_box_3d'))\n", (2248, 2431), False, 'from dgp.datasets.synchronized_dataset import SynchronizedScene\n'), ((2484, 2593), 'dgp.utils.visualization_engine.visualize_dataset_sample_3d', 'visualize_dataset_sample_3d', ([], {'dataset': 'dataset', 'scene_idx': '(0)', 'sample_idx': '(0)', 'camera_datum_names': "['camera_05']"}), "(dataset=dataset, scene_idx=0, sample_idx=0,\n camera_datum_names=['camera_05'])\n", (2511, 2593), False, 'from dgp.utils.visualization_engine import visualize_dataset_3d, visualize_dataset_2d, visualize_dataset_sample_3d, visualize_dataset_sample_2d\n'), ((2601, 2662), 'cv2.imread', 'cv2.imread', (['"""tests/data/dgp/vis_output.png"""', 'cv2.IMREAD_COLOR'], {}), "('tests/data/dgp/vis_output.png', cv2.IMREAD_COLOR)\n", (2611, 2662), False, 'import cv2\n'), ((2674, 2712), 'numpy.allclose', 'np.allclose', (["result['camera_05']", 'data'], {}), "(result['camera_05'], data)\n", (2685, 2712), True, 'import numpy as np\n')]
|
import numpy as np
import serial
import struct
import threading
import time
from array import array
from datetime import datetime
class ImuData:
def __init__(self, t=0.0, freq=0, ypr=np.zeros(3), a=np.zeros(3), \
W=np.zeros(3)):
self.t = t
self.freq = freq
self.ypr = ypr
self.a = a
self.W = W
class Vectornav(threading.Thread):
def __init__(self, thread_id, port, baud, t0):
'''Instantiate the IMU thread.
Args:
thread_id: (int) - Thread ID
port: (string) - Port name of the IMU
baud: (int) - Baud rate of the IMU
t0: (datetime object) - Epoch
'''
threading.Thread.__init__(self)
self.thread_id = thread_id
self._lock = threading.Lock()
self._on = True
self._t0 = t0
self._port = port
self._baud = baud
self._t = (datetime.now() - t0).total_seconds()
self._ypr = np.zeros(3)
self._a = np.zeros(3)
self._W = np.zeros(3)
# This is specific message has 41 bytes. You should update this to match
# your configuration.
self._len_payload = 41
print('IMU: initialized')
def run(self):
'''Start the thread.
'''
print('IMU: reading from {} at {}'.format(self._port, self._baud))
# In case the port is not properly closed,
try:
temp = serial.Serial(self._port, self._baud)
temp.close()
except:
print('\033[91m' + 'Unable to open IMU port at ' + self._port
+ ':' + str(self._baud) + '\033[0m')
return
# Open the serial port and start reading.
with serial.Serial(self._port, self._baud, timeout=1) as s:
# Clear the buffer first.
print('IMU: clearing buffer')
num_bytes = s.in_waiting
s.read(num_bytes)
print('IMU: starting main loop')
while self._on:
imu_sync_detected = False
# Check if there are bytes waiting in the buffer.
num_bytes = s.in_waiting
if num_bytes == 0:
# Reduce/delete this sleep time if you are reading data at
# a faster rate.
time.sleep(0.01)
continue
# IMU sends 0xFA (int 250) as the first byte. This marks the
# begining of the message.
imu_sync_detected = self.check_sync_byte(s)
if not imu_sync_detected:
continue
# If the sync byte us detected, read the rest of the message.
success = self.read_imu_data(s)
if not success:
continue
print('IMU: thread closed')
def check_sync_byte(self, s):
'''Check if the sync byte is detected.
IMU sends 0xFA (int 250) as the first byte. This marks the begining of
the message.
Args:
s: (serial object) - Already open serial port of the IMU.
Return:
bool - True if the sync byte is detected in the current buffer.
'''
# Iterate over all the bytes in the current buffer.
for _ in range(s.in_waiting):
byte_in = s.read(1)
# Check if the sync byte 0xFA (int 250) is detected.
int_in = int.from_bytes(byte_in, 'little')
if int_in == 250:
return True
return False
def read_imu_data(self, s):
'''Read and parse the payload of the IMU message.
Args:
s: (serial object) - Already open serial port of the IMU.
Return:
bool - True if the operation is succesfull
'''
# Read data.
N = self._len_payload
data = s.read(N)
# Check if there are unexpected errors in the message.
# Last two bytes of the payload is the checksum bytes.
checksum_array = array('B', [data[N-1], data[N-2]])
checksum = struct.unpack('H', checksum_array)[0]
# Compare the received checksum value against the calculated checksum.
crc = self.calculate_imu_crc(data[:N-2])
if not crc == checksum:
print('IMU CRC error')
return False
# If the checksum is valid, parse the data.
return self.parse_data(data)
def parse_data(self, data):
'''Parse the bytes of the sensor measurements
Args:
data: (byte array) - data read from the serial port
Return:
bool - True if the operation is succesfull
'''
try:
with self._lock:
self._ypr[0] = struct.unpack('f', data[3:7])[0]
self._ypr[1] = struct.unpack('f', data[7:11])[0]
self._ypr[2] = struct.unpack('f', data[11:15])[0]
self._a[0] = struct.unpack('f', data[15:19])[0]
self._a[1] = struct.unpack('f', data[19:23])[0]
self._a[2] = struct.unpack('f', data[23:27])[0]
self._W[0] = struct.unpack('f', data[27:31])[0]
self._W[1] = struct.unpack('f', data[31:35])[0]
self._W[2] = struct.unpack('f', data[35:39])[0]
except:
print('IMU: error parsing data')
return False
return True
def calculate_imu_crc(self, data):
'''Calculate the 16-bit CRC for the given message.
Args:
data: (byte array) - data read from the serial port
Return:
unsigned short - CRC checksum value
'''
data = bytearray(data)
crc = np.array([0], dtype=np.ushort)
for i in range(len(data)):
crc[0] = (crc[0] >> 8) | (crc[0] << 8)
crc[0] ^= data[i]
crc[0] ^= (crc[0] & 0xff) >> 4
crc[0] ^= crc[0] << 12
crc[0] ^= (crc[0] & 0x00ff) << 5
return crc[0]
def output_data(self):
'''Output the current measurements.
Return:
ImuData - current IMU data
'''
with self._lock:
data = ImuData(
self._t,
self._ypr,
self._a,
self._W
)
return data
def end_thread(self):
'''Call to end the IMU thread.'''
self._on = False
print('IMU: thread close signal received')
|
[
"threading.Thread.__init__",
"array.array",
"threading.Lock",
"time.sleep",
"numpy.array",
"numpy.zeros",
"struct.unpack",
"serial.Serial",
"datetime.datetime.now"
] |
[((191, 202), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (199, 202), True, 'import numpy as np\n'), ((206, 217), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (214, 217), True, 'import numpy as np\n'), ((235, 246), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (243, 246), True, 'import numpy as np\n'), ((682, 713), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (707, 713), False, 'import threading\n'), ((771, 787), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (785, 787), False, 'import threading\n'), ((965, 976), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (973, 976), True, 'import numpy as np\n'), ((995, 1006), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1003, 1006), True, 'import numpy as np\n'), ((1025, 1036), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1033, 1036), True, 'import numpy as np\n'), ((4112, 4150), 'array.array', 'array', (['"""B"""', '[data[N - 1], data[N - 2]]'], {}), "('B', [data[N - 1], data[N - 2]])\n", (4117, 4150), False, 'from array import array\n'), ((5804, 5834), 'numpy.array', 'np.array', (['[0]'], {'dtype': 'np.ushort'}), '([0], dtype=np.ushort)\n', (5812, 5834), True, 'import numpy as np\n'), ((1437, 1474), 'serial.Serial', 'serial.Serial', (['self._port', 'self._baud'], {}), '(self._port, self._baud)\n', (1450, 1474), False, 'import serial\n'), ((1728, 1776), 'serial.Serial', 'serial.Serial', (['self._port', 'self._baud'], {'timeout': '(1)'}), '(self._port, self._baud, timeout=1)\n', (1741, 1776), False, 'import serial\n'), ((4166, 4200), 'struct.unpack', 'struct.unpack', (['"""H"""', 'checksum_array'], {}), "('H', checksum_array)\n", (4179, 4200), False, 'import struct\n'), ((907, 921), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (919, 921), False, 'from datetime import datetime\n'), ((2338, 2354), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (2348, 2354), False, 'import time\n'), ((4840, 4869), 'struct.unpack', 'struct.unpack', (['"""f"""', 'data[3:7]'], {}), "('f', data[3:7])\n", (4853, 4869), False, 'import struct\n'), ((4904, 4934), 'struct.unpack', 'struct.unpack', (['"""f"""', 'data[7:11]'], {}), "('f', data[7:11])\n", (4917, 4934), False, 'import struct\n'), ((4969, 5000), 'struct.unpack', 'struct.unpack', (['"""f"""', 'data[11:15]'], {}), "('f', data[11:15])\n", (4982, 5000), False, 'import struct\n'), ((5034, 5065), 'struct.unpack', 'struct.unpack', (['"""f"""', 'data[15:19]'], {}), "('f', data[15:19])\n", (5047, 5065), False, 'import struct\n'), ((5098, 5129), 'struct.unpack', 'struct.unpack', (['"""f"""', 'data[19:23]'], {}), "('f', data[19:23])\n", (5111, 5129), False, 'import struct\n'), ((5162, 5193), 'struct.unpack', 'struct.unpack', (['"""f"""', 'data[23:27]'], {}), "('f', data[23:27])\n", (5175, 5193), False, 'import struct\n'), ((5227, 5258), 'struct.unpack', 'struct.unpack', (['"""f"""', 'data[27:31]'], {}), "('f', data[27:31])\n", (5240, 5258), False, 'import struct\n'), ((5291, 5322), 'struct.unpack', 'struct.unpack', (['"""f"""', 'data[31:35]'], {}), "('f', data[31:35])\n", (5304, 5322), False, 'import struct\n'), ((5355, 5386), 'struct.unpack', 'struct.unpack', (['"""f"""', 'data[35:39]'], {}), "('f', data[35:39])\n", (5368, 5386), False, 'import struct\n')]
|
'''
objective :-
------------
detect and classify shapes and their location in an image with low latency and high accuracy.
it must account for false positives and empty images.
modules used :-
---------------
1 - open cv for image processing tasks.
2 - easyocr for text-recognition tasks.
3 - threading for running text-recognition tasks on a separate thread to reduce latency.
4 - atexit to join all threads on program termination.
5 - Text_Detection which is a manually designed module for text-detection using east detection algorithm.
Inputs :-
---------
1 - captured frame from the camera video stream.
Outputs :-
----------
1 - whether text has been detected or not.
2 - coordinates of text if detected.
3 - an array containing the objects detected and what is the character that this object represents.
Algorithm :-
------------
1 - apply east text-detection on th input frame
2 - if a new character has been detected
a - capture the coordinates of the detected character
b - add a new thread that will be assigned the duty of handling the text-recognition using easy ocr.
c - create an array which contains many copies of the input frame but rotated in different angles.
d - start the thread which will run text-recogntion using the array provided in the previous step.
e - return that text has been detected and return its coordinates
else
return the input frame and that no text has been detected
4 - wait for all threads to finish and join them with the main thread
5 - return an array containing the objects detected and what is the character that this object represents.
'''
from cv2 import cv2
from AlphanumericCharacterDetection.recogniser import Recognize
from os import remove, listdir
import time
import numpy as np
WHITE_LIST = ['A','B','C','c','D','E','F','G','H','I','J','K','k','L','l','M','m','N','O','o','P','p','Q','R','S','s','T','U','u','V','v','W','w','X','x','Y','y','Z','z','0','1','2','3','4','5','6','7','8','9']
def rotate_image(image, angle):
if angle == 0: return image
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
def alphanum_B(image, id):
############################################################################################################## REPLACE [0]
for angle in range(0, 360, 90):
cv2.imwrite("AlphanumericCharacterDetection/results/" + str(id) + "_" + str(angle) + ".jpg", rotate_image(image, angle))
# x= cv2.imwrite("AlphanumericCharacterDetection/results/" + str(id) + ".jpg", image)
############################################################################################################## [0]
out_character = ""
out_confidence = 0
out_character = Recognize("AlphanumericCharacterDetection/results/")
if out_character is None or out_character == '' :
return None,None,None
else:
pass
############################################################################################################## REPLACE [1]
for angle in range(0, 360, 90):
remove("AlphanumericCharacterDetection/results/" + str(id) + "_" + str(angle) + ".jpg")
# remove("AlphanumericCharacterDetection/results/" + str(id) + ".jpg")
############################################################################################################## end [1]
############################################################################################################## UNCOMMENT [2]
out_character = sorted(out_character, key = lambda x: x[1],reverse=True) # sort by confidence
############### special cases ##############
# we prefer M, T, C, 4, 3 than other chars #
############################################
preferred = ['M','T','C','4', '3']
for i in preferred:
if out_character[0] == i:
return out_character[0]
for i in range(len(out_character)):
if out_character[i][0] in preferred:
temp = list(out_character[i])
temp[1] += 0.1
out_character[i] += tuple(temp)
out_character = sorted(out_character, key = lambda x: x[1],reverse=True) # sort again by confidence
out_character = out_character[0]
############################################################################################################# [2]
return out_character
if __name__ == '__main__':
image = cv2.imread("Sample10.jpg")
timer = time.perf_counter()
character = alphanum_B(image, 1)
print(time.perf_counter()-timer)
print(character)
|
[
"cv2.cv2.getRotationMatrix2D",
"cv2.cv2.warpAffine",
"cv2.cv2.imread",
"time.perf_counter",
"AlphanumericCharacterDetection.recogniser.Recognize",
"numpy.array"
] |
[((2094, 2143), 'cv2.cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['image_center', 'angle', '(1.0)'], {}), '(image_center, angle, 1.0)\n', (2117, 2143), False, 'from cv2 import cv2\n'), ((2154, 2228), 'cv2.cv2.warpAffine', 'cv2.warpAffine', (['image', 'rot_mat', 'image.shape[1::-1]'], {'flags': 'cv2.INTER_LINEAR'}), '(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)\n', (2168, 2228), False, 'from cv2 import cv2\n'), ((2843, 2895), 'AlphanumericCharacterDetection.recogniser.Recognize', 'Recognize', (['"""AlphanumericCharacterDetection/results/"""'], {}), "('AlphanumericCharacterDetection/results/')\n", (2852, 2895), False, 'from AlphanumericCharacterDetection.recogniser import Recognize\n'), ((4617, 4643), 'cv2.cv2.imread', 'cv2.imread', (['"""Sample10.jpg"""'], {}), "('Sample10.jpg')\n", (4627, 4643), False, 'from cv2 import cv2\n'), ((4654, 4673), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4671, 4673), False, 'import time\n'), ((2049, 2077), 'numpy.array', 'np.array', (['image.shape[1::-1]'], {}), '(image.shape[1::-1])\n', (2057, 2077), True, 'import numpy as np\n'), ((4715, 4734), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4732, 4734), False, 'import time\n')]
|
"""
Custom added maze tasks with dense rewards and progressively farther goals
For creating expert demonstrations
"""
from typing import Dict, List, Type, Tuple
import numpy as np
from mujoco_maze.custom_maze_task import (
GoalRewardLargeUMaze,
GoalRewardRoom3x5,
GoalRewardRoom3x10,
)
from mujoco_maze.task_common import (
MazeGoal,
MazeTask,
GREEN,
euc_dist,
RewardThresholdList,
)
class Room3x5(GoalRewardRoom3x5):
INNER_REWARD_SCALING: float = 0.01
PENALTY: float = 0
def __init__(self, scale: float, goal: Tuple[float, float], waypoints=None) -> None:
super().__init__(scale)
self.goals = [MazeGoal(np.array(goal) * scale)]
def reward(self, obs: np.ndarray) -> float:
reward = -self.goals[0].euc_dist(obs) / self.scale
if self.termination(obs):
reward = 100.0
return reward
class Room3x5WayPoint(Room3x5):
def __init__(self, scale: float, goal: Tuple[float, float], waypoints=None) -> None:
super().__init__(scale, goal, waypoints)
self.goals = [MazeGoal(np.array(goal) * scale)]
self.waypoints = []
for waypoint in waypoints:
self.waypoints.append(
MazeGoal(
np.array(waypoint) * scale,
rgb=GREEN,
custom_size=0.1 * scale / 2,
)
)
self.visited = np.zeros(len(self.waypoints), dtype=bool)
self.goal_reward = 1000
self.waypoint_reward = 0
# Precalculate distances b/w waypoints
self.rews = np.zeros(len(self.waypoints) + 1)
self.rews[0] = -euc_dist(self.waypoints[0].pos, [0, 0]) / self.scale
for i in range(1, len(self.waypoints)):
self.rews[i] = (
-euc_dist(self.waypoints[i - 1].pos, self.waypoints[i].pos) / self.scale
)
self.rews[-1] = (
-euc_dist(self.waypoints[-1].pos, self.goals[0].pos) / self.scale
)
def reward(self, obs: np.ndarray) -> float:
# If all waypoints were visited
if self.visited.all():
reward = -self.goals[0].euc_dist(obs) / self.scale
if self.termination(obs):
reward = self.goal_reward
else:
# Choose next waypoint
goal_idx = np.argmax(~self.visited)
# Add all remaining distances
reward = np.sum(self.rews[goal_idx + 1 :])
if self.waypoints[goal_idx].neighbor(obs):
self.visited[goal_idx] = True
reward += self.waypoint_reward
else:
reward += -self.waypoints[goal_idx].euc_dist(obs) / self.scale
return reward
class Room3x10(GoalRewardRoom3x10):
INNER_REWARD_SCALING: float = 0.01
PENALTY: float = 0
def __init__(self, scale: float, goal: Tuple[float, float], waypoints=None) -> None:
super().__init__(scale)
self.goals = [MazeGoal(np.array(goal) * scale)]
def reward(self, obs: np.ndarray) -> float:
reward = -self.goals[0].euc_dist(obs) / self.scale
if self.termination(obs):
reward = 100.0
return reward
class LargeUMaze(GoalRewardLargeUMaze):
INNER_REWARD_SCALING: float = 0.01
PENALTY: float = 0
def __init__(self, scale: float, goal: Tuple[float, float], waypoints=None) -> None:
super().__init__(scale)
self.goals = [MazeGoal(np.array(goal) * scale)]
def reward(self, obs: np.ndarray) -> float:
reward = -self.goals[0].euc_dist(obs) / self.scale
if self.termination(obs):
reward = 100.0
return reward
class ExpertTaskRegistry:
REGISTRY: Dict[str, List[Type[MazeTask]]] = {
"DistRoom3x5_1Goals": Room3x5,
"DistRoom3x5WayPoint_3Goals": Room3x5WayPoint,
"DistRoom3x10_1Goals": Room3x10,
"DistLargeUMaze_2Goals": LargeUMaze,
"DistLargeUMaze_4Goals": LargeUMaze,
}
GOALS = {
"DistRoom3x5_1Goals": [(4, 0)],
"DistRoom3x5WayPoint_3Goals": [(1, 0), (2, 0), (4, 0)],
"DistRoom3x10_1Goals": [(9, 0)],
"DistLargeUMaze_2Goals": [(2, 2), (0, 4)],
"DistLargeUMaze_4Goals": [(2, 1), (2, 2), (2, 3), (0, 4)],
}
REWARD_THRESHOLDS = {
"DistRoom3x5_1Goals": RewardThresholdList([-70], [-70], None),
"DistRoom3x5WayPoint_3Goals": RewardThresholdList(
[-20, -40, 70], [-20, -40, -70], None
),
"DistRoom3x10_1Goals": RewardThresholdList([-70], [-690], None),
"DistLargeUMaze_2Goals": RewardThresholdList([-300, -700], [-50, -100], None),
"DistLargeUMaze_4Goals": RewardThresholdList(
[-200, -400, -600, -800], [-25, -50, -75, -100], None
),
}
@staticmethod
def keys() -> List[str]:
return list(ExpertTaskRegistry.REGISTRY.keys())
@staticmethod
def tasks(key: str) -> List[Type[MazeTask]]:
return ExpertTaskRegistry.REGISTRY[key]
@staticmethod
def goals(key: str) -> List[Type[MazeTask]]:
return ExpertTaskRegistry.GOALS[key]
@staticmethod
def reward_thresholds(key: str) -> List[Type[MazeTask]]:
return ExpertTaskRegistry.REWARD_THRESHOLDS[key]
|
[
"mujoco_maze.task_common.euc_dist",
"mujoco_maze.task_common.RewardThresholdList",
"numpy.argmax",
"numpy.sum",
"numpy.array"
] |
[((4318, 4357), 'mujoco_maze.task_common.RewardThresholdList', 'RewardThresholdList', (['[-70]', '[-70]', 'None'], {}), '([-70], [-70], None)\n', (4337, 4357), False, 'from mujoco_maze.task_common import MazeGoal, MazeTask, GREEN, euc_dist, RewardThresholdList\n'), ((4397, 4455), 'mujoco_maze.task_common.RewardThresholdList', 'RewardThresholdList', (['[-20, -40, 70]', '[-20, -40, -70]', 'None'], {}), '([-20, -40, 70], [-20, -40, -70], None)\n', (4416, 4455), False, 'from mujoco_maze.task_common import MazeGoal, MazeTask, GREEN, euc_dist, RewardThresholdList\n'), ((4510, 4550), 'mujoco_maze.task_common.RewardThresholdList', 'RewardThresholdList', (['[-70]', '[-690]', 'None'], {}), '([-70], [-690], None)\n', (4529, 4550), False, 'from mujoco_maze.task_common import MazeGoal, MazeTask, GREEN, euc_dist, RewardThresholdList\n'), ((4585, 4637), 'mujoco_maze.task_common.RewardThresholdList', 'RewardThresholdList', (['[-300, -700]', '[-50, -100]', 'None'], {}), '([-300, -700], [-50, -100], None)\n', (4604, 4637), False, 'from mujoco_maze.task_common import MazeGoal, MazeTask, GREEN, euc_dist, RewardThresholdList\n'), ((4672, 4746), 'mujoco_maze.task_common.RewardThresholdList', 'RewardThresholdList', (['[-200, -400, -600, -800]', '[-25, -50, -75, -100]', 'None'], {}), '([-200, -400, -600, -800], [-25, -50, -75, -100], None)\n', (4691, 4746), False, 'from mujoco_maze.task_common import MazeGoal, MazeTask, GREEN, euc_dist, RewardThresholdList\n'), ((2338, 2362), 'numpy.argmax', 'np.argmax', (['(~self.visited)'], {}), '(~self.visited)\n', (2347, 2362), True, 'import numpy as np\n'), ((2426, 2458), 'numpy.sum', 'np.sum', (['self.rews[goal_idx + 1:]'], {}), '(self.rews[goal_idx + 1:])\n', (2432, 2458), True, 'import numpy as np\n'), ((1656, 1695), 'mujoco_maze.task_common.euc_dist', 'euc_dist', (['self.waypoints[0].pos', '[0, 0]'], {}), '(self.waypoints[0].pos, [0, 0])\n', (1664, 1695), False, 'from mujoco_maze.task_common import MazeGoal, MazeTask, GREEN, euc_dist, RewardThresholdList\n'), ((1928, 1979), 'mujoco_maze.task_common.euc_dist', 'euc_dist', (['self.waypoints[-1].pos', 'self.goals[0].pos'], {}), '(self.waypoints[-1].pos, self.goals[0].pos)\n', (1936, 1979), False, 'from mujoco_maze.task_common import MazeGoal, MazeTask, GREEN, euc_dist, RewardThresholdList\n'), ((671, 685), 'numpy.array', 'np.array', (['goal'], {}), '(goal)\n', (679, 685), True, 'import numpy as np\n'), ((1090, 1104), 'numpy.array', 'np.array', (['goal'], {}), '(goal)\n', (1098, 1104), True, 'import numpy as np\n'), ((1803, 1861), 'mujoco_maze.task_common.euc_dist', 'euc_dist', (['self.waypoints[i - 1].pos', 'self.waypoints[i].pos'], {}), '(self.waypoints[i - 1].pos, self.waypoints[i].pos)\n', (1811, 1861), False, 'from mujoco_maze.task_common import MazeGoal, MazeTask, GREEN, euc_dist, RewardThresholdList\n'), ((2981, 2995), 'numpy.array', 'np.array', (['goal'], {}), '(goal)\n', (2989, 2995), True, 'import numpy as np\n'), ((3454, 3468), 'numpy.array', 'np.array', (['goal'], {}), '(goal)\n', (3462, 3468), True, 'import numpy as np\n'), ((1259, 1277), 'numpy.array', 'np.array', (['waypoint'], {}), '(waypoint)\n', (1267, 1277), True, 'import numpy as np\n')]
|
from functools import lru_cache
import math
import logging
from enum import Enum
from typing import Optional, List, Tuple, Any, Union, Dict, Callable
from concurrent.futures import ThreadPoolExecutor
from io import BytesIO
import requests
import numpy as np
from google.api_core import retry
from PIL import Image
from pyproj import Transformer
from pygeotile.point import Point as PygeoPoint
from pydantic import BaseModel, validator
from pydantic.class_validators import root_validator
from labelbox.data.annotation_types import Rectangle, Point, Line, Polygon
from .base_data import BaseData
from .raster import RasterData
VALID_LAT_RANGE = range(-90, 90)
VALID_LNG_RANGE = range(-180, 180)
DEFAULT_TMS_TILE_SIZE = 256
TILE_DOWNLOAD_CONCURRENCY = 4
logger = logging.getLogger(__name__)
VectorTool = Union[Point, Line, Rectangle, Polygon]
class EPSG(Enum):
""" Provides the EPSG for tiled image assets that are currently supported.
SIMPLEPIXEL is Simple that can be used to obtain the pixel space coordinates
>>> epsg = EPSG()
"""
SIMPLEPIXEL = 1
EPSG4326 = 4326
EPSG3857 = 3857
class TiledBounds(BaseModel):
""" Bounds for a tiled image asset related to the relevant epsg.
Bounds should be Point objects.
>>> bounds = TiledBounds(epsg=EPSG.EPSG4326,
bounds=[
Point(x=-99.21052827588443, y=19.405662413477728),
Point(x=-99.20534818927473, y=19.400498983095076)
])
"""
epsg: EPSG
bounds: List[Point]
@validator('bounds')
def validate_bounds_not_equal(cls, bounds):
first_bound = bounds[0]
second_bound = bounds[1]
if first_bound.x == second_bound.x or \
first_bound.y == second_bound.y:
raise ValueError(
f"Bounds on either axes cannot be equal, currently {bounds}")
return bounds
#validate bounds are within lat,lng range if they are EPSG4326
@root_validator
def validate_bounds_lat_lng(cls, values):
epsg = values.get('epsg')
bounds = values.get('bounds')
if epsg == EPSG.EPSG4326:
for bound in bounds:
lat, lng = bound.y, bound.x
if int(lng) not in VALID_LNG_RANGE or int(
lat) not in VALID_LAT_RANGE:
raise ValueError(f"Invalid lat/lng bounds. Found {bounds}. "
f"lat must be in {VALID_LAT_RANGE}. "
f"lng must be in {VALID_LNG_RANGE}.")
return values
class TileLayer(BaseModel):
""" Url that contains the tile layer. Must be in the format:
https://c.tile.openstreetmap.org/{z}/{x}/{y}.png
>>> layer = TileLayer(
url="https://c.tile.openstreetmap.org/{z}/{x}/{y}.png",
name="slippy map tile"
)
"""
url: str
name: Optional[str] = "default"
def asdict(self) -> Dict[str, str]:
return {"tileLayerUrl": self.url, "name": self.name}
@validator('url')
def validate_url(cls, url):
xyz_format = "/{z}/{x}/{y}"
if xyz_format not in url:
raise ValueError(f"{url} needs to contain {xyz_format}")
return url
class TiledImageData(BaseData):
""" Represents tiled imagery
If specified version is 2, converts bounds from [lng,lat] to [lat,lng]
Requires the following args:
tile_layer: TileLayer
tile_bounds: TiledBounds
zoom_levels: List[int]
Optional args:
max_native_zoom: int = None
tile_size: Optional[int]
version: int = 2
alternative_layers: List[TileLayer]
>>> tiled_image_data = TiledImageData(tile_layer=TileLayer,
tile_bounds=TiledBounds,
zoom_levels=[1, 12])
"""
tile_layer: TileLayer
tile_bounds: TiledBounds
alternative_layers: List[TileLayer] = []
zoom_levels: Tuple[int, int]
max_native_zoom: Optional[int] = None
tile_size: Optional[int] = DEFAULT_TMS_TILE_SIZE
version: Optional[int] = 2
multithread: bool = True
def __post_init__(self) -> None:
if self.max_native_zoom is None:
self.max_native_zoom = self.zoom_levels[0]
def asdict(self) -> Dict[str, str]:
return {
"tileLayerUrl": self.tile_layer.url,
"bounds": [[
self.tile_bounds.bounds[0].x, self.tile_bounds.bounds[0].y
], [self.tile_bounds.bounds[1].x, self.tile_bounds.bounds[1].y]],
"minZoom": self.zoom_levels[0],
"maxZoom": self.zoom_levels[1],
"maxNativeZoom": self.max_native_zoom,
"epsg": self.tile_bounds.epsg.name,
"tileSize": self.tile_size,
"alternativeLayers": [
layer.asdict() for layer in self.alternative_layers
],
"version": self.version
}
def raster_data(self,
zoom: int = 0,
max_tiles: int = 32,
multithread=True) -> RasterData:
"""Converts the tiled image asset into a RasterData object containing an
np.ndarray.
Uses the minimum zoom provided to render the image.
"""
if self.tile_bounds.epsg == EPSG.SIMPLEPIXEL:
xstart, ystart, xend, yend = self._get_simple_image_params(zoom)
elif self.tile_bounds.epsg == EPSG.EPSG4326:
xstart, ystart, xend, yend = self._get_3857_image_params(
zoom, self.tile_bounds)
elif self.tile_bounds.epsg == EPSG.EPSG3857:
#transform to 4326
transformer = EPSGTransformer.create_geo_to_geo_transformer(
EPSG.EPSG3857, EPSG.EPSG4326)
transforming_bounds = [
transformer(self.tile_bounds.bounds[0]),
transformer(self.tile_bounds.bounds[1])
]
xstart, ystart, xend, yend = self._get_3857_image_params(
zoom, transforming_bounds)
else:
raise ValueError(f"Unsupported epsg found: {self.tile_bounds.epsg}")
self._validate_num_tiles(xstart, ystart, xend, yend, max_tiles)
rounded_tiles, pixel_offsets = list(
zip(*[
self._tile_to_pixel(pt) for pt in [xstart, ystart, xend, yend]
]))
image = self._fetch_image_for_bounds(*rounded_tiles, zoom, multithread)
arr = self._crop_to_bounds(image, *pixel_offsets)
return RasterData(arr=arr)
@property
def value(self) -> np.ndarray:
"""Returns the value of a generated RasterData object.
"""
return self.raster_data(self.zoom_levels[0],
multithread=self.multithread).value
def _get_simple_image_params(self,
zoom) -> Tuple[float, float, float, float]:
"""Computes the x and y tile bounds for fetching an image that
captures the entire labeling region (TiledData.bounds) given a specific zoom
Simple has different order of x / y than lat / lng because of how leaflet behaves
leaflet reports all points as pixel locations at a zoom of 0
"""
xend, xstart, yend, ystart = (
self.tile_bounds.bounds[1].x,
self.tile_bounds.bounds[0].x,
self.tile_bounds.bounds[1].y,
self.tile_bounds.bounds[0].y,
)
return (*[
x * (2**(zoom)) / self.tile_size
for x in [xstart, ystart, xend, yend]
],)
def _get_3857_image_params(
self, zoom: int,
bounds: TiledBounds) -> Tuple[float, float, float, float]:
"""Computes the x and y tile bounds for fetching an image that
captures the entire labeling region (TiledData.bounds) given a specific zoom
"""
lat_start, lat_end = bounds.bounds[1].y, bounds.bounds[0].y
lng_start, lng_end = bounds.bounds[1].x, bounds.bounds[0].x
# Convert to zoom 0 tile coordinates
xstart, ystart = self._latlng_to_tile(lat_start, lng_start)
xend, yend = self._latlng_to_tile(lat_end, lng_end)
# Make sure that the tiles are increasing in order
xstart, xend = min(xstart, xend), max(xstart, xend)
ystart, yend = min(ystart, yend), max(ystart, yend)
return (*[pt * 2.0**zoom for pt in [xstart, ystart, xend, yend]],)
def _latlng_to_tile(self,
lat: float,
lng: float,
zoom=0) -> Tuple[float, float]:
"""Converts lat/lng to 3857 tile coordinates
Formula found here:
https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#lon.2Flat_to_tile_numbers_2
"""
scale = 2**zoom
lat_rad = math.radians(lat)
x = (lng + 180.0) / 360.0 * scale
y = (1.0 - math.asinh(math.tan(lat_rad)) / math.pi) / 2.0 * scale
return x, y
def _tile_to_pixel(self, tile: float) -> Tuple[int, int]:
"""Rounds a tile coordinate and reports the remainder in pixels
"""
rounded_tile = int(tile)
remainder = tile - rounded_tile
pixel_offset = int(self.tile_size * remainder)
return rounded_tile, pixel_offset
def _fetch_image_for_bounds(self,
x_tile_start: int,
y_tile_start: int,
x_tile_end: int,
y_tile_end: int,
zoom: int,
multithread=True) -> np.ndarray:
"""Fetches the tiles and combines them into a single image.
If a tile cannot be fetched, a padding of expected tile size is instead added.
"""
if multithread:
tiles = {}
with ThreadPoolExecutor(
max_workers=TILE_DOWNLOAD_CONCURRENCY) as exc:
for x in range(x_tile_start, x_tile_end + 1):
for y in range(y_tile_start, y_tile_end + 1):
tiles[(x, y)] = exc.submit(self._fetch_tile, x, y, zoom)
rows = []
for y in range(y_tile_start, y_tile_end + 1):
row = []
for x in range(x_tile_start, x_tile_end + 1):
try:
if multithread:
row.append(tiles[(x, y)].result())
else:
row.append(self._fetch_tile(x, y, zoom))
except:
row.append(
np.zeros(shape=(self.tile_size, self.tile_size, 3),
dtype=np.uint8))
rows.append(np.hstack(row))
return np.vstack(rows)
@retry.Retry(initial=1, maximum=16, multiplier=2)
def _fetch_tile(self, x: int, y: int, z: int) -> np.ndarray:
"""
Fetches the image and returns an np array.
"""
data = requests.get(self.tile_layer.url.format(x=x, y=y, z=z))
data.raise_for_status()
decoded = np.array(Image.open(BytesIO(data.content)))[..., :3]
if decoded.shape[:2] != (self.tile_size, self.tile_size):
logger.warning(f"Unexpected tile size {decoded.shape}.")
return decoded
def _crop_to_bounds(
self,
image: np.ndarray,
x_px_start: int,
y_px_start: int,
x_px_end: int,
y_px_end: int,
) -> np.ndarray:
"""This function slices off the excess pixels that are outside of the bounds.
This occurs because only full tiles can be downloaded at a time.
"""
def invert_point(pt):
# Must have at least 1 pixel for stability.
pt = max(pt, 1)
# All pixel points are relative to a single tile
# So subtracting the tile size inverts the axis
pt = pt - self.tile_size
return pt if pt != 0 else None
x_px_end, y_px_end = invert_point(x_px_end), invert_point(y_px_end)
return image[y_px_start:y_px_end, x_px_start:x_px_end, :]
def _validate_num_tiles(self, xstart: float, ystart: float, xend: float,
yend: float, max_tiles: int):
"""Calculates the number of expected tiles we would fetch.
If this is greater than the number of max tiles, raise an error.
"""
total_n_tiles = (yend - ystart + 1) * (xend - xstart + 1)
if total_n_tiles > max_tiles:
raise ValueError(f"Requested zoom results in {total_n_tiles} tiles."
f"Max allowed tiles are {max_tiles}"
f"Increase max tiles or reduce zoom level.")
@validator('zoom_levels')
def validate_zoom_levels(cls, zoom_levels):
if zoom_levels[0] > zoom_levels[1]:
raise ValueError(
f"Order of zoom levels should be min, max. Received {zoom_levels}"
)
return zoom_levels
class EPSGTransformer(BaseModel):
"""Transformer class between different EPSG's. Useful when wanting to project
in different formats.
"""
class Config:
arbitrary_types_allowed = True
transformer: Any
@staticmethod
def _is_simple(epsg: EPSG) -> bool:
return epsg == EPSG.SIMPLEPIXEL
@staticmethod
def _get_ranges(bounds: np.ndarray) -> Tuple[int, int]:
"""helper function to get the range between bounds.
returns a tuple (x_range, y_range)"""
x_range = np.max(bounds[:, 0]) - np.min(bounds[:, 0])
y_range = np.max(bounds[:, 1]) - np.min(bounds[:, 1])
return (x_range, y_range)
@staticmethod
def _min_max_x_y(bounds: np.ndarray) -> Tuple[int, int, int, int]:
"""returns the min x, max x, min y, max y of a numpy array
"""
return np.min(bounds[:, 0]), np.max(bounds[:, 0]), np.min(
bounds[:, 1]), np.max(bounds[:, 1])
@classmethod
def geo_and_pixel(cls,
src_epsg,
pixel_bounds: TiledBounds,
geo_bounds: TiledBounds,
zoom=0) -> Callable:
"""method to change from one projection to simple projection"""
pixel_bounds = pixel_bounds.bounds
geo_bounds_epsg = geo_bounds.epsg
geo_bounds = geo_bounds.bounds
local_bounds = np.array([(point.x, point.y) for point in pixel_bounds],
dtype=int)
#convert geo bounds to pixel bounds. assumes geo bounds are in wgs84/EPS4326 per leaflet
global_bounds = np.array([
PygeoPoint.from_latitude_longitude(latitude=point.y,
longitude=point.x).pixels(zoom)
for point in geo_bounds
])
#get the range of pixels for both sets of bounds to use as a multiplification factor
local_x_range, local_y_range = cls._get_ranges(bounds=local_bounds)
global_x_range, global_y_range = cls._get_ranges(bounds=global_bounds)
if src_epsg == EPSG.SIMPLEPIXEL:
def transform(x: int, y: int) -> Callable[[int, int], Transformer]:
scaled_xy = (x * (global_x_range) / (local_x_range),
y * (global_y_range) / (local_y_range))
minx, _, miny, _ = cls._min_max_x_y(bounds=global_bounds)
x, y = map(lambda i, j: i + j, scaled_xy, (minx, miny))
point = PygeoPoint.from_pixel(pixel_x=x, pixel_y=y,
zoom=zoom).latitude_longitude
#convert to the desired epsg
return Transformer.from_crs(EPSG.EPSG4326.value,
geo_bounds_epsg.value,
always_xy=True).transform(
point[1], point[0])
return transform
#handles 4326 from lat,lng
elif src_epsg == EPSG.EPSG4326:
def transform(x: int, y: int) -> Callable[[int, int], Transformer]:
point_in_px = PygeoPoint.from_latitude_longitude(
latitude=y, longitude=x).pixels(zoom)
minx, _, miny, _ = cls._min_max_x_y(global_bounds)
x, y = map(lambda i, j: i - j, point_in_px, (minx, miny))
return (x * (local_x_range) / (global_x_range),
y * (local_y_range) / (global_y_range))
return transform
#handles 3857 from meters
elif src_epsg == EPSG.EPSG3857:
def transform(x: int, y: int) -> Callable[[int, int], Transformer]:
point_in_px = PygeoPoint.from_meters(meter_y=y,
meter_x=x).pixels(zoom)
minx, _, miny, _ = cls._min_max_x_y(global_bounds)
x, y = map(lambda i, j: i - j, point_in_px, (minx, miny))
return (x * (local_x_range) / (global_x_range),
y * (local_y_range) / (global_y_range))
return transform
@classmethod
def create_geo_to_geo_transformer(
cls, src_epsg: EPSG,
tgt_epsg: EPSG) -> Callable[[int, int], Transformer]:
"""method to change from one projection to another projection.
supports EPSG transformations not Simple.
"""
if cls._is_simple(epsg=src_epsg) or cls._is_simple(epsg=tgt_epsg):
raise Exception(
f"Cannot be used for Simple transformations. Found {src_epsg} and {tgt_epsg}"
)
return EPSGTransformer(transformer=Transformer.from_crs(
src_epsg.value, tgt_epsg.value, always_xy=True).transform)
@classmethod
def create_geo_to_pixel_transformer(
cls,
src_epsg,
pixel_bounds: TiledBounds,
geo_bounds: TiledBounds,
zoom=0) -> Callable[[int, int], Transformer]:
"""method to change from a geo projection to Simple"""
transform_function = cls.geo_and_pixel(src_epsg=src_epsg,
pixel_bounds=pixel_bounds,
geo_bounds=geo_bounds,
zoom=zoom)
return EPSGTransformer(transformer=transform_function)
@classmethod
def create_pixel_to_geo_transformer(
cls,
src_epsg,
pixel_bounds: TiledBounds,
geo_bounds: TiledBounds,
zoom=0) -> Callable[[int, int], Transformer]:
"""method to change from a geo projection to Simple"""
transform_function = cls.geo_and_pixel(src_epsg=src_epsg,
pixel_bounds=pixel_bounds,
geo_bounds=geo_bounds,
zoom=zoom)
return EPSGTransformer(transformer=transform_function)
def _get_point_obj(self, point) -> Point:
point = self.transformer(point.x, point.y)
return Point(x=point[0], y=point[1])
def __call__(
self, shape: Union[Point, Line, Rectangle, Polygon]
) -> Union[VectorTool, List[VectorTool]]:
if isinstance(shape, list):
return [self(geom) for geom in shape]
if isinstance(shape, Point):
return self._get_point_obj(shape)
if isinstance(shape, Line):
return Line(points=[self._get_point_obj(p) for p in shape.points])
if isinstance(shape, Polygon):
return Polygon(
points=[self._get_point_obj(p) for p in shape.points])
if isinstance(shape, Rectangle):
return Rectangle(start=self._get_point_obj(shape.start),
end=self._get_point_obj(shape.end))
else:
raise ValueError(f"Unsupported type found: {type(shape)}")
|
[
"logging.getLogger",
"pygeotile.point.Point.from_latitude_longitude",
"pygeotile.point.Point.from_pixel",
"math.tan",
"pydantic.validator",
"numpy.hstack",
"concurrent.futures.ThreadPoolExecutor",
"pygeotile.point.Point.from_meters",
"io.BytesIO",
"math.radians",
"numpy.max",
"numpy.array",
"labelbox.data.annotation_types.Point",
"pyproj.Transformer.from_crs",
"numpy.zeros",
"numpy.vstack",
"numpy.min",
"google.api_core.retry.Retry"
] |
[((765, 792), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (782, 792), False, 'import logging\n'), ((1532, 1551), 'pydantic.validator', 'validator', (['"""bounds"""'], {}), "('bounds')\n", (1541, 1551), False, 'from pydantic import BaseModel, validator\n'), ((3019, 3035), 'pydantic.validator', 'validator', (['"""url"""'], {}), "('url')\n", (3028, 3035), False, 'from pydantic import BaseModel, validator\n'), ((10812, 10860), 'google.api_core.retry.Retry', 'retry.Retry', ([], {'initial': '(1)', 'maximum': '(16)', 'multiplier': '(2)'}), '(initial=1, maximum=16, multiplier=2)\n', (10823, 10860), False, 'from google.api_core import retry\n'), ((12767, 12791), 'pydantic.validator', 'validator', (['"""zoom_levels"""'], {}), "('zoom_levels')\n", (12776, 12791), False, 'from pydantic import BaseModel, validator\n'), ((8837, 8854), 'math.radians', 'math.radians', (['lat'], {}), '(lat)\n', (8849, 8854), False, 'import math\n'), ((10790, 10805), 'numpy.vstack', 'np.vstack', (['rows'], {}), '(rows)\n', (10799, 10805), True, 'import numpy as np\n'), ((14442, 14509), 'numpy.array', 'np.array', (['[(point.x, point.y) for point in pixel_bounds]'], {'dtype': 'int'}), '([(point.x, point.y) for point in pixel_bounds], dtype=int)\n', (14450, 14509), True, 'import numpy as np\n'), ((19199, 19228), 'labelbox.data.annotation_types.Point', 'Point', ([], {'x': 'point[0]', 'y': 'point[1]'}), '(x=point[0], y=point[1])\n', (19204, 19228), False, 'from labelbox.data.annotation_types import Rectangle, Point, Line, Polygon\n'), ((13581, 13601), 'numpy.max', 'np.max', (['bounds[:, 0]'], {}), '(bounds[:, 0])\n', (13587, 13601), True, 'import numpy as np\n'), ((13604, 13624), 'numpy.min', 'np.min', (['bounds[:, 0]'], {}), '(bounds[:, 0])\n', (13610, 13624), True, 'import numpy as np\n'), ((13643, 13663), 'numpy.max', 'np.max', (['bounds[:, 1]'], {}), '(bounds[:, 1])\n', (13649, 13663), True, 'import numpy as np\n'), ((13666, 13686), 'numpy.min', 'np.min', (['bounds[:, 1]'], {}), '(bounds[:, 1])\n', (13672, 13686), True, 'import numpy as np\n'), ((13905, 13925), 'numpy.min', 'np.min', (['bounds[:, 0]'], {}), '(bounds[:, 0])\n', (13911, 13925), True, 'import numpy as np\n'), ((13927, 13947), 'numpy.max', 'np.max', (['bounds[:, 0]'], {}), '(bounds[:, 0])\n', (13933, 13947), True, 'import numpy as np\n'), ((13949, 13969), 'numpy.min', 'np.min', (['bounds[:, 1]'], {}), '(bounds[:, 1])\n', (13955, 13969), True, 'import numpy as np\n'), ((13984, 14004), 'numpy.max', 'np.max', (['bounds[:, 1]'], {}), '(bounds[:, 1])\n', (13990, 14004), True, 'import numpy as np\n'), ((9897, 9954), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'TILE_DOWNLOAD_CONCURRENCY'}), '(max_workers=TILE_DOWNLOAD_CONCURRENCY)\n', (9915, 9954), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((10758, 10772), 'numpy.hstack', 'np.hstack', (['row'], {}), '(row)\n', (10767, 10772), True, 'import numpy as np\n'), ((11142, 11163), 'io.BytesIO', 'BytesIO', (['data.content'], {}), '(data.content)\n', (11149, 11163), False, 'from io import BytesIO\n'), ((15547, 15601), 'pygeotile.point.Point.from_pixel', 'PygeoPoint.from_pixel', ([], {'pixel_x': 'x', 'pixel_y': 'y', 'zoom': 'zoom'}), '(pixel_x=x, pixel_y=y, zoom=zoom)\n', (15568, 15601), True, 'from pygeotile.point import Point as PygeoPoint\n'), ((17740, 17808), 'pyproj.Transformer.from_crs', 'Transformer.from_crs', (['src_epsg.value', 'tgt_epsg.value'], {'always_xy': '(True)'}), '(src_epsg.value, tgt_epsg.value, always_xy=True)\n', (17760, 17808), False, 'from pyproj import Transformer\n'), ((14686, 14757), 'pygeotile.point.Point.from_latitude_longitude', 'PygeoPoint.from_latitude_longitude', ([], {'latitude': 'point.y', 'longitude': 'point.x'}), '(latitude=point.y, longitude=point.x)\n', (14720, 14757), True, 'from pygeotile.point import Point as PygeoPoint\n'), ((15735, 15820), 'pyproj.Transformer.from_crs', 'Transformer.from_crs', (['EPSG.EPSG4326.value', 'geo_bounds_epsg.value'], {'always_xy': '(True)'}), '(EPSG.EPSG4326.value, geo_bounds_epsg.value, always_xy=True\n )\n', (15755, 15820), False, 'from pyproj import Transformer\n'), ((8927, 8944), 'math.tan', 'math.tan', (['lat_rad'], {}), '(lat_rad)\n', (8935, 8944), False, 'import math\n'), ((10632, 10699), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.tile_size, self.tile_size, 3)', 'dtype': 'np.uint8'}), '(shape=(self.tile_size, self.tile_size, 3), dtype=np.uint8)\n', (10640, 10699), True, 'import numpy as np\n'), ((16200, 16259), 'pygeotile.point.Point.from_latitude_longitude', 'PygeoPoint.from_latitude_longitude', ([], {'latitude': 'y', 'longitude': 'x'}), '(latitude=y, longitude=x)\n', (16234, 16259), True, 'from pygeotile.point import Point as PygeoPoint\n'), ((16781, 16825), 'pygeotile.point.Point.from_meters', 'PygeoPoint.from_meters', ([], {'meter_y': 'y', 'meter_x': 'x'}), '(meter_y=y, meter_x=x)\n', (16803, 16825), True, 'from pygeotile.point import Point as PygeoPoint\n')]
|
import numpy
import time
import threading
import matplotlib.pyplot
from matplotlib.animation import FuncAnimation
class VisualizationWindow:
def __init__(self, signal_collector):
self.figure, self.axes = matplotlib.pyplot.subplots(7, 1, sharex=True)
self.figure.subplots_adjust(hspace=0)
self.signal_collector = signal_collector
def create_empty_line(ax_index, *args):
return self.axes[ax_index].plot([], [], *args)[0]
self.acceleration_lines = [create_empty_line(0), create_empty_line(0), create_empty_line(0)]
self.breathing_line = create_empty_line(1)
self.ecg_line = create_empty_line(2)
self.respiration_rate_line = create_empty_line(3, "+")
self.heart_rate_line = create_empty_line(4, "+")
self.heartbeat_interval_line = create_empty_line(5, "+")
self.activity_line = create_empty_line(6, "+")
self.artists = self.acceleration_lines + [self.breathing_line, self.ecg_line, self.respiration_rate_line, self.heart_rate_line, self.heartbeat_interval_line, self.activity_line]
self.axes[0].set_ylim((-4, 4))
self.axes[1].set_ylim((-1000, 1000))
self.axes[2].set_ylim((-500, 500))
self.axes[3].set_ylim((0, 50))
self.axes[4].set_ylim((0, 120))
self.axes[5].set_ylim((0, 2))
self.axes[6].set_ylim((0, 2))
def update_plots(self, framedata):
for stream_name, stream in self.signal_collector.iterate_signal_streams():
signal_value_array = numpy.array(stream.samples, dtype=float)
x_values = numpy.arange(len(signal_value_array), dtype=float)
x_values /= stream.samplerate
x_values += stream.end_timestamp - len(signal_value_array) / stream.samplerate
if stream_name == "acceleration":
for line_i, line in enumerate(self.acceleration_lines):
line.set_xdata(x_values)
line.set_ydata(signal_value_array[:, line_i])
elif stream_name == "breathing":
self.breathing_line.set_xdata(x_values)
self.breathing_line.set_ydata(signal_value_array)
elif stream_name == "ecg":
self.ecg_line.set_xdata(x_values)
self.ecg_line.set_ydata(signal_value_array)
for stream_name, event_list in self.signal_collector.iterate_event_streams():
if len(event_list) == 0:
continue
event_data_array = numpy.array(event_list, dtype=float)
event_timestamps = event_data_array[:, 0]
event_values = event_data_array[:, 1]
event_line_object_map = {"heart_rate": self.heart_rate_line,
"respiration_rate": self.respiration_rate_line,
"heartbeat_interval": self.heartbeat_interval_line,
"activity": self.activity_line}
event_line_object = event_line_object_map[stream_name]
if event_line_object is not None:
event_line_object.set_xdata(event_timestamps)
event_line_object.set_ydata(event_values)
now = time.time()
self.axes[0].set_xlim((now - 115, now + 5))
return self.artists
def show(self):
anim = FuncAnimation(self.figure, self.update_plots, interval=1000, blit=False)
matplotlib.pyplot.show()
|
[
"matplotlib.animation.FuncAnimation",
"numpy.array",
"time.time"
] |
[((3421, 3432), 'time.time', 'time.time', ([], {}), '()\n', (3430, 3432), False, 'import time\n'), ((3562, 3634), 'matplotlib.animation.FuncAnimation', 'FuncAnimation', (['self.figure', 'self.update_plots'], {'interval': '(1000)', 'blit': '(False)'}), '(self.figure, self.update_plots, interval=1000, blit=False)\n', (3575, 3634), False, 'from matplotlib.animation import FuncAnimation\n'), ((1588, 1628), 'numpy.array', 'numpy.array', (['stream.samples'], {'dtype': 'float'}), '(stream.samples, dtype=float)\n', (1599, 1628), False, 'import numpy\n'), ((2643, 2679), 'numpy.array', 'numpy.array', (['event_list'], {'dtype': 'float'}), '(event_list, dtype=float)\n', (2654, 2679), False, 'import numpy\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import h5py as h5
import os.path as fs
import keras
from keras.models import Sequential
from keras.utils import np_utils
from keras.layers.core import Dense, Activation, Dropout
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_digits
from sklearn.linear_model import Perceptron
from sklearn.metrics import f1_score
from scipy.spatial import distance
from sklearn.preprocessing import normalize
from sklearn.metrics import accuracy_score
import logging as logg
from sklearn.decomposition import PCA
from sklearn import preprocessing
import umap
#%%
def readHDF5file(PathToSave, SavedFileName, list_group_name):
data = []
ff = h5.File(fs.join(PathToSave, SavedFileName), 'r')
for group in list_group_name:
data.append(ff[group][...])
ff.close()
return data
def saveHDF5file(PathToSave, SavedFileName, list_group_name, data):
num_group = len(list_group_name)
num_data = len(data)
if num_group != num_data:
raise RuntimeError('Group name list and data list length do not match!')
ff = h5.File(fs.join(PathToSave, SavedFileName), 'w')
for i, group in enumerate(list_group_name):
ff.create_dataset(group, data = data[i])
ff.close()
return None
#%%
def pca_result(activations, n_comp):
embedding = PCA(n_components= n_comp).fit_transform(activations)
return embedding
def umap_result(activations, n_comp):
embedding = umap.UMAP(n_components=n_comp).fit_transform(activations)
return embedding
#%%
def TrainPerceptron(latent_space):
n = len(latent_space)
shape_ls = latent_space.shape[1]
labels = np.empty((n, 1), dtype = np.int32)
labels[:15000], labels[15000:30000], labels[30000:] = 0, 1, 2
#labels[:5000], labels[5000:10000], labels[10000:] = 0, 1, 2
y_train = np_utils.to_categorical(labels)
standardized_latent_space = preprocessing.scale(latent_space)
model = Sequential()
model.add(Dense(3, input_dim= shape_ls))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='Nadam')
model.summary()
model.fit(standardized_latent_space, y_train, epochs = 250, batch_size=128, validation_split=0.3, shuffle = True, verbose=2)
optim = keras.optimizers.SGD(lr=0.02, decay=1e-2/300)
model.compile(loss='categorical_crossentropy', optimizer=optim)
model.fit(standardized_latent_space, y_train, epochs = 300, batch_size=128, validation_split=0.3, shuffle = True, verbose=2)
predict = model.predict(standardized_latent_space, batch_size=4096)
predict = np.heaviside(predict - 0.5, 1).astype(np.int32)
score = f1_score(y_train, predict, average='micro')
return score
#%%
RootPathLatentSpace = ''
logg.basicConfig(filename=fs.join(RootPathLatentSpace, "LatentSpaceLogger.log"), level=logg.INFO)
logg
umap_shape_mnist = [0, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2]
umap_shape_unet = [0, 512, 256, 128, 64, 32, 16, 8, 4, 2]
PathToDataSet = ''
PathToModel = ''
NamesDataSet = ['']
name_loss_list = ['']
name_NN_list = ['']
num_layers = [[], []]
precision = np.ones((len(name_NN_list), len(name_loss_list), 5, len(NamesDataSet),\
7, 11, 2), dtype = np.float32)
precision = precision*(-1.)
"""
for iter_NN in range(len(name_NN_list)):
for iter_loss in range(len(name_loss_list)):
for launch_num in range(5):
for data_iter, data in enumerate(NamesDataSet):
number_layer = num_layers[iter_NN]
for li, layer_iter in enumerate(number_layer):
latent_space= readHDF5file(RootPathLatentSpace,\
'LatentSpace_Model%s_Loss%s_Launch%d_Layer%d,hdf5'%(name_NN_list[iter_NN],\
name_loss_list[iter_loss],\
launch_num + 1,\
layer_iter),\
['latent_space'])[0]
if iter_NN == 0:
compress_list = umap_shape_mnist
else:
compress_list = umap_shape_unet
for dim_iter, dim in enumerate(compress_list):
if dim != 0:
ls_pca = pca_result(latent_space, dim)
f1_score_pca = TrainPerceptron(ls_pca)
logg.info('%d / %d, %d / %d, %d / %d, %d / %d, %d / %d, %d / %d pca score = %f'%(iter_NN + 1, len(name_NN_list), \
iter_loss + 1, len(name_loss_list),\
launch_num + 1, 5,\
data_iter + 1, len(NamesDataSet),\
li + 1, len(number_layer),\
dim_iter + 1, len(compress_list),\
f1_score_pca))
precision[iter_NN, iter_loss, launch_num, data_iter, li,\
dim_iter, 0] = f1_score_pca
ls_umap = umap_result(latent_space, dim)
f1_score_umap = TrainPerceptron(ls_umap)
logg.info('%d / %d, %d / %d, %d / %d, %d / %d, %d / %d, %d / %d umap score = %f'%(iter_NN + 1, len(name_NN_list), \
iter_loss + 1, len(name_loss_list),\
launch_num + 1, 5,\
data_iter + 1, len(NamesDataSet),\
li + 1, len(number_layer),\
dim_iter + 1, len(compress_list),\
f1_score_umap))
precision[iter_NN, iter_loss, launch_num, data_iter, li,\
dim_iter, 1] = f1_score_umap
else:
f1_score = TrainPerceptron(latent_space)
logg.info('%d / %d, %d / %d, %d / %d, %d / %d, %d / %d, %d / %d score = %f'%(iter_NN + 1, len(name_NN_list), \
iter_loss + 1, len(name_loss_list),\
launch_num + 1, 5,\
data_iter + 1, len(NamesDataSet),\
li + 1, len(number_layer),\
dim_iter + 1, len(compress_list),\
f1_score))
precision[iter_NN, iter_loss, launch_num, data_iter, li,\
dim_iter, 0] = f1_score
ff = h5.File(fs.join(RootPathLatentSpace, 'preceptron', 'perceptron.hdf5'), 'w')
ff.create_dataset('precision', precision)
ff.close()
"""
#%%
"""
NN1
"""
RootPathLatentSpace = ''
logg.basicConfig(filename=fs.join(RootPathLatentSpace, "LatentSpaceLogger.log"), level=logg.INFO)
logg
umap_shape_mnist = [0, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2]
PathToDataSet = ''
PathToModel = ''
NamesDataSet = ['OnlyColor.hdf5',\
'OnlyH.hdf5',\
'OnlyX.hdf5',\
'Only.hdf5']
name_loss_list = ['weighted_categorical_crossentropy',\
'dice_loss']
name_NN_list = ['ezConvAutoEncoderForMnist', 'UnetСircumcised',\
'UnetWithSeparableConvСircumcised']
num_layers = [6, 7]
iter_NN = 0
for iter_loss in range(len(name_loss_list)):
for launch_num in range(5):
for data_iter, data in enumerate(NamesDataSet):
number_layer = num_layers[iter_NN]
for li, layer_iter in enumerate(number_layer):
latent_space= readHDF5file(RootPathLatentSpace,\
'LatentSpace_Model%s_Loss%s_Launch%d_Layer%d,hdf5'%(name_NN_list[iter_NN],\
name_loss_list[iter_loss],\
launch_num + 1,\
layer_iter),\
['latent_space'])[0]
if iter_NN == 0:
compress_list = umap_shape_mnist
else:
compress_list = umap_shape_unet
precision = np.ones(len(compress_list), 2)
for dim_iter, dim in enumerate(compress_list):
if dim != 0:
ls_pca = pca_result(latent_space, dim)
f1_score_pca = TrainPerceptron(ls_pca)
logg.info('%d / %d, %d / %d, %d / %d, %d / %d, %d / %d, %d / %d pca score = %f'%(iter_NN + 1, len(name_NN_list), \
iter_loss + 1, len(name_loss_list),\
launch_num + 1, 5,\
data_iter + 1, len(NamesDataSet),\
li + 1, len(number_layer),\
dim_iter + 1, len(compress_list),\
f1_score_pca))
precision[dim_iter, 0] = f1_score_pca
ls_umap = umap_result(latent_space, dim)
f1_score_umap = TrainPerceptron(ls_umap)
logg.info('%d / %d, %d / %d, %d / %d, %d / %d, %d / %d, %d / %d umap score = %f'%(iter_NN + 1, len(name_NN_list), \
iter_loss + 1, len(name_loss_list),\
launch_num + 1, 5,\
data_iter + 1, len(NamesDataSet),\
li + 1, len(number_layer),\
dim_iter + 1, len(compress_list),\
f1_score_umap))
precision[dim_iter, 1] = f1_score_umap
else:
f1_score = TrainPerceptron(latent_space)
logg.info('%d / %d, %d / %d, %d / %d, %d / %d, %d / %d, %d / %d score = %f'%(iter_NN + 1, len(name_NN_list), \
iter_loss + 1, len(name_loss_list),\
launch_num + 1, 5,\
data_iter + 1, len(NamesDataSet),\
li + 1, len(number_layer),\
dim_iter + 1, len(compress_list),\
f1_score))
precision[dim_iter, 0] = f1_score
precision[dim_iter, 1] = f1_score
ff = h5.File(fs.join(RootPathLatentSpace, 'preceptron',\
'perceptron_Model%s_Loss%s_Launch%d_Layer%d.hdf5'%(name_NN_list[0],\
name_loss_list[iter_loss],\
launch_num + 1,\
layer_iter)), 'w')
ff.create_dataset('precision', precision)
ff.close()
#%%
"""
NN2
"""
RootPathLatentSpace = ''
logg.basicConfig(filename=fs.join(RootPathLatentSpace, "LatentSpaceLogger.log"), level=logg.INFO)
logg
umap_shape_mnist = [0, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2]
PathToDataSet = ''
PathToModel = ''
NamesDataSet = ['OnlyColor.hdf5',\
'OnlyH.hdf5',\
'OnlyX.hdf5',\
'Only.hdf5']
name_loss_list = ['weighted_categorical_crossentropy',\
'dice_loss']
name_NN_list = ['ezConvAutoEncoderForMnist', 'UnetСircumcised',\
'UnetWithSeparableConvСircumcised']
num_layers = [6, 7]
iter_NN = 0
for iter_loss in range(len(name_loss_list)):
for launch_num in range(5):
for data_iter, data in enumerate(NamesDataSet):
number_layer = num_layers[iter_NN]
for li, layer_iter in enumerate(number_layer):
latent_space= readHDF5file(RootPathLatentSpace,\
'LatentSpace_Model%s_Loss%s_Launch%d_Layer%d,hdf5'%(name_NN_list[iter_NN],\
name_loss_list[iter_loss],\
launch_num + 1,\
layer_iter),\
['latent_space'])[0]
if iter_NN == 0:
compress_list = umap_shape_mnist
else:
compress_list = umap_shape_unet
precision = np.ones(len(compress_list), 2)
for dim_iter, dim in enumerate(compress_list):
if dim != 0:
ls_pca = pca_result(latent_space, dim)
f1_score_pca = TrainPerceptron(ls_pca)
logg.info('%d / %d, %d / %d, %d / %d, %d / %d, %d / %d, %d / %d pca score = %f'%(iter_NN + 1, len(name_NN_list), \
iter_loss + 1, len(name_loss_list),\
launch_num + 1, 5,\
data_iter + 1, len(NamesDataSet),\
li + 1, len(number_layer),\
dim_iter + 1, len(compress_list),\
f1_score_pca))
precision[dim_iter, 0] = f1_score_pca
ls_umap = umap_result(latent_space, dim)
f1_score_umap = TrainPerceptron(ls_umap)
logg.info('%d / %d, %d / %d, %d / %d, %d / %d, %d / %d, %d / %d umap score = %f'%(iter_NN + 1, len(name_NN_list), \
iter_loss + 1, len(name_loss_list),\
launch_num + 1, 5,\
data_iter + 1, len(NamesDataSet),\
li + 1, len(number_layer),\
dim_iter + 1, len(compress_list),\
f1_score_umap))
precision[dim_iter, 1] = f1_score_umap
else:
f1_score = TrainPerceptron(latent_space)
logg.info('%d / %d, %d / %d, %d / %d, %d / %d, %d / %d, %d / %d score = %f'%(iter_NN + 1, len(name_NN_list), \
iter_loss + 1, len(name_loss_list),\
launch_num + 1, 5,\
data_iter + 1, len(NamesDataSet),\
li + 1, len(number_layer),\
dim_iter + 1, len(compress_list),\
f1_score))
precision[dim_iter, 0] = f1_score
precision[dim_iter, 1] = f1_score
ff = h5.File(fs.join(RootPathLatentSpace, 'preceptron',\
'perceptron_Model%s_Loss%s_Launch%d_Layer%d.hdf5'%(name_NN_list[0],\
name_loss_list[iter_loss],\
launch_num + 1,\
layer_iter)), 'w')
ff.create_dataset('precision', precision)
ff.close()
|
[
"sklearn.metrics.f1_score",
"keras.layers.core.Activation",
"sklearn.decomposition.PCA",
"os.path.join",
"numpy.heaviside",
"keras.models.Sequential",
"keras.optimizers.SGD",
"keras.utils.np_utils.to_categorical",
"numpy.empty",
"umap.UMAP",
"sklearn.preprocessing.scale",
"keras.layers.core.Dense"
] |
[((1697, 1729), 'numpy.empty', 'np.empty', (['(n, 1)'], {'dtype': 'np.int32'}), '((n, 1), dtype=np.int32)\n', (1705, 1729), True, 'import numpy as np\n'), ((1871, 1902), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['labels'], {}), '(labels)\n', (1894, 1902), False, 'from keras.utils import np_utils\n'), ((1933, 1966), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['latent_space'], {}), '(latent_space)\n', (1952, 1966), False, 'from sklearn import preprocessing\n'), ((1980, 1992), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1990, 1992), False, 'from keras.models import Sequential\n'), ((2294, 2341), 'keras.optimizers.SGD', 'keras.optimizers.SGD', ([], {'lr': '(0.02)', 'decay': '(0.01 / 300)'}), '(lr=0.02, decay=0.01 / 300)\n', (2314, 2341), False, 'import keras\n'), ((2675, 2718), 'sklearn.metrics.f1_score', 'f1_score', (['y_train', 'predict'], {'average': '"""micro"""'}), "(y_train, predict, average='micro')\n", (2683, 2718), False, 'from sklearn.metrics import f1_score\n'), ((780, 814), 'os.path.join', 'fs.join', (['PathToSave', 'SavedFileName'], {}), '(PathToSave, SavedFileName)\n', (787, 814), True, 'import os.path as fs\n'), ((1161, 1195), 'os.path.join', 'fs.join', (['PathToSave', 'SavedFileName'], {}), '(PathToSave, SavedFileName)\n', (1168, 1195), True, 'import os.path as fs\n'), ((2005, 2033), 'keras.layers.core.Dense', 'Dense', (['(3)'], {'input_dim': 'shape_ls'}), '(3, input_dim=shape_ls)\n', (2010, 2033), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((2048, 2069), 'keras.layers.core.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (2058, 2069), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((2793, 2846), 'os.path.join', 'fs.join', (['RootPathLatentSpace', '"""LatentSpaceLogger.log"""'], {}), "(RootPathLatentSpace, 'LatentSpaceLogger.log')\n", (2800, 2846), True, 'import os.path as fs\n'), ((6746, 6799), 'os.path.join', 'fs.join', (['RootPathLatentSpace', '"""LatentSpaceLogger.log"""'], {}), "(RootPathLatentSpace, 'LatentSpaceLogger.log')\n", (6753, 6799), True, 'import os.path as fs\n'), ((10863, 10916), 'os.path.join', 'fs.join', (['RootPathLatentSpace', '"""LatentSpaceLogger.log"""'], {}), "(RootPathLatentSpace, 'LatentSpaceLogger.log')\n", (10870, 10916), True, 'import os.path as fs\n'), ((1376, 1400), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'n_comp'}), '(n_components=n_comp)\n', (1379, 1400), False, 'from sklearn.decomposition import PCA\n'), ((1503, 1533), 'umap.UMAP', 'umap.UMAP', ([], {'n_components': 'n_comp'}), '(n_components=n_comp)\n', (1512, 1533), False, 'import umap\n'), ((2617, 2647), 'numpy.heaviside', 'np.heaviside', (['(predict - 0.5)', '(1)'], {}), '(predict - 0.5, 1)\n', (2629, 2647), True, 'import numpy as np\n'), ((10332, 10509), 'os.path.join', 'fs.join', (['RootPathLatentSpace', '"""preceptron"""', "('perceptron_Model%s_Loss%s_Launch%d_Layer%d.hdf5' % (name_NN_list[0],\n name_loss_list[iter_loss], launch_num + 1, layer_iter))"], {}), "(RootPathLatentSpace, 'preceptron', \n 'perceptron_Model%s_Loss%s_Launch%d_Layer%d.hdf5' % (name_NN_list[0],\n name_loss_list[iter_loss], launch_num + 1, layer_iter))\n", (10339, 10509), True, 'import os.path as fs\n'), ((14449, 14626), 'os.path.join', 'fs.join', (['RootPathLatentSpace', '"""preceptron"""', "('perceptron_Model%s_Loss%s_Launch%d_Layer%d.hdf5' % (name_NN_list[0],\n name_loss_list[iter_loss], launch_num + 1, layer_iter))"], {}), "(RootPathLatentSpace, 'preceptron', \n 'perceptron_Model%s_Loss%s_Launch%d_Layer%d.hdf5' % (name_NN_list[0],\n name_loss_list[iter_loss], launch_num + 1, layer_iter))\n", (14456, 14626), True, 'import os.path as fs\n')]
|
import os
import cv2
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from flask_ngrok import run_with_ngrok
from flask import Flask,request,send_from_directory,render_template
# GLOBAL ACCESS
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
THIS_FOLDER = os.path.dirname(os.path.abspath(__file__))
# SETUP APPLICATION
tf.disable_eager_execution()
app = Flask(__name__, static_url_path='')
run_with_ngrok(app)
def sparse_tensor_to_strs(sparse_tensor):
indices= sparse_tensor[0][0]
values = sparse_tensor[0][1]
dense_shape = sparse_tensor[0][2]
strs = [ [] for i in range(dense_shape[0]) ]
string = []
ptr = 0
b = 0
for idx in range(len(indices)):
if indices[idx][0] != b:
strs[b] = string
string = []
b = indices[idx][0]
string.append(values[ptr])
ptr = ptr + 1
strs[b] = string
return strs
def normalize(image):
return (255. - image)/255.
def resize(image, height):
width = int(float(height * image.shape[1]) / image.shape[0])
sample_img = cv2.resize(image, (width, height))
return sample_img
voc_file = "vocabulary_semantic.txt"
model = "semantic_model/semantic_model.meta"
tf.reset_default_graph()
sess = tf.InteractiveSession()
# Read the dictionary
dict_file = open(voc_file,'r')
dict_list = dict_file.read().splitlines()
int2word = dict()
for word in dict_list:
word_idx = len(int2word)
int2word[word_idx] = word
dict_file.close()
# Restore weights
saver = tf.train.import_meta_graph(model)
saver.restore(sess,model[:-5])
graph = tf.get_default_graph()
input = graph.get_tensor_by_name("model_input:0")
seq_len = graph.get_tensor_by_name("seq_lengths:0")
rnn_keep_prob = graph.get_tensor_by_name("keep_prob:0")
height_tensor = graph.get_tensor_by_name("input_height:0")
width_reduction_tensor = graph.get_tensor_by_name("width_reduction:0")
logits = tf.get_collection("logits")[0]
# Constants that are saved inside the model itself
WIDTH_REDUCTION, HEIGHT = sess.run([width_reduction_tensor, height_tensor])
decoded, _ = tf.nn.ctc_greedy_decoder(logits, seq_len)
# HOME
@app.route("/")
def root():
return render_template('index.html')
# IMAGE REQUEST
@app.route('/img/<filename>')
def send_img(filename):
return send_from_directory('', filename)
# ANDROID REQUEST
@app.route('/android/predict', methods = ['GET', 'POST'])
def login():
return 'Yeah, it works.'
# GET
@app.route('/users/<var>')
def hello_user(var):
"""
this serves as a demo purpose
:param user:
:return: str
"""
return "Wow, the GET works %s!" % var
# POST
@app.route('/api/post_some_data', methods=['POST'])
def get_text_prediction():
"""
predicts requested text whether it is ham or spam
:return: json
"""
json = request.get_json()
print(json)
if len(json['text']) == 0:
return jsonify({'error': 'invalid input'})
return jsonify({'This is the KEY': json['This is the value?']})
#UPLOAD_FOLDER = 'static/upload'
#app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
#@app.route('/upload', methods=['GET','POST'])
#def upload():
# if flask.request.method == "POST":
# files = flask.request.files.getlist("file")
# for file in files:
# file.save(os.path.join(app.config['UPLOAD_FOLDER'], file.filename))
# MODEL PREDICTION
@app.route('/predict', methods = ['GET', 'POST'])
def predict():
if request.method == 'POST':
f = request.files['file']
img = f
image = Image.open(img).convert('L')
image = np.array(image)
image = resize(image, HEIGHT)
image = normalize(image)
image = np.asarray(image).reshape(1,image.shape[0],image.shape[1],1)
seq_lengths = [ image.shape[2] / WIDTH_REDUCTION ]
prediction = sess.run(decoded,
feed_dict={
input: image,
seq_len: seq_lengths,
rnn_keep_prob: 1.0,
})
str_predictions = sparse_tensor_to_strs(prediction)
array_of_notes = []
for w in str_predictions[0]:
array_of_notes.append(int2word[w])
notes=[]
for i in array_of_notes:
if i[0:5]=="note-":
if not i[6].isdigit():
notes.append(i[5:7])
else:
notes.append(i[5])
img = Image.open(img).convert('L')
size = (img.size[0], int(img.size[1]*1.5))
layer = Image.new('RGB', size, (255,255,255))
layer.paste(img, box=None)
img_arr = np.array(layer)
height = int(img_arr.shape[0])
width = int(img_arr.shape[1])
print(img_arr.shape[0])
draw = ImageDraw.Draw(layer)
# font = ImageFont.truetype(<font-file>, <font-size>)
font = ImageFont.truetype("Aaargh.ttf", 16)
# draw.text((x, y),"Sample Text",(r,g,b))
j = width / 9
for i in notes:
draw.text((j, height-40), i, (0,0,0), font=font)
j+= (width / (len(notes) + 4))
layer.save("img/annotated.png")
return render_template('result.html')
if __name__=="__main__":
app.run()
|
[
"flask.render_template",
"flask.Flask",
"PIL.Image.new",
"numpy.array",
"PIL.ImageDraw.Draw",
"tensorflow.compat.v1.get_collection",
"tensorflow.compat.v1.get_default_graph",
"flask.send_from_directory",
"tensorflow.compat.v1.nn.ctc_greedy_decoder",
"numpy.asarray",
"PIL.ImageFont.truetype",
"tensorflow.compat.v1.train.import_meta_graph",
"tensorflow.compat.v1.disable_eager_execution",
"flask.request.get_json",
"tensorflow.compat.v1.reset_default_graph",
"cv2.resize",
"tensorflow.compat.v1.InteractiveSession",
"PIL.Image.open",
"flask_ngrok.run_with_ngrok",
"os.path.abspath"
] |
[((414, 442), 'tensorflow.compat.v1.disable_eager_execution', 'tf.disable_eager_execution', ([], {}), '()\n', (440, 442), True, 'import tensorflow.compat.v1 as tf\n'), ((449, 484), 'flask.Flask', 'Flask', (['__name__'], {'static_url_path': '""""""'}), "(__name__, static_url_path='')\n", (454, 484), False, 'from flask import Flask, request, send_from_directory, render_template\n'), ((485, 504), 'flask_ngrok.run_with_ngrok', 'run_with_ngrok', (['app'], {}), '(app)\n', (499, 504), False, 'from flask_ngrok import run_with_ngrok\n'), ((1291, 1315), 'tensorflow.compat.v1.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1313, 1315), True, 'import tensorflow.compat.v1 as tf\n'), ((1323, 1346), 'tensorflow.compat.v1.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (1344, 1346), True, 'import tensorflow.compat.v1 as tf\n'), ((1587, 1620), 'tensorflow.compat.v1.train.import_meta_graph', 'tf.train.import_meta_graph', (['model'], {}), '(model)\n', (1613, 1620), True, 'import tensorflow.compat.v1 as tf\n'), ((1660, 1682), 'tensorflow.compat.v1.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (1680, 1682), True, 'import tensorflow.compat.v1 as tf\n'), ((2153, 2194), 'tensorflow.compat.v1.nn.ctc_greedy_decoder', 'tf.nn.ctc_greedy_decoder', (['logits', 'seq_len'], {}), '(logits, seq_len)\n', (2177, 2194), True, 'import tensorflow.compat.v1 as tf\n'), ((367, 392), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (382, 392), False, 'import os\n'), ((1151, 1185), 'cv2.resize', 'cv2.resize', (['image', '(width, height)'], {}), '(image, (width, height))\n', (1161, 1185), False, 'import cv2\n'), ((1980, 2007), 'tensorflow.compat.v1.get_collection', 'tf.get_collection', (['"""logits"""'], {}), "('logits')\n", (1997, 2007), True, 'import tensorflow.compat.v1 as tf\n'), ((2241, 2270), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (2256, 2270), False, 'from flask import Flask, request, send_from_directory, render_template\n'), ((2352, 2385), 'flask.send_from_directory', 'send_from_directory', (['""""""', 'filename'], {}), "('', filename)\n", (2371, 2385), False, 'from flask import Flask, request, send_from_directory, render_template\n'), ((2870, 2888), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (2886, 2888), False, 'from flask import Flask, request, send_from_directory, render_template\n'), ((3629, 3644), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (3637, 3644), True, 'import numpy as np\n'), ((4610, 4649), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'size', '(255, 255, 255)'], {}), "('RGB', size, (255, 255, 255))\n", (4619, 4649), False, 'from PIL import Image\n'), ((4701, 4716), 'numpy.array', 'np.array', (['layer'], {}), '(layer)\n', (4709, 4716), True, 'import numpy as np\n'), ((4841, 4862), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['layer'], {}), '(layer)\n', (4855, 4862), False, 'from PIL import ImageDraw\n'), ((4940, 4976), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""Aaargh.ttf"""', '(16)'], {}), "('Aaargh.ttf', 16)\n", (4958, 4976), False, 'from PIL import ImageFont\n'), ((5232, 5262), 'flask.render_template', 'render_template', (['"""result.html"""'], {}), "('result.html')\n", (5247, 5262), False, 'from flask import Flask, request, send_from_directory, render_template\n'), ((3584, 3599), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (3594, 3599), False, 'from PIL import Image\n'), ((3732, 3749), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (3742, 3749), True, 'import numpy as np\n'), ((4514, 4529), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (4524, 4529), False, 'from PIL import Image\n')]
|
import time,os,math,inspect,re,sys,random,argparse
from env import SenseEnv
from torch.autograd import Variable
import numpy as np
from itertools import count
from collections import namedtuple
from tensorboardX import SummaryWriter
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.autograd as autograd
from torch.autograd import Variable
writer = SummaryWriter()
SavedAction = namedtuple('SavedAction', ['action', 'value'])
class Policy(nn.Module):
def __init__(self,observation_space_n,action_space_n):
super(Policy, self).__init__()
self.affine1 = nn.Linear(observation_space_n, 256)
self.action1 = nn.Linear(256, 128)
self.value1 = nn.Linear(256, 128)
self.action_head = nn.Linear(128, action_space_n)
self.value_head = nn.Linear(128, 1)
self.saved_actions = []
self.rewards = []
self.init_weights()
def init_weights(self):
self.affine1.weight.data.uniform_(-0.1, 0.1)
self.action1.weight.data.uniform_(-0.1, 0.1)
self.value1.weight.data.uniform_(-0.1, 0.1)
def forward(self, x):
x = F.relu(self.affine1(x))
xa = F.relu(self.action1(x))
xv = F.relu(self.value1(x))
action_scores = self.action_head(xa)
state_values = self.value_head(xv)
return F.softmax(action_scores), state_values
class CNN(nn.Module):
def __init__(self,classification_n):
super(CNN, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=5, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(2))
#self.fc = nn.Linear(7*7*32, 2)
self.fc = nn.Linear(80000, classification_n)
def forward(self, x):
x = x.unsqueeze(1).float()
out = self.layer1(x)
out = self.layer2(out)
#print("size before",out.size())
out = out.view(out.size(0), -1)
#print("size after",out.size())
out = self.fc(out)
return out
parser = argparse.ArgumentParser(description='SenseNet actor-critic example')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G', help='discount factor (default: 0.99)')
parser.add_argument('--epsilon', type=float, default=0.6, metavar='G', help='epsilon value for random action (default: 0.6)')
parser.add_argument('--seed', type=int, default=42, metavar='N', help='random seed (default: 42)')
parser.add_argument('--batch_size', type=int, default=42, metavar='N', help='batch size (default: 42)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='interval between training status logs (default: 10)')
parser.add_argument('--render', action='store_true', help='render the environment')
parser.add_argument('--debug', action='store_true', help='turn on debug mode')
parser.add_argument('--gpu', action='store_true', help='use GPU')
parser.add_argument('--log', type=str, help='log experiment to tensorboard')
parser.add_argument('--model_path', type=str, help='path to store/retrieve model at')
parser.add_argument('--mode', type=str, default="train", help='train/test/all model')
args = parser.parse_args()
def select_action(state,n_actions,epsilon=0.6):
if np.random.rand() < epsilon:
return np.random.choice(n_actions)
else:
state = torch.from_numpy(state).float().unsqueeze(0)
probs, state_value = model(Variable(state))
action = probs.multinomial()
model.saved_actions.append(SavedAction(action, state_value))
return action.data[0][0]
def finish_episode():
R = 0
saved_actions = model.saved_actions
value_loss = 0
rewards = []
for r in model.rewards[::-1]:
R = r + args.gamma * R
rewards.insert(0, R)
rewards = torch.Tensor(rewards)
rewards = (rewards - rewards.mean()) / (rewards.std() + np.finfo(np.float32).eps)
for (action, value), r in zip(saved_actions, rewards):
reward = r - value.data[0,0]
action.reinforce(reward)
value_loss += F.smooth_l1_loss(value, Variable(torch.Tensor([r])))
optimizer.zero_grad()
final_nodes = [value_loss] + list(map(lambda p: p.action, saved_actions))
gradients = [torch.ones(1)] + [None] * len(saved_actions)
autograd.backward(final_nodes, gradients)
optimizer.step()
del model.rewards[:]
del model.saved_actions[:]
#train
env = SenseEnv(vars(args))
print("action space: ",env.action_space())
model = Policy(env.observation_space(),env.action_space_n())
cnn = CNN(env.classification_n())
if args.gpu and torch.cuda.is_available():
model.cuda()
cnn.cuda()
if args.model_path:
if os.path.exists(args.model_path+"/model.pkl"):
print("loading pretrained models")
model.load_state_dict(torch.load(args.model_path+"/model.pkl"))
cnn.load_state_dict(torch.load(args.model_path+"/cnn.pkl"))
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
classifier_criterion = nn.CrossEntropyLoss()
classifier_optimizer = torch.optim.Adam(cnn.parameters(), lr=0.001)
running_reward = 0
batch = []
labels = []
total_steps = 0
if args.mode == "train" or args.mode == "all":
for i_episode in count(1000):
observation = env.reset()
print("episode: ", i_episode)
for t in range(1000):
action = select_action(observation,env.action_space_n(),args.epsilon)
observation, reward, done, info = env.step(action)
model.rewards.append(reward)
if env.is_touching():
print("touching!")
#print("batch size", len(batch))
if len(batch) > args.batch_size:
#TODO GPU support
#batch = torch.from_numpy(np.asarray(batch))
batch = torch.LongTensor(torch.from_numpy(np.asarray(batch)))
labels = torch.from_numpy(np.asarray(labels))
#labels = torch.LongTensor(torch.from_numpy(np.asarray(labels)))
if args.gpu and torch.cuda.is_available():
batch = batch.cuda()
labels = labels.cuda()
batch = Variable(batch)
labels = Variable(labels)
classifier_optimizer.zero_grad()
outputs = cnn(batch)
loss = classifier_criterion(outputs, labels)
loss.backward()
classifier_optimizer.step()
print ('Loss: %.4f' %(loss.data[0]))
if args.log:
writer.add_scalar(args.log + "/loss",loss.data[0],total_steps)
batch = []
labels = []
else:
batch.append(observation.reshape(200,200))
labels.append(env.class_label)
if done:
break
running_reward = running_reward * 0.99 + t * 0.01
print("running reward ", running_reward)
total_steps +=1
finish_episode()
if i_episode % args.log_interval == 0:
if args.log:
writer.add_scalar(args.log+"/reward",running_reward,total_steps)
print('Episode {}\tLast length: {:5d}\tAverage length: {:.2f}'.format(i_episode, t, running_reward))
if running_reward > 5000: #env.spec.reward_threshold:
print("Solved! Running reward is now {} and the last episode runs to {} time steps!".format(running_reward, t))
break
if args.model_path:
torch.save(model.state_dict(), os.path.join(args.model_path, 'policy.pkl' ))
torch.save(model.state_dict(), os.path.join(args.model_path, 'cnn.pkl' ))
elif args.mode == "test" or args.mode == "all":
#test
test_labels = []
predicted_labels = []
steps_to_guess = []
correct = 0
total = 0
max_steps = 500
for i_episode in range(100):
guesses = []
print("testing on a new object")
observation = env.reset()
for t in range(max_steps):
action = select_action(observation,env.action_space_n(),args.epsilon)
observation, reward, done, info = env.step(action)
model.rewards.append(reward)
#if confidence over 90%, then use it
if (t >= max_steps-1 and len(guesses) == 0) or env.is_touching:
x = [observation.reshape(200,200)]
x = torch.LongTensor(torch.from_numpy(np.asarray(x)))
x = Variable(x)
output = cnn(x)
prob, predicted = torch.max(output.data, 1)
correct += int(predicted[0][0] == env.class_label)
total += 1
print("predicted ", predicted[0][0], " with prob ", prob[0][0], " correct answer is: ",env.class_label)
print('Accuracy of the network: %d %%' % (100 * correct / total ))
else:
for i_episode in range(100):
observation = env.reset()
for t in range(1000):
env.render()
action = np.random.choice(env.action_space_n())
observation,reward,done,info = env.step(action)
print(observation)
|
[
"torch.nn.ReLU",
"torch.nn.CrossEntropyLoss",
"numpy.random.rand",
"torch.max",
"torch.from_numpy",
"torch.nn.MSELoss",
"torch.cuda.is_available",
"torch.nn.functional.softmax",
"os.path.exists",
"torch.nn.BatchNorm2d",
"tensorboardX.SummaryWriter",
"argparse.ArgumentParser",
"numpy.asarray",
"torch.autograd.Variable",
"collections.namedtuple",
"numpy.random.choice",
"torch.Tensor",
"numpy.finfo",
"torch.autograd.backward",
"torch.load",
"os.path.join",
"torch.nn.Conv2d",
"itertools.count",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"torch.ones"
] |
[((408, 423), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (421, 423), False, 'from tensorboardX import SummaryWriter\n'), ((438, 484), 'collections.namedtuple', 'namedtuple', (['"""SavedAction"""', "['action', 'value']"], {}), "('SavedAction', ['action', 'value'])\n", (448, 484), False, 'from collections import namedtuple\n'), ((2076, 2144), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""SenseNet actor-critic example"""'}), "(description='SenseNet actor-critic example')\n", (2099, 2144), False, 'import time, os, math, inspect, re, sys, random, argparse\n'), ((4852, 4864), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (4862, 4864), True, 'import torch.nn as nn\n'), ((4948, 4969), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4967, 4969), True, 'import torch.nn as nn\n'), ((3782, 3803), 'torch.Tensor', 'torch.Tensor', (['rewards'], {}), '(rewards)\n', (3794, 3803), False, 'import torch\n'), ((4240, 4281), 'torch.autograd.backward', 'autograd.backward', (['final_nodes', 'gradients'], {}), '(final_nodes, gradients)\n', (4257, 4281), True, 'import torch.autograd as autograd\n'), ((4542, 4567), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4565, 4567), False, 'import torch\n'), ((4622, 4668), 'os.path.exists', 'os.path.exists', (["(args.model_path + '/model.pkl')"], {}), "(args.model_path + '/model.pkl')\n", (4636, 4668), False, 'import time, os, math, inspect, re, sys, random, argparse\n'), ((5163, 5174), 'itertools.count', 'count', (['(1000)'], {}), '(1000)\n', (5168, 5174), False, 'from itertools import count\n'), ((621, 656), 'torch.nn.Linear', 'nn.Linear', (['observation_space_n', '(256)'], {}), '(observation_space_n, 256)\n', (630, 656), True, 'import torch.nn as nn\n'), ((676, 695), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(128)'], {}), '(256, 128)\n', (685, 695), True, 'import torch.nn as nn\n'), ((714, 733), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(128)'], {}), '(256, 128)\n', (723, 733), True, 'import torch.nn as nn\n'), ((757, 787), 'torch.nn.Linear', 'nn.Linear', (['(128)', 'action_space_n'], {}), '(128, action_space_n)\n', (766, 787), True, 'import torch.nn as nn\n'), ((810, 827), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(1)'], {}), '(128, 1)\n', (819, 827), True, 'import torch.nn as nn\n'), ((1770, 1804), 'torch.nn.Linear', 'nn.Linear', (['(80000)', 'classification_n'], {}), '(80000, classification_n)\n', (1779, 1804), True, 'import torch.nn as nn\n'), ((3278, 3294), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3292, 3294), True, 'import numpy as np\n'), ((3317, 3344), 'numpy.random.choice', 'np.random.choice', (['n_actions'], {}), '(n_actions)\n', (3333, 3344), True, 'import numpy as np\n'), ((1288, 1312), 'torch.nn.functional.softmax', 'F.softmax', (['action_scores'], {}), '(action_scores)\n', (1297, 1312), True, 'import torch.nn.functional as F\n'), ((1460, 1502), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(16)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(1, 16, kernel_size=5, padding=2)\n', (1469, 1502), True, 'import torch.nn as nn\n'), ((1510, 1528), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(16)'], {}), '(16)\n', (1524, 1528), True, 'import torch.nn as nn\n'), ((1536, 1545), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1543, 1545), True, 'import torch.nn as nn\n'), ((1553, 1568), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (1565, 1568), True, 'import torch.nn as nn\n'), ((1609, 1652), 'torch.nn.Conv2d', 'nn.Conv2d', (['(16)', '(32)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(16, 32, kernel_size=5, padding=2)\n', (1618, 1652), True, 'import torch.nn as nn\n'), ((1660, 1678), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (1674, 1678), True, 'import torch.nn as nn\n'), ((1686, 1695), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1693, 1695), True, 'import torch.nn as nn\n'), ((1703, 1718), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (1715, 1718), True, 'import torch.nn as nn\n'), ((3441, 3456), 'torch.autograd.Variable', 'Variable', (['state'], {}), '(state)\n', (3449, 3456), False, 'from torch.autograd import Variable\n'), ((4193, 4206), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (4203, 4206), False, 'import torch\n'), ((4733, 4775), 'torch.load', 'torch.load', (["(args.model_path + '/model.pkl')"], {}), "(args.model_path + '/model.pkl')\n", (4743, 4775), False, 'import torch\n'), ((4799, 4839), 'torch.load', 'torch.load', (["(args.model_path + '/cnn.pkl')"], {}), "(args.model_path + '/cnn.pkl')\n", (4809, 4839), False, 'import torch\n'), ((3862, 3882), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (3870, 3882), True, 'import numpy as np\n'), ((4058, 4075), 'torch.Tensor', 'torch.Tensor', (['[r]'], {}), '([r])\n', (4070, 4075), False, 'import torch\n'), ((7205, 7248), 'os.path.join', 'os.path.join', (['args.model_path', '"""policy.pkl"""'], {}), "(args.model_path, 'policy.pkl')\n", (7217, 7248), False, 'import time, os, math, inspect, re, sys, random, argparse\n'), ((7288, 7328), 'os.path.join', 'os.path.join', (['args.model_path', '"""cnn.pkl"""'], {}), "(args.model_path, 'cnn.pkl')\n", (7300, 7328), False, 'import time, os, math, inspect, re, sys, random, argparse\n'), ((6003, 6018), 'torch.autograd.Variable', 'Variable', (['batch'], {}), '(batch)\n', (6011, 6018), False, 'from torch.autograd import Variable\n'), ((6038, 6054), 'torch.autograd.Variable', 'Variable', (['labels'], {}), '(labels)\n', (6046, 6054), False, 'from torch.autograd import Variable\n'), ((8040, 8051), 'torch.autograd.Variable', 'Variable', (['x'], {}), '(x)\n', (8048, 8051), False, 'from torch.autograd import Variable\n'), ((8102, 8127), 'torch.max', 'torch.max', (['output.data', '(1)'], {}), '(output.data, 1)\n', (8111, 8127), False, 'import torch\n'), ((3365, 3388), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (3381, 3388), False, 'import torch\n'), ((5769, 5787), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (5779, 5787), True, 'import numpy as np\n'), ((5890, 5915), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5913, 5915), False, 'import torch\n'), ((5713, 5730), 'numpy.asarray', 'np.asarray', (['batch'], {}), '(batch)\n', (5723, 5730), True, 'import numpy as np\n'), ((8012, 8025), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (8022, 8025), True, 'import numpy as np\n')]
|
import numpy as np
from sklearn import svm
class character:
def __init__(self, raw_character):
self.identity = None
def recognize(characters, classifier):
for char in characters:
data = np.reshape(char.image_centered, np.prod(char.image_centered.shape)).reshape(1, -1)
char.identity = classifier.predict(data)
return characters
|
[
"numpy.prod"
] |
[((253, 287), 'numpy.prod', 'np.prod', (['char.image_centered.shape'], {}), '(char.image_centered.shape)\n', (260, 287), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
from skimage.viewer import utils
from skimage.viewer.utils import dialogs
from skimage.viewer.qt import QtCore, QtGui, has_qt
from numpy.testing.decorators import skipif
@skipif(not has_qt)
def test_event_loop():
utils.init_qtapp()
timer = QtCore.QTimer()
timer.singleShot(10, QtGui.QApplication.quit)
utils.start_qtapp()
@skipif(not has_qt)
def test_format_filename():
fname = dialogs._format_filename(('apple', 2))
assert fname == 'apple'
fname = dialogs._format_filename('')
assert fname is None
@skipif(not has_qt)
def test_open_file_dialog():
utils.init_qtapp()
timer = QtCore.QTimer()
timer.singleShot(100, lambda: QtGui.QApplication.quit())
filename = dialogs.open_file_dialog()
assert filename is None
@skipif(not has_qt)
def test_save_file_dialog():
utils.init_qtapp()
timer = QtCore.QTimer()
timer.singleShot(100, lambda: QtGui.QApplication.quit())
filename = dialogs.save_file_dialog()
assert filename is None
|
[
"skimage.viewer.utils.init_qtapp",
"numpy.testing.decorators.skipif",
"skimage.viewer.utils.dialogs._format_filename",
"skimage.viewer.utils.dialogs.open_file_dialog",
"skimage.viewer.qt.QtCore.QTimer",
"skimage.viewer.utils.dialogs.save_file_dialog",
"skimage.viewer.utils.start_qtapp",
"skimage.viewer.qt.QtGui.QApplication.quit"
] |
[((197, 215), 'numpy.testing.decorators.skipif', 'skipif', (['(not has_qt)'], {}), '(not has_qt)\n', (203, 215), False, 'from numpy.testing.decorators import skipif\n'), ((367, 385), 'numpy.testing.decorators.skipif', 'skipif', (['(not has_qt)'], {}), '(not has_qt)\n', (373, 385), False, 'from numpy.testing.decorators import skipif\n'), ((562, 580), 'numpy.testing.decorators.skipif', 'skipif', (['(not has_qt)'], {}), '(not has_qt)\n', (568, 580), False, 'from numpy.testing.decorators import skipif\n'), ((795, 813), 'numpy.testing.decorators.skipif', 'skipif', (['(not has_qt)'], {}), '(not has_qt)\n', (801, 813), False, 'from numpy.testing.decorators import skipif\n'), ((243, 261), 'skimage.viewer.utils.init_qtapp', 'utils.init_qtapp', ([], {}), '()\n', (259, 261), False, 'from skimage.viewer import utils\n'), ((274, 289), 'skimage.viewer.qt.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (287, 289), False, 'from skimage.viewer.qt import QtCore, QtGui, has_qt\n'), ((344, 363), 'skimage.viewer.utils.start_qtapp', 'utils.start_qtapp', ([], {}), '()\n', (361, 363), False, 'from skimage.viewer import utils\n'), ((426, 464), 'skimage.viewer.utils.dialogs._format_filename', 'dialogs._format_filename', (["('apple', 2)"], {}), "(('apple', 2))\n", (450, 464), False, 'from skimage.viewer.utils import dialogs\n'), ((505, 533), 'skimage.viewer.utils.dialogs._format_filename', 'dialogs._format_filename', (['""""""'], {}), "('')\n", (529, 533), False, 'from skimage.viewer.utils import dialogs\n'), ((614, 632), 'skimage.viewer.utils.init_qtapp', 'utils.init_qtapp', ([], {}), '()\n', (630, 632), False, 'from skimage.viewer import utils\n'), ((645, 660), 'skimage.viewer.qt.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (658, 660), False, 'from skimage.viewer.qt import QtCore, QtGui, has_qt\n'), ((737, 763), 'skimage.viewer.utils.dialogs.open_file_dialog', 'dialogs.open_file_dialog', ([], {}), '()\n', (761, 763), False, 'from skimage.viewer.utils import dialogs\n'), ((847, 865), 'skimage.viewer.utils.init_qtapp', 'utils.init_qtapp', ([], {}), '()\n', (863, 865), False, 'from skimage.viewer import utils\n'), ((878, 893), 'skimage.viewer.qt.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (891, 893), False, 'from skimage.viewer.qt import QtCore, QtGui, has_qt\n'), ((970, 996), 'skimage.viewer.utils.dialogs.save_file_dialog', 'dialogs.save_file_dialog', ([], {}), '()\n', (994, 996), False, 'from skimage.viewer.utils import dialogs\n'), ((695, 720), 'skimage.viewer.qt.QtGui.QApplication.quit', 'QtGui.QApplication.quit', ([], {}), '()\n', (718, 720), False, 'from skimage.viewer.qt import QtCore, QtGui, has_qt\n'), ((928, 953), 'skimage.viewer.qt.QtGui.QApplication.quit', 'QtGui.QApplication.quit', ([], {}), '()\n', (951, 953), False, 'from skimage.viewer.qt import QtCore, QtGui, has_qt\n')]
|
from aoi_envs.MultiAgent import MultiAgentEnv
import numpy as np
class MobileEnv(MultiAgentEnv):
def __init__(self, agent_velocity=1.0, initialization='Random', biased_velocities=False, flocking=False,
random_acceleration=True, aoi_reward=True, flocking_position_control=False, num_agents=40):
super().__init__(eavesdropping=True, fractional_power_levels=[0.25, 0.0], initialization=initialization,
aoi_reward=aoi_reward, num_agents=num_agents)
self.ts_length = 0.1
self.max_velocity = agent_velocity * self.distance_scale / self.ts_length / self.episode_length
self.max_acceleration = 10.0
self.gain = 1.0
self.recompute_solution = True
self.mobile_agents = True
self.flocking = flocking
self.flocking_position_control = flocking_position_control
self.random_acceleration = random_acceleration
self.biased_velocities = biased_velocities
def reset(self):
super().reset()
if self.random_acceleration or (self.flocking and not self.biased_velocities):
self.x[:, 2:4] = np.random.uniform(-self.max_velocity, self.max_velocity, size=(self.n_agents, 2))
elif self.flocking and self.biased_velocities:
self.x[:, 2:4] = np.random.uniform(0.5 * -self.max_velocity, 0.5 * self.max_velocity, size=(self.n_agents, 2))
self.x[:, 2:4] = self.x[:, 2:4] + np.random.uniform(0.5 * -self.max_velocity, 0.5 * self.max_velocity, size=(1, 2))
else:
angle = np.pi * np.random.uniform(0, 2, size=(self.n_agents,))
self.x[:, 2] = self.max_velocity * np.cos(angle)
self.x[:, 3] = self.max_velocity * np.sin(angle)
self.network_buffer[:, :, 4:6] = np.where(self.diag,
self.x[:, 2:4].reshape(self.n_agents, 1, 2),
self.network_buffer[:, :, 4:6])
return self.get_relative_network_buffer_as_dict()
def step(self, attempted_transmissions):
self.move_agents()
return super().step(attempted_transmissions)
def potential_grad(self, pos_diff, r2):
grad = -2.0 * np.divide(pos_diff, np.multiply(r2, r2)) + 2 * np.divide(pos_diff, r2)
return grad
def move_agents(self):
new_pos = self.x[:, 0:2] + self.x[:, 2:4] * self.ts_length
if self.flocking or self.random_acceleration:
if self.flocking:
known_velocities = np.copy(self.network_buffer[:, :, 4:6])
known_velocities[known_velocities == 0] = np.nan
known_velocities -= (self.x[:, 2:4])[:, np.newaxis, :]
acceleration = np.nanmean(known_velocities, axis=1)
if self.flocking_position_control:
steady_state_scale = self.r_max / 5.0
known_positions = np.copy(self.network_buffer[:, :, 2:4])
known_positions[known_positions == 0] = np.nan
known_positions = (known_positions - (self.x[:, 2:4])[:, np.newaxis, :]) / steady_state_scale
r2 = np.sum(known_positions ** 2, axis=2)[:, :, np.newaxis]
grad = -2.0 * np.divide(known_positions, np.multiply(r2, r2)) + 2 * np.divide(known_positions, r2)
acceleration += np.nansum(grad, axis=1) * steady_state_scale
else:
# acceleration = np.random.uniform(-self.max_acceleration, self.max_acceleration, size=(self.n_agents, 2))
acceleration = np.random.normal(0., self.max_acceleration / 3.0, size=(self.n_agents, 2))
acceleration = np.clip(acceleration, -self.max_acceleration, self.max_acceleration)
self.x[:, 2:4] += self.gain * self.ts_length * acceleration
self.x[:, 2:4] = np.clip(self.x[:, 2:4], -self.max_velocity, self.max_velocity)
if self.flocking:
self.x[:, 0:2] = new_pos
else:
self.x[:, 0:2] = np.clip(new_pos[:, 0:2], -self.r_max, self.r_max)
self.x[:, 2:4] = np.where((self.x[:, 0:2] - new_pos[:, 0:2]) == 0, self.x[:, 2:4], -self.x[:, 2:4])
self.network_buffer[:, :, 2:6] = np.where(self.diag,
self.x[:, 0:4].reshape(self.n_agents, 1, 4),
self.network_buffer[:, :, 2:6])
|
[
"numpy.clip",
"numpy.copy",
"numpy.random.normal",
"numpy.multiply",
"numpy.where",
"numpy.nanmean",
"numpy.sum",
"numpy.cos",
"numpy.random.uniform",
"numpy.sin",
"numpy.nansum",
"numpy.divide"
] |
[((1140, 1226), 'numpy.random.uniform', 'np.random.uniform', (['(-self.max_velocity)', 'self.max_velocity'], {'size': '(self.n_agents, 2)'}), '(-self.max_velocity, self.max_velocity, size=(self.\n n_agents, 2))\n', (1157, 1226), True, 'import numpy as np\n'), ((3705, 3773), 'numpy.clip', 'np.clip', (['acceleration', '(-self.max_acceleration)', 'self.max_acceleration'], {}), '(acceleration, -self.max_acceleration, self.max_acceleration)\n', (3712, 3773), True, 'import numpy as np\n'), ((3875, 3937), 'numpy.clip', 'np.clip', (['self.x[:, 2:4]', '(-self.max_velocity)', 'self.max_velocity'], {}), '(self.x[:, 2:4], -self.max_velocity, self.max_velocity)\n', (3882, 3937), True, 'import numpy as np\n'), ((4045, 4094), 'numpy.clip', 'np.clip', (['new_pos[:, 0:2]', '(-self.r_max)', 'self.r_max'], {}), '(new_pos[:, 0:2], -self.r_max, self.r_max)\n', (4052, 4094), True, 'import numpy as np\n'), ((4124, 4209), 'numpy.where', 'np.where', (['(self.x[:, 0:2] - new_pos[:, 0:2] == 0)', 'self.x[:, 2:4]', '(-self.x[:, 2:4])'], {}), '(self.x[:, 0:2] - new_pos[:, 0:2] == 0, self.x[:, 2:4], -self.x[:, 2:4]\n )\n', (4132, 4209), True, 'import numpy as np\n'), ((1306, 1404), 'numpy.random.uniform', 'np.random.uniform', (['(0.5 * -self.max_velocity)', '(0.5 * self.max_velocity)'], {'size': '(self.n_agents, 2)'}), '(0.5 * -self.max_velocity, 0.5 * self.max_velocity, size=(\n self.n_agents, 2))\n', (1323, 1404), True, 'import numpy as np\n'), ((2277, 2300), 'numpy.divide', 'np.divide', (['pos_diff', 'r2'], {}), '(pos_diff, r2)\n', (2286, 2300), True, 'import numpy as np\n'), ((2536, 2575), 'numpy.copy', 'np.copy', (['self.network_buffer[:, :, 4:6]'], {}), '(self.network_buffer[:, :, 4:6])\n', (2543, 2575), True, 'import numpy as np\n'), ((2743, 2779), 'numpy.nanmean', 'np.nanmean', (['known_velocities'], {'axis': '(1)'}), '(known_velocities, axis=1)\n', (2753, 2779), True, 'import numpy as np\n'), ((3602, 3677), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(self.max_acceleration / 3.0)'], {'size': '(self.n_agents, 2)'}), '(0.0, self.max_acceleration / 3.0, size=(self.n_agents, 2))\n', (3618, 3677), True, 'import numpy as np\n'), ((1446, 1532), 'numpy.random.uniform', 'np.random.uniform', (['(0.5 * -self.max_velocity)', '(0.5 * self.max_velocity)'], {'size': '(1, 2)'}), '(0.5 * -self.max_velocity, 0.5 * self.max_velocity, size=(\n 1, 2))\n', (1463, 1532), True, 'import numpy as np\n'), ((1570, 1616), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2)'], {'size': '(self.n_agents,)'}), '(0, 2, size=(self.n_agents,))\n', (1587, 1616), True, 'import numpy as np\n'), ((1664, 1677), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1670, 1677), True, 'import numpy as np\n'), ((1725, 1738), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1731, 1738), True, 'import numpy as np\n'), ((2250, 2269), 'numpy.multiply', 'np.multiply', (['r2', 'r2'], {}), '(r2, r2)\n', (2261, 2269), True, 'import numpy as np\n'), ((2928, 2967), 'numpy.copy', 'np.copy', (['self.network_buffer[:, :, 2:4]'], {}), '(self.network_buffer[:, :, 2:4])\n', (2935, 2967), True, 'import numpy as np\n'), ((3174, 3210), 'numpy.sum', 'np.sum', (['(known_positions ** 2)'], {'axis': '(2)'}), '(known_positions ** 2, axis=2)\n', (3180, 3210), True, 'import numpy as np\n'), ((3384, 3407), 'numpy.nansum', 'np.nansum', (['grad'], {'axis': '(1)'}), '(grad, axis=1)\n', (3393, 3407), True, 'import numpy as np\n'), ((3317, 3347), 'numpy.divide', 'np.divide', (['known_positions', 'r2'], {}), '(known_positions, r2)\n', (3326, 3347), True, 'import numpy as np\n'), ((3290, 3309), 'numpy.multiply', 'np.multiply', (['r2', 'r2'], {}), '(r2, r2)\n', (3301, 3309), True, 'import numpy as np\n')]
|
from torch.autograd import Variable
import torch.nn.functional as F
import scripts.utils as utils
import torch.nn as nn
import numpy as np
import torch
class CrossEntropy2d(nn.Module):
def __init__(self, size_average=True, ignore_label=255):
super(CrossEntropy2d, self).__init__()
self.size_average = size_average
self.ignore_label = ignore_label
def forward(self, predict, target, weight=None):
"""
Args:
predict:(n, c, h, w)
target:(n, h, w)
weight (Tensor, optional): a manual rescaling weight given to each class.
If given, has to be a Tensor of size "nclasses"
"""
assert not target.requires_grad
assert predict.dim() == 4
assert target.dim() == 3
assert predict.size(0) == target.size(0), "{0} vs {1} ".format(predict.size(0), target.size(0))
assert predict.size(2) == target.size(1), "{0} vs {1} ".format(predict.size(2), target.size(1))
assert predict.size(3) == target.size(2), "{0} vs {1} ".format(predict.size(3), target.size(3))
n, c, h, w = predict.size()
target_mask = (target >= 0) * (target != self.ignore_label)
target = target[target_mask]
predict = predict.transpose(1, 2).transpose(2, 3).contiguous()
predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c)
loss = F.cross_entropy(predict, target, weight=weight, size_average=self.size_average)
return loss
def cross_entropy2d(input, target, weight=None, size_average=True):
# 1. input: (n, c, h, w), target: (n, h, w)
n, c, h, w = input.size()
# 2. log_p: (n, c, h, w)
log_p = F.log_softmax(input, dim=1)
# 3. log_p: (n*h*w, c) - contiguous() required if transpose() is used before view().
log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)
log_p = log_p[target.view(n * h * w, 1).repeat(1, c) >= 0]
log_p = log_p.view(-1, c)
# 4. target: (n*h*w,)
mask = target >= 0
target = target[mask]
loss = F.nll_loss(log_p, target, ignore_index=250, weight=weight, size_average=False)
if size_average:
loss /= mask.data.sum()
# loss /= mask.sum().data[0]
return loss
def bootstrapped_cross_entropy2d(input, target, K, weight=None, size_average=False):
"""A categorical cross entropy loss for 4D tensors.
We assume the following layout: (batch, classes, height, width)
Args:
input: The outputs.
target: The predictions.
K: The number of pixels to select in the bootstrapping process.
The total number of pixels is determined as 512 * multiplier.
Returns:
The pixel-bootstrapped cross entropy loss.
"""
batch_size = input.size()[0]
def _bootstrap_xentropy_single(input, target, K, weight=None, size_average=False):
n, c, h, w = input.size()
# 1. The log softmax. log_p: (n, c, h, w)
log_p = F.log_softmax(input, dim=1)
# 2. log_p: (n*h*w, c) - contiguous() required if transpose() is used before view().
log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)
log_p = log_p[target.view(n * h * w, 1).repeat(1, c) >= 0]
log_p = log_p.view(-1, c)
# 3. target: (n*h*w,)
mask = target >= 0
target = target[mask]
loss = F.nll_loss(log_p, target, weight=weight, ignore_index=250,
reduce=False, size_average=size_average)
# For each element in the batch, collect the top K worst predictions
topk_loss, _ = loss.topk(K)
reduced_topk_loss = topk_loss.sum() / K
return reduced_topk_loss
loss = 0.0
# Bootstrap from each image not entire batch
for i in range(batch_size):
loss += _bootstrap_xentropy_single(input=torch.unsqueeze(input[i], 0),
target=torch.unsqueeze(target[i], 0),
K=K,
weight=weight,
size_average=size_average)
return loss / float(batch_size)
class FocalLoss2D(nn.Module):
"""
Focal Loss, which is proposed in:
"Focal Loss for Dense Object Detection (https://arxiv.org/abs/1708.02002v2)"
"""
def __init__(self, num_classes=19, ignore_label=250, alpha=0.25, gamma=2, size_average=True):
"""
Loss(x, class) = - \alpha (1-softmax(x)[class])^gamma \log(softmax(x)[class])
:param num_classes: (int) num of the classes
:param ignore_label: (int) ignore label
:param alpha: (1D Tensor or Variable) the scalar factor
:param gamma: (float) gamma > 0;
reduces the relative loss for well-classified examples (probabilities > .5),
putting more focus on hard, mis-classified examples
:param size_average: (bool): By default, the losses are averaged over observations for each mini-batch.
If the size_average is set to False, the losses are
instead summed for each mini-batch.
"""
super(FocalLoss2D, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.num_classes = num_classes
self.ignore_label = ignore_label
self.size_average = size_average
self.one_hot = Variable(torch.eye(self.num_classes))
def forward(self, cls_preds, cls_targets):
"""
:param cls_preds: (n, c, h, w)
:param cls_targets: (n, h, w)
:return:
"""
assert not cls_targets.requires_grad
assert cls_targets.dim() == 3
assert cls_preds.size(0) == cls_targets.size(0), "{0} vs {1} ".format(cls_preds.size(0), cls_targets.size(0))
assert cls_preds.size(2) == cls_targets.size(1), "{0} vs {1} ".format(cls_preds.size(2), cls_targets.size(1))
assert cls_preds.size(3) == cls_targets.size(2), "{0} vs {1} ".format(cls_preds.size(3), cls_targets.size(3))
if cls_preds.is_cuda:
self.one_hot = self.one_hot.cuda()
n, c, h, w = cls_preds.size()
# +++++++++++++++++++++++++++++++++++++++++++++++++++ #
# 1. target reshape and one-hot encode
# +++++++++++++++++++++++++++++++++++++++++++++++++++ #
# 1.1. target: (n*h*w,)
cls_targets = cls_targets.view(n * h * w, 1)
target_mask = (cls_targets >= 0) * (cls_targets != self.ignore_label)
cls_targets = cls_targets[target_mask]
cls_targets = self.one_hot.index_select(dim=0, index=cls_targets)
# +++++++++++++++++++++++++++++++++++++++++++++++++++ #
# 2. compute focal loss for multi-classification
# +++++++++++++++++++++++++++++++++++++++++++++++++++ #
# 2.1. The softmax. prob: (n, c, h, w)
prob = F.softmax(cls_preds, dim=1)
# 2.2. prob: (n*h*w, c) - contiguous() required if transpose() is used before view().
prob = prob.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)
prob = prob[target_mask.repeat(1, c)]
prob = prob.view(-1, c) # (n*h*w, c)
probs = torch.clamp((prob * cls_targets).sum(1).view(-1, 1), min=1e-8, max=1.0)
batch_loss = -self.alpha * (torch.pow((1 - probs), self.gamma)) * probs.log()
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
return loss
class SemanticEncodingLoss(nn.Module):
def __init__(self, num_classes=19, ignore_label=250, alpha=0.25):
super(SemanticEncodingLoss, self).__init__()
self.alpha = alpha
self.num_classes = num_classes
self.ignore_label = ignore_label
def unique_encode(self, cls_targets):
batch_size, _, _ = cls_targets.size()
target_mask = (cls_targets >= 0) * (cls_targets != self.ignore_label)
cls_targets = [cls_targets[idx].masked_select(target_mask[idx]) for idx in np.arange(batch_size)]
# unique_cls = [np.unique(label.numpy(), return_counts=True) for label in cls_targets]
unique_cls = [np.unique(label.numpy()) for label in cls_targets]
encode = np.zeros((batch_size, self.num_classes), dtype=np.uint8)
for idx in np.arange(batch_size):
np.put(encode[idx], unique_cls[idx], 1)
return torch.from_numpy(encode).float()
def forward(self, predicts, enc_cls_target, size_average=True):
se_loss = F.binary_cross_entropy_with_logits(predicts, enc_cls_target, weight=None,
size_average=size_average)
return self.alpha * se_loss
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# Lovasz-Softmax
# <NAME> 2018 ESAT-PSI KU Leuven
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1. - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def iou_binary(preds, labels, EMPTY=1., ignore=None, per_image=True):
"""
IoU for foreground class
binary: 1 foreground, 0 background
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
intersection = ((label == 1) & (pred == 1)).sum()
union = ((label == 1) | ((pred == 1) & (label != ignore))).sum()
if not union:
iou = EMPTY
else:
iou = float(intersection) / union
ious.append(iou)
iou = utils.mean(ious) # mean accross images if per_image
return 100 * iou
def iou(preds, labels, C, EMPTY=1., ignore=None, per_image=False):
"""
Array of IoU for each (non ignored) class
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
iou = []
for i in range(C):
if i != ignore: # The ignored label is sometimes among predicted classes (ENet - CityScapes)
intersection = ((label == i) & (pred == i)).sum()
union = ((label == i) | ((pred == i) & (label != ignore))).sum()
if not union:
iou.append(EMPTY)
else:
iou.append(float(intersection) / union)
ious.append(iou)
ious = map(utils.mean, zip(*ious)) # mean accross images if per_image
return 100 * np.array(ious)
def lovasz_softmax(probas, labels, only_present=False, per_image=False, ignore=None):
"""
Multi-class Lovasz-Softmax loss
probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1)
labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
only_present: average only on classes present in ground truth
per_image: compute the loss per image instead of per batch
ignore: void class labels
"""
if per_image:
loss = utils.mean(lovasz_softmax_flat(*flatten_probas(prob, lab, ignore), only_present=only_present)
for prob, lab in zip(probas, labels))
else:
loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore), only_present=only_present)
return loss
def lovasz_softmax_flat(probas, labels, only_present=False):
"""
Multi-class Lovasz-Softmax loss
probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)
labels: [P] Tensor, ground truth labels (between 0 and C - 1)
only_present: average only on classes present in ground truth
"""
C = probas.size(1)
losses = []
for c in range(C):
fg = (labels == c).float() # foreground for class c
if only_present and fg.sum() == 0:
continue
errors = (fg - probas[:, c]).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
losses.append(torch.dot(errors_sorted, lovasz_grad(fg_sorted)))
return utils.mean(losses)
def flatten_probas(scores, labels, ignore=None):
"""
Flattens predictions in the batch
"""
B, C, H, W = scores.size()
scores = scores.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C
labels = labels.view(-1)
if ignore is None:
return scores, labels
valid = (labels != ignore)
vscores = scores[valid.nonzero().squeeze()]
vlabels = labels[valid]
return vscores, vlabels
if __name__ == "__main__":
from torch.autograd import Variable
while True:
dummy_in = Variable(torch.randn(2, 3, 32, 32), requires_grad=True)
dummy_gt = Variable(torch.LongTensor(2, 32, 32).random_(0, 3))
dummy_in = F.softmax(dummy_in, dim=1)
loss = lovasz_softmax(dummy_in, dummy_gt, ignore=255)
print(loss.data[0])
|
[
"torch.sort",
"torch.nn.functional.nll_loss",
"numpy.put",
"torch.eye",
"torch.unsqueeze",
"torch.LongTensor",
"scripts.utils.mean",
"torch.pow",
"torch.from_numpy",
"torch.randn",
"numpy.array",
"numpy.zeros",
"torch.nn.functional.cross_entropy",
"torch.nn.functional.log_softmax",
"torch.nn.functional.softmax",
"numpy.arange",
"torch.nn.functional.binary_cross_entropy_with_logits"
] |
[((1750, 1777), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['input'], {'dim': '(1)'}), '(input, dim=1)\n', (1763, 1777), True, 'import torch.nn.functional as F\n'), ((2124, 2202), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['log_p', 'target'], {'ignore_index': '(250)', 'weight': 'weight', 'size_average': '(False)'}), '(log_p, target, ignore_index=250, weight=weight, size_average=False)\n', (2134, 2202), True, 'import torch.nn.functional as F\n'), ((10048, 10064), 'scripts.utils.mean', 'utils.mean', (['ious'], {}), '(ious)\n', (10058, 10064), True, 'import scripts.utils as utils\n'), ((12524, 12542), 'scripts.utils.mean', 'utils.mean', (['losses'], {}), '(losses)\n', (12534, 12542), True, 'import scripts.utils as utils\n'), ((1460, 1539), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['predict', 'target'], {'weight': 'weight', 'size_average': 'self.size_average'}), '(predict, target, weight=weight, size_average=self.size_average)\n', (1475, 1539), True, 'import torch.nn.functional as F\n'), ((3062, 3089), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['input'], {'dim': '(1)'}), '(input, dim=1)\n', (3075, 3089), True, 'import torch.nn.functional as F\n'), ((3468, 3571), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['log_p', 'target'], {'weight': 'weight', 'ignore_index': '(250)', 'reduce': '(False)', 'size_average': 'size_average'}), '(log_p, target, weight=weight, ignore_index=250, reduce=False,\n size_average=size_average)\n', (3478, 3571), True, 'import torch.nn.functional as F\n'), ((7059, 7086), 'torch.nn.functional.softmax', 'F.softmax', (['cls_preds'], {'dim': '(1)'}), '(cls_preds, dim=1)\n', (7068, 7086), True, 'import torch.nn.functional as F\n'), ((8396, 8452), 'numpy.zeros', 'np.zeros', (['(batch_size, self.num_classes)'], {'dtype': 'np.uint8'}), '((batch_size, self.num_classes), dtype=np.uint8)\n', (8404, 8452), True, 'import numpy as np\n'), ((8473, 8494), 'numpy.arange', 'np.arange', (['batch_size'], {}), '(batch_size)\n', (8482, 8494), True, 'import numpy as np\n'), ((8684, 8788), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['predicts', 'enc_cls_target'], {'weight': 'None', 'size_average': 'size_average'}), '(predicts, enc_cls_target, weight=None,\n size_average=size_average)\n', (8718, 8788), True, 'import torch.nn.functional as F\n'), ((10942, 10956), 'numpy.array', 'np.array', (['ious'], {}), '(ious)\n', (10950, 10956), True, 'import numpy as np\n'), ((12348, 12386), 'torch.sort', 'torch.sort', (['errors', '(0)'], {'descending': '(True)'}), '(errors, 0, descending=True)\n', (12358, 12386), False, 'import torch\n'), ((13236, 13262), 'torch.nn.functional.softmax', 'F.softmax', (['dummy_in'], {'dim': '(1)'}), '(dummy_in, dim=1)\n', (13245, 13262), True, 'import torch.nn.functional as F\n'), ((5597, 5624), 'torch.eye', 'torch.eye', (['self.num_classes'], {}), '(self.num_classes)\n', (5606, 5624), False, 'import torch\n'), ((8508, 8547), 'numpy.put', 'np.put', (['encode[idx]', 'unique_cls[idx]', '(1)'], {}), '(encode[idx], unique_cls[idx], 1)\n', (8514, 8547), True, 'import numpy as np\n'), ((13098, 13123), 'torch.randn', 'torch.randn', (['(2)', '(3)', '(32)', '(32)'], {}), '(2, 3, 32, 32)\n', (13109, 13123), False, 'import torch\n'), ((3936, 3964), 'torch.unsqueeze', 'torch.unsqueeze', (['input[i]', '(0)'], {}), '(input[i], 0)\n', (3951, 3964), False, 'import torch\n'), ((4016, 4045), 'torch.unsqueeze', 'torch.unsqueeze', (['target[i]', '(0)'], {}), '(target[i], 0)\n', (4031, 4045), False, 'import torch\n'), ((7475, 7507), 'torch.pow', 'torch.pow', (['(1 - probs)', 'self.gamma'], {}), '(1 - probs, self.gamma)\n', (7484, 7507), False, 'import torch\n'), ((8186, 8207), 'numpy.arange', 'np.arange', (['batch_size'], {}), '(batch_size)\n', (8195, 8207), True, 'import numpy as np\n'), ((8564, 8588), 'torch.from_numpy', 'torch.from_numpy', (['encode'], {}), '(encode)\n', (8580, 8588), False, 'import torch\n'), ((13173, 13200), 'torch.LongTensor', 'torch.LongTensor', (['(2)', '(32)', '(32)'], {}), '(2, 32, 32)\n', (13189, 13200), False, 'import torch\n')]
|
import logging
import tflite
import numpy as np
from tflite2onnx import mapping
from tflite2onnx.op.common import Operator
from tflite2onnx.op.binary import PowerWrapper
logger = logging.getLogger('tflite2onnx')
class Rsqrt(Operator):
# use square root as input operator and propagate output to power
TypeMapping = {
tflite.BuiltinOperator.RSQRT: 'Sqrt',
}
def __init__(self, TFactory, index):
super().__init__(TFactory, index)
self.setInited()
@property
def type(self):
if self.status.uninitialized:
return 'Sqrt'
else:
op = self.tflite
opcode = self.model.OperatorCodes(op.OpcodeIndex()).BuiltinCode()
assert(opcode in self.TypeMapping)
return self.TypeMapping[opcode]
def parse(self):
logger.debug("Parsing %s...", self.type)
op = self.tflite
opcode = self.model.OperatorCodes(op.OpcodeIndex()).BuiltinCode()
assert(opcode in self.TypeMapping)
assert(op.InputsLength() == 1)
assert(op.OutputsLength() == 1)
self.parseInput(0)
self.parseOutput(0)
# invert square root result
self.appendInvert()
self.setParsed()
def propagatableTensors(self):
return self.inputs + self.outputs
def transform(self):
pass
def appendInvert(self):
invert = PowerWrapper(self.TFactory, -1)
invert_name = 'TFLITE2ONNX_Invert_%s' % self.outputs[0].name
invert_t = self.TFactory.getWithRef(self.outputs[0], invert_name, True)
invert_t.setParsed()
invert_t.addProducer(self)
invert_t.addConsumer(invert)
pow_t = 'TFLITE2ONNX_PowData_%s' % self.outputs[0].name
pow_t = self.TFactory.getWithRef(self.outputs[0], pow_t, True)
pow_dtype = mapping.DTYPE_ONNX2NAME[pow_t.dtype]
pow_t.data = np.full(shape=pow_t.shape, fill_value=-1, dtype=pow_dtype)
pow_t.setParsed()
pow_t.addConsumer(invert)
invert.inputs.append(invert_t)
invert.inputs.append(pow_t)
invert.outputs.append(self.outputs[0])
self.replaceOutput(self.outputs[0], invert_t)
invert.setParsed()
self.post.append(invert)
|
[
"logging.getLogger",
"tflite2onnx.op.binary.PowerWrapper",
"numpy.full"
] |
[((181, 213), 'logging.getLogger', 'logging.getLogger', (['"""tflite2onnx"""'], {}), "('tflite2onnx')\n", (198, 213), False, 'import logging\n'), ((1405, 1436), 'tflite2onnx.op.binary.PowerWrapper', 'PowerWrapper', (['self.TFactory', '(-1)'], {}), '(self.TFactory, -1)\n', (1417, 1436), False, 'from tflite2onnx.op.binary import PowerWrapper\n'), ((1902, 1960), 'numpy.full', 'np.full', ([], {'shape': 'pow_t.shape', 'fill_value': '(-1)', 'dtype': 'pow_dtype'}), '(shape=pow_t.shape, fill_value=-1, dtype=pow_dtype)\n', (1909, 1960), True, 'import numpy as np\n')]
|
"""
********************************************************************************
main file to execute
********************************************************************************
"""
import time
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from pinn import PINN
from config_gpu import config_gpu
from params import params
from prp_dat import prp_dat
from plot_sol import *
from fdm import FDM
def main():
# gpu confiuration
config_gpu(gpu_flg = 1)
# params
f_in, f_out, width, depth, \
w_init, b_init, act, \
lr, opt, \
f_scl, laaf, c, \
w_ini, w_bnd, w_pde, BC, \
f_mntr, r_seed, \
n_epch, n_btch, c_tol = params()
# domain
tmin = 0.; tmax = 10.; nt = int(5e2) + 1
xmin = 0.; xmax = 5.; nx = int(1e2) + 1
ymin = 0.; ymax = 5.; ny = int(1e2) + 1
t_ = np.linspace(tmin, tmax, nt)
x_ = np.linspace(xmin, xmax, nx)
y_ = np.linspace(ymin, ymax, ny)
dt = t_[1] - t_[0]
dx = x_[1] - x_[0]
dy = y_[1] - y_[0]
cfl = c * dt / dx
print("CFL number:", cfl)
x, y = np.meshgrid(x_, y_)
u = np.empty((nt, nx, ny))
print("tmin: %.3f, tmax: %.3f, nt: %d, dt: %.3e" % (tmin, tmax, nt, dt))
print("xmin: %.3f, xmax: %.3f, nx: %d, dx: %.3e" % (xmin, xmax, nx, dx))
print("ymin: %.3f, ymax: %.3f, ny: %d, dy: %.3e" % (ymin, ymax, ny, dy))
# FDM simulation
u_FDM = FDM(xmin, xmax, nx, dx,
ymin, ymax, ny, dy,
nt, dt,
x, y, u, c, BC)
# prep data
TX, lb, ub, \
t_ini, x_ini, y_ini, u_ini, \
t_bnd, x_bnd, y_bnd, \
t_pde, x_pde, y_pde = prp_dat(t_, x_, y_,
N_ini = int(5e3), N_bnd = int(1e4), N_pde = int(3e4))
pinn = PINN(t_ini, x_ini, y_ini, u_ini,
t_bnd, x_bnd, y_bnd,
t_pde, x_pde, y_pde,
f_in, f_out, width, depth,
w_init, b_init, act,
lr, opt,
f_scl, laaf, c,
w_ini, w_bnd, w_pde, BC,
f_mntr, r_seed)
t0 = time.time()
with tf.device("/device:GPU:0"):
pinn.train(epoch = n_epch, batch = n_btch, tol = c_tol)
t1 = time.time()
elps = t1 - t0
print(">>>>> elapse time for training (sec):", elps)
print(">>>>> elapse time for training (min):", elps / 60.)
# inference
x_inf, y_inf = np.meshgrid(x_, y_)
x_inf, y_inf = x_inf.reshape(-1, 1), y_inf.reshape(-1, 1)
elps = 0
for t in t_:
t_inf = np.ones_like(x_inf) * t
t0 = time.time()
u_, gv_ = pinn.infer(t_inf, x_inf, y_inf)
t1 = time.time()
temp = t1 - t0
elps += temp
print(">>>>> elapse time for inference (sec):", elps)
print(">>>>> elapse time for inference (min):", elps / 60.)
# x_inf = np.unique(TX[:,1:2])
# y_inf = np.unique(TX[:,2:3])
# x_inf, y_inf = np.meshgrid(x_inf, y_inf)
# x_inf, y_inf = x_inf.reshape(-1, 1), y_inf.reshape(-1, 1)
# elps = 0.
# for n in range(nt):
# if n % 100 == 0:
# print("currently", n)
# t = n * dt # convert to real time
# u_fdm = u_FDM[n,:,:]
# n = np.array([n])
# t_inf = np.unique(TX[:,0:1])
# t_inf = np.tile(t_inf.reshape(-1, 1), (1, x_inf.shape[0])).T[:,n]
# t0 = time.time()
# u_, gv_ = pinn.infer(t_inf, x_inf, y_inf)
# t1 = time.time()
# temp = t1 - t0
# elps += temp
# print(">>>>> elapse time for inference (sec):", elps)
# print(">>>>> elapse time for inference (min):", elps / 60.)
plt.figure(figsize = (8, 4))
plt.plot(pinn.ep_log, pinn.loss_log, alpha = .7, linestyle = "-", label = "loss", c = "k")
plt.plot(pinn.ep_log, pinn.loss_ini_log, alpha = .5, linestyle = "--", label = "loss_ini")
plt.plot(pinn.ep_log, pinn.loss_bnd_log, alpha = .5, linestyle = "--", label = "loss_bnd")
plt.plot(pinn.ep_log, pinn.loss_pde_log, alpha = .5, linestyle = "--", label = "loss_pde")
plt.yscale("log")
plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend(loc = "upper right")
plt.grid(alpha = .5)
plt.show()
for n in range(nt):
if n % (int(nt / 20)) == 0:
t = n * dt # convert to real time
u_fdm = u_FDM[n,:,:]
n = np.array([n])
t_inf = np.unique(TX[:,0:1])
x_inf = np.unique(TX[:,1:2])
y_inf = np.unique(TX[:,2:3])
x_inf, y_inf = np.meshgrid(x_inf, y_inf)
x_inf, y_inf = x_inf.reshape(-1, 1), y_inf.reshape(-1, 1)
t_inf = np.tile(t_inf.reshape(-1, 1), (1, x_inf.shape[0])).T[:,n]
u_, gv_ = pinn.infer(t_inf, x_inf, y_inf)
fig = plt.figure(figsize=(16, 4))
ax = fig.add_subplot(1, 1, 1, projection = "3d")
ax.plot_surface(x, y, u_fdm, vmin = -1., vmax = 1.)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_zlim(-1., 1.)
ax = fig.add_subplot(1, 2, 2, projection = "3d")
ax.plot_surface(x, y, u_.numpy().reshape(nx, ny), vmin = -1., vmax = 1.)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_zlim(-1., 1.)
u_diff = u_fdm - u_.numpy().reshape(nx, ny)
u_l2 = np.linalg.norm(u_diff, ord=2) / np.linalg.norm(u_fdm, ord=2)
u_mse = np.mean(np.square(u_diff)) / np.sqrt(nx * ny)
u_sem = np.std (np.square(u_diff), ddof = 1) / np.sqrt(nx * ny)
print("t: %.3f, l2: %.3e, mse: %.3e, sem: %.3e" % (t, u_l2, u_mse, u_sem))
if __name__ == "__main__":
main()
|
[
"matplotlib.pyplot.grid",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.array",
"numpy.linalg.norm",
"fdm.FDM",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.empty",
"params.params",
"numpy.meshgrid",
"matplotlib.pyplot.yscale",
"tensorflow.device",
"pinn.PINN",
"numpy.square",
"time.time",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"numpy.ones_like",
"numpy.unique",
"config_gpu.config_gpu",
"matplotlib.pyplot.figure"
] |
[((473, 494), 'config_gpu.config_gpu', 'config_gpu', ([], {'gpu_flg': '(1)'}), '(gpu_flg=1)\n', (483, 494), False, 'from config_gpu import config_gpu\n'), ((689, 697), 'params.params', 'params', ([], {}), '()\n', (695, 697), False, 'from params import params\n'), ((856, 883), 'numpy.linspace', 'np.linspace', (['tmin', 'tmax', 'nt'], {}), '(tmin, tmax, nt)\n', (867, 883), True, 'import numpy as np\n'), ((893, 920), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'nx'], {}), '(xmin, xmax, nx)\n', (904, 920), True, 'import numpy as np\n'), ((930, 957), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', 'ny'], {}), '(ymin, ymax, ny)\n', (941, 957), True, 'import numpy as np\n'), ((1091, 1110), 'numpy.meshgrid', 'np.meshgrid', (['x_', 'y_'], {}), '(x_, y_)\n', (1102, 1110), True, 'import numpy as np\n'), ((1122, 1144), 'numpy.empty', 'np.empty', (['(nt, nx, ny)'], {}), '((nt, nx, ny))\n', (1130, 1144), True, 'import numpy as np\n'), ((1410, 1477), 'fdm.FDM', 'FDM', (['xmin', 'xmax', 'nx', 'dx', 'ymin', 'ymax', 'ny', 'dy', 'nt', 'dt', 'x', 'y', 'u', 'c', 'BC'], {}), '(xmin, xmax, nx, dx, ymin, ymax, ny, dy, nt, dt, x, y, u, c, BC)\n', (1413, 1477), False, 'from fdm import FDM\n'), ((1774, 1970), 'pinn.PINN', 'PINN', (['t_ini', 'x_ini', 'y_ini', 'u_ini', 't_bnd', 'x_bnd', 'y_bnd', 't_pde', 'x_pde', 'y_pde', 'f_in', 'f_out', 'width', 'depth', 'w_init', 'b_init', 'act', 'lr', 'opt', 'f_scl', 'laaf', 'c', 'w_ini', 'w_bnd', 'w_pde', 'BC', 'f_mntr', 'r_seed'], {}), '(t_ini, x_ini, y_ini, u_ini, t_bnd, x_bnd, y_bnd, t_pde, x_pde, y_pde,\n f_in, f_out, width, depth, w_init, b_init, act, lr, opt, f_scl, laaf, c,\n w_ini, w_bnd, w_pde, BC, f_mntr, r_seed)\n', (1778, 1970), False, 'from pinn import PINN\n'), ((2109, 2120), 'time.time', 'time.time', ([], {}), '()\n', (2118, 2120), False, 'import time\n'), ((2231, 2242), 'time.time', 'time.time', ([], {}), '()\n', (2240, 2242), False, 'import time\n'), ((2418, 2437), 'numpy.meshgrid', 'np.meshgrid', (['x_', 'y_'], {}), '(x_, y_)\n', (2429, 2437), True, 'import numpy as np\n'), ((3628, 3654), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 4)'}), '(figsize=(8, 4))\n', (3638, 3654), True, 'import matplotlib.pyplot as plt\n'), ((3661, 3748), 'matplotlib.pyplot.plot', 'plt.plot', (['pinn.ep_log', 'pinn.loss_log'], {'alpha': '(0.7)', 'linestyle': '"""-"""', 'label': '"""loss"""', 'c': '"""k"""'}), "(pinn.ep_log, pinn.loss_log, alpha=0.7, linestyle='-', label='loss',\n c='k')\n", (3669, 3748), True, 'import matplotlib.pyplot as plt\n'), ((3761, 3851), 'matplotlib.pyplot.plot', 'plt.plot', (['pinn.ep_log', 'pinn.loss_ini_log'], {'alpha': '(0.5)', 'linestyle': '"""--"""', 'label': '"""loss_ini"""'}), "(pinn.ep_log, pinn.loss_ini_log, alpha=0.5, linestyle='--', label=\n 'loss_ini')\n", (3769, 3851), True, 'import matplotlib.pyplot as plt\n'), ((3856, 3946), 'matplotlib.pyplot.plot', 'plt.plot', (['pinn.ep_log', 'pinn.loss_bnd_log'], {'alpha': '(0.5)', 'linestyle': '"""--"""', 'label': '"""loss_bnd"""'}), "(pinn.ep_log, pinn.loss_bnd_log, alpha=0.5, linestyle='--', label=\n 'loss_bnd')\n", (3864, 3946), True, 'import matplotlib.pyplot as plt\n'), ((3951, 4041), 'matplotlib.pyplot.plot', 'plt.plot', (['pinn.ep_log', 'pinn.loss_pde_log'], {'alpha': '(0.5)', 'linestyle': '"""--"""', 'label': '"""loss_pde"""'}), "(pinn.ep_log, pinn.loss_pde_log, alpha=0.5, linestyle='--', label=\n 'loss_pde')\n", (3959, 4041), True, 'import matplotlib.pyplot as plt\n'), ((4046, 4063), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (4056, 4063), True, 'import matplotlib.pyplot as plt\n'), ((4068, 4087), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (4078, 4087), True, 'import matplotlib.pyplot as plt\n'), ((4092, 4110), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (4102, 4110), True, 'import matplotlib.pyplot as plt\n'), ((4115, 4144), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (4125, 4144), True, 'import matplotlib.pyplot as plt\n'), ((4151, 4170), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'alpha': '(0.5)'}), '(alpha=0.5)\n', (4159, 4170), True, 'import matplotlib.pyplot as plt\n'), ((4176, 4186), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4184, 4186), True, 'import matplotlib.pyplot as plt\n'), ((2130, 2156), 'tensorflow.device', 'tf.device', (['"""/device:GPU:0"""'], {}), "('/device:GPU:0')\n", (2139, 2156), True, 'import tensorflow as tf\n'), ((2583, 2594), 'time.time', 'time.time', ([], {}), '()\n', (2592, 2594), False, 'import time\n'), ((2658, 2669), 'time.time', 'time.time', ([], {}), '()\n', (2667, 2669), False, 'import time\n'), ((2546, 2565), 'numpy.ones_like', 'np.ones_like', (['x_inf'], {}), '(x_inf)\n', (2558, 2565), True, 'import numpy as np\n'), ((4345, 4358), 'numpy.array', 'np.array', (['[n]'], {}), '([n])\n', (4353, 4358), True, 'import numpy as np\n'), ((4379, 4400), 'numpy.unique', 'np.unique', (['TX[:, 0:1]'], {}), '(TX[:, 0:1])\n', (4388, 4400), True, 'import numpy as np\n'), ((4420, 4441), 'numpy.unique', 'np.unique', (['TX[:, 1:2]'], {}), '(TX[:, 1:2])\n', (4429, 4441), True, 'import numpy as np\n'), ((4461, 4482), 'numpy.unique', 'np.unique', (['TX[:, 2:3]'], {}), '(TX[:, 2:3])\n', (4470, 4482), True, 'import numpy as np\n'), ((4509, 4534), 'numpy.meshgrid', 'np.meshgrid', (['x_inf', 'y_inf'], {}), '(x_inf, y_inf)\n', (4520, 4534), True, 'import numpy as np\n'), ((4756, 4783), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 4)'}), '(figsize=(16, 4))\n', (4766, 4783), True, 'import matplotlib.pyplot as plt\n'), ((5343, 5372), 'numpy.linalg.norm', 'np.linalg.norm', (['u_diff'], {'ord': '(2)'}), '(u_diff, ord=2)\n', (5357, 5372), True, 'import numpy as np\n'), ((5375, 5403), 'numpy.linalg.norm', 'np.linalg.norm', (['u_fdm'], {'ord': '(2)'}), '(u_fdm, ord=2)\n', (5389, 5403), True, 'import numpy as np\n'), ((5453, 5469), 'numpy.sqrt', 'np.sqrt', (['(nx * ny)'], {}), '(nx * ny)\n', (5460, 5469), True, 'import numpy as np\n'), ((5529, 5545), 'numpy.sqrt', 'np.sqrt', (['(nx * ny)'], {}), '(nx * ny)\n', (5536, 5545), True, 'import numpy as np\n'), ((5432, 5449), 'numpy.square', 'np.square', (['u_diff'], {}), '(u_diff)\n', (5441, 5449), True, 'import numpy as np\n'), ((5498, 5515), 'numpy.square', 'np.square', (['u_diff'], {}), '(u_diff)\n', (5507, 5515), True, 'import numpy as np\n')]
|
import os, sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import skimage.io
from skimage.transform import resize
from imgaug import augmenters as iaa
from random import randint
import PIL
from PIL import Image
import cv2
from sklearn.utils import class_weight, shuffle
import keras
import warnings
from keras.utils import Sequence
import tensorflow as tf
warnings.filterwarnings("ignore")
SIZE = 256
SEED = 777
THRESHOLD = 0.2
# Load dataset info
DIR = '../input/'
# data = pd.read_csv('../input/train.csv')
def getTrainDataset():
path_to_train = DIR + '/train/'
data = pd.read_csv(DIR + '/train.csv')
paths = []
labels = []
for name, lbl in zip(data['Id'], data['Target'].str.split(' ')):
y = np.zeros(28)
for key in lbl:
y[int(key)] = 1
paths.append(os.path.join(path_to_train, name))
labels.append(y)
return np.array(paths[:5000]), np.array(labels[:5000])
def getTestDataset():
path_to_test = DIR + '/test/'
data = pd.read_csv(DIR + '/sample_submission.csv')
paths = []
labels = []
for name in data['Id']:
y = np.ones(28)
paths.append(os.path.join(path_to_test, name))
labels.append(y)
return np.array(paths), np.array(labels)
# paths, labels = getTrainDataset()
# credits: https://github.com/keras-team/keras/blob/master/keras/utils/data_utils.py#L302
# credits: https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly
class ProteinDataGenerator(keras.utils.Sequence):
def __init__(self, paths, labels, batch_size, shape, channels = [], shuffle = False, use_cache = False, augmentor = False):
self.paths, self.labels = paths, labels
self.batch_size = batch_size
self.shape = shape
self.shuffle = shuffle
self.use_cache = use_cache
self.channels = channels
self.augmentor = augmentor
self.clahe = cv2.createCLAHE()
if use_cache == True:
self.cache = np.zeros((paths.shape[0], shape[0], shape[1], len(channels)))
self.is_cached = np.zeros((paths.shape[0]))
self.on_epoch_end()
def __len__(self):
return int(np.ceil(len(self.paths) / float(self.batch_size)))
def __getitem__(self, idx):
indexes = self.indexes[idx * self.batch_size : (idx+1) * self.batch_size]
paths = self.paths[indexes]
X = np.zeros((paths.shape[0], self.shape[0], self.shape[1], self.shape[2]))
# Generate data
if self.use_cache == True:
X = self.cache[indexes]
for i, path in enumerate(paths[np.where(self.is_cached[indexes] == 0)]):
image = self.__load_image(path)
self.is_cached[indexes[i]] = 1
self.cache[indexes[i]] = image
X[i] = image
else:
for i, path in enumerate(paths):
X[i] = self.__load_image(path)
if self.augmentor == True:
for i, item in enumerate(X):
X[i] = self.augment(item)
y = self.labels[indexes]
return X, y
def on_epoch_end(self):
# Updates indexes after each epoch
self.indexes = np.arange(len(self.paths))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __iter__(self):
"""Create a generator that iterate over the Sequence."""
for item in (self[i] for i in range(len(self))):
yield item
def __load_image(self, path):
images = []
for channel in self.channels:
im = np.array(Image.open(path + '_' + channel + '.png'))
# im = clahe.apply(im)
images.append(im)
if len(self.channels) >= 2:
im = np.stack((
images
), -1)
im = cv2.resize(im, (SIZE,SIZE))
im = np.divide(im, 255)
else:
im = images[0]
im = cv2.resize(im, (SIZE,SIZE))
im = np.divide(im, 255)
im = np.expand_dims(im, 2)
return im
def augment(self, image):
if randint(0,1) == 1:
augment_img = iaa.Sequential([
iaa.OneOf([
iaa.Fliplr(0.5), # horizontal flips
iaa.Flipud(0.5), # horizontal flips
iaa.Crop(percent=(0, 0.1)), # random crops
# Small gaussian blur with random sigma between 0 and 0.5.
# But we only blur about 50% of all images.
iaa.Sometimes(0.5,
iaa.GaussianBlur(sigma=(0, 0.5))
),
# Make some images brighter and some darker.
# In 20% of all cases, we sample the multiplier once per channel,
# which can end up changing the color of the images.
iaa.Multiply((0.8, 1.2), per_channel=0.2),
# Apply affine transformations to each image.
# Scale/zoom them, translate/move them, rotate them and shear them.
iaa.Affine(
scale={"x": (0.9, 1.1), "y": (0.9, 1.1)},
translate_percent={"x": (-0.1, 0.1), "y": (-0.1, 0.1)},
rotate=(-180, 180),
shear=(-4, 4)
)
])], random_order=True)
image_aug = augment_img.augment_image(image)
return image_aug
else:
return image
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, load_model
from keras.layers import Activation, Dropout, Flatten, Dense, GlobalMaxPooling2D, BatchNormalization, Input, Conv2D, MaxPooling2D
from keras.applications.inception_v3 import InceptionV3
from keras.callbacks import ModelCheckpoint
from keras import metrics
from keras.optimizers import Adam
from keras import backend as K
import keras
from keras.models import Model
from keras.utils import multi_gpu_model
def f1(y_true, y_pred):
y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), THRESHOLD), K.floatx())
tp = K.sum(K.cast(y_true*y_pred, 'float'), axis=0)
tn = K.sum(K.cast((1-y_true)*(1-y_pred), 'float'), axis=0)
fp = K.sum(K.cast((1-y_true)*y_pred, 'float'), axis=0)
fn = K.sum(K.cast(y_true*(1-y_pred), 'float'), axis=0)
p = tp / (tp + fp + K.epsilon())
r = tp / (tp + fn + K.epsilon())
f1 = 2*p*r / (p+r+K.epsilon())
f1 = tf.where(tf.is_nan(f1), tf.zeros_like(f1), f1)
return K.mean(f1)
def f1_loss(y_true, y_pred):
tp = K.sum(K.cast(y_true*y_pred, 'float'), axis=0)
tn = K.sum(K.cast((1-y_true)*(1-y_pred), 'float'), axis=0)
fp = K.sum(K.cast((1-y_true)*y_pred, 'float'), axis=0)
fn = K.sum(K.cast(y_true*(1-y_pred), 'float'), axis=0)
p = tp / (tp + fp + K.epsilon())
r = tp / (tp + fn + K.epsilon())
f1 = 2*p*r / (p+r+K.epsilon())
f1 = tf.where(tf.is_nan(f1), tf.zeros_like(f1), f1)
return 1-K.mean(f1)
def focal_loss(gamma=2., alpha=.25):
def focal_loss_fixed(y_true, y_pred):
pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
pt_1 = K.clip(pt_1, 1e-3, .999)
pt_0 = K.clip(pt_0, 1e-3, .999)
return -K.sum(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1))-K.sum((1-alpha) * K.pow( pt_0, gamma) * K.log(1. - pt_0))
return focal_loss_fixed
def create_model(input_shape, n_out, channels):
input_tensor = Input(shape=(256,256,len(channels)))
# print(len(channels))
bn = BatchNormalization()(input_tensor)
base_model = InceptionV3(include_top=False, weights='imagenet')
# base_model.summary()
# base_model.get_layer(index=)
# for idx, layer in enumerate(base_model.layers):
# print(idx, layer.name)
base_output = base_model.get_layer(index=132).output
base_input = base_model.input
base_model = Model(inputs=base_input, outputs=base_output)
x = base_model(bn)
x = Dropout(0.5)(x)
x = Conv2D(128, kernel_size=(3,3), activation='relu')(x)
x = Flatten()(x)
x = Dropout(0.5)(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
output = Dense(n_out, activation='sigmoid')(x)
model = Model(input_tensor, output)
return model
## Load data
SHAPE = (256, 256, 3)
channels = ["green", "blue", "red"]
# create callbacks list
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau
from sklearn.model_selection import train_test_split
epochs = 10; batch_size = 64; VAL_RATIO = .1; DEBUG = False
# split data into train, valid
paths, labels = getTrainDataset()
# divide to
keys = np.arange(paths.shape[0], dtype=np.int)
np.random.seed(SEED)
np.random.shuffle(keys)
lastTrainIndex = int((1-VAL_RATIO) * paths.shape[0])
if DEBUG == True: # use only small subset for debugging, Kaggle's RAM is limited
pathsTrain = paths[0:256]
labelsTrain = labels[0:256]
pathsVal = paths[lastTrainIndex:lastTrainIndex+256]
labelsVal = labels[lastTrainIndex:lastTrainIndex+256]
use_cache = True
else:
pathsTrain = paths[0:lastTrainIndex]
labelsTrain = labels[0:lastTrainIndex]
pathsVal = paths[lastTrainIndex:]
labelsVal = labels[lastTrainIndex:]
use_cache = False
use_cache = False
# print(paths.shape, labels.shape)
# print(pathsTrain.shape, labelsTrain.shape, pathsVal.shape, labelsVal.shape)
tg = ProteinDataGenerator(pathsTrain, labelsTrain, batch_size, SHAPE, channels, use_cache=use_cache)
vg = ProteinDataGenerator(pathsVal, labelsVal, batch_size, SHAPE, channels, use_cache=use_cache)
checkpoint = ModelCheckpoint('../working/InceptionV3_3chan.h5', monitor='val_f1', verbose=1,
save_best_only=True, mode='max', save_weights_only = False)
reduceLROnPlat = ReduceLROnPlateau(monitor='val_f1', factor=0.5, patience=10,
verbose=1, mode='max', epsilon=0.0001)
early = EarlyStopping(monitor="val_f1",
mode="max",
patience=20)
callbacks_list = [checkpoint, early, reduceLROnPlat]
# warm up model
import tensorflow as tf
channels = ["green", "blue", "red"]
# with tf.device('/cpu:0'):
model = create_model(
input_shape=(SIZE,SIZE,3),
n_out=28, channels = channels)
for layer in model.layers:
layer.trainable = False
for i in range(-6,2):
model.layers[i].trainable = True
model.summary()
model.compile(loss="binary_crossentropy",
optimizer=Adam(lr=1e-4),
metrics=['accuracy', f1])
hist = model.fit_generator(
tg,
steps_per_epoch=np.ceil(float(len(pathsTrain)) / float(batch_size))/2,
validation_data=vg,
validation_steps=np.ceil(float(len(pathsVal)) / float(batch_size))/2,
epochs=1,
verbose=1,
callbacks = callbacks_list)
# Set all layers back to trainable
for layer in model.layers:
layer.trainable = True
model.compile(loss="binary_crossentropy",
optimizer=Adam(lr=1e-4),
metrics=['accuracy', f1])
batch_size = 64
tg = ProteinDataGenerator(pathsTrain, labelsTrain, batch_size, SHAPE, channels, use_cache=use_cache)
vg = ProteinDataGenerator(pathsVal, labelsVal, batch_size, SHAPE, channels, use_cache=use_cache)
hist = model.fit_generator(
tg,
steps_per_epoch=np.ceil(float(len(pathsTrain)) / float(batch_size))/2,
validation_data=vg,
validation_steps=np.ceil(float(len(pathsVal)) / float(batch_size))/2,
epochs=100,
verbose=1,
callbacks=callbacks_list)
fig, ax = plt.subplots(1, 2, figsize=(15,5))
ax[0].set_title('loss')
ax[0].plot(hist.epoch, hist.history["loss"], label="Train loss")
ax[0].plot(hist.epoch, hist.history["val_loss"], label="Validation loss")
ax[1].set_title('acc')
ax[1].plot(hist.epoch, hist.history["f1"], label="Train F1")
ax[1].plot(hist.epoch, hist.history["val_f1"], label="Validation F1")
ax[0].legend()
ax[1].legend()
plt.show()
|
[
"keras.layers.Conv2D",
"tensorflow.equal",
"pandas.read_csv",
"imgaug.augmenters.GaussianBlur",
"keras.backend.floatx",
"numpy.array",
"tensorflow.is_nan",
"keras.layers.Dense",
"tensorflow.ones_like",
"imgaug.augmenters.Fliplr",
"numpy.arange",
"numpy.divide",
"imgaug.augmenters.Flipud",
"imgaug.augmenters.Crop",
"numpy.where",
"keras.backend.clip",
"keras.backend.pow",
"numpy.stack",
"numpy.random.seed",
"keras.callbacks.EarlyStopping",
"keras.models.Model",
"keras.applications.inception_v3.InceptionV3",
"tensorflow.zeros_like",
"keras.backend.epsilon",
"random.randint",
"keras.optimizers.Adam",
"keras.backend.cast",
"numpy.ones",
"keras.layers.Flatten",
"keras.callbacks.ReduceLROnPlateau",
"keras.backend.log",
"imgaug.augmenters.Multiply",
"keras.layers.BatchNormalization",
"cv2.resize",
"keras.layers.Dropout",
"warnings.filterwarnings",
"matplotlib.pyplot.show",
"PIL.Image.open",
"keras.callbacks.ModelCheckpoint",
"keras.backend.mean",
"imgaug.augmenters.Affine",
"os.path.join",
"cv2.createCLAHE",
"numpy.zeros",
"numpy.expand_dims",
"matplotlib.pyplot.subplots",
"numpy.random.shuffle"
] |
[((384, 417), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (407, 417), False, 'import warnings\n'), ((8892, 8931), 'numpy.arange', 'np.arange', (['paths.shape[0]'], {'dtype': 'np.int'}), '(paths.shape[0], dtype=np.int)\n', (8901, 8931), True, 'import numpy as np\n'), ((8934, 8954), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (8948, 8954), True, 'import numpy as np\n'), ((8955, 8978), 'numpy.random.shuffle', 'np.random.shuffle', (['keys'], {}), '(keys)\n', (8972, 8978), True, 'import numpy as np\n'), ((9847, 9988), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""../working/InceptionV3_3chan.h5"""'], {'monitor': '"""val_f1"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""max"""', 'save_weights_only': '(False)'}), "('../working/InceptionV3_3chan.h5', monitor='val_f1',\n verbose=1, save_best_only=True, mode='max', save_weights_only=False)\n", (9862, 9988), False, 'from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau\n'), ((10034, 10137), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_f1"""', 'factor': '(0.5)', 'patience': '(10)', 'verbose': '(1)', 'mode': '"""max"""', 'epsilon': '(0.0001)'}), "(monitor='val_f1', factor=0.5, patience=10, verbose=1,\n mode='max', epsilon=0.0001)\n", (10051, 10137), False, 'from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau\n'), ((10178, 10234), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_f1"""', 'mode': '"""max"""', 'patience': '(20)'}), "(monitor='val_f1', mode='max', patience=20)\n", (10191, 10234), False, 'from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau\n'), ((11820, 11855), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(15, 5)'}), '(1, 2, figsize=(15, 5))\n', (11832, 11855), True, 'import matplotlib.pyplot as plt\n'), ((12202, 12212), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12210, 12212), True, 'import matplotlib.pyplot as plt\n'), ((614, 645), 'pandas.read_csv', 'pd.read_csv', (["(DIR + '/train.csv')"], {}), "(DIR + '/train.csv')\n", (625, 645), True, 'import pandas as pd\n'), ((1043, 1086), 'pandas.read_csv', 'pd.read_csv', (["(DIR + '/sample_submission.csv')"], {}), "(DIR + '/sample_submission.csv')\n", (1054, 1086), True, 'import pandas as pd\n'), ((6670, 6680), 'keras.backend.mean', 'K.mean', (['f1'], {}), '(f1)\n', (6676, 6680), True, 'from keras import backend as K\n'), ((7802, 7852), 'keras.applications.inception_v3.InceptionV3', 'InceptionV3', ([], {'include_top': '(False)', 'weights': '"""imagenet"""'}), "(include_top=False, weights='imagenet')\n", (7813, 7852), False, 'from keras.applications.inception_v3 import InceptionV3\n'), ((8110, 8155), 'keras.models.Model', 'Model', ([], {'inputs': 'base_input', 'outputs': 'base_output'}), '(inputs=base_input, outputs=base_output)\n', (8115, 8155), False, 'from keras.models import Model\n'), ((8443, 8470), 'keras.models.Model', 'Model', (['input_tensor', 'output'], {}), '(input_tensor, output)\n', (8448, 8470), False, 'from keras.models import Model\n'), ((764, 776), 'numpy.zeros', 'np.zeros', (['(28)'], {}), '(28)\n', (772, 776), True, 'import numpy as np\n'), ((922, 944), 'numpy.array', 'np.array', (['paths[:5000]'], {}), '(paths[:5000])\n', (930, 944), True, 'import numpy as np\n'), ((946, 969), 'numpy.array', 'np.array', (['labels[:5000]'], {}), '(labels[:5000])\n', (954, 969), True, 'import numpy as np\n'), ((1164, 1175), 'numpy.ones', 'np.ones', (['(28)'], {}), '(28)\n', (1171, 1175), True, 'import numpy as np\n'), ((1268, 1283), 'numpy.array', 'np.array', (['paths'], {}), '(paths)\n', (1276, 1283), True, 'import numpy as np\n'), ((1285, 1301), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1293, 1301), True, 'import numpy as np\n'), ((1972, 1989), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {}), '()\n', (1987, 1989), False, 'import cv2\n'), ((2457, 2528), 'numpy.zeros', 'np.zeros', (['(paths.shape[0], self.shape[0], self.shape[1], self.shape[2])'], {}), '((paths.shape[0], self.shape[0], self.shape[1], self.shape[2]))\n', (2465, 2528), True, 'import numpy as np\n'), ((6244, 6254), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (6252, 6254), True, 'from keras import backend as K\n'), ((6271, 6303), 'keras.backend.cast', 'K.cast', (['(y_true * y_pred)', '"""float"""'], {}), "(y_true * y_pred, 'float')\n", (6277, 6303), True, 'from keras import backend as K\n'), ((6326, 6370), 'keras.backend.cast', 'K.cast', (['((1 - y_true) * (1 - y_pred))', '"""float"""'], {}), "((1 - y_true) * (1 - y_pred), 'float')\n", (6332, 6370), True, 'from keras import backend as K\n'), ((6389, 6427), 'keras.backend.cast', 'K.cast', (['((1 - y_true) * y_pred)', '"""float"""'], {}), "((1 - y_true) * y_pred, 'float')\n", (6395, 6427), True, 'from keras import backend as K\n'), ((6448, 6486), 'keras.backend.cast', 'K.cast', (['(y_true * (1 - y_pred))', '"""float"""'], {}), "(y_true * (1 - y_pred), 'float')\n", (6454, 6486), True, 'from keras import backend as K\n'), ((6621, 6634), 'tensorflow.is_nan', 'tf.is_nan', (['f1'], {}), '(f1)\n', (6630, 6634), True, 'import tensorflow as tf\n'), ((6636, 6653), 'tensorflow.zeros_like', 'tf.zeros_like', (['f1'], {}), '(f1)\n', (6649, 6653), True, 'import tensorflow as tf\n'), ((6728, 6760), 'keras.backend.cast', 'K.cast', (['(y_true * y_pred)', '"""float"""'], {}), "(y_true * y_pred, 'float')\n", (6734, 6760), True, 'from keras import backend as K\n'), ((6783, 6827), 'keras.backend.cast', 'K.cast', (['((1 - y_true) * (1 - y_pred))', '"""float"""'], {}), "((1 - y_true) * (1 - y_pred), 'float')\n", (6789, 6827), True, 'from keras import backend as K\n'), ((6846, 6884), 'keras.backend.cast', 'K.cast', (['((1 - y_true) * y_pred)', '"""float"""'], {}), "((1 - y_true) * y_pred, 'float')\n", (6852, 6884), True, 'from keras import backend as K\n'), ((6905, 6943), 'keras.backend.cast', 'K.cast', (['(y_true * (1 - y_pred))', '"""float"""'], {}), "(y_true * (1 - y_pred), 'float')\n", (6911, 6943), True, 'from keras import backend as K\n'), ((7078, 7091), 'tensorflow.is_nan', 'tf.is_nan', (['f1'], {}), '(f1)\n', (7087, 7091), True, 'import tensorflow as tf\n'), ((7093, 7110), 'tensorflow.zeros_like', 'tf.zeros_like', (['f1'], {}), '(f1)\n', (7106, 7110), True, 'import tensorflow as tf\n'), ((7129, 7139), 'keras.backend.mean', 'K.mean', (['f1'], {}), '(f1)\n', (7135, 7139), True, 'from keras import backend as K\n'), ((7388, 7414), 'keras.backend.clip', 'K.clip', (['pt_1', '(0.001)', '(0.999)'], {}), '(pt_1, 0.001, 0.999)\n', (7394, 7414), True, 'from keras import backend as K\n'), ((7428, 7454), 'keras.backend.clip', 'K.clip', (['pt_0', '(0.001)', '(0.999)'], {}), '(pt_0, 0.001, 0.999)\n', (7434, 7454), True, 'from keras import backend as K\n'), ((7750, 7770), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (7768, 7770), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, GlobalMaxPooling2D, BatchNormalization, Input, Conv2D, MaxPooling2D\n'), ((8192, 8204), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (8199, 8204), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, GlobalMaxPooling2D, BatchNormalization, Input, Conv2D, MaxPooling2D\n'), ((8216, 8266), 'keras.layers.Conv2D', 'Conv2D', (['(128)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(128, kernel_size=(3, 3), activation='relu')\n", (8222, 8266), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, GlobalMaxPooling2D, BatchNormalization, Input, Conv2D, MaxPooling2D\n'), ((8277, 8286), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (8284, 8286), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, GlobalMaxPooling2D, BatchNormalization, Input, Conv2D, MaxPooling2D\n'), ((8298, 8310), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (8305, 8310), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, GlobalMaxPooling2D, BatchNormalization, Input, Conv2D, MaxPooling2D\n'), ((8322, 8352), 'keras.layers.Dense', 'Dense', (['(1024)'], {'activation': '"""relu"""'}), "(1024, activation='relu')\n", (8327, 8352), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, GlobalMaxPooling2D, BatchNormalization, Input, Conv2D, MaxPooling2D\n'), ((8364, 8376), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (8371, 8376), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, GlobalMaxPooling2D, BatchNormalization, Input, Conv2D, MaxPooling2D\n'), ((8393, 8427), 'keras.layers.Dense', 'Dense', (['n_out'], {'activation': '"""sigmoid"""'}), "(n_out, activation='sigmoid')\n", (8398, 8427), False, 'from keras.layers import Activation, Dropout, Flatten, Dense, GlobalMaxPooling2D, BatchNormalization, Input, Conv2D, MaxPooling2D\n'), ((10730, 10745), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (10734, 10745), False, 'from keras.optimizers import Adam\n'), ((11239, 11254), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (11243, 11254), False, 'from keras.optimizers import Adam\n'), ((850, 883), 'os.path.join', 'os.path.join', (['path_to_train', 'name'], {}), '(path_to_train, name)\n', (862, 883), False, 'import os, sys\n'), ((1197, 1229), 'os.path.join', 'os.path.join', (['path_to_test', 'name'], {}), '(path_to_test, name)\n', (1209, 1229), False, 'import os, sys\n'), ((2136, 2160), 'numpy.zeros', 'np.zeros', (['paths.shape[0]'], {}), '(paths.shape[0])\n', (2144, 2160), True, 'import numpy as np\n'), ((3346, 3377), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indexes'], {}), '(self.indexes)\n', (3363, 3377), True, 'import numpy as np\n'), ((3866, 3886), 'numpy.stack', 'np.stack', (['images', '(-1)'], {}), '(images, -1)\n', (3874, 3886), True, 'import numpy as np\n'), ((3936, 3964), 'cv2.resize', 'cv2.resize', (['im', '(SIZE, SIZE)'], {}), '(im, (SIZE, SIZE))\n', (3946, 3964), False, 'import cv2\n'), ((3981, 3999), 'numpy.divide', 'np.divide', (['im', '(255)'], {}), '(im, 255)\n', (3990, 3999), True, 'import numpy as np\n'), ((4059, 4087), 'cv2.resize', 'cv2.resize', (['im', '(SIZE, SIZE)'], {}), '(im, (SIZE, SIZE))\n', (4069, 4087), False, 'import cv2\n'), ((4104, 4122), 'numpy.divide', 'np.divide', (['im', '(255)'], {}), '(im, 255)\n', (4113, 4122), True, 'import numpy as np\n'), ((4140, 4161), 'numpy.expand_dims', 'np.expand_dims', (['im', '(2)'], {}), '(im, 2)\n', (4154, 4161), True, 'import numpy as np\n'), ((4221, 4234), 'random.randint', 'randint', (['(0)', '(1)'], {}), '(0, 1)\n', (4228, 4234), False, 'from random import randint\n'), ((6210, 6230), 'keras.backend.clip', 'K.clip', (['y_pred', '(0)', '(1)'], {}), '(y_pred, 0, 1)\n', (6216, 6230), True, 'from keras import backend as K\n'), ((6517, 6528), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (6526, 6528), True, 'from keras import backend as K\n'), ((6554, 6565), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (6563, 6565), True, 'from keras import backend as K\n'), ((6590, 6601), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (6599, 6601), True, 'from keras import backend as K\n'), ((6974, 6985), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (6983, 6985), True, 'from keras import backend as K\n'), ((7011, 7022), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (7020, 7022), True, 'from keras import backend as K\n'), ((7047, 7058), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (7056, 7058), True, 'from keras import backend as K\n'), ((7245, 7264), 'tensorflow.equal', 'tf.equal', (['y_true', '(1)'], {}), '(y_true, 1)\n', (7253, 7264), True, 'import tensorflow as tf\n'), ((7274, 7294), 'tensorflow.ones_like', 'tf.ones_like', (['y_pred'], {}), '(y_pred)\n', (7286, 7294), True, 'import tensorflow as tf\n'), ((7320, 7339), 'tensorflow.equal', 'tf.equal', (['y_true', '(0)'], {}), '(y_true, 0)\n', (7328, 7339), True, 'import tensorflow as tf\n'), ((7349, 7370), 'tensorflow.zeros_like', 'tf.zeros_like', (['y_pred'], {}), '(y_pred)\n', (7362, 7370), True, 'import tensorflow as tf\n'), ((3679, 3720), 'PIL.Image.open', 'Image.open', (["(path + '_' + channel + '.png')"], {}), "(path + '_' + channel + '.png')\n", (3689, 3720), False, 'from PIL import Image\n'), ((2667, 2705), 'numpy.where', 'np.where', (['(self.is_cached[indexes] == 0)'], {}), '(self.is_cached[indexes] == 0)\n', (2675, 2705), True, 'import numpy as np\n'), ((7563, 7580), 'keras.backend.log', 'K.log', (['(1.0 - pt_0)'], {}), '(1.0 - pt_0)\n', (7568, 7580), True, 'from keras import backend as K\n'), ((7510, 7521), 'keras.backend.log', 'K.log', (['pt_1'], {}), '(pt_1)\n', (7515, 7521), True, 'from keras import backend as K\n'), ((7541, 7559), 'keras.backend.pow', 'K.pow', (['pt_0', 'gamma'], {}), '(pt_0, gamma)\n', (7546, 7559), True, 'from keras import backend as K\n'), ((4331, 4346), 'imgaug.augmenters.Fliplr', 'iaa.Fliplr', (['(0.5)'], {}), '(0.5)\n', (4341, 4346), True, 'from imgaug import augmenters as iaa\n'), ((4387, 4402), 'imgaug.augmenters.Flipud', 'iaa.Flipud', (['(0.5)'], {}), '(0.5)\n', (4397, 4402), True, 'from imgaug import augmenters as iaa\n'), ((4443, 4469), 'imgaug.augmenters.Crop', 'iaa.Crop', ([], {'percent': '(0, 0.1)'}), '(percent=(0, 0.1))\n', (4451, 4469), True, 'from imgaug import augmenters as iaa\n'), ((4992, 5033), 'imgaug.augmenters.Multiply', 'iaa.Multiply', (['(0.8, 1.2)'], {'per_channel': '(0.2)'}), '((0.8, 1.2), per_channel=0.2)\n', (5004, 5033), True, 'from imgaug import augmenters as iaa\n'), ((5209, 5356), 'imgaug.augmenters.Affine', 'iaa.Affine', ([], {'scale': "{'x': (0.9, 1.1), 'y': (0.9, 1.1)}", 'translate_percent': "{'x': (-0.1, 0.1), 'y': (-0.1, 0.1)}", 'rotate': '(-180, 180)', 'shear': '(-4, 4)'}), "(scale={'x': (0.9, 1.1), 'y': (0.9, 1.1)}, translate_percent={'x':\n (-0.1, 0.1), 'y': (-0.1, 0.1)}, rotate=(-180, 180), shear=(-4, 4))\n", (5219, 5356), True, 'from imgaug import augmenters as iaa\n'), ((7484, 7508), 'keras.backend.pow', 'K.pow', (['(1.0 - pt_1)', 'gamma'], {}), '(1.0 - pt_1, gamma)\n', (7489, 7508), True, 'from keras import backend as K\n'), ((4692, 4724), 'imgaug.augmenters.GaussianBlur', 'iaa.GaussianBlur', ([], {'sigma': '(0, 0.5)'}), '(sigma=(0, 0.5))\n', (4708, 4724), True, 'from imgaug import augmenters as iaa\n')]
|
import numpy as np
from . import utils
def tile_position(x0, y0, x1=None, y1=None):
"""Need doc string..."""
if x1 is None and y1 is None:
x1 = x0
y1 = y0
if (x0.size != y0.size) or (x1.size != y1.size):
raise ValueError("x0 and y0 or x1 and y1 size do not match.")
x0g = np.tile(x0.ravel()[:, np.newaxis], (1, x1.size))
y0g = np.tile(y0.ravel()[:, np.newaxis], (1, x1.size))
x1g = np.tile(x1.ravel()[np.newaxis, :], (x0.size, 1))
y1g = np.tile(y1.ravel()[np.newaxis, :], (x0.size, 1))
return x0g, y0g, x1g, y1g
def xy_distance(x0, y0, x1=None, y1=None):
"""
Output x and y distance matrices.
If x1 and y1 are not supplied we calculate the auto-distance matrices.
"""
if x1 is None and y1 is None:
x1 = x0
y1 = y0
dx = x0.ravel()[:, np.newaxis] - x1.ravel()[np.newaxis, :]
dy = y0.ravel()[:, np.newaxis] - y1.ravel()[np.newaxis, :]
return dx, dy
def r_distance(x0, y0, x1=None, y1=None, coords="cartesian"):
"""
Distance matrix.
If x1 and y1 are not supplied we calculate the auto-distance matrix.
"""
if coords == "cartesian":
dx, dy = xy_distance(x0, y0, x1, y1)
r = np.sqrt(dx ** 2 + dy ** 2)
elif coords == "latlon":
r = utils.haversine_distance(*tile_position(x0, y0, x1, y1))
return r
|
[
"numpy.sqrt"
] |
[((1225, 1251), 'numpy.sqrt', 'np.sqrt', (['(dx ** 2 + dy ** 2)'], {}), '(dx ** 2 + dy ** 2)\n', (1232, 1251), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding: utf-8
import sys
sys.path.append("../")
import pandas as pd
import numpy as np
import pathlib
import pickle
import os
import itertools
import argparse
import logging
import helpers.feature_helpers as fh
from collections import Counter
OUTPUT_DF_TR = 'df_steps_tr.csv'
OUTPUT_DF_VAL = 'df_steps_val.csv'
OUTPUT_DF_TRAIN = 'df_steps_train.csv'
OUTPUT_DF_TEST = 'df_steps_test.csv'
OUTPUT_DF_SESSIONS = 'df_sessions.csv'
OUTPUT_ENCODING_DICT = 'enc_dicts_v02.pkl'
OUTPUT_CONFIG = 'config.pkl'
OUTPUT_NORMLIZATIONS_VAL = 'Dwell_normalizations_val.pkl'
OUTPUT_NORMLIZATIONS_SUBM = 'Dwell_normalizations_submission.pkl'
DEFAULT_FEATURES_DIR_NAME = 'nn_vnormal'
DEFAULT_PREPROC_DIR_NAME = 'data_processed_vnormal'
def setup_args_parser():
parser = argparse.ArgumentParser(description='Create cv features')
parser.add_argument('--processed_data_dir_name', help='path to preprocessed data', default=DEFAULT_PREPROC_DIR_NAME)
parser.add_argument('--features_dir_name', help='features directory name', default=DEFAULT_FEATURES_DIR_NAME)
#parser.add_argument('--split_option', help='split type. Options: normal, future', default=DEFAULT_SPLIT)
parser.add_argument('--debug', help='debug mode (verbose output and no saving)', action='store_true')
return parser
def setup_logger(debug):
logger = logging.getLogger()
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
if debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
return logger
def main():
parser = setup_args_parser()
args = parser.parse_args()
logger = setup_logger(args.debug)
#logger.info('split option: %s' % args.split_option)
logger.info(100*'-')
logger.info('Running 013_Features_Dwell.py')
logger.info(100*'-')
logger.info('processed data directory name: %s' % args.processed_data_dir_name)
logger.info('features directory name: %s' % args.features_dir_name)
#Set up arguments
# # split_option
# if args.split_option=='normal':
# SPLIT_OPTION = 'normal'
# elif args.split_option=='future':
# SPLIT_OPTION = 'leave_out_only_clickout_with_nans'
# processed data path
DATA_PATH = '../data/' + args.processed_data_dir_name + '/'
#os.makedirs(DATA_PATH) if not os.path.exists(DATA_PATH) else None
logger.info('processed data path: %s' % DATA_PATH)
# features data path
FEATURES_PATH = '../features/' + args.features_dir_name + '/'
#os.makedirs(FEATURES_PATH) if not os.path.exists(FEATURES_PATH) else None
logger.info('features path: %s' % FEATURES_PATH)
# End of set up arguments
config = pickle.load(open(DATA_PATH+OUTPUT_CONFIG, "rb" ))
config
# ### read data
df_steps_tr = pd.read_csv(DATA_PATH+OUTPUT_DF_TR)
df_steps_val = pd.read_csv(DATA_PATH+OUTPUT_DF_VAL)
df_steps_train = pd.read_csv(DATA_PATH+OUTPUT_DF_TRAIN)
df_steps_test = pd.read_csv(DATA_PATH+OUTPUT_DF_TEST)
df_sessions = pd.read_csv(DATA_PATH+OUTPUT_DF_SESSIONS)
enc_dict = pickle.load(open(DATA_PATH+OUTPUT_ENCODING_DICT, "rb" ))
# ## Concatenate all data
# #### validation
df_tr = df_steps_tr.merge(df_sessions, on='session_id')
df_val = df_steps_val.merge(df_sessions, on='session_id')
df_all_cv = pd.concat([df_tr, df_val], axis=0).reset_index(drop=True)
del df_tr, df_val, df_steps_tr, df_steps_val
# #### all
df_test_new = df_steps_test.merge(df_sessions, on='session_id')
df_train_new = df_steps_train.merge(df_sessions, on='session_id')
df_all = pd.concat([df_train_new, df_test_new], axis=0).reset_index(drop=True)
del df_train_new, df_test_new, df_steps_train, df_steps_test
del df_sessions
# ### create a dataframe with impressions list¶
idx = df_all.action_type=='clickout item'
df_all_imp_list = df_all.loc[idx,['session_id', 'step', 'impressions']].reset_index(drop=True)
df_all_imp_list['impressions_list_enc'] = df_all_imp_list.impressions.fillna('').str.split('|') \
.apply(lambda s: [enc_dict['reference'].get(i) for i in s])
df_all_imp_list.drop('impressions', axis=1, inplace=True)
# # Get Dwell
VAR_GROUPBY = 'session_id'
FEATURE_NAME = 'past_dwell_with_items_%s' % VAR_GROUPBY
print (FEATURE_NAME)
df_all_cv = df_all_cv.sort_values(['user_id', 'day','session_id', 'step', 'timestamp']).reset_index(drop=True)
df_all = df_all.sort_values(['user_id', 'day','session_id', 'step', 'timestamp']).reset_index(drop=True)
# ### validation
VARS_ = ['session_id', 'step', 'timestamp', 'action_type', 'reference']
df = df_all_cv[VARS_].copy()
FILE_NAME = 'Dcv_%s.gz' % FEATURE_NAME
print (FILE_NAME)
df['reference_enc'] = df.reference.apply(lambda s: str(enc_dict['reference'].get(s)))
df = df.drop('reference', axis=1)
df['next_timestamp'] = df.groupby('session_id').timestamp.shift(-1)
df['duration'] = df.next_timestamp-df.timestamp
df['duration'] = df['duration'].fillna(0)
df = df.drop(['timestamp', 'next_timestamp'], axis=1)
df['ref_dwell_dict'] = df.apply(lambda row: dict([(row.reference_enc, row.duration)]), axis=1).apply(Counter)
df = df.drop(['reference_enc', 'duration'], axis=1)
df['cumsum_dwell_dict'] = df.groupby('session_id').ref_dwell_dict.transform(pd.Series.cumsum)
df['cumsum_dwell_dict_shift'] = df.groupby('session_id').cumsum_dwell_dict.shift()
df = df.drop(['ref_dwell_dict', 'cumsum_dwell_dict'], axis=1)
df_feat = df.merge(df_all_imp_list, on=['session_id', 'step'])
df_feat[FEATURE_NAME] = df_feat.apply(lambda row: [row.cumsum_dwell_dict_shift.get(str(s), -1) for s in row.impressions_list_enc] \
if pd.notnull(row.cumsum_dwell_dict_shift) else [-1 for s in row.impressions_list_enc], axis=1)
df_feat = df_feat[['session_id', 'step', FEATURE_NAME]]
df_feat.to_csv(FEATURES_PATH+FILE_NAME, index=False, compression='gzip')
print (FEATURES_PATH+FILE_NAME)
def get_imp_means_and_stds(df_tr_=None, var_group = 'seq_num_new'):
aux = df_tr_[[var_group]].reset_index(drop=True)[var_group]
lista=list(itertools.chain.from_iterable(aux))
listasemnan = [s for s in lista if s!=-1]
means = np.mean(listasemnan)
stds = np.std(listasemnan)
maxv = np.max(listasemnan)
return means, stds, maxv
def get_log_imp_means_and_stds(df_tr_=None, var_group = 'seq_num_new'):
aux = df_tr_[[var_group]].reset_index(drop=True)[var_group]
lista=list(itertools.chain.from_iterable(aux))
listasemnan = np.log(np.array([s for s in lista if s!=-1])+1.9)
means = np.mean(listasemnan)
stds = np.std(listasemnan)
maxv = np.max(listasemnan)
return means, stds,maxv
normalizations_dict = {}
normalizations_dict['dwell_times'] = {}
means, stds, maxv = get_imp_means_and_stds(df_tr_=df_feat, var_group = 'past_dwell_with_items_session_id')
normalizations_dict['dwell_times']['means'] = means
normalizations_dict['dwell_times']['stds'] = stds
normalizations_dict['dwell_times']['max'] = maxv
normalizations_dict['dwell_times_log'] = {}
means, stds, maxv = get_log_imp_means_and_stds(df_tr_=df_feat, var_group = 'past_dwell_with_items_session_id')
normalizations_dict['dwell_times_log']['means'] = means
normalizations_dict['dwell_times_log']['stds'] = stds
normalizations_dict['dwell_times_log']['max'] = maxv
with open(FEATURES_PATH+OUTPUT_NORMLIZATIONS_VAL, 'wb') as handle:
pickle.dump(normalizations_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
# ### all
VARS_ = ['session_id', 'step', 'timestamp', 'action_type', 'reference']
df = df_all[VARS_].copy()
FILE_NAME = 'D_%s.gz' % FEATURE_NAME
print (FILE_NAME)
df['reference_enc'] = df.reference.apply(lambda s: str(enc_dict['reference'].get(s)))
df = df.drop('reference', axis=1)
df['next_timestamp'] = df.groupby('session_id').timestamp.shift(-1)
df['duration'] = df.next_timestamp-df.timestamp
df['duration'] = df['duration'].fillna(0)
df = df.drop(['timestamp', 'next_timestamp'], axis=1)
df['ref_dwell_dict'] = df.apply(lambda row: dict([(row.reference_enc, row.duration)]), axis=1).apply(Counter)
df = df.drop(['reference_enc', 'duration'], axis=1)
df['cumsum_dwell_dict'] = df.groupby('session_id').ref_dwell_dict.transform(pd.Series.cumsum)
df['cumsum_dwell_dict_shift'] = df.groupby('session_id').cumsum_dwell_dict.shift()
df = df.drop(['ref_dwell_dict', 'cumsum_dwell_dict'], axis=1)
df_feat = df.merge(df_all_imp_list, on=['session_id', 'step'])
df_feat[FEATURE_NAME] = df_feat.apply(lambda row: [row.cumsum_dwell_dict_shift.get(str(s), -1) for s in row.impressions_list_enc] \
if pd.notnull(row.cumsum_dwell_dict_shift) else [-1 for s in row.impressions_list_enc], axis=1)
df_feat = df_feat[['session_id', 'step', FEATURE_NAME]]
df_feat.to_csv(FEATURES_PATH+FILE_NAME, index=False, compression='gzip')
print (FEATURES_PATH+FILE_NAME)
normalizations_dict = {}
normalizations_dict['dwell_times'] = {}
means, stds, maxv = get_imp_means_and_stds(df_tr_=df_feat, var_group = 'past_dwell_with_items_session_id')
normalizations_dict['dwell_times']['means'] = means
normalizations_dict['dwell_times']['stds'] = stds
normalizations_dict['dwell_times']['max'] = maxv
normalizations_dict['dwell_times_log'] = {}
means, stds, maxv = get_log_imp_means_and_stds(df_tr_=df_feat, var_group = 'past_dwell_with_items_session_id')
normalizations_dict['dwell_times_log']['means'] = means
normalizations_dict['dwell_times_log']['stds'] = stds
normalizations_dict['dwell_times_log']['max'] = maxv
with open(FEATURES_PATH+OUTPUT_NORMLIZATIONS_SUBM, 'wb') as handle:
pickle.dump(normalizations_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
main()
|
[
"logging.getLogger",
"logging.basicConfig",
"numpy.mean",
"pickle.dump",
"argparse.ArgumentParser",
"pandas.read_csv",
"numpy.max",
"itertools.chain.from_iterable",
"numpy.array",
"pandas.concat",
"numpy.std",
"pandas.notnull",
"sys.path.append"
] |
[((50, 72), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (65, 72), False, 'import sys\n'), ((785, 842), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create cv features"""'}), "(description='Create cv features')\n", (808, 842), False, 'import argparse\n'), ((1351, 1370), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1368, 1370), False, 'import logging\n'), ((1375, 1467), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)s %(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)s %(levelname)s %(message)s', level=\n logging.INFO)\n", (1394, 1467), False, 'import logging\n'), ((2813, 2850), 'pandas.read_csv', 'pd.read_csv', (['(DATA_PATH + OUTPUT_DF_TR)'], {}), '(DATA_PATH + OUTPUT_DF_TR)\n', (2824, 2850), True, 'import pandas as pd\n'), ((2868, 2906), 'pandas.read_csv', 'pd.read_csv', (['(DATA_PATH + OUTPUT_DF_VAL)'], {}), '(DATA_PATH + OUTPUT_DF_VAL)\n', (2879, 2906), True, 'import pandas as pd\n'), ((2926, 2966), 'pandas.read_csv', 'pd.read_csv', (['(DATA_PATH + OUTPUT_DF_TRAIN)'], {}), '(DATA_PATH + OUTPUT_DF_TRAIN)\n', (2937, 2966), True, 'import pandas as pd\n'), ((2985, 3024), 'pandas.read_csv', 'pd.read_csv', (['(DATA_PATH + OUTPUT_DF_TEST)'], {}), '(DATA_PATH + OUTPUT_DF_TEST)\n', (2996, 3024), True, 'import pandas as pd\n'), ((3041, 3084), 'pandas.read_csv', 'pd.read_csv', (['(DATA_PATH + OUTPUT_DF_SESSIONS)'], {}), '(DATA_PATH + OUTPUT_DF_SESSIONS)\n', (3052, 3084), True, 'import pandas as pd\n'), ((6359, 6379), 'numpy.mean', 'np.mean', (['listasemnan'], {}), '(listasemnan)\n', (6366, 6379), True, 'import numpy as np\n'), ((6396, 6415), 'numpy.std', 'np.std', (['listasemnan'], {}), '(listasemnan)\n', (6402, 6415), True, 'import numpy as np\n'), ((6431, 6450), 'numpy.max', 'np.max', (['listasemnan'], {}), '(listasemnan)\n', (6437, 6450), True, 'import numpy as np\n'), ((6775, 6795), 'numpy.mean', 'np.mean', (['listasemnan'], {}), '(listasemnan)\n', (6782, 6795), True, 'import numpy as np\n'), ((6812, 6831), 'numpy.std', 'np.std', (['listasemnan'], {}), '(listasemnan)\n', (6818, 6831), True, 'import numpy as np\n'), ((6847, 6866), 'numpy.max', 'np.max', (['listasemnan'], {}), '(listasemnan)\n', (6853, 6866), True, 'import numpy as np\n'), ((7671, 7745), 'pickle.dump', 'pickle.dump', (['normalizations_dict', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(normalizations_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (7682, 7745), False, 'import pickle\n'), ((9979, 10053), 'pickle.dump', 'pickle.dump', (['normalizations_dict', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(normalizations_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (9990, 10053), False, 'import pickle\n'), ((3348, 3382), 'pandas.concat', 'pd.concat', (['[df_tr, df_val]'], {'axis': '(0)'}), '([df_tr, df_val], axis=0)\n', (3357, 3382), True, 'import pandas as pd\n'), ((3623, 3669), 'pandas.concat', 'pd.concat', (['[df_train_new, df_test_new]'], {'axis': '(0)'}), '([df_train_new, df_test_new], axis=0)\n', (3632, 3669), True, 'import pandas as pd\n'), ((6257, 6291), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['aux'], {}), '(aux)\n', (6286, 6291), False, 'import itertools\n'), ((6651, 6685), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['aux'], {}), '(aux)\n', (6680, 6685), False, 'import itertools\n'), ((5827, 5866), 'pandas.notnull', 'pd.notnull', (['row.cumsum_dwell_dict_shift'], {}), '(row.cumsum_dwell_dict_shift)\n', (5837, 5866), True, 'import pandas as pd\n'), ((6716, 6755), 'numpy.array', 'np.array', (['[s for s in lista if s != -1]'], {}), '([s for s in lista if s != -1])\n', (6724, 6755), True, 'import numpy as np\n'), ((8940, 8979), 'pandas.notnull', 'pd.notnull', (['row.cumsum_dwell_dict_shift'], {}), '(row.cumsum_dwell_dict_shift)\n', (8950, 8979), True, 'import pandas as pd\n')]
|
import copy
import importlib
import os
import numpy as np
import tensorflow as tf
import logging
tf.get_logger().setLevel(logging.ERROR)
from client import Client
from server import Server
from model import ServerModel
from baseline_constants import MAIN_PARAMS, MODEL_PARAMS
from fedbayes_helper import *
from fedbayes_tinyhelper import *
import metrics.writer as metrics_writer
STAT_METRICS_PATH = 'metrics/stat_metrics.csv'
SYS_METRICS_PATH = 'metrics/sys_metrics.csv'
#from utils.matching.pfnm import layer_group_descent as pdm_multilayer_group_descent
from utils.matching.cnn_pfnm import layerwise_sampler
from utils.matching.cnn_retrain import reconstruct_weights, local_train, combine_network_after_matching
def print_metrics(metrics, weights):
ordered_weights = [weights[c] for c in sorted(weights)]
metric_names = metrics_writer.get_metrics_names(metrics)
for metric in metric_names:
ordered_metric = [metrics[c][metric] for c in sorted(metrics)]
print('%s: %g, 10th percentile: %g, 90th percentile %g' \
% (metric,
np.average(ordered_metric, weights=ordered_weights),
np.percentile(ordered_metric, 10),
np.percentile(ordered_metric, 90)))
class Fedbayes_Sing_Trainer:
def __init__(self, users, groups, train_data, test_data):
# matching requires num of classes to be set during
# model_config stage, or it can cause program failure
self.users = users
self.train_data = train_data
self.test_data = test_data
self.num_classes = 0
self.shape_func = None
self.upd_collector = []
def recover_weights(self, weights, assignment, model_summary, model_meta_data):
res_weights = []
conv_varname, dense_varname, weight_varname = "conv", "dense", "kernel"
#print("checking len, model summ: {}, model meta data: {}".format(len(model_summary), len(model_meta_data)))
for var_name, o, v in zip(model_summary, model_meta_data, weights):
print("name {}, old shape is {}, new shape is {}".format(var_name, o, v.shape))
if var_name.startswith(conv_varname):
if var_name.endswith(weight_varname):
w = v.reshape(o)
w = w.transpose((2, 3, 1, 0))
else:
w = v
elif var_name.startswith("batch"):
w = np.ones(o)
elif var_name.startswith(dense_varname):
if var_name.endswith(weight_varname):
w = v.transpose()
else:
w = v
res_weights.append(w)
# just change last layer, carefully, not sure how it works
# do check the reason out after Jan, find a way to
# improve it.
res_weights[-2] = res_weights[-2].T
return res_weights
def train_model(self, client_model, train_data, weights, assignment, config):
# what maintain by assignment is a dictionary of
#{layer_name: [global_id]}
# the meaning of it is a worker's all layer(except last) matching assignment
epochs = config["epochs"]
batch_size = config["batch-size"]
client_model.set_params(weights)
client_model.train(train_data, num_epochs=epochs, batch_size=batch_size)
update = client_model.get_params()
self.upd_collector.append(update)
def model_config(self, config, dataset, my_model):
shared_model = my_model
model_path = '%s/%s.py' % (dataset, shared_model)
if not os.path.exists(model_path):
print('Please specify a valid dataset and a valid model.')
model_path = '%s.%s' % (dataset, shared_model)
print('############################## %s ##############################' % model_path)
mod = importlib.import_module(model_path)
ClientModel = getattr(mod, 'ClientModel')
self.shape_func = getattr(mod, 'get_convolution_extractor_shape')
# Suppress tf warnings
tf.logging.set_verbosity(tf.logging.WARN)
# Create 2 models
model_params = MODEL_PARAMS[model_path]
model_params_list = list(model_params)
self.num_classes = model_params[1] # setting num_class to be a member of the trainer
model_params_list.insert(0, config["seed"])
model_params_list[1] = config["lr"]
model_params = tuple(model_params_list)
tf.reset_default_graph()
client_model = ClientModel(*model_params)
# Create server
server = Server(client_model)
# Create clients
_users = self.users
groups = [[] for _ in _users]
clients = [Client(u, g, self.train_data[u], self.test_data[u], client_model) \
for u, g in zip(_users, groups)]
print('%d Clients in Total' % len(clients))
return clients, server, client_model
def begins(self, config, args):
clients, server, client_model = self.model_config(config, args.dataset, 'cnn')
num_rounds = config["num-rounds"]
eval_every = config["eval-every"]
epochs_per_round = config['epochs']
batch_size = config['batch-size']
clients_per_round = config["clients-per-round"]
state_dict = {}
# Test untrained model on all clients
# stat_metrics = server.test_model(clients)
# all_ids, all_groups, all_num_samples = server.get_clients_info(clients)
# print_metrics(stat_metrics, all_num_samples)
model_summary = client_model.get_summary()
model_meta_data = client_model.get_meta_data()
# gl_weight = client_model.get_params()
gl_weight = self.batch_BBPMAP(clients[:40], state_dict, client_model, config, args)
gl_weight = self.recover_weights(gl_weight, [], model_summary, model_meta_data)
server.model = gl_weight
stat_metrics = server.test_model(clients[:40])
all_ids, all_groups, all_num_samples = server.get_clients_info(clients[:40])
print_metrics(stat_metrics, all_num_samples)
first = True
# for i in range(num_rounds):
# print('--- Round %d of %d: Training %d Clients ---' % (i+1, num_rounds, clients_per_round))
# server.select_clients(clients, num_clients=clients_per_round)
# batch_clients = server.selected_clients
# if first:
# cw = gl_weight
# else:
# cw = self.recover_weights(gl_weight, assignment, model_summary, model_meta_data)
# for k in batch_clients:
# if first or not (k.id in state_dict):
# assignment = []
# else:
# assignment = state_dict[k.id]
# self.train_model(client_model, k.train_data, cw, assignment, config)
# gl_weight = self.batch_BBPMAP(batch_clients, state_dict, client_model, config, args)
# if (i + 1) % eval_every == 0 or (i + 1) == num_rounds:
# cw = self.recover_weights(gl_weight, assignment, model_summary, model_meta_data)
# server.model = cw
# stat_metrics = server.test_model(clients)
# print_metrics(stat_metrics, all_num_samples)
# first = False
client_model.close()
def ends(self):
print("experiment of Fedbayes finished.")
return
def batch_BBPMAP(self, batch_clients, state_dict, client_model, config, args):
model_summary = client_model.get_summary()
model_meta_data = client_model.get_meta_data()
n_classes = self.num_classes
# averaging_weights, cls_freqs = avg_cls_weights(batch_clients, args.dataset, n_classes)
averaging_weights, cls_freqs = avg_cls_weights(args.dataset, n_classes)
sigma=config["sigma"]
sigma0=config["sigma0"]
gamma=config["gamma"]
it = config["sample-iter"]
assignments_list = []
# param names explained:
# C is the number of layers for model structure, no counting bias
# J is the number of clients (workers)
net_list = load_files()
C = int(len(model_meta_data) / 2)
J = len(net_list)
matching_shapes = []
fc_pos = None
apply_by_j = lambda j: load_local_model_weight_func(j, model_summary)
batch_weights = list(map(apply_by_j, net_list))
batch_freqs = pdm_prepare_freq(cls_freqs, self.num_classes)
for cur_l in range(1, C):
layer_hungarian_weights, assignment, L_next = layerwise_sampler(
batch_weights=batch_weights,
layer_index=cur_l,
sigma0_layers=sigma0,
sigma_layers=sigma,
batch_frequencies=batch_freqs,
it=it,
gamma_layers=gamma,
model_meta_data=model_meta_data,
model_layer_type= model_summary,
n_layers= C,
matching_shapes=matching_shapes,
)
assignments_list.append(assignment)
for client, a_val in zip(batch_clients, assignment):
p_index = 2 * (cur_l -1)
v_name = model_summary[p_index]
if client.id in state_dict:
cdict = state_dict[client.id]
else:
cdict = {}
cdict.update({v_name: a_val})
state_dict.update({client.id : cdict})
print("Number of assignment: {}, L_next: {}, matched_weight shape: {} ".format(
len(assignment), L_next, layer_hungarian_weights[0].shape) )
matching_shapes.append(L_next)
temp_network_weg = combine_network_after_matching(batch_weights, cur_l,
model_summary, model_meta_data,
layer_hungarian_weights, L_next, assignment,
matching_shapes, self.shape_func)
old_data = client_model.get_params()
gl_weights = []
for worker in range(J):
j = worker
gl_weights.append(reconstruct_weights(temp_network_weg[j], assignment[j],
model_summary, old_data,
model_summary[2 * cur_l - 2]))
models = local_train(batch_clients, gl_weights, cur_l, config)
batch_weights = list(map(apply_by_j, models))
## we handle the last layer carefully here ...
## averaging the last layer
matched_weights = []
last_layer_weights_collector = []
for worker in range(J):
# firstly we combine last layer's weight and bias
bias_shape = batch_weights[worker][-1].shape
last_layer_bias = batch_weights[worker][-1].reshape((1, bias_shape[0]))
last_layer_weights = np.concatenate((batch_weights[worker][-2].T, last_layer_bias), axis=0)
# the directed normalization doesn't work well, let's try weighted averaging
last_layer_weights_collector.append(last_layer_weights)
last_layer_weights_collector = np.array(last_layer_weights_collector)
avg_last_layer_weight = np.zeros(last_layer_weights_collector[0].shape, dtype=np.float32)
for i in range(n_classes):
avg_weight_collector = np.zeros(last_layer_weights_collector[0][:, 0].shape, dtype=np.float32)
for j in range(J):
avg_weight_collector += averaging_weights[j][i]*last_layer_weights_collector[j][:, i]
avg_last_layer_weight[:, i] = avg_weight_collector
#avg_last_layer_weight = np.mean(last_layer_weights_collector, axis=0)
for i in range(C * 2):
if i < (C * 2 - 2):
matched_weights.append(batch_weights[0][i])
matched_weights.append(avg_last_layer_weight[0:-1, :])
matched_weights.append(avg_last_layer_weight[-1, :])
self.upd_collector = []
return matched_weights
|
[
"os.path.exists",
"tensorflow.reset_default_graph",
"importlib.import_module",
"numpy.ones",
"numpy.average",
"tensorflow.logging.set_verbosity",
"metrics.writer.get_metrics_names",
"server.Server",
"numpy.array",
"numpy.zeros",
"utils.matching.cnn_pfnm.layerwise_sampler",
"utils.matching.cnn_retrain.local_train",
"tensorflow.get_logger",
"client.Client",
"utils.matching.cnn_retrain.combine_network_after_matching",
"numpy.concatenate",
"numpy.percentile",
"utils.matching.cnn_retrain.reconstruct_weights"
] |
[((836, 877), 'metrics.writer.get_metrics_names', 'metrics_writer.get_metrics_names', (['metrics'], {}), '(metrics)\n', (868, 877), True, 'import metrics.writer as metrics_writer\n'), ((98, 113), 'tensorflow.get_logger', 'tf.get_logger', ([], {}), '()\n', (111, 113), True, 'import tensorflow as tf\n'), ((3894, 3929), 'importlib.import_module', 'importlib.import_module', (['model_path'], {}), '(model_path)\n', (3917, 3929), False, 'import importlib\n'), ((4093, 4134), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.WARN'], {}), '(tf.logging.WARN)\n', (4117, 4134), True, 'import tensorflow as tf\n'), ((4510, 4534), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (4532, 4534), True, 'import tensorflow as tf\n'), ((4627, 4647), 'server.Server', 'Server', (['client_model'], {}), '(client_model)\n', (4633, 4647), False, 'from server import Server\n'), ((11589, 11627), 'numpy.array', 'np.array', (['last_layer_weights_collector'], {}), '(last_layer_weights_collector)\n', (11597, 11627), True, 'import numpy as np\n'), ((11661, 11726), 'numpy.zeros', 'np.zeros', (['last_layer_weights_collector[0].shape'], {'dtype': 'np.float32'}), '(last_layer_weights_collector[0].shape, dtype=np.float32)\n', (11669, 11726), True, 'import numpy as np\n'), ((3630, 3656), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (3644, 3656), False, 'import os\n'), ((4760, 4825), 'client.Client', 'Client', (['u', 'g', 'self.train_data[u]', 'self.test_data[u]', 'client_model'], {}), '(u, g, self.train_data[u], self.test_data[u], client_model)\n', (4766, 4825), False, 'from client import Client\n'), ((8814, 9106), 'utils.matching.cnn_pfnm.layerwise_sampler', 'layerwise_sampler', ([], {'batch_weights': 'batch_weights', 'layer_index': 'cur_l', 'sigma0_layers': 'sigma0', 'sigma_layers': 'sigma', 'batch_frequencies': 'batch_freqs', 'it': 'it', 'gamma_layers': 'gamma', 'model_meta_data': 'model_meta_data', 'model_layer_type': 'model_summary', 'n_layers': 'C', 'matching_shapes': 'matching_shapes'}), '(batch_weights=batch_weights, layer_index=cur_l,\n sigma0_layers=sigma0, sigma_layers=sigma, batch_frequencies=batch_freqs,\n it=it, gamma_layers=gamma, model_meta_data=model_meta_data,\n model_layer_type=model_summary, n_layers=C, matching_shapes=matching_shapes\n )\n', (8831, 9106), False, 'from utils.matching.cnn_pfnm import layerwise_sampler\n'), ((9998, 10169), 'utils.matching.cnn_retrain.combine_network_after_matching', 'combine_network_after_matching', (['batch_weights', 'cur_l', 'model_summary', 'model_meta_data', 'layer_hungarian_weights', 'L_next', 'assignment', 'matching_shapes', 'self.shape_func'], {}), '(batch_weights, cur_l, model_summary,\n model_meta_data, layer_hungarian_weights, L_next, assignment,\n matching_shapes, self.shape_func)\n', (10028, 10169), False, 'from utils.matching.cnn_retrain import reconstruct_weights, local_train, combine_network_after_matching\n'), ((10768, 10821), 'utils.matching.cnn_retrain.local_train', 'local_train', (['batch_clients', 'gl_weights', 'cur_l', 'config'], {}), '(batch_clients, gl_weights, cur_l, config)\n', (10779, 10821), False, 'from utils.matching.cnn_retrain import reconstruct_weights, local_train, combine_network_after_matching\n'), ((11320, 11390), 'numpy.concatenate', 'np.concatenate', (['(batch_weights[worker][-2].T, last_layer_bias)'], {'axis': '(0)'}), '((batch_weights[worker][-2].T, last_layer_bias), axis=0)\n', (11334, 11390), True, 'import numpy as np\n'), ((11798, 11869), 'numpy.zeros', 'np.zeros', (['last_layer_weights_collector[0][:, 0].shape'], {'dtype': 'np.float32'}), '(last_layer_weights_collector[0][:, 0].shape, dtype=np.float32)\n', (11806, 11869), True, 'import numpy as np\n'), ((1089, 1140), 'numpy.average', 'np.average', (['ordered_metric'], {'weights': 'ordered_weights'}), '(ordered_metric, weights=ordered_weights)\n', (1099, 1140), True, 'import numpy as np\n'), ((1159, 1192), 'numpy.percentile', 'np.percentile', (['ordered_metric', '(10)'], {}), '(ordered_metric, 10)\n', (1172, 1192), True, 'import numpy as np\n'), ((1211, 1244), 'numpy.percentile', 'np.percentile', (['ordered_metric', '(90)'], {}), '(ordered_metric, 90)\n', (1224, 1244), True, 'import numpy as np\n'), ((2444, 2454), 'numpy.ones', 'np.ones', (['o'], {}), '(o)\n', (2451, 2454), True, 'import numpy as np\n'), ((10524, 10638), 'utils.matching.cnn_retrain.reconstruct_weights', 'reconstruct_weights', (['temp_network_weg[j]', 'assignment[j]', 'model_summary', 'old_data', 'model_summary[2 * cur_l - 2]'], {}), '(temp_network_weg[j], assignment[j], model_summary,\n old_data, model_summary[2 * cur_l - 2])\n', (10543, 10638), False, 'from utils.matching.cnn_retrain import reconstruct_weights, local_train, combine_network_after_matching\n')]
|
import numpy as np
import pyverilator
import os
from . import to_float, to_fix_point_int
import taichi as ti
from .all_python_functions import calc_next_pos_and_velocity, rectify_positions_and_velocities, \
rectify_positions_in_collision, calc_after_collision_velocity, two_ball_collides, normalize_vector
def rectify_positions_in_collision_test(sim, ball1_pos, ball2_pos, radius):
sim.io.rst = 0
sim.io.rst = 1
sim.io.rst = 0
sim.io.x0, sim.io.y0 = to_fix_point_int(ball1_pos)
sim.io.x1, sim.io.y1 = to_fix_point_int(ball2_pos)
sim.io.radius = to_fix_point_int(radius)
done = bool(sim.io.done.value)
while not done:
sim.clock.tick()
done = bool(sim.io.done.value)
rectified_ball1_pos = to_float(np.array([sim.io.new_x0.value, sim.io.new_y0.value]))
rectified_ball2_pos = to_float(np.array([sim.io.new_x1.value, sim.io.new_y1.value]))
return rectified_ball1_pos, rectified_ball2_pos
def test_rectifier():
os.chdir("./pyverilog")
sim = pyverilator.PyVerilator.build("rectify_p_in_collision.v")
ti.init(ti.cpu)
resolution = (500, 500)
fps = 60
g = 9.8
drag_coefficient = 0.001
# world space [0.0, 1.0] ^ 2
cue_ball_velocity_magnitude_wc = 1.0
ball_pixel_radius = 10
ball_radius_wc = 1.0 / resolution[0] * ball_pixel_radius
gui = ti.GUI("billiard_game_dual_ball", resolution)
gui.fps_limit = fps
delta_t = 1.0 / fps
cue_ball_velocity_wc, _ = normalize_vector(np.random.rand(2))
cue_ball_velocity_wc *= cue_ball_velocity_magnitude_wc
cue_ball_pos_wc = np.ones(2) * 0.5
ball_pos_wc = np.array([0.25, 0.25])
ball_velocity_wc = np.zeros(2)
boundary_begin = np.array([
[0.0, 0.0],
[0.0, 0.0],
[1.0, 1.0],
[1.0, 1.0]
])
boundary_end = np.array([
[1.0, 0.0],
[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0]
])
virtual_bound_x = [ball_radius_wc, 1.0 - ball_radius_wc]
virtual_bound_y = [ball_radius_wc, 1.0 - ball_radius_wc]
while gui.running:
gui.lines(begin=boundary_begin, end=boundary_end, radius=2)
gui.circle(cue_ball_pos_wc, radius=ball_pixel_radius)
gui.circle(ball_pos_wc, radius=ball_pixel_radius)
gui.show()
cue_ball_pos_wc, cue_ball_velocity_wc = calc_next_pos_and_velocity(cue_ball_pos_wc, cue_ball_velocity_wc,
delta_t, drag_coefficient, g)
cue_ball_pos_wc, cue_ball_velocity_wc = rectify_positions_and_velocities(virtual_bound_x[0], virtual_bound_x[1],
virtual_bound_y[0], virtual_bound_y[1],
cue_ball_pos_wc,
cue_ball_velocity_wc)
ball_pos_wc, ball_velocity_wc = calc_next_pos_and_velocity(ball_pos_wc, ball_velocity_wc, delta_t,
drag_coefficient, g)
ball_pos_wc, ball_velocity_wc = rectify_positions_and_velocities(virtual_bound_x[0], virtual_bound_x[1],
virtual_bound_y[0], virtual_bound_y[1],
ball_pos_wc, ball_velocity_wc)
if two_ball_collides(cue_ball_pos_wc, ball_pos_wc, ball_radius_wc):
old_cue_ball_pos_wc, old_ball_pos_wc = cue_ball_pos_wc, ball_pos_wc
cue_ball_pos_wc_ref, ball_pos_wc_ref = rectify_positions_in_collision(old_cue_ball_pos_wc, old_ball_pos_wc,
ball_radius_wc)
cue_ball_pos_wc, ball_pos_wc = rectify_positions_in_collision_test(sim, old_cue_ball_pos_wc,
old_ball_pos_wc,
ball_radius_wc)
assert np.allclose(cue_ball_pos_wc_ref, cue_ball_pos_wc, atol=0.0001)
assert np.allclose(ball_pos_wc_ref, ball_pos_wc, atol=0.0001)
cue_ball_velocity_wc, ball_velocity_wc = calc_after_collision_velocity(cue_ball_pos_wc,
ball_pos_wc,
cue_ball_velocity_wc,
ball_velocity_wc)
|
[
"numpy.allclose",
"pyverilator.PyVerilator.build",
"numpy.random.rand",
"numpy.ones",
"taichi.init",
"os.chdir",
"numpy.array",
"numpy.zeros",
"taichi.GUI"
] |
[((978, 1001), 'os.chdir', 'os.chdir', (['"""./pyverilog"""'], {}), "('./pyverilog')\n", (986, 1001), False, 'import os\n'), ((1012, 1069), 'pyverilator.PyVerilator.build', 'pyverilator.PyVerilator.build', (['"""rectify_p_in_collision.v"""'], {}), "('rectify_p_in_collision.v')\n", (1041, 1069), False, 'import pyverilator\n'), ((1075, 1090), 'taichi.init', 'ti.init', (['ti.cpu'], {}), '(ti.cpu)\n', (1082, 1090), True, 'import taichi as ti\n'), ((1348, 1393), 'taichi.GUI', 'ti.GUI', (['"""billiard_game_dual_ball"""', 'resolution'], {}), "('billiard_game_dual_ball', resolution)\n", (1354, 1393), True, 'import taichi as ti\n'), ((1625, 1647), 'numpy.array', 'np.array', (['[0.25, 0.25]'], {}), '([0.25, 0.25])\n', (1633, 1647), True, 'import numpy as np\n'), ((1671, 1682), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (1679, 1682), True, 'import numpy as np\n'), ((1705, 1763), 'numpy.array', 'np.array', (['[[0.0, 0.0], [0.0, 0.0], [1.0, 1.0], [1.0, 1.0]]'], {}), '([[0.0, 0.0], [0.0, 0.0], [1.0, 1.0], [1.0, 1.0]])\n', (1713, 1763), True, 'import numpy as np\n'), ((1822, 1880), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])\n', (1830, 1880), True, 'import numpy as np\n'), ((755, 807), 'numpy.array', 'np.array', (['[sim.io.new_x0.value, sim.io.new_y0.value]'], {}), '([sim.io.new_x0.value, sim.io.new_y0.value])\n', (763, 807), True, 'import numpy as np\n'), ((844, 896), 'numpy.array', 'np.array', (['[sim.io.new_x1.value, sim.io.new_y1.value]'], {}), '([sim.io.new_x1.value, sim.io.new_y1.value])\n', (852, 896), True, 'import numpy as np\n'), ((1490, 1507), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (1504, 1507), True, 'import numpy as np\n'), ((1590, 1600), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (1597, 1600), True, 'import numpy as np\n'), ((4149, 4211), 'numpy.allclose', 'np.allclose', (['cue_ball_pos_wc_ref', 'cue_ball_pos_wc'], {'atol': '(0.0001)'}), '(cue_ball_pos_wc_ref, cue_ball_pos_wc, atol=0.0001)\n', (4160, 4211), True, 'import numpy as np\n'), ((4231, 4285), 'numpy.allclose', 'np.allclose', (['ball_pos_wc_ref', 'ball_pos_wc'], {'atol': '(0.0001)'}), '(ball_pos_wc_ref, ball_pos_wc, atol=0.0001)\n', (4242, 4285), True, 'import numpy as np\n')]
|
import unittest
from operator import attrgetter
from typing import Dict
import numpy as np
from PIL import Image
from _pytest._code import ExceptionInfo
from lunavl.sdk.errors.errors import ErrorInfo
from lunavl.sdk.faceengine.engine import VLFaceEngine
from lunavl.sdk.image_utils.geometry import Rect
from lunavl.sdk.image_utils.image import ColorFormat, VLImage
from tests.resources import ONE_FACE
class BaseTestClass(unittest.TestCase):
faceEngine = VLFaceEngine()
@classmethod
def setup_class(cls):
super().setUpClass()
@classmethod
def teardown_class(cls) -> None:
super().tearDownClass()
@staticmethod
def assertLunaVlError(exceptionInfo: ExceptionInfo, expectedError: ErrorInfo):
"""
Assert LunaVl Error
Args:
exceptionInfo: response from service
expectedError: expected error
"""
assert exceptionInfo.value.error.errorCode == expectedError.errorCode, exceptionInfo.value
assert exceptionInfo.value.error.description == expectedError.description, exceptionInfo.value
if expectedError.detail != "":
assert exceptionInfo.value.error.detail == expectedError.detail, exceptionInfo.value
@staticmethod
def assertReceivedAndRawExpectedErrors(receivedError: ErrorInfo, expectedErrorEmptyDetail: ErrorInfo):
"""
Assert expected and received errors as dicts
Args:
receivedError: received error
expectedErrorEmptyDetail: expected error with empty detail
"""
assert expectedErrorEmptyDetail.errorCode == receivedError.errorCode
assert expectedErrorEmptyDetail.description == receivedError.description
assert expectedErrorEmptyDetail.description == receivedError.detail
@staticmethod
def checkRectAttr(defaultRect: Rect):
"""
Validate attributes of Rect
Args:
defaultRect: rect object
"""
for rectType in ("coreRectI", "coreRectF"):
assert all(
isinstance(
getattr(defaultRect.__getattribute__(rectType), f"{coordinate}"),
float if rectType == "coreRectF" else int,
)
for coordinate in ("x", "y", "height", "width")
)
@staticmethod
def generateColorToArrayMap() -> Dict[ColorFormat, np.ndarray]:
"""
Get images as ndarrays in all available color formats.
Returns:
color format to pixel ndarray map
"""
image = Image.open(ONE_FACE)
R, G, B = np.array(image).T
X = np.ndarray(B.shape, dtype=np.uint8)
allImages = {
ColorFormat.B8G8R8: np.array((B, G, R)).T,
ColorFormat.B8G8R8X8: np.array((B, G, R, X)).T,
ColorFormat.IR_X8X8X8: np.array(image, dtype=np.uint8),
ColorFormat.R16: np.array(image.convert("L"), dtype=np.uint16),
ColorFormat.R8: np.array(image.convert("L"), dtype=np.uint8),
ColorFormat.R8G8B8: np.array(image),
ColorFormat.R8G8B8X8: np.array((R, G, B, X)).T,
}
def _checksAllFormats():
_notImplementedFormats = set(ColorFormat) - set(allImages) - {ColorFormat.Unknown}
if _notImplementedFormats:
notImplementedFormatsList = list(map(attrgetter("name"), _notImplementedFormats))
raise RuntimeError(f"Add Image for {notImplementedFormatsList} color formats")
def _checksArrayShapes():
for color, ndarray in allImages.items():
if ndarray.shape[:2] != allImages[ColorFormat.R8G8B8].shape[:2]:
msg = (
f"'{color.name}' image has incorrect shape.\n"
f"Expected:{allImages[ColorFormat.R8G8B8].shape}\n"
f"Received:{ndarray.shape}"
)
raise RuntimeError(msg)
_checksAllFormats()
_checksArrayShapes()
return allImages
@staticmethod
def getColorToImageMap() -> Dict[ColorFormat, VLImage]:
"""
Get images as vl image in all available color formats.
Returns:
color format to vl image map
"""
return {
color: VLImage.fromNumpyArray(ndarray, color)
for color, ndarray in BaseTestClass.generateColorToArrayMap().items()
}
|
[
"operator.attrgetter",
"PIL.Image.open",
"lunavl.sdk.faceengine.engine.VLFaceEngine",
"numpy.array",
"numpy.ndarray",
"lunavl.sdk.image_utils.image.VLImage.fromNumpyArray"
] |
[((463, 477), 'lunavl.sdk.faceengine.engine.VLFaceEngine', 'VLFaceEngine', ([], {}), '()\n', (475, 477), False, 'from lunavl.sdk.faceengine.engine import VLFaceEngine\n'), ((2576, 2596), 'PIL.Image.open', 'Image.open', (['ONE_FACE'], {}), '(ONE_FACE)\n', (2586, 2596), False, 'from PIL import Image\n'), ((2645, 2680), 'numpy.ndarray', 'np.ndarray', (['B.shape'], {'dtype': 'np.uint8'}), '(B.shape, dtype=np.uint8)\n', (2655, 2680), True, 'import numpy as np\n'), ((2615, 2630), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2623, 2630), True, 'import numpy as np\n'), ((2854, 2885), 'numpy.array', 'np.array', (['image'], {'dtype': 'np.uint8'}), '(image, dtype=np.uint8)\n', (2862, 2885), True, 'import numpy as np\n'), ((3069, 3084), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (3077, 3084), True, 'import numpy as np\n'), ((4323, 4361), 'lunavl.sdk.image_utils.image.VLImage.fromNumpyArray', 'VLImage.fromNumpyArray', (['ndarray', 'color'], {}), '(ndarray, color)\n', (4345, 4361), False, 'from lunavl.sdk.image_utils.image import ColorFormat, VLImage\n'), ((2736, 2755), 'numpy.array', 'np.array', (['(B, G, R)'], {}), '((B, G, R))\n', (2744, 2755), True, 'import numpy as np\n'), ((2793, 2815), 'numpy.array', 'np.array', (['(B, G, R, X)'], {}), '((B, G, R, X))\n', (2801, 2815), True, 'import numpy as np\n'), ((3120, 3142), 'numpy.array', 'np.array', (['(R, G, B, X)'], {}), '((R, G, B, X))\n', (3128, 3142), True, 'import numpy as np\n'), ((3377, 3395), 'operator.attrgetter', 'attrgetter', (['"""name"""'], {}), "('name')\n", (3387, 3395), False, 'from operator import attrgetter\n')]
|
from pygame import init, display, time, event, draw, QUIT
from numpy import arange
def grid(janela, comprimento, tamanho_linha, tamanho_quadrado):
def draw_grid(v):
draw.line(janela, (255, 255, 255),
(v * tamanho_quadrado, 0),
(v * tamanho_quadrado, comprimento))
draw.line(janela, (255, 255, 255),
(0, v * tamanho_quadrado),
(comprimento, v * tamanho_quadrado))
# Iniciando o grid
for x_c in arange(comprimento // tamanho_linha):
draw_grid(x_c)
def borda(janela, comprimento, tamanho,
cor=(0, 0, 0), espaçamento=0, p_borda=12):
def borda_vertical(lado='esquerdo'):
for y in arange(comprimento // tamanho):
draw.line(janela, cor,
(y if lado == 'esquerdo' else y+comprimento-p_borda,
espaçamento),
(y if lado == 'esquerdo' else y+comprimento-p_borda,
comprimento))
def borda_horizontal(lado='cima'):
for x in arange(comprimento // tamanho):
draw.line(janela, cor,
(espaçamento,
x if lado == 'cima' else x+comprimento-p_borda),
(comprimento,
x if lado == 'cima' else x+comprimento-p_borda))
# -------------------------- Bordas
borda_vertical(lado='esquerdo')
borda_vertical(lado='direita')
borda_horizontal(lado='cima')
borda_horizontal(lado='baixo')
# ------------------ Programa
init()
tela_cheia = 600, 600
janela = display.set_mode(tela_cheia)
janela.fill((0, 0, 0))
display.set_caption('Testes de grid')
FPS = 60
Fps = time.Clock()
def teste_grid(game):
while 1:
for evento in event.get():
if evento.type == QUIT:
game = False
grid(janela, 400, 42, 50)
borda(janela, 600, 30, (80, 80, 80), 0, 20)
display.flip()
Fps.tick(FPS)
if not game:
break
|
[
"pygame.display.set_caption",
"pygame.init",
"pygame.draw.line",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.display.flip",
"pygame.time.Clock",
"numpy.arange"
] |
[((1554, 1560), 'pygame.init', 'init', ([], {}), '()\n', (1558, 1560), False, 'from pygame import init, display, time, event, draw, QUIT\n'), ((1594, 1622), 'pygame.display.set_mode', 'display.set_mode', (['tela_cheia'], {}), '(tela_cheia)\n', (1610, 1622), False, 'from pygame import init, display, time, event, draw, QUIT\n'), ((1646, 1683), 'pygame.display.set_caption', 'display.set_caption', (['"""Testes de grid"""'], {}), "('Testes de grid')\n", (1665, 1683), False, 'from pygame import init, display, time, event, draw, QUIT\n'), ((1700, 1712), 'pygame.time.Clock', 'time.Clock', ([], {}), '()\n', (1710, 1712), False, 'from pygame import init, display, time, event, draw, QUIT\n'), ((496, 532), 'numpy.arange', 'arange', (['(comprimento // tamanho_linha)'], {}), '(comprimento // tamanho_linha)\n', (502, 532), False, 'from numpy import arange\n'), ((179, 281), 'pygame.draw.line', 'draw.line', (['janela', '(255, 255, 255)', '(v * tamanho_quadrado, 0)', '(v * tamanho_quadrado, comprimento)'], {}), '(janela, (255, 255, 255), (v * tamanho_quadrado, 0), (v *\n tamanho_quadrado, comprimento))\n', (188, 281), False, 'from pygame import init, display, time, event, draw, QUIT\n'), ((322, 424), 'pygame.draw.line', 'draw.line', (['janela', '(255, 255, 255)', '(0, v * tamanho_quadrado)', '(comprimento, v * tamanho_quadrado)'], {}), '(janela, (255, 255, 255), (0, v * tamanho_quadrado), (comprimento,\n v * tamanho_quadrado))\n', (331, 424), False, 'from pygame import init, display, time, event, draw, QUIT\n'), ((710, 740), 'numpy.arange', 'arange', (['(comprimento // tamanho)'], {}), '(comprimento // tamanho)\n', (716, 740), False, 'from numpy import arange\n'), ((1058, 1088), 'numpy.arange', 'arange', (['(comprimento // tamanho)'], {}), '(comprimento // tamanho)\n', (1064, 1088), False, 'from numpy import arange\n'), ((1772, 1783), 'pygame.event.get', 'event.get', ([], {}), '()\n', (1781, 1783), False, 'from pygame import init, display, time, event, draw, QUIT\n'), ((1946, 1960), 'pygame.display.flip', 'display.flip', ([], {}), '()\n', (1958, 1960), False, 'from pygame import init, display, time, event, draw, QUIT\n'), ((754, 926), 'pygame.draw.line', 'draw.line', (['janela', 'cor', "(y if lado == 'esquerdo' else y + comprimento - p_borda, espaçamento)", "(y if lado == 'esquerdo' else y + comprimento - p_borda, comprimento)"], {}), "(janela, cor, (y if lado == 'esquerdo' else y + comprimento -\n p_borda, espaçamento), (y if lado == 'esquerdo' else y + comprimento -\n p_borda, comprimento))\n", (763, 926), False, 'from pygame import init, display, time, event, draw, QUIT\n'), ((1102, 1266), 'pygame.draw.line', 'draw.line', (['janela', 'cor', "(espaçamento, x if lado == 'cima' else x + comprimento - p_borda)", "(comprimento, x if lado == 'cima' else x + comprimento - p_borda)"], {}), "(janela, cor, (espaçamento, x if lado == 'cima' else x +\n comprimento - p_borda), (comprimento, x if lado == 'cima' else x +\n comprimento - p_borda))\n", (1111, 1266), False, 'from pygame import init, display, time, event, draw, QUIT\n')]
|
from operator import truediv
import cv2
from time import sleep
import HandTrackingModule as htm
import os
import autopy
import numpy as np
import math
import mediapipe as mp
#import modules
#variables
frameR=20 #frame rduction
frameR_x=800
frameR_y=110
wCam,hCam=1300 ,400
pTime=0
smoothening = 5 #need to tune
plocX, plocY=0,0
clocX,clocY=0,0
##########
cap=cv2.VideoCapture(0)
cap.set(3, wCam)
cap.set(4, hCam)
detector=htm.handDetector(maxHands=1)
wScr, hScr=autopy.screen.size()
while True:
#1. find hand landmarks
success, img = cap.read()
img= detector.findHands(img)
lmList,bbox =detector.findPosition(img)
#2. get the tip of the index and middle finger
if len(lmList)!=0:
x1,y1 =lmList[8][1:]
x2, y2=lmList[12][1:]
#print(x1,y1,x2,y2)
#3. check which finger is up
fingers=detector.fingersUp()
cv2.rectangle(img, (frameR_x, frameR_y), (wCam-frameR_x,hCam-frameR_y),(255,0,0),2)
#4. check if it is finger is in moving. index= moving, index and middle=clicking
#convert the coordinates to get correct position
if fingers[1]==1 and fingers[2]==0:
#moving mode
x3= np.interp(x1,(frameR,wCam-frameR_x),(0,wScr))
y3= np.interp(y1,(frameR,hCam-frameR_y),(0,hScr))
#5.smoothen the values
clocX=plocX+(x3-plocX)/smoothening
clocY=plocY+(y3-plocY)/smoothening
#move the mouse
#flip all existing values on x axis
autopy.mouse.move(wScr-clocX,clocY)
cv2.circle(img,(x1,y1),10,(0,255,0),cv2.FILLED)
plocX,plocY=clocX,clocY
#check if in clicking mode both middle and index gfiner are up
if fingers[1]==1 and fingers[2]==1:
length, img, lineinfo=detector.findDistance(8,12,img)
#print(length)
if length<40:
cv2.circle(img, (lineinfo[4],lineinfo[5]),7,(0,200,0),cv2.FILLED)
autopy.mouse.click()
sleep(0.3)
if fingers[1]==1 and fingers[2]==2 and fingers[3]==3:
length, img, lineinfo=detector.findDistance(8,12,img)
if length<40:
print("true")
#show image
cv2.imshow("Image",img)
cv2.waitKey(1)
|
[
"cv2.rectangle",
"autopy.mouse.click",
"autopy.screen.size",
"HandTrackingModule.handDetector",
"time.sleep",
"cv2.imshow",
"cv2.circle",
"cv2.VideoCapture",
"numpy.interp",
"autopy.mouse.move",
"cv2.waitKey"
] |
[((360, 379), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (376, 379), False, 'import cv2\n'), ((423, 451), 'HandTrackingModule.handDetector', 'htm.handDetector', ([], {'maxHands': '(1)'}), '(maxHands=1)\n', (439, 451), True, 'import HandTrackingModule as htm\n'), ((463, 483), 'autopy.screen.size', 'autopy.screen.size', ([], {}), '()\n', (481, 483), False, 'import autopy\n'), ((2237, 2261), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'img'], {}), "('Image', img)\n", (2247, 2261), False, 'import cv2\n'), ((2265, 2279), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2276, 2279), False, 'import cv2\n'), ((877, 973), 'cv2.rectangle', 'cv2.rectangle', (['img', '(frameR_x, frameR_y)', '(wCam - frameR_x, hCam - frameR_y)', '(255, 0, 0)', '(2)'], {}), '(img, (frameR_x, frameR_y), (wCam - frameR_x, hCam - frameR_y),\n (255, 0, 0), 2)\n', (890, 973), False, 'import cv2\n'), ((1211, 1262), 'numpy.interp', 'np.interp', (['x1', '(frameR, wCam - frameR_x)', '(0, wScr)'], {}), '(x1, (frameR, wCam - frameR_x), (0, wScr))\n', (1220, 1262), True, 'import numpy as np\n'), ((1273, 1324), 'numpy.interp', 'np.interp', (['y1', '(frameR, hCam - frameR_y)', '(0, hScr)'], {}), '(y1, (frameR, hCam - frameR_y), (0, hScr))\n', (1282, 1324), True, 'import numpy as np\n'), ((1518, 1556), 'autopy.mouse.move', 'autopy.mouse.move', (['(wScr - clocX)', 'clocY'], {}), '(wScr - clocX, clocY)\n', (1535, 1556), False, 'import autopy\n'), ((1566, 1620), 'cv2.circle', 'cv2.circle', (['img', '(x1, y1)', '(10)', '(0, 255, 0)', 'cv2.FILLED'], {}), '(img, (x1, y1), 10, (0, 255, 0), cv2.FILLED)\n', (1576, 1620), False, 'import cv2\n'), ((1899, 1970), 'cv2.circle', 'cv2.circle', (['img', '(lineinfo[4], lineinfo[5])', '(7)', '(0, 200, 0)', 'cv2.FILLED'], {}), '(img, (lineinfo[4], lineinfo[5]), 7, (0, 200, 0), cv2.FILLED)\n', (1909, 1970), False, 'import cv2\n'), ((1981, 2001), 'autopy.mouse.click', 'autopy.mouse.click', ([], {}), '()\n', (1999, 2001), False, 'import autopy\n'), ((2018, 2028), 'time.sleep', 'sleep', (['(0.3)'], {}), '(0.3)\n', (2023, 2028), False, 'from time import sleep\n')]
|
from abc import ABC, abstractmethod
import numpy as np
import matplotlib.pyplot as plt
from heatlib.units import Time
from heatlib.boundary_conditions import Boundary_Condition
from heatlib.domains import Domain_Constant_1D, Domain_Variable_1D
from heatlib.solvers import Solver_1D
#################################################
# Models #
#################################################
class Model_1D(ABC):
@abstractmethod
def __init__(self, **kwargs):
assert isinstance(
self.bc0, Boundary_Condition
), 'Second argument must be Boundary_Condition.'
assert isinstance(
self.bc1, Boundary_Condition
), 'Third argument must be Boundary_Condition.'
self.time_unit = kwargs.get('time_unit', 's') # default plotting time units
self.orientation = kwargs.get('orientation', 'vertical') # default plotting orientation
self.figsize = kwargs.get('figsize', (9, 6)) # default figure size
self.flipy = kwargs.get('flipy', True) # plot x as negative for vertical orientation
self.T = None
self._time_abs = 0.0
@property
def time(self):
return self._time_abs / abs(Time(1, self.time_unit))
def get_T(self, x):
if self.T is not None:
return np.interp(abs(x), self.domain.x, self.T)
else:
print('Model has not yet solution.')
return None
def __repr__(self):
if self.T is None:
return 'No solutions. Ready for initial one.'
elif self._time_abs == 0.0:
return 'Model with initial solution'
else:
return f'Model with evolutionary solution for time {self.time:g}{self.time_unit}'
def info(self):
print(self.bc0)
print(self.domain.info())
print(self.bc1)
def solve(self, solver, **kwargs):
assert isinstance(
solver, Solver_1D
), 'You have to use Solver_1D instance as argument.'
solver.solve(self, **kwargs)
def plot(self):
if self.T is not None:
fig, ax = plt.subplots(figsize=self.figsize)
if self.orientation == 'vertical':
multi = -1 if self.flipy else 1
ax.plot(
self.T, multi * self.domain.x_units, label=f't={self.time:g}{self.time_unit}'
)
ax.set_xlabel('Temperature [°C]')
ax.set_ylabel(f'Depth [{self.domain.plot_unit}]')
else:
ax.plot(
self.domain.x_units, self.T, label=f't={self.time:g}{self.time_unit}'
)
ax.set_xlabel(f'Distance [{self.domain.plot_unit}]')
ax.set_ylabel('Temperature [°C]')
ax.legend(loc='best')
plt.show()
else:
print('Model has not yet any solution.')
class Model_Constant_1D(Model_1D):
def __init__(self, domain, bc0, bc1, **kwargs):
assert isinstance(
domain, Domain_Constant_1D
), 'You have to use Domain_Constant_1D instance as argument.'
self.domain = domain
self.bc0 = bc0
self.bc1 = bc1
super().__init__(**kwargs)
class Model_Variable_1D(Model_1D):
def __init__(self, domain, bc0, bc1, **kwargs):
assert isinstance(
domain, Domain_Variable_1D
), 'You have to use Domain_Variable_1D instance as argument.'
self.domain = domain
self.bc0 = bc0
self.bc1 = bc1
super().__init__(**kwargs)
@property
def Tm(self):
if self.T is not None:
return np.interp(self.domain.xm, self.domain.x, self.T)
else:
print('Model has not yet solution.')
|
[
"heatlib.units.Time",
"matplotlib.pyplot.subplots",
"numpy.interp",
"matplotlib.pyplot.show"
] |
[((2137, 2171), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'self.figsize'}), '(figsize=self.figsize)\n', (2149, 2171), True, 'import matplotlib.pyplot as plt\n'), ((2840, 2850), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2848, 2850), True, 'import matplotlib.pyplot as plt\n'), ((3671, 3719), 'numpy.interp', 'np.interp', (['self.domain.xm', 'self.domain.x', 'self.T'], {}), '(self.domain.xm, self.domain.x, self.T)\n', (3680, 3719), True, 'import numpy as np\n'), ((1234, 1257), 'heatlib.units.Time', 'Time', (['(1)', 'self.time_unit'], {}), '(1, self.time_unit)\n', (1238, 1257), False, 'from heatlib.units import Time\n')]
|
# BSD 3-Clause License
#
# Copyright (c) 2016-21, University of Liverpool
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Parser module specific to rosetta NPZ distance predictions
"""
import numpy as np
from conkit.io._parser import BinaryDistanceFileParser
from conkit.core.distance import Distance
from conkit.core.distogram import Distogram
from conkit.core.distancefile import DistanceFile
DISTANCE_BINS = ((0, 2), (2, 2.5), (2.5, 3), (3, 4), (4, 4.5), (4.5, 5), (5, 5.5), (5.5, 6), (6, 6.5), (6.5, 7),
(7, 7.5), (7.5, 8), (8, 8.5), (8.5, 9), (9, 9.5), (9.5, 10), (10, 10.5), (10.5, 11), (11, 11.5),
(11.5, 12), (12, 12.5), (12.5, 13), (13, 13.5), (13.5, 14), (14, 14.5), (14.5, 15), (15, 15.5),
(15.5, 16), (16, 16.5), (16.5, 17), (17, 17.5), (17.5, 18), (18, 18.5), (18.5, 19), (19, 19.5),
(19.5, 20), (20, np.inf))
class RosettaNpzParser(BinaryDistanceFileParser):
"""Parser class for rosetta NPZ distance prediction file"""
def read(self, f_handle, f_id="rosettanpz"):
"""Read a distance prediction file
Parameters
----------
f_handle
Open file handle [read permissions]
f_id : str, optional
Unique contact file identifier
Returns
-------
:obj:`~conkit.core.distancefile.DistanceFile`
"""
hierarchy = DistanceFile(f_id)
hierarchy.original_file_format = "ROSETTA_NPZ"
_map = Distogram("distogram_1")
hierarchy.add(_map)
prediction = np.load(f_handle, allow_pickle=True)
probs = prediction['dist']
# Bin #0 corresponds with d>20A & bins #1 ~ #36 correspond with 2A<d<20A in increments of 0.5A
probs = probs[:, :, [x for x in range(1, 37)] + [0]]
L = probs.shape[0]
for i in range(L):
for j in range(i, L):
_distance = Distance(i + 1, j + 1, tuple(probs[i, j, :].tolist()), DISTANCE_BINS)
_map.add(_distance)
return hierarchy
def write(self, f_handle, hierarchy):
"""Write a distance file instance to a file
Raises
------
:exc:`NotImplementedError`
Write function not available
"""
raise NotImplementedError("Write function not available yet")
|
[
"numpy.load",
"conkit.core.distancefile.DistanceFile",
"conkit.core.distogram.Distogram"
] |
[((2857, 2875), 'conkit.core.distancefile.DistanceFile', 'DistanceFile', (['f_id'], {}), '(f_id)\n', (2869, 2875), False, 'from conkit.core.distancefile import DistanceFile\n'), ((2946, 2970), 'conkit.core.distogram.Distogram', 'Distogram', (['"""distogram_1"""'], {}), "('distogram_1')\n", (2955, 2970), False, 'from conkit.core.distogram import Distogram\n'), ((3021, 3057), 'numpy.load', 'np.load', (['f_handle'], {'allow_pickle': '(True)'}), '(f_handle, allow_pickle=True)\n', (3028, 3057), True, 'import numpy as np\n')]
|
from cv2 import cv2
from collections import Counter
from PIL import Image, ImageDraw, ImageFont
from scipy.fftpack import dct
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import gmpy2
import numpy as np
import time
import os
"""
Transparency
If putting the pixel with RGBA = (Ra, Ga, Ba, Aa) over the pixel with RGBA = (Rb, Gb, Bb, 100%)
we get the output color equal to (Ra*Aa + Rb*(100% - Aa), Ga*Aa + Gb*(100% - Aa), Ba*Aa + Bb*(100% - Aa), 100%)
Tested with Adobe Photoshop :) Works there
"""
GENERATED_PATH = os.path.join('icons', 'generated')
def phash(img, hash_size=8, factor=4):
img = np.array(img, dtype=np.uint8)
img = Image.fromarray(img)
image_size = hash_size * factor
img.convert('L').resize((image_size, image_size), Image.ANTIALIAS)
img = np.asarray(img)[:, :, 0]
dct_ = dct(dct(img, axis=0), axis=1)
dct_ = dct_[:hash_size, :hash_size]
med = np.median(dct_)
diff = dct_ > med
return sum((1 << i) * int(el) for i, el in enumerate(diff.flatten()))
def cluster_icons(clusters, hash_size=8):
img_count = len(os.listdir(GENERATED_PATH))
assert img_count > clusters, f'There are not enough images in "{GENERATED_PATH}"'
X = np.zeros((img_count, hash_size * hash_size), dtype=np.uint8)
names = []
for i, filepath in enumerate(os.listdir(GENERATED_PATH)):
img = cv2.imread(os.path.join(GENERATED_PATH, filepath))
names.append(filepath)
hashed = phash(img)
X[i, :] = np.array(
[(hashed >> i) & 1 for i in range(hash_size * hash_size - 1, -1, -1)],
np.uint8)
kmeans = KMeans(n_jobs=-1, n_clusters=clusters)
X_dist = kmeans.fit_transform(X)
representative_sign_idx = np.argmin(X_dist, axis=0)
imgs = []
for idx in representative_sign_idx:
read = plt.imread(os.path.join(GENERATED_PATH, names[idx]))
img = np.zeros((read.shape[0], read.shape[1], 4))
img[:, :, :3] = read[:, :, :3]
# add transparency (make PNG out of JPEG)
if read.shape[-1] == 3:
img[:, :, 3] = 1.0
else:
img[:, :, 3] = read[:, :, 3]
imgs.append(img)
return imgs
def icon_mapping(threads):
counter = Counter(map(lambda thr: thr['number'], threads.flatten()))
clusters = cluster_icons(len(counter))
return {
number: img for (number, _), img in zip(counter.most_common(), clusters)
}
def draw_pattern(image, threads):
icons = icon_mapping(threads)
factor = 32
h, w = len(image), len(image[0])
new_h = h * factor + 2 * ((h * factor - 1) // (10 * factor))
new_w = w * factor + 2 * ((w * factor - 1) // (10 * factor))
pattern = np.zeros((new_h, new_w, 3))
t0 = time.time()
for y in range(h):
new_y = y * factor + (y//10) * 2 + 1
for x, rgb, thread in zip(range(w), image[y], threads[y]):
new_x = x * factor + (x // 10) * 2 + 1
icon = (np.copy(icons[thread['number']]) * 255).astype(np.uint8)
dark = not bool(np.mean(rgb[:3]) // 128)
if dark:
icon[:, :, :3] = 255 - icon[:, :, :3]
for y_offset in range(factor - 2):
for x_offset in range(factor - 2):
alpha = icon[y_offset + 1, x_offset + 1, 3] / 255
pattern[new_y + y_offset, new_x + x_offset] = (
alpha * icon[y_offset + 1, x_offset + 1, :3]
+ rgb * (1 - alpha))
print("\nTime spent: ", round(time.time() - t0, 2))
return pattern, icons
def draw_mapping(icons, threads):
icons_count = len(icons)
h_line = 36
h = icons_count * 36
w = 300
prj_path = os.path.dirname(os.path.dirname(__file__))
font_path = os.path.join(prj_path, 'fonts', 'arial.ttf')
font = ImageFont.truetype(font_path, size=24)
img = Image.new('RGBA', (w, h), (255, 255, 255, 255))
new_img = ImageDraw.Draw(img)
for i, (number, icon) in enumerate(icons.items()):
text = f'{number}'
icon_w, icon_h = new_img.textsize(text, font)
coords = (50, h_line * i + (h_line-icon_h) // 2)
new_img.text(coords, text, fill=(0, 0, 0), font=font)
img = np.array(img)
def find_rgb(number):
for thread in threads.flatten():
if number == thread['number']:
return thread['rgb']
raise ValueError(f'No thread with number {number}')
icon_h, icon_w = 32, 32
for i, (number, icon_) in enumerate(icons.items()):
r, g, b = find_rgb(number)
icon = np.array(Image.new('RGBA', (icon_w, icon_h), (r, g, b, 255)))
alpha = icon_[:, :, 3:4]
dark = not bool(np.mean([r, g, b]) // 128)
if dark:
icon_[:, :, :3] = 1 - icon_[:, :, :3]
icon = alpha * 255 * icon_ + (1 - alpha) * icon
icon = icon.astype(np.uint8)
coords = (w - 50 - icon_w, h_line * i + (h_line - icon_h) // 2)
img[coords[1]:coords[1]+icon_h, coords[0]:coords[0]+icon_w, :] = icon
return img
|
[
"sklearn.cluster.KMeans",
"numpy.mean",
"PIL.Image.fromarray",
"numpy.median",
"os.listdir",
"numpy.copy",
"PIL.Image.new",
"os.path.join",
"PIL.ImageFont.truetype",
"numpy.asarray",
"numpy.array",
"numpy.zeros",
"PIL.ImageDraw.Draw",
"scipy.fftpack.dct",
"os.path.dirname",
"numpy.argmin",
"time.time"
] |
[((540, 574), 'os.path.join', 'os.path.join', (['"""icons"""', '"""generated"""'], {}), "('icons', 'generated')\n", (552, 574), False, 'import os\n'), ((626, 655), 'numpy.array', 'np.array', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (634, 655), True, 'import numpy as np\n'), ((666, 686), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (681, 686), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((920, 935), 'numpy.median', 'np.median', (['dct_'], {}), '(dct_)\n', (929, 935), True, 'import numpy as np\n'), ((1219, 1279), 'numpy.zeros', 'np.zeros', (['(img_count, hash_size * hash_size)'], {'dtype': 'np.uint8'}), '((img_count, hash_size * hash_size), dtype=np.uint8)\n', (1227, 1279), True, 'import numpy as np\n'), ((1634, 1672), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_jobs': '(-1)', 'n_clusters': 'clusters'}), '(n_jobs=-1, n_clusters=clusters)\n', (1640, 1672), False, 'from sklearn.cluster import KMeans\n'), ((1741, 1766), 'numpy.argmin', 'np.argmin', (['X_dist'], {'axis': '(0)'}), '(X_dist, axis=0)\n', (1750, 1766), True, 'import numpy as np\n'), ((2726, 2753), 'numpy.zeros', 'np.zeros', (['(new_h, new_w, 3)'], {}), '((new_h, new_w, 3))\n', (2734, 2753), True, 'import numpy as np\n'), ((2764, 2775), 'time.time', 'time.time', ([], {}), '()\n', (2773, 2775), False, 'import time\n'), ((3809, 3853), 'os.path.join', 'os.path.join', (['prj_path', '"""fonts"""', '"""arial.ttf"""'], {}), "(prj_path, 'fonts', 'arial.ttf')\n", (3821, 3853), False, 'import os\n'), ((3865, 3903), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['font_path'], {'size': '(24)'}), '(font_path, size=24)\n', (3883, 3903), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3915, 3962), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(w, h)', '(255, 255, 255, 255)'], {}), "('RGBA', (w, h), (255, 255, 255, 255))\n", (3924, 3962), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3977, 3996), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (3991, 3996), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((4266, 4279), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (4274, 4279), True, 'import numpy as np\n'), ((804, 819), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (814, 819), True, 'import numpy as np\n'), ((844, 860), 'scipy.fftpack.dct', 'dct', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (847, 860), False, 'from scipy.fftpack import dct\n'), ((1096, 1122), 'os.listdir', 'os.listdir', (['GENERATED_PATH'], {}), '(GENERATED_PATH)\n', (1106, 1122), False, 'import os\n'), ((1333, 1359), 'os.listdir', 'os.listdir', (['GENERATED_PATH'], {}), '(GENERATED_PATH)\n', (1343, 1359), False, 'import os\n'), ((1904, 1947), 'numpy.zeros', 'np.zeros', (['(read.shape[0], read.shape[1], 4)'], {}), '((read.shape[0], read.shape[1], 4))\n', (1912, 1947), True, 'import numpy as np\n'), ((3766, 3791), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3781, 3791), False, 'import os\n'), ((1387, 1425), 'os.path.join', 'os.path.join', (['GENERATED_PATH', 'filepath'], {}), '(GENERATED_PATH, filepath)\n', (1399, 1425), False, 'import os\n'), ((1848, 1888), 'os.path.join', 'os.path.join', (['GENERATED_PATH', 'names[idx]'], {}), '(GENERATED_PATH, names[idx])\n', (1860, 1888), False, 'import os\n'), ((4632, 4683), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(icon_w, icon_h)', '(r, g, b, 255)'], {}), "('RGBA', (icon_w, icon_h), (r, g, b, 255))\n", (4641, 4683), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3567, 3578), 'time.time', 'time.time', ([], {}), '()\n', (3576, 3578), False, 'import time\n'), ((4743, 4761), 'numpy.mean', 'np.mean', (['[r, g, b]'], {}), '([r, g, b])\n', (4750, 4761), True, 'import numpy as np\n'), ((2995, 3027), 'numpy.copy', 'np.copy', (["icons[thread['number']]"], {}), "(icons[thread['number']])\n", (3002, 3027), True, 'import numpy as np\n'), ((3080, 3096), 'numpy.mean', 'np.mean', (['rgb[:3]'], {}), '(rgb[:3])\n', (3087, 3096), True, 'import numpy as np\n')]
|
# ======================================================================
# Copyright CERFACS (October 2018)
# Contributor: <NAME> (<EMAIL>)
#
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/or redistribute the software under the terms of the
# CeCILL-B license as circulated by CEA, CNRS and INRIA at the following
# URL "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided
# only with a limited warranty and the software's author, the holder of
# the economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards
# their requirements in conditions enabling the security of their
# systems and/or data to be ensured and, more generally, to use and
# operate it in the same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-B license and that you accept its terms.
# ======================================================================
"""Implement the :py:class:`~.QuantumCircuitGroup` class used in GLOA.
The :py:class:`~.QuantumCircuitGroup` class represents what the `original paper\
<https://arxiv.org/abs/1004.2242>`_ call a "group". It is a collection of
entities (in this particular case "entities" refers to "instances of
:py:class:`~.QuantumCircuit`") admitting a leader.
"""
import copy
import typing
import numpy
import qtoolkit.data_structures.quantum_circuit.quantum_circuit as qcirc
import qtoolkit.data_structures.quantum_circuit.quantum_operation as qop
import qtoolkit.maths.matrix.distances as qdists
import qtoolkit.maths.matrix.generation.quantum_circuit as qc_gen
import qtoolkit.utils.types as qtypes
class QuantumCircuitGroup:
"""A group of :py:class:`~.QuantumCircuit`.
The instances of :py:class:`~.QuantumCircuit` are grouped into an instance
of :py:class:`~.QuantumCircuitGroup` to factorise the code.
"""
def __init__(
self,
basis: typing.Sequence[qop.QuantumOperation],
objective_unitary: qtypes.UnitaryMatrix,
length: int,
p: int,
r: numpy.ndarray,
correctness_weight: float,
circuit_cost_weight: float,
circuit_cost_func: qcirc.CircuitCostFunction,
parameters_bounds: qtypes.Bounds = None,
) -> None:
"""Initialise the :py:class:`~.QuantumCircuitGroup` instance.
A :py:class:`~.QuantumCircuitGroup` is a group composed of `p` instances
of :py:class:`~.QuantumCircuit`.
:param basis: a sequence of allowed operations. The operations can be
"abstract" (i.e. with None entries, see the documentation for the
:py:class:`~.QuantumOperation` class) or not (i.e. with specified
entries).
:param objective_unitary: unitary matrix we are trying to approximate.
:param length: length of the sequences that will be generated.
:param p: population of the group, i.e. number of gate sequences
contained in this group.
:param r: rates determining the portion of old (r[0]), leader (r[1]) and
random (r[2]) that are used to generate new candidates.
:param correctness_weight: scalar representing the importance attached
to the correctness of the generated circuit.
:param circuit_cost_weight: scalar representing the importance attached
to the cost of the generated circuit.
:param circuit_cost_func: a function that takes as input an instance of
:py:class:`~.QuantumCircuit` and returns a float representing the
cost of the given circuit.
:param parameters_bounds: a list of bounds for each operation in the
`basis`. A None value in this list means that the corresponding
operation is not parametrised. A None value for the whole list
(default value) means that no gate in `basis` is parametrised.
"""
self._qubit_number = objective_unitary.shape[0].bit_length() - 1
self._circuits = [
qc_gen.generate_random_quantum_circuit(
self._qubit_number, basis, length, parameters_bounds
)
for _ in range(p)
]
self._basis = basis
self._r = r
self._length = length
self._param_bounds = parameters_bounds
if self._param_bounds is None:
self._param_bounds = [None] * self._qubit_number
self._correctness_weight = correctness_weight
self._circuit_cost_weight = circuit_cost_weight
self._circuit_cost_func = circuit_cost_func
self._objective_unitary = objective_unitary
self._costs = numpy.zeros((p,), dtype=numpy.float)
self._update_costs()
def _update_costs(self):
"""Update the cached costs.
This method should be called after one or more sequence(s) of the group
changed in order to update the cached costs.
"""
for i in range(len(self._circuits)):
self._costs[i] = qdists.gloa_objective_function(
self._circuits[i],
self._objective_unitary,
self._correctness_weight,
self._circuit_cost_weight,
self._circuit_cost_func,
)
def get_leader(self) -> typing.Tuple[float, qcirc.QuantumCircuit]:
"""Get the best quantum circuit of the group.
:return: the best sequence of the group along with its cost.
"""
idx: int = numpy.argmin(self._costs)
return self._costs[idx], self._circuits[idx]
def mutate_and_recombine(self) -> None:
"""Apply the mutate and recombine step of the GLOA.
See the `GLOA paper <https://arxiv.org/abs/1004.2242>`_ for more
precision on this step.
"""
# Pre-compute group leader data.
_, leader = self.get_leader()
# For each member of the group, mutate and recombine it and see if the
# newly created member is better.
for seq_idx, current in enumerate(self._circuits):
new_circuit = qcirc.QuantumCircuit(self._qubit_number, cache_matrix=True)
random = qc_gen.generate_random_quantum_circuit(
self._qubit_number, self._basis, self._length, self._param_bounds
)
for ops in zip(current.operations, leader.operations, random.operations):
new_circuit.add_operation(self._combine_operations(ops))
new_cost = qdists.gloa_objective_function(
new_circuit,
self._objective_unitary,
self._correctness_weight,
self._circuit_cost_weight,
self._circuit_cost_func,
)
if new_cost < self._costs[seq_idx]:
self._circuits[seq_idx] = new_circuit
self._costs[seq_idx] = new_cost
return
def _combine_operations(
self, operations: typing.Sequence[qop.QuantumOperation]
) -> qop.QuantumOperation:
"""Combine the 3 given operations into one operation.
The combined operation is randomly chosen from the 3 given operations
with the probability distribution `r` given at the instance construction
and then randomly mutated with characteristics of the other operations.
:param operations: A sequence of 3 :py:class:`~.QuantumOperation`.
:return: a random merge of the 3 given operations.
"""
op1, op2, op3 = operations[0], operations[1], operations[2]
new_operation = copy.copy(numpy.random.choice(operations, p=self._r))
control_number = len(new_operation.controls)
new_operation.controls = []
new_operation.target = numpy.random.choice(
[op1.target, op2.target, op3.target], p=self._r
)
while len(new_operation.controls) < control_number:
ctrl = numpy.random.randint(0, self._qubit_number)
if ctrl != new_operation.target and ctrl not in new_operation.controls:
new_operation.controls.append(ctrl)
if new_operation.is_parametrised():
raise NotImplementedError(
"Parametrised operations are not supported for the moment."
)
return new_operation
@property
def circuits(self) -> typing.List[qcirc.QuantumCircuit]:
"""Getter for the stored list of :py:class:`~.QuantumCircuit`."""
return self._circuits
@property
def costs(self):
"""Getter for the pre-computed costs."""
return self._costs
|
[
"numpy.random.choice",
"qtoolkit.data_structures.quantum_circuit.quantum_circuit.QuantumCircuit",
"qtoolkit.maths.matrix.distances.gloa_objective_function",
"numpy.zeros",
"numpy.random.randint",
"numpy.argmin",
"qtoolkit.maths.matrix.generation.quantum_circuit.generate_random_quantum_circuit"
] |
[((5397, 5433), 'numpy.zeros', 'numpy.zeros', (['(p,)'], {'dtype': 'numpy.float'}), '((p,), dtype=numpy.float)\n', (5408, 5433), False, 'import numpy\n'), ((6224, 6249), 'numpy.argmin', 'numpy.argmin', (['self._costs'], {}), '(self._costs)\n', (6236, 6249), False, 'import numpy\n'), ((8461, 8529), 'numpy.random.choice', 'numpy.random.choice', (['[op1.target, op2.target, op3.target]'], {'p': 'self._r'}), '([op1.target, op2.target, op3.target], p=self._r)\n', (8480, 8529), False, 'import numpy\n'), ((4772, 4868), 'qtoolkit.maths.matrix.generation.quantum_circuit.generate_random_quantum_circuit', 'qc_gen.generate_random_quantum_circuit', (['self._qubit_number', 'basis', 'length', 'parameters_bounds'], {}), '(self._qubit_number, basis, length,\n parameters_bounds)\n', (4810, 4868), True, 'import qtoolkit.maths.matrix.generation.quantum_circuit as qc_gen\n'), ((5749, 5910), 'qtoolkit.maths.matrix.distances.gloa_objective_function', 'qdists.gloa_objective_function', (['self._circuits[i]', 'self._objective_unitary', 'self._correctness_weight', 'self._circuit_cost_weight', 'self._circuit_cost_func'], {}), '(self._circuits[i], self._objective_unitary,\n self._correctness_weight, self._circuit_cost_weight, self.\n _circuit_cost_func)\n', (5779, 5910), True, 'import qtoolkit.maths.matrix.distances as qdists\n'), ((6812, 6871), 'qtoolkit.data_structures.quantum_circuit.quantum_circuit.QuantumCircuit', 'qcirc.QuantumCircuit', (['self._qubit_number'], {'cache_matrix': '(True)'}), '(self._qubit_number, cache_matrix=True)\n', (6832, 6871), True, 'import qtoolkit.data_structures.quantum_circuit.quantum_circuit as qcirc\n'), ((6893, 7002), 'qtoolkit.maths.matrix.generation.quantum_circuit.generate_random_quantum_circuit', 'qc_gen.generate_random_quantum_circuit', (['self._qubit_number', 'self._basis', 'self._length', 'self._param_bounds'], {}), '(self._qubit_number, self._basis,\n self._length, self._param_bounds)\n', (6931, 7002), True, 'import qtoolkit.maths.matrix.generation.quantum_circuit as qc_gen\n'), ((7213, 7364), 'qtoolkit.maths.matrix.distances.gloa_objective_function', 'qdists.gloa_objective_function', (['new_circuit', 'self._objective_unitary', 'self._correctness_weight', 'self._circuit_cost_weight', 'self._circuit_cost_func'], {}), '(new_circuit, self._objective_unitary, self.\n _correctness_weight, self._circuit_cost_weight, self._circuit_cost_func)\n', (7243, 7364), True, 'import qtoolkit.maths.matrix.distances as qdists\n'), ((8297, 8339), 'numpy.random.choice', 'numpy.random.choice', (['operations'], {'p': 'self._r'}), '(operations, p=self._r)\n', (8316, 8339), False, 'import numpy\n'), ((8632, 8675), 'numpy.random.randint', 'numpy.random.randint', (['(0)', 'self._qubit_number'], {}), '(0, self._qubit_number)\n', (8652, 8675), False, 'import numpy\n')]
|
from optparse import OptionParser
import os
import sys
import time
import numpy as np
import pandas as pd
import tensorflow as tf
import utils
import get_site_features
import tf_utils
np.set_printoptions(threshold=np.inf, linewidth=200)
pd.options.mode.chained_assignment = None
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("--tpm_file", dest="TPM_FILE", help="tpm data")
parser.add_option("--orf_file", dest="ORF_FILE", help="ORF sequences in tsv format")
parser.add_option("--mirseqs", dest="MIR_SEQS", help="tsv with miRNAs and their sequences")
parser.add_option("--mirlen", dest="MIRLEN", type=int)
parser.add_option("-w", "--outfile", dest="OUTFILE", help="location for tfrecords")
parser.add_option("--overlap_dist", dest="OVERLAP_DIST", help="minimum distance between neighboring sites", type=int)
parser.add_option("--only_canon", dest="ONLY_CANON", help="only use canonical sites", default=False, action='store_true')
(options, args) = parser.parse_args()
### READ miRNA DATA and filter for ones to keep ###
MIRNAS = pd.read_csv(options.MIR_SEQS, sep='\t')
MIRNAS = MIRNAS[MIRNAS['use_tpms']]
ALL_GUIDES = sorted(list(MIRNAS['mir'].values))
MIR_DICT = {}
for row in MIRNAS.iterrows():
guide_seq = row[1]['guide_seq']
pass_seq = row[1]['pass_seq']
MIR_DICT[row[1]['mir']] = {
'mirseq': guide_seq,
'site8': utils.rev_comp(guide_seq[1:8]) + 'A',
'one_hot': utils.one_hot_encode(guide_seq[:options.MIRLEN])
}
MIR_DICT[row[1]['mir'] + '*'] = {
'mirseq': pass_seq,
'site8': utils.rev_comp(pass_seq[1:8]) + 'A',
'one_hot': utils.one_hot_encode(pass_seq[:options.MIRLEN])
}
### READ EXPRESSION DATA ###
TPM = pd.read_csv(options.TPM_FILE, sep='\t', index_col=0).sort_index()
for mir in ALL_GUIDES:
if mir not in TPM.columns:
raise ValueError('{} given in mirseqs file but not in TPM file.'.format(mir))
num_batches = 10
TPM['batch'] = [ix % num_batches for ix in TPM['ix']]
print("Using mirs: {}".format(ALL_GUIDES))
# read in orf sequences
ORF_SEQS = pd.read_csv(options.ORF_FILE, sep='\t', header=None, index_col=0)
feature_names = ['mir', 'tpm', 'orf_guide_1hot', 'utr3_guide_1hot',
'orf_pass_1hot', 'utr3_pass_1hot']
with tf.python_io.TFRecordWriter(options.OUTFILE) as tfwriter:
for ix, row in enumerate(TPM.iterrows()):
# print progress
if ix % 100 == 0:
print("Processed {}/{} transcripts".format(ix, len(TPM)))
transcript = row[0]
utr3 = row[1]['sequence']
orf = ORF_SEQS.loc[transcript][2]
transcript_sequence = orf + utr3
orf_length = len(orf)
context_dict = tf.train.Features(feature={
'transcript': tf_utils._bytes_feature(transcript.encode('utf-8')),
'batch': tf_utils._int64_feature([row[1]['batch']])
})
total_transcript_sites = 0
features = [[], [], [], [], [], []]
for mir in ALL_GUIDES:
site8 = MIR_DICT[mir]['site8']
mirseq = MIR_DICT[mir]['mirseq']
site8_star = MIR_DICT[mir + '*']['site8']
mirseq_star = MIR_DICT[mir + '*']['mirseq']
features[0].append(tf_utils._bytes_feature(mir.encode('utf-8'))) # mir
features[1].append(tf_utils._float_feature([row[1][mir]])) # tpm
# get sites for guide strand
seqs, locs = get_site_features.get_sites_from_utr(transcript_sequence, site8, overlap_dist=options.OVERLAP_DIST, only_canon=options.ONLY_CANON)
num_orf_sites = len([l for l in locs if l < orf_length])
orf_sites = utils.mir_site_pair_to_ints(mirseq[:options.MIRLEN], ''.join(seqs[:num_orf_sites]))
utr3_sites = utils.mir_site_pair_to_ints(mirseq[:options.MIRLEN], ''.join(seqs[num_orf_sites:]))
features[2].append(tf_utils._int64_feature(orf_sites))
features[3].append(tf_utils._int64_feature(utr3_sites))
total_transcript_sites += len(locs)
# get sites for guide strand
seqs, locs = get_site_features.get_sites_from_utr(transcript_sequence, site8_star, overlap_dist=options.OVERLAP_DIST, only_canon=options.ONLY_CANON)
num_orf_sites = len([l for l in locs if l < orf_length])
orf_sites = utils.mir_site_pair_to_ints(mirseq_star[:options.MIRLEN], ''.join(seqs[:num_orf_sites]))
utr3_sites = utils.mir_site_pair_to_ints(mirseq_star[:options.MIRLEN], ''.join(seqs[num_orf_sites:]))
features[4].append(tf_utils._int64_feature(orf_sites))
features[5].append(tf_utils._int64_feature(utr3_sites))
total_transcript_sites += len(locs)
# features[0].append(tf_utils._bytes_feature(mir.encode('utf-8'))) # mir
# features[1].append(tf_utils._float_feature([row[1][mir]])) # tpm
# features[2].append(tf_utils._int64_feature(utils.one_hot_encode(mirseq[:options.MIRLEN]))) # mirseq
# assert len(utils.one_hot_encode(mirseq[:options.MIRLEN])) == 40
# # get sites for guide strand
# seqs, locs = get_site_features.get_sites_from_utr(transcript_sequence, site8, overlap_dist=options.OVERLAP_DIST, only_canon=options.ONLY_CANON)
# num_orf_sites = len([l for l in locs if l < orf_length])
# orf_sites = ''.join(seqs[:num_orf_sites])
# utr3_sites = ''.join(seqs[num_orf_sites:])
# features[3].append(tf_utils._int64_feature(utils.one_hot_encode(orf_sites)))
# features[4].append(tf_utils._int64_feature(utils.one_hot_encode(orf_sites)))
# total_transcript_sites += len(locs)
# features[5].append(tf_utils._int64_feature(utils.one_hot_encode(mirseq_star[:options.MIRLEN]))) # mirseq*
# assert len(utils.one_hot_encode(mirseq_star[:options.MIRLEN])) == 40
# # get sites for guide strand
# seqs, locs = get_site_features.get_sites_from_utr(transcript_sequence, site8_star, overlap_dist=options.OVERLAP_DIST, only_canon=options.ONLY_CANON)
# num_orf_sites = len([l for l in locs if l < orf_length])
# orf_sites = ''.join(seqs[:num_orf_sites])
# utr3_sites = ''.join(seqs[num_orf_sites:])
# features[6].append(tf_utils._int64_feature(utils.one_hot_encode(orf_sites)))
# features[7].append(tf_utils._int64_feature(utils.one_hot_encode(orf_sites)))
# total_transcript_sites += len(locs)
print(total_transcript_sites)
if total_transcript_sites > 0:
feature_dict = tf.train.FeatureLists(feature_list={
feature_names[ix]: tf.train.FeatureList(feature=features[ix]) for ix in range(len(feature_names))
})
# Create the SequenceExample
example = tf.train.SequenceExample(context=context_dict,
feature_lists=feature_dict)
tfwriter.write(example.SerializeToString())
else:
print('Skipping {} because no sites found'.format(transcript))
|
[
"tf_utils._float_feature",
"pandas.read_csv",
"tf_utils._int64_feature",
"optparse.OptionParser",
"get_site_features.get_sites_from_utr",
"utils.rev_comp",
"tensorflow.train.FeatureList",
"utils.one_hot_encode",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.train.SequenceExample",
"numpy.set_printoptions"
] |
[((187, 239), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf', 'linewidth': '(200)'}), '(threshold=np.inf, linewidth=200)\n', (206, 239), True, 'import numpy as np\n'), ((325, 339), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (337, 339), False, 'from optparse import OptionParser\n'), ((1103, 1142), 'pandas.read_csv', 'pd.read_csv', (['options.MIR_SEQS'], {'sep': '"""\t"""'}), "(options.MIR_SEQS, sep='\\t')\n", (1114, 1142), True, 'import pandas as pd\n'), ((2224, 2289), 'pandas.read_csv', 'pd.read_csv', (['options.ORF_FILE'], {'sep': '"""\t"""', 'header': 'None', 'index_col': '(0)'}), "(options.ORF_FILE, sep='\\t', header=None, index_col=0)\n", (2235, 2289), True, 'import pandas as pd\n'), ((2430, 2474), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['options.OUTFILE'], {}), '(options.OUTFILE)\n', (2457, 2474), True, 'import tensorflow as tf\n'), ((1517, 1565), 'utils.one_hot_encode', 'utils.one_hot_encode', (['guide_seq[:options.MIRLEN]'], {}), '(guide_seq[:options.MIRLEN])\n', (1537, 1565), False, 'import utils\n'), ((1732, 1779), 'utils.one_hot_encode', 'utils.one_hot_encode', (['pass_seq[:options.MIRLEN]'], {}), '(pass_seq[:options.MIRLEN])\n', (1752, 1779), False, 'import utils\n'), ((1834, 1886), 'pandas.read_csv', 'pd.read_csv', (['options.TPM_FILE'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(options.TPM_FILE, sep='\\t', index_col=0)\n", (1845, 1886), True, 'import pandas as pd\n'), ((1456, 1486), 'utils.rev_comp', 'utils.rev_comp', (['guide_seq[1:8]'], {}), '(guide_seq[1:8])\n', (1470, 1486), False, 'import utils\n'), ((1672, 1701), 'utils.rev_comp', 'utils.rev_comp', (['pass_seq[1:8]'], {}), '(pass_seq[1:8])\n', (1686, 1701), False, 'import utils\n'), ((3676, 3810), 'get_site_features.get_sites_from_utr', 'get_site_features.get_sites_from_utr', (['transcript_sequence', 'site8'], {'overlap_dist': 'options.OVERLAP_DIST', 'only_canon': 'options.ONLY_CANON'}), '(transcript_sequence, site8,\n overlap_dist=options.OVERLAP_DIST, only_canon=options.ONLY_CANON)\n', (3712, 3810), False, 'import get_site_features\n'), ((4375, 4514), 'get_site_features.get_sites_from_utr', 'get_site_features.get_sites_from_utr', (['transcript_sequence', 'site8_star'], {'overlap_dist': 'options.OVERLAP_DIST', 'only_canon': 'options.ONLY_CANON'}), '(transcript_sequence, site8_star,\n overlap_dist=options.OVERLAP_DIST, only_canon=options.ONLY_CANON)\n', (4411, 4514), False, 'import get_site_features\n'), ((7273, 7347), 'tensorflow.train.SequenceExample', 'tf.train.SequenceExample', ([], {'context': 'context_dict', 'feature_lists': 'feature_dict'}), '(context=context_dict, feature_lists=feature_dict)\n', (7297, 7347), True, 'import tensorflow as tf\n'), ((3554, 3592), 'tf_utils._float_feature', 'tf_utils._float_feature', (['[row[1][mir]]'], {}), '([row[1][mir]])\n', (3577, 3592), False, 'import tf_utils\n'), ((4140, 4174), 'tf_utils._int64_feature', 'tf_utils._int64_feature', (['orf_sites'], {}), '(orf_sites)\n', (4163, 4174), False, 'import tf_utils\n'), ((4211, 4246), 'tf_utils._int64_feature', 'tf_utils._int64_feature', (['utr3_sites'], {}), '(utr3_sites)\n', (4234, 4246), False, 'import tf_utils\n'), ((4854, 4888), 'tf_utils._int64_feature', 'tf_utils._int64_feature', (['orf_sites'], {}), '(orf_sites)\n', (4877, 4888), False, 'import tf_utils\n'), ((4925, 4960), 'tf_utils._int64_feature', 'tf_utils._int64_feature', (['utr3_sites'], {}), '(utr3_sites)\n', (4948, 4960), False, 'import tf_utils\n'), ((3032, 3074), 'tf_utils._int64_feature', 'tf_utils._int64_feature', (["[row[1]['batch']]"], {}), "([row[1]['batch']])\n", (3055, 3074), False, 'import tf_utils\n'), ((7103, 7145), 'tensorflow.train.FeatureList', 'tf.train.FeatureList', ([], {'feature': 'features[ix]'}), '(feature=features[ix])\n', (7123, 7145), True, 'import tensorflow as tf\n')]
|
"""
Feature Selection
Test 3
Random Forest, heatmap
"""
import matplotlib.pyplot as plt
from mlxtend.plotting import scatterplotmatrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectFromModel
from sklearn.model_selection import train_test_split
from mlxtend.plotting import heatmap
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
# Reading data
df = pd.read_csv('NewBioDegWCols.csv')
df.columns = ['SpMax_L','J_Dz','nHM','F01','F04','NssssC','nCb-','C%','nCp',
'n0','F03CN','SdssC','HyWi_B','LOC','SM6_L','F03CO','Me','Mi',
'nN-N','nArN02','nCRX3','SpPosA_B','nCIR','B01','B03','N-073',
'SpMax_A','Psi_i_1d','B04','Sd0','TI2_L','nCrt','c-026','F02',
'nHDon','SpMax_B','Psi_i_A','nN','SM6_B','nArCOOR','nX','TAR']
df['TAR'] = df['TAR'].replace(['RB', 'NRB'], [1, 0])
df.replace(to_replace='NaN', value=np.nan, regex=True, inplace=True)
# df.mean(), df.median()
df.fillna(df.mean(), inplace=True)
X = df[[i for i in list(df.columns) if i != 'TAR']]
y = df['TAR']
feat_labels = X.columns
## Random Forest Feature Selection ##
stdsc = StandardScaler()
X = stdsc.fit_transform(X)
forest = RandomForestClassifier(n_estimators=500,
random_state=1)
forest.fit(X, y)
importances = forest.feature_importances_
indices = np.argsort(importances)[::-1]
for f in range(X.shape[1]):
print("%2d) %-*s %f" % (f + 1, 30,
feat_labels[indices[f]],
importances[indices[f]]))
plt.title('Feature Importance')
plt.bar(range(X.shape[1]),
importances[indices],
align='center')
plt.xticks(range(X.shape[1]),
feat_labels[indices], rotation=90)
plt.xlim([-1, X.shape[1]])
plt.tight_layout()
plt.savefig("rf_selection.png")
plt.show()
sfm = SelectFromModel(forest, prefit=True)
X_selected = sfm.transform(X)
print('Number of features that meet this threshold criterion:',
X_selected.shape[1])
print("Threshold %f" % np.mean(importances))
cols = []
for f in range(X_selected.shape[1]):
cols.append(feat_labels[indices[f]])
print("%2d) %-*s %f" % (f + 1, 30,
feat_labels[indices[f]],
importances[indices[f]]))
## HEAT MAP using the above features ##
cols.append('TAR')
cm = np.corrcoef(df[cols].values.T)
hm = heatmap(cm, row_names=cols, column_names=cols)
plt.show()
|
[
"numpy.mean",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"sklearn.feature_selection.SelectFromModel",
"numpy.corrcoef",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.preprocessing.StandardScaler",
"numpy.argsort",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"mlxtend.plotting.heatmap",
"matplotlib.pyplot.show"
] |
[((442, 475), 'pandas.read_csv', 'pd.read_csv', (['"""NewBioDegWCols.csv"""'], {}), "('NewBioDegWCols.csv')\n", (453, 475), True, 'import pandas as pd\n'), ((1183, 1199), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1197, 1199), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1237, 1293), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(500)', 'random_state': '(1)'}), '(n_estimators=500, random_state=1)\n', (1259, 1293), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1605, 1636), 'matplotlib.pyplot.title', 'plt.title', (['"""Feature Importance"""'], {}), "('Feature Importance')\n", (1614, 1636), True, 'import matplotlib.pyplot as plt\n'), ((1797, 1823), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-1, X.shape[1]]'], {}), '([-1, X.shape[1]])\n', (1805, 1823), True, 'import matplotlib.pyplot as plt\n'), ((1824, 1842), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1840, 1842), True, 'import matplotlib.pyplot as plt\n'), ((1843, 1874), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""rf_selection.png"""'], {}), "('rf_selection.png')\n", (1854, 1874), True, 'import matplotlib.pyplot as plt\n'), ((1875, 1885), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1883, 1885), True, 'import matplotlib.pyplot as plt\n'), ((1893, 1929), 'sklearn.feature_selection.SelectFromModel', 'SelectFromModel', (['forest'], {'prefit': '(True)'}), '(forest, prefit=True)\n', (1908, 1929), False, 'from sklearn.feature_selection import SelectFromModel\n'), ((2404, 2434), 'numpy.corrcoef', 'np.corrcoef', (['df[cols].values.T'], {}), '(df[cols].values.T)\n', (2415, 2434), True, 'import numpy as np\n'), ((2440, 2486), 'mlxtend.plotting.heatmap', 'heatmap', (['cm'], {'row_names': 'cols', 'column_names': 'cols'}), '(cm, row_names=cols, column_names=cols)\n', (2447, 2486), False, 'from mlxtend.plotting import heatmap\n'), ((2487, 2497), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2495, 2497), True, 'import matplotlib.pyplot as plt\n'), ((1397, 1420), 'numpy.argsort', 'np.argsort', (['importances'], {}), '(importances)\n', (1407, 1420), True, 'import numpy as np\n'), ((2075, 2095), 'numpy.mean', 'np.mean', (['importances'], {}), '(importances)\n', (2082, 2095), True, 'import numpy as np\n')]
|
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.style as style
import numpy as np
import os
style.use('ggplot')
grid_list = ['grid.168010.e', 'grid.1032.0', 'grid.7177.6', 'grid.194645.b', 'grid.6571.5']
dirname = os.getcwd()
dirname = dirname + '/Data/'
df_ARWU2018 = pd.read_csv(dirname + 'ARWU/ARWURanking_2018_grid.csv')
df_ARWU2017 = pd.read_csv(dirname + 'ARWU/ARWURanking_2017_grid.csv')
df_ARWU2016 = pd.read_csv(dirname + 'ARWU/ARWURanking_2016_grid.csv')
df_ARWU2015 = pd.read_csv(dirname + 'ARWU/ARWURanking_2015_grid.csv')
df_ARWU2014 = pd.read_csv(dirname + 'ARWU/ARWURanking_2014_grid.csv')
df_ARWU2013 = pd.read_csv(dirname + 'ARWU/ARWURanking_2013_grid.csv')
df_ARWU2012 = pd.read_csv(dirname + 'ARWU/ARWURanking_2012_grid.csv')
ARWU_DATA = ["GRID_ID", "Alumni", "Award", "HiCi", "NS", "PUB"]
df_ARWU2012 = df_ARWU2012[ARWU_DATA].dropna()
df_ARWU2013 = df_ARWU2013[ARWU_DATA].dropna()
df_ARWU2014 = df_ARWU2014[ARWU_DATA].dropna()
df_ARWU2015 = df_ARWU2015[ARWU_DATA].dropna()
df_ARWU2016 = df_ARWU2016[ARWU_DATA].dropna()
df_ARWU2017 = df_ARWU2017[ARWU_DATA].dropna()
df_ARWU2018 = df_ARWU2018[ARWU_DATA].dropna()
df_THE2012 = pd.read_csv(dirname + 'THE/THERanking2013__grid.csv')
df_THE2013 = pd.read_csv(dirname + 'THE/THERanking2014__grid.csv')
df_THE2014 = pd.read_csv(dirname + 'THE/THERanking2015__grid.csv')
df_THE2015 = pd.read_csv(dirname + 'THE/THERanking2016__grid.csv')
df_THE2016 = pd.read_csv(dirname + 'THE/THERanking2017__grid.csv')
df_THE2017 = pd.read_csv(dirname + 'THE/THERanking2018__grid.csv')
df_THE2018 = pd.read_csv(dirname + 'THE/THERanking2019__grid.csv')
THE_DATA = ["GRID_ID", "Teaching", "Rechearch", "Citations", "Industry_Income", "Internationals_Outlook"]
df_THE2012 = df_THE2012[THE_DATA].dropna()
df_THE2013 = df_THE2013[THE_DATA].dropna()
df_THE2014 = df_THE2014[THE_DATA].dropna()
df_THE2015 = df_THE2015[THE_DATA].dropna()
df_THE2016 = df_THE2016[THE_DATA].dropna()
df_THE2017 = df_THE2017[THE_DATA].dropna()
df_THE2018 = df_THE2018[THE_DATA].dropna()
df_QS2012 = pd.read_csv(dirname + 'QS/qs2013_grid.csv')
df_QS2013 = pd.read_csv(dirname + 'QS/qs2014_grid.csv')
df_QS2014 = pd.read_csv(dirname + 'QS/qs2015_grid.csv')
df_QS2015 = pd.read_csv(dirname + 'QS/qs2016_grid.csv')
df_QS2016 = pd.read_csv(dirname + 'QS/qs2017_grid.csv')
df_QS2017 = pd.read_csv(dirname + 'QS/qs2018_grid.csv')
df_QS2018 = pd.read_csv(dirname + 'QS/qs2019_grid.csv')
QS_DATA = ["GRID_ID", "Academic_reputation", "Employer_reputation", "Faculty_Student", "International_Faculty",
"International_Students", "Citations"]
df_QS2018 = df_QS2018.replace(0, np.nan)
df_QS2017 = df_QS2017.replace(0, np.nan)
df_QS2016 = df_QS2016.replace(0, np.nan)
df_QS2015 = df_QS2015.replace(0, np.nan)
df_QS2014 = df_QS2014.replace(0, np.nan)
df_QS2013 = df_QS2013.replace(0, np.nan)
df_QS2012 = df_QS2012.replace(0, np.nan)
df_QS2018 = df_QS2018[QS_DATA].dropna()
df_QS2017 = df_QS2017[QS_DATA].dropna()
df_QS2016 = df_QS2016[QS_DATA].dropna()
df_QS2015 = df_QS2015[QS_DATA].dropna()
df_QS2014 = df_QS2014[QS_DATA].dropna()
df_QS2013 = df_QS2013[QS_DATA].dropna()
df_QS2012 = df_QS2012[QS_DATA].dropna()
def create_constructs(df_ARWU, df_THE, df_QS, year):
df_ARWU['Reputation_ARWU'] = (df_ARWU['Alumni'] + df_ARWU['Award']) / 2
df_ARWU['Publication_ARWU'] = (df_ARWU['HiCi'] + df_ARWU['NS'] + df_ARWU['PUB']) / 3
df_ARWU = df_ARWU[['GRID_ID', 'Reputation_ARWU', 'Publication_ARWU']]
df_ARWU['year'] = year
df_ARWU = df_ARWU[['GRID_ID', 'year', 'Reputation_ARWU', 'Publication_ARWU']]
df_ARWU.columns = ['GRID_ID', 'year', 'Reputation_ARWU', 'Publication_ARWU']
df_THE['Reputation_THE'] = (df_THE['Teaching'] + df_THE['Rechearch']) / 2
df_THE['Publication_THE'] = df_THE['Citations']
df_THE = df_THE[['GRID_ID', 'Reputation_THE', 'Publication_THE']]
df_THE['year'] = year
df_THE = df_THE[['GRID_ID', 'year', 'Reputation_THE', 'Publication_THE']]
df_THE.columns = ['GRID_ID', 'year', 'Reputation_THE', 'Publication_THE']
df_QS['Reputation_QS'] = (df_QS['Academic_reputation'] + df_QS['Employer_reputation']) / 2
df_QS['Publication_QS'] = df_QS['Citations']
df_QS = df_QS[['GRID_ID', 'Reputation_QS', 'Publication_QS']]
df_QS['year'] = year
df_QS = df_QS[['GRID_ID', 'year', 'Reputation_QS', 'Publication_QS']]
df_QS.columns = ['GRID_ID', 'year', 'Reputation_QS', 'Publication_QS']
return df_ARWU, df_THE, df_QS
def add_arrow(line, position=None, direction='right', size=20, color=None):
if color is None:
color = line.get_color()
xdata = line.get_xdata()
ydata = line.get_ydata()
for i in range(len(xdata) -1):
position = xdata[i]
start_ind = np.argmin(np.absolute(xdata - position))
if direction == 'right':
end_ind = start_ind + 1
else:
end_ind = start_ind - 1
line.axes.annotate('',
xytext=(xdata[start_ind], ydata[start_ind]),
xy=(xdata[end_ind], ydata[end_ind]),
arrowprops=dict(arrowstyle="-|>", color=color),
size=size
)
df_ARWU2018, df_THE2018, df_QS2018 = create_constructs(df_ARWU2018, df_THE2018, df_QS2018, '2018')
df_ARWU2017, df_THE2017, df_QS2017 = create_constructs(df_ARWU2017, df_THE2017, df_QS2017, '2017')
df_ARWU2016, df_THE2016, df_QS2016 = create_constructs(df_ARWU2016, df_THE2016, df_QS2016, '2016')
df_ARWU2015, df_THE2015, df_QS2015 = create_constructs(df_ARWU2015, df_THE2015, df_QS2015, '2015')
df_ARWU2014, df_THE2014, df_QS2014 = create_constructs(df_ARWU2014, df_THE2014, df_QS2014, '2014')
df_ARWU2013, df_THE2013, df_QS2013 = create_constructs(df_ARWU2013, df_THE2013, df_QS2013, '2013')
df_ARWU2012, df_THE2012, df_QS2012 = create_constructs(df_ARWU2012, df_THE2012, df_QS2012, '2012')
df_ARWU = df_ARWU2018
listARWU = [df_ARWU2017, df_ARWU2016, df_ARWU2015, df_ARWU2014, df_ARWU2013, df_ARWU2012]
for i in listARWU:
df_ARWU = df_ARWU.append(i)
df_THE = df_THE2018
listTHE = [df_THE2017, df_THE2016, df_THE2015, df_THE2014, df_THE2013, df_THE2012]
for i in listTHE:
df_THE = df_THE.append(i)
df_QS = df_QS2018
listQS = [df_QS2017, df_QS2016, df_QS2015, df_QS2014, df_QS2013, df_QS2012]
for i in listQS:
df_QS = df_QS.append(i)
def create_uni_df(ARWU, THE, QS, GRID):
ARWU = ARWU[ARWU['GRID_ID'] == GRID]
THE = THE[THE['GRID_ID'] == GRID]
QS = QS[QS['GRID_ID'] == GRID]
return ARWU, THE, QS
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 5))
for i in grid_list:
df_stanford_ARWU, df_stanford_THE, df_stanford_QS = create_uni_df(df_ARWU, df_THE, df_QS, i)
line = ax1.plot(df_stanford_ARWU['Reputation_ARWU'], df_stanford_ARWU['Publication_ARWU'])[0]
add_arrow(line)
line = ax2.plot(df_stanford_THE['Reputation_THE'], df_stanford_THE['Publication_THE'])[0]
add_arrow(line)
line = ax3.plot(df_stanford_QS['Reputation_QS'], df_stanford_QS['Publication_QS'])[0]
add_arrow(line)
ax1.set_title('ARWU')
ax2.set_title('THE')
ax3.set_title('QS')
fig.text(0.5, 0.04, 'Reputation', ha='center', va='center', fontsize=15)
fig.text(0.09, 0.5, 'Publication', ha='center', va='center', rotation='vertical', fontsize=15)
ax3.legend(grid_list, loc='right',
bbox_to_anchor=(1.5, 0.5), ncol=1, fontsize='large', frameon=False)
plt.show()
|
[
"pandas.read_csv",
"numpy.absolute",
"os.getcwd",
"matplotlib.style.use",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((115, 134), 'matplotlib.style.use', 'style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (124, 134), True, 'import matplotlib.style as style\n'), ((239, 250), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (248, 250), False, 'import os\n'), ((295, 350), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'ARWU/ARWURanking_2018_grid.csv')"], {}), "(dirname + 'ARWU/ARWURanking_2018_grid.csv')\n", (306, 350), True, 'import pandas as pd\n'), ((365, 420), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'ARWU/ARWURanking_2017_grid.csv')"], {}), "(dirname + 'ARWU/ARWURanking_2017_grid.csv')\n", (376, 420), True, 'import pandas as pd\n'), ((435, 490), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'ARWU/ARWURanking_2016_grid.csv')"], {}), "(dirname + 'ARWU/ARWURanking_2016_grid.csv')\n", (446, 490), True, 'import pandas as pd\n'), ((505, 560), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'ARWU/ARWURanking_2015_grid.csv')"], {}), "(dirname + 'ARWU/ARWURanking_2015_grid.csv')\n", (516, 560), True, 'import pandas as pd\n'), ((575, 630), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'ARWU/ARWURanking_2014_grid.csv')"], {}), "(dirname + 'ARWU/ARWURanking_2014_grid.csv')\n", (586, 630), True, 'import pandas as pd\n'), ((645, 700), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'ARWU/ARWURanking_2013_grid.csv')"], {}), "(dirname + 'ARWU/ARWURanking_2013_grid.csv')\n", (656, 700), True, 'import pandas as pd\n'), ((715, 770), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'ARWU/ARWURanking_2012_grid.csv')"], {}), "(dirname + 'ARWU/ARWURanking_2012_grid.csv')\n", (726, 770), True, 'import pandas as pd\n'), ((1173, 1226), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'THE/THERanking2013__grid.csv')"], {}), "(dirname + 'THE/THERanking2013__grid.csv')\n", (1184, 1226), True, 'import pandas as pd\n'), ((1240, 1293), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'THE/THERanking2014__grid.csv')"], {}), "(dirname + 'THE/THERanking2014__grid.csv')\n", (1251, 1293), True, 'import pandas as pd\n'), ((1307, 1360), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'THE/THERanking2015__grid.csv')"], {}), "(dirname + 'THE/THERanking2015__grid.csv')\n", (1318, 1360), True, 'import pandas as pd\n'), ((1374, 1427), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'THE/THERanking2016__grid.csv')"], {}), "(dirname + 'THE/THERanking2016__grid.csv')\n", (1385, 1427), True, 'import pandas as pd\n'), ((1441, 1494), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'THE/THERanking2017__grid.csv')"], {}), "(dirname + 'THE/THERanking2017__grid.csv')\n", (1452, 1494), True, 'import pandas as pd\n'), ((1508, 1561), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'THE/THERanking2018__grid.csv')"], {}), "(dirname + 'THE/THERanking2018__grid.csv')\n", (1519, 1561), True, 'import pandas as pd\n'), ((1575, 1628), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'THE/THERanking2019__grid.csv')"], {}), "(dirname + 'THE/THERanking2019__grid.csv')\n", (1586, 1628), True, 'import pandas as pd\n'), ((2051, 2094), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'QS/qs2013_grid.csv')"], {}), "(dirname + 'QS/qs2013_grid.csv')\n", (2062, 2094), True, 'import pandas as pd\n'), ((2107, 2150), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'QS/qs2014_grid.csv')"], {}), "(dirname + 'QS/qs2014_grid.csv')\n", (2118, 2150), True, 'import pandas as pd\n'), ((2163, 2206), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'QS/qs2015_grid.csv')"], {}), "(dirname + 'QS/qs2015_grid.csv')\n", (2174, 2206), True, 'import pandas as pd\n'), ((2219, 2262), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'QS/qs2016_grid.csv')"], {}), "(dirname + 'QS/qs2016_grid.csv')\n", (2230, 2262), True, 'import pandas as pd\n'), ((2275, 2318), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'QS/qs2017_grid.csv')"], {}), "(dirname + 'QS/qs2017_grid.csv')\n", (2286, 2318), True, 'import pandas as pd\n'), ((2331, 2374), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'QS/qs2018_grid.csv')"], {}), "(dirname + 'QS/qs2018_grid.csv')\n", (2342, 2374), True, 'import pandas as pd\n'), ((2387, 2430), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'QS/qs2019_grid.csv')"], {}), "(dirname + 'QS/qs2019_grid.csv')\n", (2398, 2430), True, 'import pandas as pd\n'), ((6482, 6517), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(15, 5)'}), '(1, 3, figsize=(15, 5))\n', (6494, 6517), True, 'import matplotlib.pyplot as plt\n'), ((7330, 7340), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7338, 7340), True, 'import matplotlib.pyplot as plt\n'), ((4736, 4765), 'numpy.absolute', 'np.absolute', (['(xdata - position)'], {}), '(xdata - position)\n', (4747, 4765), True, 'import numpy as np\n')]
|
#!/bin/python3
# encoding: utf-8
import sys
import numpy as np
from time import time
'''
x
[0, 2] => idx start 0, end 3
[3, 5] => idx start 3, end 6
[6, 8] => idx start 6, end 9
((0 + (r_idx // 3 * 3)): (3 + (r_idx // 3 * 3)), (0 + (c_idx // 3 * 3)): (3 + (c_idx // 3 * 3)))
np.random.randint(1, 10)
'''
sys.setrecursionlimit(10 ** 7)
np.random.seed(int(time() % 1000))
TRIALS = [(0, 0, 0)]
def padding(input_values, rollback=False):
MAX_ROW, MAX_COL = input_values.shape
# if it is rollback
if rollback:
if len(TRIALS) == 0:
raise Exception('No possible result!')
i, j, prev_val = TRIALS.pop()
valid_digit = False
for num in range(prev_val+1, 10):
input_values[i, j] = num
valid_digit = value_chk(input_values, i, j)
if valid_digit: # if value fits current position
TRIALS.append((i, j, num))
return padding(input_values)
if not valid_digit: # if not updated
# clear value
input_values[i, j] = 0
# and rollback again
return padding(input_values, True)
else:
# if new position
for i in range(MAX_ROW):
for j in range(MAX_COL):
if input_values[i, j] == 0:
valid_digit = False
for num in range(1, 10):
input_values[i, j] = num
valid_digit = value_chk(input_values, i, j)
if valid_digit: # if value fits current position
TRIALS.append((i, j, num))
return padding(input_values)
# if no digit fits, rollback
if not valid_digit:
input_values[i, j] = 0
return padding(input_values, True)
return input_values
def value_chk(val_mtx, row_idx, col_idx):
val = val_mtx[row_idx, col_idx]
return (dup_cnt(val_mtx[row_idx, :], val) == 1
and dup_cnt(val_mtx[:, col_idx], val) == 1
and dup_cnt(val_mtx[(0 + (row_idx // 3 * 3)): (3 + (row_idx // 3 * 3)), (0 + (col_idx // 3 * 3)): (3 + (col_idx // 3 * 3))].flatten(), val) == 1)
def dup_cnt(tar_arr, val):
cnt = 0
for e in tar_arr:
if e == val:
cnt += 1
return cnt
if __name__ == '__main__':
i1 = np.array([
[5, 3, 0, 0, 7, 0, 0, 0, 0],
[6, 0, 0, 1, 9, 5, 0, 0, 0],
[0, 9, 8, 0, 0, 0, 0, 6, 0],
[8, 0, 0, 0, 6, 0, 0, 0, 3],
[4, 0, 0, 8, 0, 3, 0, 0, 1],
[7, 0, 0, 0, 2, 0, 0, 0, 6],
[0, 6, 0, 0, 0, 0, 2, 8, 0],
[0, 0, 0, 4, 1, 9, 0, 0, 5],
[0, 0, 0, 0, 8, 0, 0, 7, 9]
])
print('Original input:\n', i1)
result = padding(i1)
print('Result:\n', result)
# result check
for i in range(result.shape[0]):
for j in range(result.shape[1]):
if not value_chk(result, i, j):
raise Exception("Unvalid result! ({}, {})".format(i, j))
|
[
"sys.setrecursionlimit",
"numpy.array",
"time.time"
] |
[((309, 339), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10 ** 7)'], {}), '(10 ** 7)\n', (330, 339), False, 'import sys\n'), ((2394, 2678), 'numpy.array', 'np.array', (['[[5, 3, 0, 0, 7, 0, 0, 0, 0], [6, 0, 0, 1, 9, 5, 0, 0, 0], [0, 9, 8, 0, 0, \n 0, 0, 6, 0], [8, 0, 0, 0, 6, 0, 0, 0, 3], [4, 0, 0, 8, 0, 3, 0, 0, 1],\n [7, 0, 0, 0, 2, 0, 0, 0, 6], [0, 6, 0, 0, 0, 0, 2, 8, 0], [0, 0, 0, 4, \n 1, 9, 0, 0, 5], [0, 0, 0, 0, 8, 0, 0, 7, 9]]'], {}), '([[5, 3, 0, 0, 7, 0, 0, 0, 0], [6, 0, 0, 1, 9, 5, 0, 0, 0], [0, 9, \n 8, 0, 0, 0, 0, 6, 0], [8, 0, 0, 0, 6, 0, 0, 0, 3], [4, 0, 0, 8, 0, 3, 0,\n 0, 1], [7, 0, 0, 0, 2, 0, 0, 0, 6], [0, 6, 0, 0, 0, 0, 2, 8, 0], [0, 0,\n 0, 4, 1, 9, 0, 0, 5], [0, 0, 0, 0, 8, 0, 0, 7, 9]])\n', (2402, 2678), True, 'import numpy as np\n'), ((360, 366), 'time.time', 'time', ([], {}), '()\n', (364, 366), False, 'from time import time\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
**Project Name:** MakeHuman
**Product Home Page:** http://www.makehumancommunity.org/
**Github Code Home Page:** https://github.com/makehumancommunity/
**Authors:** <NAME>, <NAME>
**Copyright(c):** MakeHuman Team 2001-2019
**Licensing:** AGPL3
This file is part of MakeHuman Community (www.makehumancommunity.org).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Abstract
--------
TODO
"""
import os
import io
import math
import numpy as np
from core import G
import getpath
import log
from collections import OrderedDict
import makehuman
import material
import json
#
# Proxy types. Loop over simple proxy types to do all proxies.
# Some code use lowercase proxy types instead.
#
SimpleProxyTypes = ['Hair', 'Eyes', 'Eyebrows', 'Eyelashes', 'Teeth', 'Tongue']
ProxyTypes = ['Proxymeshes', 'Clothes'] + SimpleProxyTypes
SimpleProxyTypesLower = []
for name in SimpleProxyTypes:
SimpleProxyTypesLower.append(name.lower())
_A7converter = None
Unit3 = np.identity(3,float)
class Proxy:
def __init__(self, file, type, human):
log.debug("Loading proxy file: %s.", file)
import makehuman
name = os.path.splitext(os.path.basename(file))[0]
self.name = name.capitalize().replace(" ","_")
self.license = makehuman.getAssetLicense()
self.description = ""
self.type = type
self.object = None
self.human = human
if not human:
raise RuntimeError("Proxy constructor expects a valid human object.")
self.file = file
if file:
self.mtime = os.path.getmtime(file)
else:
self.mtime = None
self.uuid = None
self.basemesh = makehuman.getBasemeshVersion()
self.tags = []
self.version = 110
self.ref_vIdxs = None # (Vidx1,Vidx2,Vidx3) list with references to human vertex indices, indexed by proxy vert
self.weights = None # (w1,w2,w3) list, with weights per human vertex (mapped by ref_vIdxs), indexed by proxy vert
self.vertWeights = {} # (proxy-vert, weight) list for each parent vert (reverse mapping of self.weights, indexed by human vertex)
self.offsets = None # (x,y,z) list of vertex offsets, indexed by proxy vert
self.vertexBoneWeights = None # Explicitly defined custom vertex-to-bone weights, connecting the proxy mesh to the reference skeleton (optional)
# Not to be confused with the vertex weights assigned for mapping the proxy mesh geometry to the base mesh
self.tmatrix = TMatrix() # Offset transformation matrix. Replaces scale
self.z_depth = -1 # Render order depth for the proxy object. Also used to determine which proxy object should mask others (delete faces)
self.max_pole = None # Signifies the maximum number of faces per vertex on the mesh topology. Set to none for default.
self.special_pose = {} # Special poses that should be set on the human when this proxy is active to make it look good
self.uvLayers = {} # TODO what is this used for?
self.material = material.Material(self.name)
self._obj_file = None
self._vertexBoneWeights_file = None
self._material_file = None
self.deleteVerts = np.zeros(human.meshData.getVertexCount(), bool)
self.weightsCache = None
self.cacheSkel = None
@property
def material_file(self):
folder = os.path.dirname(self.file) if self.file else None
return _getFilePath(self._material_file, folder)
@property
def obj_file(self):
folder = os.path.dirname(self.file) if self.file else None
return _getFilePath(self._obj_file, folder, ['npz', 'obj'])
@property
def vertexBoneWeights_file(self):
folder = os.path.dirname(self.file) if self.file else None
return _getFilePath(self._vertexBoneWeights_file, folder)
def __repr__(self):
return ("<Proxy %s %s %s %s>" % (self.name, self.type, self.file, self.uuid))
def getSeedMesh(self):
for pxy in self.human.getProxies():
if self == pxy:
return pxy.object.getSeedMesh()
if self.type == "Proxymeshes":
if not self.human.proxy:
return None
return self.human.getProxyMesh()
elif self.type in ["Converter"]:
return None
else:
raise NameError("Unknown proxy type %s" % self.type)
def getMesh(self):
if not self.object:
return None
return self.object.mesh
def loadMeshAndObject(self, human):
import files3d
import guicommon
obj = False
mesh = files3d.loadMesh(self.obj_file, maxFaces = self.max_pole)
if not mesh:
log.error("Failed to load %s", self.obj_file)
else:
mesh.priority = self.z_depth # Set render order
mesh.setCameraProjection(0) # Set to model camera
obj = self.object = guicommon.Object(mesh, human.getPosition())
obj.proxy = self
obj.material = self.material
obj.setRotation(human.getRotation())
obj.setSolid(human.solid) # Set to wireframe if human is in wireframe
# TODO perhaps other properties should be copied from human to object, such as subdivision state. For other hints, and duplicate code, see guicommon Object.setProxy()
# TODO why return both obj and mesh if you can access the mesh easily through obj.mesh?
return mesh,obj
def _finalize(self, refVerts):
"""
Final step in parsing/loading a proxy file. Initializes numpy structures
for performance improvement.
"""
self.weights = np.asarray([v._weights for v in refVerts], dtype=np.float32)
self.ref_vIdxs = np.asarray([v._verts for v in refVerts], dtype=np.uint32)
self.offsets = np.asarray([v._offset for v in refVerts], dtype=np.float32)
def _reloadReverseMapping(self):
"""
Reconstruct reverse vertex (and weights) mapping
"""
self.vertWeights = {}
for pxy_vIdx in range(self.ref_vIdxs.shape[0]):
_addProxyVertWeight(self.vertWeights, self.ref_vIdxs[pxy_vIdx, 0], pxy_vIdx, self.weights[pxy_vIdx, 0])
_addProxyVertWeight(self.vertWeights, self.ref_vIdxs[pxy_vIdx, 1], pxy_vIdx, self.weights[pxy_vIdx, 1])
_addProxyVertWeight(self.vertWeights, self.ref_vIdxs[pxy_vIdx, 2], pxy_vIdx, self.weights[pxy_vIdx, 2])
def getCoords(self, fit_to_posed=False):
if fit_to_posed:
hcoord = self.human.meshData.coord
else:
hcoord = self.human.getRestposeCoordinates()
matrix = self.tmatrix.getMatrix(hcoord)
ref_vIdxs = self.ref_vIdxs
weights = self.weights
coord = (
hcoord[ref_vIdxs[:,0]] * weights[:,0,None] +
hcoord[ref_vIdxs[:,1]] * weights[:,1,None] +
hcoord[ref_vIdxs[:,2]] * weights[:,2,None] +
np.dot(matrix, self.offsets.transpose()).transpose()
)
return coord
def update(self, mesh, fit_to_posed=False):
#log.debug("Updating proxy %s.", self.name)
coords = self.getCoords(fit_to_posed)
mesh.changeCoords(coords)
mesh.calcNormals()
def getUuid(self):
if self.uuid:
return self.uuid
else:
return self.name
def hasCustomVertexWeights(self):
"""
Determines whether this proxy explicitly defines its own set of vertex
to bone weights (defined on the bones of the reference skeleton).
Returns True if this proxy has custom vertex weights, False if it does
not, in which case vertex weights will be derived from the weights of
the basemesh, mapped through the vertex mapping of the proxy.
"""
return self.vertexBoneWeights is not None
def getVertexWeights(self, humanWeights, skel=None, allowCache=False):
"""
Map armature weights mapped to the human to the proxy mesh through the
proxy mapping.
humanWeights is expected to be an animation.VertexBoneWeights object.
Only when this proxy has custom weights:
Optionally remaps the weights to fit a user-selected skeleton when a
skel is supplied as argument. If no skel argument is provided, the
weights for the base skeleton are returned.
Note: these vertex weights are intended for rigging and are not to be
confused with getWeights() which returns the weights of the proxy
mapping to the basemesh.
"""
# Override proxy weights mapping behaviour if this proxy has its own
# bone weights defined explicitly.
# This requires remapping the vertex weights of the proxy, defined on
# the bones of the reference skeleton, to those of the current skeleton.
# The current skeleton is retrieved from the human object linked to this
# proxy.
import time
import log
if self.hasCustomVertexWeights():
# TODO we could introduce caching of weights here as long as the skeleton is not changed
if skel is None:
return self.human.getBaseSkeleton().getVertexWeights(self.vertexBoneWeights, force_remap=True)
else:
return skel.getVertexWeights(self.vertexBoneWeights, force_remap=True)
# Remap weights through proxy mapping
WEIGHT_THRESHOLD = 1e-4 # Threshold for including bone weight
recalculate = True
weights = OrderedDict()
if not allowCache:
pass
#print("Caching not allowed")
else:
if self.weightsCache is None:
pass
#print("There is no cache")
else:
if not skel is None:
if skel == self.cacheSkel:
recalculate = False
else:
log.debug("The skeleton is different")
if recalculate:
log.debug("remapping weights for proxy " + self.name)
start = time.perf_counter()
for bname, (indxs, wghts) in list(humanWeights.data.items()):
vgroup = []
empty = True
for (v,wt) in zip(indxs, wghts):
try:
vlist = self.vertWeights[v]
except KeyError:
vlist = []
for (pv, w) in vlist:
pw = w*wt
if (pw > WEIGHT_THRESHOLD):
vgroup.append((pv, pw))
empty = False
if not empty:
weights[bname] = vgroup
stop = time.perf_counter()
hw = humanWeights.create(weights)
if allowCache:
self.weightsCache = hw
self.cacheSkel = skel
else:
self.weightsCache = None
self.cacheSkel = None
log.debug("remapping weights for %s took %.5f seconds", self.name, stop - start)
else:
hw = self.weightsCache
return hw
doRefVerts = 1
doWeights = 2
doDeleteVerts = 3
def loadProxy(human, path, type="Clothes"):
try:
npzpath = os.path.splitext(path)[0] + '.mhpxy'
asciipath = os.path.splitext(path)[0] + getAsciiFileExtension(type)
try:
if not os.path.isfile(npzpath):
log.message('compiled proxy file missing: %s', npzpath)
raise RuntimeError('compiled proxy file missing: %s', npzpath)
if os.path.isfile(asciipath) and os.path.getmtime(asciipath) > os.path.getmtime(npzpath):
log.message('compiled proxy file out of date: %s', npzpath)
raise RuntimeError('compiled file out of date: %s', npzpath)
proxy = loadBinaryProxy(npzpath, human, type)
except Exception as e:
showTrace = not isinstance(e, RuntimeError)
log.warning("Problem loading binary proxy: %s", e, exc_info=showTrace)
proxy = loadTextProxy(human, asciipath, type) # TODO perhaps proxy type should be stored in .mhclo file too
if getpath.isSubPath(npzpath, getpath.getPath()):
# Only write compiled binary proxies to user data path
try:
log.message('Compiling binary proxy file %s', npzpath)
saveBinaryProxy(proxy, npzpath)
except Exception:
log.notice('unable to save compiled proxy: %s', npzpath, exc_info=True)
if os.path.isfile(npzpath):
# Remove file again, in case an empty file is left
try:
os.remove(npzpath)
except Exception as e:
log.warning("Could not remove empty file %s that was left behind (%s).", npzpath, e)
else:
log.debug('Not writing compiled proxies to system paths (%s).', npzpath)
except:
log.error('Unable to load proxy file: %s', path, exc_info=True)
return None
return proxy
def loadTextProxy(human, filepath, type="Clothes"):
import io
try:
fp = io.open(filepath, "r", encoding="utf-8")
except IOError:
log.error("*** Cannot open %s", filepath)
return None
folder = os.path.realpath(os.path.expanduser(os.path.dirname(filepath)))
proxy = Proxy(filepath, type, human)
proxy.max_pole = 8;
refVerts = []
status = 0
vnum = 0
for line in fp:
words = line.split()
if len(words) == 0:
# Reset status on empty line
#status = 0
continue
if words[0].startswith('#'):
# Comment
# Try interpreting comment attributes as license info
proxy.license.updateFromComment(line)
continue
key = words[0]
if key == 'name':
proxy.name = " ".join(words[1:])
elif key == 'uuid':
proxy.uuid = " ".join(words[1:])
elif key == 'description':
proxy.description = " ".join(words[1:])
elif key in ['author', 'license', 'homepage']:
proxy.license.updateFromComment(words)
elif key == 'tag':
proxy.tags.append( " ".join(words[1:]).lower() )
elif key == 'version':
proxy.version = int(words[1])
elif key == 'z_depth':
proxy.z_depth = int(words[1])
elif key == 'max_pole':
proxy.max_pole = int(words[1])
elif key == 'special_pose':
proxy.special_pose[words[1]] = words[2]
elif key == 'verts':
status = doRefVerts
elif key == 'weights':
status = doWeights
if proxy.weights is None:
proxy.weights = {}
weights = []
proxy.weights[words[1]] = weights
elif key == "delete_verts":
status = doDeleteVerts
elif key == 'obj_file':
proxy._obj_file = _getFileName(folder, words[1], ".obj")
elif key == 'material':
matFile = _getFileName(folder, words[1], ".mhmat")
proxy._material_file = matFile
proxy.material.fromFile(proxy.material_file)
elif key == 'vertexboneweights_file':
from animation import VertexBoneWeights
proxy._vertexBoneWeights_file = _getFileName(folder, words[1], ".jsonw")
proxy.vertexBoneWeights = VertexBoneWeights.fromFile(proxy.vertexBoneWeights_file)
elif key == 'backface_culling':
# TODO remove in future
log.warning('Deprecated parameter "backface_culling" used in proxy file. Set property backfaceCull in material instead.')
elif key == 'transparent':
# TODO remove in future
log.warning('Deprecated parameter "transparent" used in proxy file. Set property in material file instead.')
elif key == 'uvLayer':
# TODO is this still used?
if len(words) > 2:
layer = int(words[1])
uvFile = words[2]
else:
layer = 0
uvFile = words[1]
#uvMap = material.UVMap(proxy.name+"UV"+str(layer))
#uvMap.read(proxy.mesh, _getFileName(folder, uvFile, ".mhuv"))
# Delayed load, only store path here
proxy.uvLayers[layer] = _getFileName(folder, uvFile, ".mhuv")
elif key == 'x_scale':
proxy.tmatrix.getScaleData(words, 0)
elif key == 'y_scale':
proxy.tmatrix.getScaleData(words, 1)
elif key == 'z_scale':
proxy.tmatrix.getScaleData(words, 2)
elif key == 'shear_x':
proxy.tmatrix.getShearData(words, 0, None)
elif key == 'shear_y':
proxy.tmatrix.getShearData(words, 1, None)
elif key == 'shear_z':
proxy.tmatrix.getShearData(words, 2, None)
elif key == 'l_shear_x':
proxy.tmatrix.getShearData(words, 0, 'Left')
elif key == 'l_shear_y':
proxy.tmatrix.getShearData(words, 1, 'Left')
elif key == 'l_shear_z':
proxy.tmatrix.getShearData(words, 2, 'Left')
elif key == 'r_shear_x':
proxy.tmatrix.getShearData(words, 0, 'Right')
elif key == 'r_shear_y':
proxy.tmatrix.getShearData(words, 1, 'Right')
elif key == 'r_shear_z':
proxy.tmatrix.getShearData(words, 2, 'Right')
elif key == 'basemesh':
proxy.basemesh = words[1]
elif key in ['shapekey', 'subsurf', 'shrinkwrap', 'solidify', 'objfile_layer', 'uvtex_layer', 'use_projection', 'mask_uv_layer', 'texture_uv_layer', 'delete', 'vertexgroup_file']:
log.warning('Deprecated parameter "%s" used in proxy file. Please remove.', key)
elif status == doRefVerts:
refVert = ProxyRefVert(human)
refVerts.append(refVert)
if len(words) == 1:
refVert.fromSingle(words, vnum, proxy.vertWeights)
else:
refVert.fromTriple(words, vnum, proxy.vertWeights)
vnum += 1
elif status == doWeights:
v = int(words[0])
w = float(words[1])
weights.append((v,w))
elif status == doDeleteVerts:
sequence = False
for v in words:
if v == "-":
sequence = True
else:
v1 = int(v)
if sequence:
for vn in range(v0,v1+1):
proxy.deleteVerts[vn] = True
sequence = False
else:
proxy.deleteVerts[v1] = True
v0 = v1
else:
log.warning('Unknown keyword %s found in proxy file %s', key, filepath)
if proxy.z_depth == -1:
log.warning('Proxy file %s does not specify a Z depth. Using 50.', filepath)
proxy.z_depth = 50
# since max-pole is used for the calculation of neighboring planes we have to double it initially
proxy.max_pole *= 2
proxy._finalize(refVerts)
return proxy
def saveBinaryProxy(proxy, path):
def _properPath(path):
return getpath.getJailedPath(path, folder)
fp = io.open(path, 'wb')
tagStr, tagIdx = _packStringList(proxy.tags)
uvStr,uvIdx = _packStringList([ _properPath(proxy.uvLayers[k]) for k in sorted(proxy.uvLayers.keys()) ])
licStr, licIdx = proxy.license.toNumpyString()
folder = os.path.dirname(path)
vars_ = dict(
#proxyType = np.fromstring(proxy.type, dtype='S1'), # TODO store proxy type?
name = np.fromstring(proxy.name, dtype='S1'),
uuid = np.fromstring(proxy.uuid, dtype='S1'),
description = np.fromstring(proxy.description, dtype='S1'),
basemesh = np.fromstring(proxy.basemesh, dtype='S1'),
tags_str = tagStr,
tags_idx = tagIdx,
lic_str = licStr,
lic_idx = licIdx,
uvLayers_str = uvStr,
uvLayers_idx = uvIdx,
obj_file = np.fromstring(_properPath(proxy.obj_file), dtype='S1'),
version = np.asarray(proxy.version, dtype=np.int32)
)
if proxy.material_file:
vars_["material_file"] = np.fromstring(_properPath(proxy.material_file), dtype='S1')
if np.any(proxy.deleteVerts):
vars_["deleteVerts"] = proxy.deleteVerts
if proxy.z_depth is not None and proxy.z_depth != -1:
vars_["z_depth"] = np.asarray(proxy.z_depth, dtype=np.int32)
if proxy.max_pole:
vars_["max_pole"] = np.asarray(proxy.max_pole, dtype=np.uint32)
proxy.tmatrix.toNumpyStruct(vars_)
special_poses = []
for posetype, posename in list(proxy.special_pose.items()):
special_poses.append(posetype)
special_poses.append(posename)
specialposeStr, specialposeIdx = _packStringList(special_poses)
vars_["special_pose_str"] = specialposeStr
vars_["special_pose_idx"] = specialposeIdx
if proxy.weights[:,1:].any():
# 3 ref verts used in this proxy
num_refverts = 3
vars_["ref_vIdxs"] = proxy.ref_vIdxs
vars_["offsets"] = proxy.offsets
vars_["weights"] = proxy.weights
else:
# Proxy uses exact fitting exclusively: store npz file more compactly
num_refverts = 1
vars_["ref_vIdxs"] = proxy.ref_vIdxs[:,0]
vars_["weights"] = proxy.weights[:,0]
vars_['num_refverts'] = np.asarray(num_refverts, dtype=np.int32)
if proxy.vertexBoneWeights_file:
vars_['vertexBoneWeights_file'] = np.fromstring(_properPath(proxy.vertexBoneWeights_file), dtype='S1')
np.savez_compressed(fp, **vars_)
fp.close()
os.utime(path, None) # Ensure modification time is updated
def loadBinaryProxy(path, human, type):
log.debug("Loading binary proxy %s.", path)
npzfile = np.load(path)
#if type is None:
# proxyType = npzfile['proxyType'].tostring()
#else:
proxyType = type
proxy = Proxy(path, proxyType, human)
proxy.name = str(npzfile['name'].tostring(), 'utf8')
proxy.uuid = str(npzfile['uuid'].tostring(), 'utf8')
proxy.basemesh = str(npzfile['basemesh'].tostring(), 'utf8')
if 'description' in npzfile:
proxy.description = str(npzfile['description'].tostring(), 'utf8')
if 'version' in npzfile:
proxy.version = int(npzfile['version'])
if 'lic_str' in npzfile and 'lic_idx' in npzfile:
proxy.license.fromNumpyString(npzfile['lic_str'], npzfile['lic_idx'])
proxy.tags = set(_unpackStringList(npzfile['tags_str'], npzfile['tags_idx']))
if 'z_depth' in npzfile:
proxy.z_depth = int(npzfile['z_depth'])
if 'max_pole' in npzfile:
proxy.max_pole = int(npzfile['max_pole'])
if 'special_pose_str' in npzfile:
special_poses = _unpackStringList(npzfile['special_pose_str'], npzfile['special_pose_idx'])
for idx in range(0, len(special_poses), 2):
proxy.special_pose[special_poses[idx]] = special_poses[idx+1]
num_refverts = int(npzfile['num_refverts'])
if num_refverts == 3:
proxy.ref_vIdxs = npzfile['ref_vIdxs']
proxy.offsets = npzfile['offsets']
proxy.weights = npzfile['weights']
else:
num_refs = npzfile['ref_vIdxs'].shape[0]
proxy.ref_vIdxs = np.zeros((num_refs,3), dtype=np.uint32)
proxy.ref_vIdxs[:,0] = npzfile['ref_vIdxs']
proxy.offsets = np.zeros((num_refs,3), dtype=np.float32)
proxy.weights = np.zeros((num_refs,3), dtype=np.float32)
proxy.weights[:,0] = npzfile['weights']
if "deleteVerts" in npzfile:
proxy.deleteVerts = npzfile['deleteVerts']
# Reconstruct reverse vertex (and weights) mapping
proxy._reloadReverseMapping()
proxy.tmatrix.fromNumpyStruct(npzfile)
proxy.uvLayers = {}
for uvIdx, uvName in enumerate(_unpackStringList(npzfile['uvLayers_str'], npzfile['uvLayers_idx'])):
proxy.uvLayers[uvIdx] = uvName
proxy.material = material.Material(proxy.name)
if 'material_file' in npzfile:
proxy._material_file = str(npzfile['material_file'].tostring(), 'utf8')
if proxy.material_file:
proxy.material.fromFile(proxy.material_file)
proxy._obj_file = str(npzfile['obj_file'].tostring(), 'utf8')
if 'vertexBoneWeights_file' in npzfile:
proxy._vertexBoneWeights_file = str(npzfile['vertexBoneWeights_file'].tostring(), 'utf8')
if proxy.vertexBoneWeights_file:
from animation import VertexBoneWeights
proxy.vertexBoneWeights = VertexBoneWeights.fromFile(proxy.vertexBoneWeights_file)
if proxy.z_depth == -1:
log.warning('Proxy file %s does not specify a Z depth. Using 50.', path)
proxy.z_depth = 50
return proxy
#
# class ProxyRefVert:
#
class ProxyRefVert:
def __init__(self, human):
self.human = human
def fromSingle(self, words, vnum, vertWeights):
# TODO store the number of reference verts in proxy so that we can efficiently save and load them.
v0 = int(words[0])
self._verts = (v0,0,1)
self._weights = (1.0,0.0,0.0)
self._offset = np.zeros(3, float)
_addProxyVertWeight(vertWeights, v0, vnum, 1)
return self
def fromTriple(self, words, vnum, vertWeights):
v0 = int(words[0])
v1 = int(words[1])
v2 = int(words[2])
w0 = float(words[3])
w1 = float(words[4])
w2 = float(words[5])
if len(words) > 6:
d0 = float(words[6])
d1 = float(words[7])
d2 = float(words[8])
else:
(d0,d1,d2) = (0,0,0)
self._verts = (v0,v1,v2)
self._weights = (w0,w1,w2)
self._offset = np.array((d0,d1,d2), float)
_addProxyVertWeight(vertWeights, v0, vnum, w0)
_addProxyVertWeight(vertWeights, v1, vnum, w1)
_addProxyVertWeight(vertWeights, v2, vnum, w2)
return self
def getWeights(self):
return self._weights
def getCoord(self, matrix):
hcoord = self.human.getRestposeCoordinates()
return (
np.dot(hcoord[self._verts], self._weights) +
np.dot(matrix, self._offset)
)
def _addProxyVertWeight(vertWeights, v, pv, w):
try:
vertWeights[v].append((pv, w))
except KeyError:
vertWeights[v] = [(pv,w)]
return
#
# class TMatrix:
# Transformation matrix. Replaces previous scale
#
class TMatrix:
def __init__(self):
self.scaleData = None
self.shearData = None
self.lShearData = None
self.rShearData = None
def toNumpyStruct(self, npzfile, prefix=""):
"""Serialize TMatrix in npz file"""
def _nan_array(size):
return np.repeat(float('nan'), size).astype(np.float32)
def _pack_scales(scaleData):
scales = list()
vidxs = list()
for e_idx, entry in enumerate(scaleData):
# Should be 3 entries
if entry is None:
scales.append(float('nan'))
vidxs.extend([0, 0])
else:
vidx1, vidx2, scale = entry
scales.append(scale)
vidxs.extend([vidx1, vidx2])
return (np.asarray(scales, dtype=np.float32),
np.asarray(vidxs, dtype=np.uint32))
def _pack_shears(shearData):
shears = list()
vidxs = list()
for e_idx, entry in enumerate(shearData):
# Should be 3 entries
if entry is None:
shears.extend([float('nan'), float('nan')])
vidxs.extend([0, 0])
else:
vidx1, vidx2, shear1, shear2 = entry
shears.extend([shear1, shear2])
vidxs.extend([vidx1, vidx2])
return (np.asarray(shears, dtype=np.float32),
np.asarray(vidxs, dtype=np.uint32))
if prefix:
prefix += "_"
if self.scaleData:
scales, vidxs = _pack_scales(self.scaleData)
npzfile[prefix+"tmat_scale"] = scales
npzfile[prefix+"tmat_scale_idx"] = vidxs
if self.shearData:
shears, vidxs = _pack_shears(self.shearData)
npzfile[prefix+"tmat_shear"] = shears
npzfile[prefix+"tmat_shear_idx"] = vidxs
if self.lShearData:
shears, vidxs = _pack_shears(self.lShearData)
npzfile[prefix+"tmat_lshear"] = shears
npzfile[prefix+"tmat_lshear_idx"] = vidxs
if self.rShearData:
shears, vidxs = _pack_shears(self.rShearData)
npzfile[prefix+"tmat_rshear"] = shears
npzfile[prefix+"tmat_rshear_idx"] = vidxs
def fromNumpyStruct(self, npzfile, prefix=""):
"""Deserialize TMatrix from npz file"""
def _unpack_scales(scales, vidxs):
scaleData = [None, None, None]
for i in range(3):
if i >= min(len(scales), len(vidxs)//2):
break
scale = scales[i]
if not math.isnan(scale):
vidx1, vidx2 = vidxs[i*2], vidxs[i*2+1]
scaleData[i] = (int(vidx1), int(vidx2), float(scale))
return scaleData
def _unpack_shears(shears, vidxs):
shearData = [None, None, None]
for i in range(3):
if i >= min(len(shears)//2, len(vidxs)//2):
break
shear1, shear2 = shears[i*2], shears[i*2+1]
vidx1, vidx2 = vidxs[i*2], vidxs[i*2+1]
shearData[i] = (int(vidx1), int(vidx2), float(shear1), float(shear2))
return shearData
if prefix:
prefix += "_"
if prefix+'tmat_scale' in npzfile and prefix+'tmat_scale_idx' in npzfile:
scales = npzfile[prefix+'tmat_scale']
vidxs = npzfile[prefix+'tmat_scale_idx']
self.scaleData = _unpack_scales(scales, vidxs)
if prefix+'tmat_shear' in npzfile and prefix+'tmat_shear_idx' in npzfile:
shears = npzfile[prefix+'tmat_shear']
vidxs = npzfile[prefix+'tmat_shear_idx']
self.shearData = _unpack_shears(shears, vidxs)
if prefix+'tmat_lshear' in npzfile and prefix+'tmat_lshear_idx' in npzfile:
shears = npzfile[prefix+'tmat_lshear']
vidxs = npzfile[prefix+'tmat_lshear_idx']
self.lShearData = _unpack_shears(shears, vidxs)
if prefix+'tmat_rshear' in npzfile and prefix+'tmat_rshear_idx' in npzfile:
shears = npzfile[prefix+'tmat_rshear']
vidxs = npzfile[prefix+'tmat_rshear_idx']
self.rShearData = _unpack_shears(shears, vidxs)
def getScaleData(self, words, idx):
vn1 = int(words[1])
vn2 = int(words[2])
den = float(words[3])
if not self.scaleData:
self.scaleData = [None, None, None]
self.scaleData[idx] = (vn1, vn2, den)
def getShearData(self, words, idx, side):
vn1 = int(words[1])
vn2 = int(words[2])
x1 = float(words[3])
x2 = float(words[4])
bbdata = (vn1, vn2, x1, x2)
if side == "Left":
if not self.lShearData:
self.lShearData = [None, None, None]
self.lShearData[idx] = bbdata
elif side == "Right":
if not self.rShearData:
self.rShearData = [None, None, None]
self.rShearData[idx] = bbdata
else:
if not self.shearData:
self.shearData = [None, None, None]
self.shearData[idx] = bbdata
def getMatrix(self, hcoord):
if self.scaleData:
matrix = np.identity(3, float)
for n in range(3):
(vn1, vn2, den) = self.scaleData[n]
co1 = hcoord[vn1]
co2 = hcoord[vn2]
num = abs(co1[n] - co2[n])
matrix[n][n] = (num/den)
return matrix
elif self.shearData:
return self.matrixFromShear(self.shearData, hcoord)
elif self.lShearData:
return self.matrixFromShear(self.lShearData, hcoord)
elif self.rShearData:
return self.matrixFromShear(self.rShearData, hcoord)
else:
return Unit3
def matrixFromShear(self, shear, hcoord):
from transformations import affine_matrix_from_points
# sfaces and tfaces are the face coordinates
sfaces = np.zeros((3,2), float)
tfaces = np.zeros((3,2), float)
for n in range(3):
(vn1, vn2, sfaces[n,0], sfaces[n,1]) = shear[n]
tfaces[n,0] = hcoord[vn1][n]
tfaces[n,1] = hcoord[vn2][n]
# sverts and tverts are the vertex coordinates
sverts = []
tverts = []
for i in [0,1]:
for j,k in [(0,0),(0,1),(1,1),(1,0)]:
sverts.append( np.array((sfaces[0,i], sfaces[1,j], sfaces[2,k])) )
tverts.append( np.array((tfaces[0,i], tfaces[1,j], tfaces[2,k])) )
sbox = vertsToNumpy(sverts)
tbox = vertsToNumpy(tverts)
mat = affine_matrix_from_points(sbox, tbox)
return mat[:3,:3]
def vertsToNumpy(verts):
result = np.asarray(verts)
return np.asarray([result[:,0], result[:,1], result[:,2]], dtype=np.float32)
def _getFileName(folder, file, suffix):
(name, ext) = os.path.split(file)
if ext:
return os.path.join(folder, file)
else:
return os.path.join(folder, file+suffix)
def transferVertexMaskToProxy(vertsMask, proxy):
"""
Transfer a vertex mask defined on the parent mesh to a proxy using the
proxy mapping to this parent mesh.
A vertex mask defines for each vertex if it should be hidden, only faces
that have all vertices hidden will be hidden.
True in vertex mask means: show vertex, false means hide (masked)
"""
# Convert basemesh vertex mask to local mask for proxy vertices
proxyVertMask = np.ones(len(proxy.ref_vIdxs), dtype=bool)
# Proxy verts that use exact mapping
exact_mask = ~np.any(proxy.weights[:,1:], axis=1)
# Faster numpy implementation of the above:
unmasked_row_col = np.nonzero(vertsMask[proxy.ref_vIdxs])
unmasked_rows = unmasked_row_col[0]
if len(unmasked_rows) > 0:
unmasked_count = np.bincount(unmasked_rows) # count number of unmasked verts per row
# only hide/mask a vertex if at least two referenced body verts are hidden/masked
masked_idxs = np.nonzero(unmasked_count < 2)
proxyVertMask[masked_idxs] = False
else:
# All verts are masked
proxyVertMask[:] = False
# Directly map exactly mapped proxy verts
proxyVertMask[exact_mask] = vertsMask[proxy.ref_vIdxs[exact_mask,0]]
return proxyVertMask
def getAsciiFileExtension(proxyType):
"""
The file extension used for ASCII (non-compiled) proxy source files
for the proxies of specified type.
"""
return '.proxy' if proxyType == 'Proxymeshes' else '.mhclo'
def peekMetadata(proxyFilePath, proxyType=None):
"""
Read UUID and tags from proxy file, and return as soon as vertex data
begins. Reads only the necessary lines of the proxy file from disk, not the
entire proxy file is loaded in memory.
"""
#import zipfile
#if zipfile.is_zipfile(proxyFilePath):
# Using the filename extension is faster (and will have to do):
if os.path.splitext(proxyFilePath)[1][1:].lower() == 'mhpxy':
try:
if proxyType is not None:
asciipath = os.path.splitext(proxyFilePath)[0] + getAsciiFileExtension(proxyType)
if os.path.isfile(asciipath) and os.path.getmtime(asciipath) > os.path.getmtime(proxyFilePath):
_npzpath = proxyFilePath
proxyFilePath = asciipath
raise RuntimeError('compiled file out of date: %s', _npzpath)
# Binary proxy file
npzfile = np.load(proxyFilePath)
uuid = str(npzfile['uuid'].tostring(), 'utf8')
tags = set(_unpackStringList(npzfile['tags_str'], npzfile['tags_idx']))
return (uuid, tags)
except Exception as e:
showTrace = not isinstance(e, RuntimeError)
log.warning("Problem loading metadata from binary proxy, trying ASCII file: %s", e, exc_info=showTrace)
# ASCII proxy file
import io
fp = io.open(proxyFilePath, 'r', encoding="utf-8")
uuid = None
tags = set()
for line in fp:
words = line.split()
if len(words) == 0:
pass
elif words[0] == 'uuid':
uuid = words[1]
elif words[0] == 'tag':
tags.add(" ".join(words[1:]).lower())
elif words[0] == 'verts':
break
fp.close()
return (uuid, tags)
def _packStringList(strings):
text = ''
index = []
for string in strings:
asbytes = bytearray(text,'utf-8')
index.append(len(asbytes))
text += string
text = np.fromstring(text, dtype='S1')
index = np.array(index, dtype=np.uint32)
return text, index
def _unpackStringList(text, index):
strings = []
last = None
for i in index:
if last is not None:
name = str(text[last:i].tostring(), 'utf8')
strings.append(name)
last = i
if last is not None:
name = str(text[last:].tostring(), 'utf8')
strings.append(name)
return strings
def _getFilePath(filename, folder = None, altExtensions=None):
import getpath
if altExtensions is not None:
# Search for existing path with alternative file extension
for aExt in altExtensions:
if aExt.startswith('.'):
aExt = aExt[1:]
aFile = os.path.splitext(filename)[0]+'.'+aExt
aPath = _getFilePath(aFile, folder, altExtensions=None)
if os.path.isfile(aPath):
# Path found, return result with original extension
orgExt = os.path.splitext(filename)[1]
path = os.path.splitext(aPath)[0]+orgExt
return getpath.formatPath(path)
if not filename or not isinstance(filename, str):
return filename
searchPaths = []
# Search within current folder
if folder:
searchPaths.append(folder)
return getpath.thoroughFindFile(filename, searchPaths)
|
[
"getpath.thoroughFindFile",
"transformations.affine_matrix_from_points",
"material.Material",
"io.open",
"numpy.array",
"makehuman.getAssetLicense",
"getpath.formatPath",
"animation.VertexBoneWeights.fromFile",
"os.remove",
"log.notice",
"numpy.asarray",
"time.perf_counter",
"os.path.split",
"math.isnan",
"numpy.dot",
"getpath.getPath",
"numpy.savez_compressed",
"numpy.fromstring",
"log.message",
"numpy.identity",
"getpath.getJailedPath",
"collections.OrderedDict",
"log.warning",
"log.error",
"os.path.splitext",
"numpy.any",
"os.path.isfile",
"os.path.dirname",
"numpy.nonzero",
"os.path.getmtime",
"numpy.bincount",
"makehuman.getBasemeshVersion",
"os.path.join",
"os.utime",
"log.debug",
"numpy.zeros",
"os.path.basename",
"numpy.load",
"files3d.loadMesh"
] |
[((1684, 1705), 'numpy.identity', 'np.identity', (['(3)', 'float'], {}), '(3, float)\n', (1695, 1705), True, 'import numpy as np\n'), ((20405, 20424), 'io.open', 'io.open', (['path', '"""wb"""'], {}), "(path, 'wb')\n", (20412, 20424), False, 'import io\n'), ((20649, 20670), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (20664, 20670), False, 'import os\n'), ((21455, 21480), 'numpy.any', 'np.any', (['proxy.deleteVerts'], {}), '(proxy.deleteVerts)\n', (21461, 21480), True, 'import numpy as np\n'), ((22588, 22628), 'numpy.asarray', 'np.asarray', (['num_refverts'], {'dtype': 'np.int32'}), '(num_refverts, dtype=np.int32)\n', (22598, 22628), True, 'import numpy as np\n'), ((22783, 22815), 'numpy.savez_compressed', 'np.savez_compressed', (['fp'], {}), '(fp, **vars_)\n', (22802, 22815), True, 'import numpy as np\n'), ((22835, 22855), 'os.utime', 'os.utime', (['path', 'None'], {}), '(path, None)\n', (22843, 22855), False, 'import os\n'), ((22940, 22983), 'log.debug', 'log.debug', (['"""Loading binary proxy %s."""', 'path'], {}), "('Loading binary proxy %s.', path)\n", (22949, 22983), False, 'import log\n'), ((22999, 23012), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (23006, 23012), True, 'import numpy as np\n'), ((25144, 25173), 'material.Material', 'material.Material', (['proxy.name'], {}), '(proxy.name)\n', (25161, 25173), False, 'import material\n'), ((34523, 34540), 'numpy.asarray', 'np.asarray', (['verts'], {}), '(verts)\n', (34533, 34540), True, 'import numpy as np\n'), ((34552, 34624), 'numpy.asarray', 'np.asarray', (['[result[:, 0], result[:, 1], result[:, 2]]'], {'dtype': 'np.float32'}), '([result[:, 0], result[:, 1], result[:, 2]], dtype=np.float32)\n', (34562, 34624), True, 'import numpy as np\n'), ((34683, 34702), 'os.path.split', 'os.path.split', (['file'], {}), '(file)\n', (34696, 34702), False, 'import os\n'), ((35492, 35530), 'numpy.nonzero', 'np.nonzero', (['vertsMask[proxy.ref_vIdxs]'], {}), '(vertsMask[proxy.ref_vIdxs])\n', (35502, 35530), True, 'import numpy as np\n'), ((37731, 37776), 'io.open', 'io.open', (['proxyFilePath', '"""r"""'], {'encoding': '"""utf-8"""'}), "(proxyFilePath, 'r', encoding='utf-8')\n", (37738, 37776), False, 'import io\n'), ((38337, 38368), 'numpy.fromstring', 'np.fromstring', (['text'], {'dtype': '"""S1"""'}), "(text, dtype='S1')\n", (38350, 38368), True, 'import numpy as np\n'), ((38381, 38413), 'numpy.array', 'np.array', (['index'], {'dtype': 'np.uint32'}), '(index, dtype=np.uint32)\n', (38389, 38413), True, 'import numpy as np\n'), ((39667, 39714), 'getpath.thoroughFindFile', 'getpath.thoroughFindFile', (['filename', 'searchPaths'], {}), '(filename, searchPaths)\n', (39691, 39714), False, 'import getpath\n'), ((1771, 1813), 'log.debug', 'log.debug', (['"""Loading proxy file: %s."""', 'file'], {}), "('Loading proxy file: %s.', file)\n", (1780, 1813), False, 'import log\n'), ((1977, 2004), 'makehuman.getAssetLicense', 'makehuman.getAssetLicense', ([], {}), '()\n', (2002, 2004), False, 'import makehuman\n'), ((2401, 2431), 'makehuman.getBasemeshVersion', 'makehuman.getBasemeshVersion', ([], {}), '()\n', (2429, 2431), False, 'import makehuman\n'), ((3856, 3884), 'material.Material', 'material.Material', (['self.name'], {}), '(self.name)\n', (3873, 3884), False, 'import material\n'), ((5452, 5507), 'files3d.loadMesh', 'files3d.loadMesh', (['self.obj_file'], {'maxFaces': 'self.max_pole'}), '(self.obj_file, maxFaces=self.max_pole)\n', (5468, 5507), False, 'import files3d\n'), ((6538, 6598), 'numpy.asarray', 'np.asarray', (['[v._weights for v in refVerts]'], {'dtype': 'np.float32'}), '([v._weights for v in refVerts], dtype=np.float32)\n', (6548, 6598), True, 'import numpy as np\n'), ((6624, 6681), 'numpy.asarray', 'np.asarray', (['[v._verts for v in refVerts]'], {'dtype': 'np.uint32'}), '([v._verts for v in refVerts], dtype=np.uint32)\n', (6634, 6681), True, 'import numpy as np\n'), ((6705, 6764), 'numpy.asarray', 'np.asarray', (['[v._offset for v in refVerts]'], {'dtype': 'np.float32'}), '([v._offset for v in refVerts], dtype=np.float32)\n', (6715, 6764), True, 'import numpy as np\n'), ((10438, 10451), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10449, 10451), False, 'from collections import OrderedDict\n'), ((14240, 14280), 'io.open', 'io.open', (['filepath', '"""r"""'], {'encoding': '"""utf-8"""'}), "(filepath, 'r', encoding='utf-8')\n", (14247, 14280), False, 'import io\n'), ((20002, 20078), 'log.warning', 'log.warning', (['"""Proxy file %s does not specify a Z depth. Using 50."""', 'filepath'], {}), "('Proxy file %s does not specify a Z depth. Using 50.', filepath)\n", (20013, 20078), False, 'import log\n'), ((20359, 20394), 'getpath.getJailedPath', 'getpath.getJailedPath', (['path', 'folder'], {}), '(path, folder)\n', (20380, 20394), False, 'import getpath\n'), ((21617, 21658), 'numpy.asarray', 'np.asarray', (['proxy.z_depth'], {'dtype': 'np.int32'}), '(proxy.z_depth, dtype=np.int32)\n', (21627, 21658), True, 'import numpy as np\n'), ((21711, 21754), 'numpy.asarray', 'np.asarray', (['proxy.max_pole'], {'dtype': 'np.uint32'}), '(proxy.max_pole, dtype=np.uint32)\n', (21721, 21754), True, 'import numpy as np\n'), ((24464, 24504), 'numpy.zeros', 'np.zeros', (['(num_refs, 3)'], {'dtype': 'np.uint32'}), '((num_refs, 3), dtype=np.uint32)\n', (24472, 24504), True, 'import numpy as np\n'), ((24580, 24621), 'numpy.zeros', 'np.zeros', (['(num_refs, 3)'], {'dtype': 'np.float32'}), '((num_refs, 3), dtype=np.float32)\n', (24588, 24621), True, 'import numpy as np\n'), ((24645, 24686), 'numpy.zeros', 'np.zeros', (['(num_refs, 3)'], {'dtype': 'np.float32'}), '((num_refs, 3), dtype=np.float32)\n', (24653, 24686), True, 'import numpy as np\n'), ((25806, 25878), 'log.warning', 'log.warning', (['"""Proxy file %s does not specify a Z depth. Using 50."""', 'path'], {}), "('Proxy file %s does not specify a Z depth. Using 50.', path)\n", (25817, 25878), False, 'import log\n'), ((26313, 26331), 'numpy.zeros', 'np.zeros', (['(3)', 'float'], {}), '(3, float)\n', (26321, 26331), True, 'import numpy as np\n'), ((26892, 26921), 'numpy.array', 'np.array', (['(d0, d1, d2)', 'float'], {}), '((d0, d1, d2), float)\n', (26900, 26921), True, 'import numpy as np\n'), ((33764, 33787), 'numpy.zeros', 'np.zeros', (['(3, 2)', 'float'], {}), '((3, 2), float)\n', (33772, 33787), True, 'import numpy as np\n'), ((33804, 33827), 'numpy.zeros', 'np.zeros', (['(3, 2)', 'float'], {}), '((3, 2), float)\n', (33812, 33827), True, 'import numpy as np\n'), ((34419, 34456), 'transformations.affine_matrix_from_points', 'affine_matrix_from_points', (['sbox', 'tbox'], {}), '(sbox, tbox)\n', (34444, 34456), False, 'from transformations import affine_matrix_from_points\n'), ((34730, 34756), 'os.path.join', 'os.path.join', (['folder', 'file'], {}), '(folder, file)\n', (34742, 34756), False, 'import os\n'), ((34782, 34817), 'os.path.join', 'os.path.join', (['folder', '(file + suffix)'], {}), '(folder, file + suffix)\n', (34794, 34817), False, 'import os\n'), ((35384, 35420), 'numpy.any', 'np.any', (['proxy.weights[:, 1:]'], {'axis': '(1)'}), '(proxy.weights[:, 1:], axis=1)\n', (35390, 35420), True, 'import numpy as np\n'), ((35627, 35653), 'numpy.bincount', 'np.bincount', (['unmasked_rows'], {}), '(unmasked_rows)\n', (35638, 35653), True, 'import numpy as np\n'), ((35807, 35837), 'numpy.nonzero', 'np.nonzero', (['(unmasked_count < 2)'], {}), '(unmasked_count < 2)\n', (35817, 35837), True, 'import numpy as np\n'), ((2285, 2307), 'os.path.getmtime', 'os.path.getmtime', (['file'], {}), '(file)\n', (2301, 2307), False, 'import os\n'), ((4196, 4222), 'os.path.dirname', 'os.path.dirname', (['self.file'], {}), '(self.file)\n', (4211, 4222), False, 'import os\n'), ((4359, 4385), 'os.path.dirname', 'os.path.dirname', (['self.file'], {}), '(self.file)\n', (4374, 4385), False, 'import os\n'), ((4547, 4573), 'os.path.dirname', 'os.path.dirname', (['self.file'], {}), '(self.file)\n', (4562, 4573), False, 'import os\n'), ((5543, 5588), 'log.error', 'log.error', (['"""Failed to load %s"""', 'self.obj_file'], {}), "('Failed to load %s', self.obj_file)\n", (5552, 5588), False, 'import log\n'), ((10932, 10985), 'log.debug', 'log.debug', (["('remapping weights for proxy ' + self.name)"], {}), "('remapping weights for proxy ' + self.name)\n", (10941, 10985), False, 'import log\n'), ((11006, 11025), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (11023, 11025), False, 'import time\n'), ((11670, 11689), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (11687, 11689), False, 'import time\n'), ((11951, 12036), 'log.debug', 'log.debug', (['"""remapping weights for %s took %.5f seconds"""', 'self.name', '(stop - start)'], {}), "('remapping weights for %s took %.5f seconds', self.name, stop - start\n )\n", (11960, 12036), False, 'import log\n'), ((14049, 14112), 'log.error', 'log.error', (['"""Unable to load proxy file: %s"""', 'path'], {'exc_info': '(True)'}), "('Unable to load proxy file: %s', path, exc_info=True)\n", (14058, 14112), False, 'import log\n'), ((14309, 14350), 'log.error', 'log.error', (['"""*** Cannot open %s"""', 'filepath'], {}), "('*** Cannot open %s', filepath)\n", (14318, 14350), False, 'import log\n'), ((14421, 14446), 'os.path.dirname', 'os.path.dirname', (['filepath'], {}), '(filepath)\n', (14436, 14446), False, 'import os\n'), ((20795, 20832), 'numpy.fromstring', 'np.fromstring', (['proxy.name'], {'dtype': '"""S1"""'}), "(proxy.name, dtype='S1')\n", (20808, 20832), True, 'import numpy as np\n'), ((20849, 20886), 'numpy.fromstring', 'np.fromstring', (['proxy.uuid'], {'dtype': '"""S1"""'}), "(proxy.uuid, dtype='S1')\n", (20862, 20886), True, 'import numpy as np\n'), ((20910, 20954), 'numpy.fromstring', 'np.fromstring', (['proxy.description'], {'dtype': '"""S1"""'}), "(proxy.description, dtype='S1')\n", (20923, 20954), True, 'import numpy as np\n'), ((20975, 21016), 'numpy.fromstring', 'np.fromstring', (['proxy.basemesh'], {'dtype': '"""S1"""'}), "(proxy.basemesh, dtype='S1')\n", (20988, 21016), True, 'import numpy as np\n'), ((21277, 21318), 'numpy.asarray', 'np.asarray', (['proxy.version'], {'dtype': 'np.int32'}), '(proxy.version, dtype=np.int32)\n', (21287, 21318), True, 'import numpy as np\n'), ((25711, 25767), 'animation.VertexBoneWeights.fromFile', 'VertexBoneWeights.fromFile', (['proxy.vertexBoneWeights_file'], {}), '(proxy.vertexBoneWeights_file)\n', (25737, 25767), False, 'from animation import VertexBoneWeights\n'), ((27277, 27319), 'numpy.dot', 'np.dot', (['hcoord[self._verts]', 'self._weights'], {}), '(hcoord[self._verts], self._weights)\n', (27283, 27319), True, 'import numpy as np\n'), ((27334, 27362), 'numpy.dot', 'np.dot', (['matrix', 'self._offset'], {}), '(matrix, self._offset)\n', (27340, 27362), True, 'import numpy as np\n'), ((32977, 32998), 'numpy.identity', 'np.identity', (['(3)', 'float'], {}), '(3, float)\n', (32988, 32998), True, 'import numpy as np\n'), ((37282, 37304), 'numpy.load', 'np.load', (['proxyFilePath'], {}), '(proxyFilePath)\n', (37289, 37304), True, 'import numpy as np\n'), ((39217, 39238), 'os.path.isfile', 'os.path.isfile', (['aPath'], {}), '(aPath)\n', (39231, 39238), False, 'import os\n'), ((1872, 1894), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (1888, 1894), False, 'import os\n'), ((12229, 12251), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (12245, 12251), False, 'import os\n'), ((12286, 12308), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (12302, 12308), False, 'import os\n'), ((12374, 12397), 'os.path.isfile', 'os.path.isfile', (['npzpath'], {}), '(npzpath)\n', (12388, 12397), False, 'import os\n'), ((12415, 12470), 'log.message', 'log.message', (['"""compiled proxy file missing: %s"""', 'npzpath'], {}), "('compiled proxy file missing: %s', npzpath)\n", (12426, 12470), False, 'import log\n'), ((12565, 12590), 'os.path.isfile', 'os.path.isfile', (['asciipath'], {}), '(asciipath)\n', (12579, 12590), False, 'import os\n'), ((12668, 12727), 'log.message', 'log.message', (['"""compiled proxy file out of date: %s"""', 'npzpath'], {}), "('compiled proxy file out of date: %s', npzpath)\n", (12679, 12727), False, 'import log\n'), ((12962, 13032), 'log.warning', 'log.warning', (['"""Problem loading binary proxy: %s"""', 'e'], {'exc_info': 'showTrace'}), "('Problem loading binary proxy: %s', e, exc_info=showTrace)\n", (12973, 13032), False, 'import log\n'), ((28458, 28494), 'numpy.asarray', 'np.asarray', (['scales'], {'dtype': 'np.float32'}), '(scales, dtype=np.float32)\n', (28468, 28494), True, 'import numpy as np\n'), ((28517, 28551), 'numpy.asarray', 'np.asarray', (['vidxs'], {'dtype': 'np.uint32'}), '(vidxs, dtype=np.uint32)\n', (28527, 28551), True, 'import numpy as np\n'), ((29077, 29113), 'numpy.asarray', 'np.asarray', (['shears'], {'dtype': 'np.float32'}), '(shears, dtype=np.float32)\n', (29087, 29113), True, 'import numpy as np\n'), ((29136, 29170), 'numpy.asarray', 'np.asarray', (['vidxs'], {'dtype': 'np.uint32'}), '(vidxs, dtype=np.uint32)\n', (29146, 29170), True, 'import numpy as np\n'), ((37580, 37688), 'log.warning', 'log.warning', (['"""Problem loading metadata from binary proxy, trying ASCII file: %s"""', 'e'], {'exc_info': 'showTrace'}), "('Problem loading metadata from binary proxy, trying ASCII file: %s'\n , e, exc_info=showTrace)\n", (37591, 37688), False, 'import log\n'), ((39443, 39467), 'getpath.formatPath', 'getpath.formatPath', (['path'], {}), '(path)\n', (39461, 39467), False, 'import getpath\n'), ((12595, 12622), 'os.path.getmtime', 'os.path.getmtime', (['asciipath'], {}), '(asciipath)\n', (12611, 12622), False, 'import os\n'), ((12625, 12650), 'os.path.getmtime', 'os.path.getmtime', (['npzpath'], {}), '(npzpath)\n', (12641, 12650), False, 'import os\n'), ((13198, 13215), 'getpath.getPath', 'getpath.getPath', ([], {}), '()\n', (13213, 13215), False, 'import getpath\n'), ((13956, 14028), 'log.debug', 'log.debug', (['"""Not writing compiled proxies to system paths (%s)."""', 'npzpath'], {}), "('Not writing compiled proxies to system paths (%s).', npzpath)\n", (13965, 14028), False, 'import log\n'), ((30332, 30349), 'math.isnan', 'math.isnan', (['scale'], {}), '(scale)\n', (30342, 30349), False, 'import math\n'), ((34197, 34249), 'numpy.array', 'np.array', (['(sfaces[0, i], sfaces[1, j], sfaces[2, k])'], {}), '((sfaces[0, i], sfaces[1, j], sfaces[2, k]))\n', (34205, 34249), True, 'import numpy as np\n'), ((34280, 34332), 'numpy.array', 'np.array', (['(tfaces[0, i], tfaces[1, j], tfaces[2, k])'], {}), '((tfaces[0, i], tfaces[1, j], tfaces[2, k]))\n', (34288, 34332), True, 'import numpy as np\n'), ((36961, 36986), 'os.path.isfile', 'os.path.isfile', (['asciipath'], {}), '(asciipath)\n', (36975, 36986), False, 'import os\n'), ((39333, 39359), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (39349, 39359), False, 'import os\n'), ((10856, 10894), 'log.debug', 'log.debug', (['"""The skeleton is different"""'], {}), "('The skeleton is different')\n", (10865, 10894), False, 'import log\n'), ((13330, 13384), 'log.message', 'log.message', (['"""Compiling binary proxy file %s"""', 'npzpath'], {}), "('Compiling binary proxy file %s', npzpath)\n", (13341, 13384), False, 'import log\n'), ((36734, 36765), 'os.path.splitext', 'os.path.splitext', (['proxyFilePath'], {}), '(proxyFilePath)\n', (36750, 36765), False, 'import os\n'), ((36872, 36903), 'os.path.splitext', 'os.path.splitext', (['proxyFilePath'], {}), '(proxyFilePath)\n', (36888, 36903), False, 'import os\n'), ((36991, 37018), 'os.path.getmtime', 'os.path.getmtime', (['asciipath'], {}), '(asciipath)\n', (37007, 37018), False, 'import os\n'), ((37021, 37052), 'os.path.getmtime', 'os.path.getmtime', (['proxyFilePath'], {}), '(proxyFilePath)\n', (37037, 37052), False, 'import os\n'), ((39095, 39121), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (39111, 39121), False, 'import os\n'), ((39386, 39409), 'os.path.splitext', 'os.path.splitext', (['aPath'], {}), '(aPath)\n', (39402, 39409), False, 'import os\n'), ((13491, 13562), 'log.notice', 'log.notice', (['"""unable to save compiled proxy: %s"""', 'npzpath'], {'exc_info': '(True)'}), "('unable to save compiled proxy: %s', npzpath, exc_info=True)\n", (13501, 13562), False, 'import log\n'), ((13586, 13609), 'os.path.isfile', 'os.path.isfile', (['npzpath'], {}), '(npzpath)\n', (13600, 13609), False, 'import os\n'), ((13743, 13761), 'os.remove', 'os.remove', (['npzpath'], {}), '(npzpath)\n', (13752, 13761), False, 'import os\n'), ((13837, 13925), 'log.warning', 'log.warning', (['"""Could not remove empty file %s that was left behind (%s)."""', 'npzpath', 'e'], {}), "('Could not remove empty file %s that was left behind (%s).',\n npzpath, e)\n", (13848, 13925), False, 'import log\n'), ((16540, 16596), 'animation.VertexBoneWeights.fromFile', 'VertexBoneWeights.fromFile', (['proxy.vertexBoneWeights_file'], {}), '(proxy.vertexBoneWeights_file)\n', (16566, 16596), False, 'from animation import VertexBoneWeights\n'), ((16686, 16817), 'log.warning', 'log.warning', (['"""Deprecated parameter "backface_culling" used in proxy file. Set property backfaceCull in material instead."""'], {}), '(\n \'Deprecated parameter "backface_culling" used in proxy file. Set property backfaceCull in material instead.\'\n )\n', (16697, 16817), False, 'import log\n'), ((16891, 17009), 'log.warning', 'log.warning', (['"""Deprecated parameter "transparent" used in proxy file. Set property in material file instead."""'], {}), '(\n \'Deprecated parameter "transparent" used in proxy file. Set property in material file instead.\'\n )\n', (16902, 17009), False, 'import log\n'), ((18829, 18914), 'log.warning', 'log.warning', (['"""Deprecated parameter "%s" used in proxy file. Please remove."""', 'key'], {}), '(\'Deprecated parameter "%s" used in proxy file. Please remove.\', key\n )\n', (18840, 18914), False, 'import log\n'), ((19893, 19964), 'log.warning', 'log.warning', (['"""Unknown keyword %s found in proxy file %s"""', 'key', 'filepath'], {}), "('Unknown keyword %s found in proxy file %s', key, filepath)\n", (19904, 19964), False, 'import log\n')]
|
import numpy as np
import unittest
import pytest
from pysph.base.particle_array import ParticleArray
import pysph.tools.mesh_tools as G
from pysph.base.utils import get_particle_array
# Data of a unit length cube
def cube_data():
points = np.array([[0., 0., 0.],
[0., 1., 0.],
[1., 1., 0.],
[1., 0., 0.],
[0., 0., 1.],
[0., 1., 1.],
[1., 0., 1.],
[1., 1., 1.]])
x_cube, y_cube, z_cube = points.T
cells = np.array([[0, 1, 2],
[0, 2, 3],
[0, 4, 5],
[0, 5, 1],
[0, 3, 6],
[0, 6, 4],
[4, 6, 7],
[4, 7, 5],
[3, 2, 7],
[3, 7, 6],
[1, 5, 7],
[1, 7, 2]])
normals = np.array([[0., 0., -1.],
[0., 0., -1.],
[-1., 0., 0.],
[-1., 0., 0.],
[0., -1., 0.],
[0., -1., 0.],
[0., 0., 1.],
[0., 0., 1.],
[1., 0., 0.],
[1., 0., 0.],
[0., 1., 0.],
[0., 1., 0.]])
vectors = np.zeros((len(cells), 3, 3))
for i, cell in enumerate(cells):
idx1, idx2, idx3 = cell
vector = np.array([[x_cube[idx1], y_cube[idx1], z_cube[idx1]],
[x_cube[idx2], y_cube[idx2], z_cube[idx2]],
[x_cube[idx3], y_cube[idx3], z_cube[idx3]]])
vectors[i] = vector
return x_cube, y_cube, z_cube, cells, normals, vectors
class TestGeometry(unittest.TestCase):
def test_in_triangle(self):
assert(G._in_triangle(0.5, 0.5, 0.0, 0.0, 1.5, 0.0, 0.0, 1.5) is True)
assert(G._in_triangle(1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0) is False)
def test_interp_2d(self):
# Check interpolation between two points on line y=x
dx = 0.1
r = G._interp_2d(np.array([0., 0.]), np.array([1., 1.]), dx)
# Check if all points satisfy y=x
np.testing.assert_array_almost_equal(
r[:, 0] - r[:, 1], np.zeros(r.shape[0]))
# Check if distance between consecutive points is lesser than dx
np.testing.assert_array_less(np.linalg.norm(r[1:] - r[0:-1], axis=1),
np.ones(r.shape[0] - 1) * dx)
def test_fill_triangle(self):
triangle = np.array([[0., 0., 0.],
[1., 0., 0.],
[0., 1., 0.]])
dx_triangle = 0.1
x, y, z = G._fill_triangle(triangle, dx_triangle)
EPS = np.finfo(float).eps
np.testing.assert_array_less(-x, np.zeros(x.shape[0]) + EPS)
np.testing.assert_array_less(-y, np.zeros(x.shape[0]) + EPS)
np.testing.assert_array_less(-(x + y), np.ones(x.shape[0]) + EPS)
np.testing.assert_almost_equal(z, np.zeros(x.shape[0]))
def test_fill_triangle_throws_zero_area_triangle_exception(self):
self.assertRaises(G.ZeroAreaTriangleException, G._fill_triangle,
np.zeros((3, 3)), 0.5)
def test_fill_triangle_throws_polygon_mesh_error(self):
self.assertRaises(G.PolygonMeshError, G._fill_triangle,
np.zeros((4, 3)), 0.5)
def test_get_points_from_mgrid(self):
"""Find neighbouring particles around a unit cube"""
h = 0.1
x_cube, y_cube, z_cube, cells, normals, vectors = cube_data()
x, y, z, x_list, y_list, z_list, vectors = \
G._get_surface_mesh(x_cube, y_cube, z_cube, cells, h, uniform=True)
pa_mesh = ParticleArray(name='mesh', x=x, y=y, z=z, h=h)
offset = h
x_grid, y_grid, z_grid = np.meshgrid(
np.arange(x.min() - offset, x.max() + offset, h),
np.arange(y.min() - offset, y.max() + offset, h),
np.arange(z.min() - offset, z.max() + offset, h))
pa_grid = ParticleArray(name='grid', x=x_grid, y=y_grid, z=z_grid, h=h)
x_grid, y_grid, z_grid = G.get_points_from_mgrid(
pa_grid, pa_mesh, x_list, y_list, z_list, 1, h, vectors, normals
)
for i in range(x.shape[0]):
assert((x[i] ** 2 + y[i] ** 2 + z[i] ** 2) <= 4)
def _cube_assert(self, x, y, z, h):
"""Check if x,y,z lie within surface of thickness `h` of a unit cube"""
def surface1(x, y, z): return min(abs(x), abs(1 - x)) < h and \
y > -h and y < 1 + h and z > -h and z < 1 + h
def on_surface(x, y, z): return surface1(x, y, z) or \
surface1(y, x, z) or surface1(z, x, y)
for i in range(x.shape[0]):
assert on_surface(x[i], y[i], z[i])
def test_get_surface_mesh(self):
"""Check if mesh is generated correctly for unit cube"""
x_cube, y_cube, z_cube, cells, normals, vectors = cube_data()
x, y, z = G._get_surface_mesh(x_cube, y_cube, z_cube, cells, 0.1)
h = np.finfo(float).eps
self._cube_assert(x, y, z, h)
def test_get_surface_points(self):
"""Check if surface is generated correctly for unit cube"""
h = 0.1
x_cube, y_cube, z_cube, cells, normals, vectors = cube_data()
x, y, z = G.surface_points(x_cube, y_cube, z_cube, cells, h)
self._cube_assert(x, y, z, h)
def test_get_surface_points_uniform(self):
"""Check if uniform surface is generated correctly for unit cube"""
h = 0.1
x_cube, y_cube, z_cube, cells, normals, vectors = cube_data()
x, y, z = G.surf_points_uniform(x_cube, y_cube, z_cube,
cells, normals, 1.0, 1.0)
self._cube_assert(x, y, z, h)
def test_prism(self):
tri_normal = np.array([0, -1, 0])
tri_points = np.array([[0, 0, 0], [1, 0, 0], [0, 0, 1]])
h = 1/1.5
prism_normals, prism_points, prism_face_centres = \
G.prism(tri_normal, tri_points, h)
assert np.array([-1, 0, 0]) in prism_normals
assert np.array([0, 1, 0]) in prism_points
assert np.array([0.5, 0.5, 0]) in prism_face_centres
if __name__ == "__main__":
unittest.main()
|
[
"pysph.tools.mesh_tools.get_points_from_mgrid",
"numpy.ones",
"pysph.tools.mesh_tools.prism",
"pysph.tools.mesh_tools.surface_points",
"pysph.base.particle_array.ParticleArray",
"pysph.tools.mesh_tools.surf_points_uniform",
"pysph.tools.mesh_tools._get_surface_mesh",
"pysph.tools.mesh_tools._in_triangle",
"numpy.array",
"numpy.zeros",
"pysph.tools.mesh_tools._fill_triangle",
"numpy.linalg.norm",
"unittest.main",
"numpy.finfo"
] |
[((245, 396), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, \n 0.0, 1.0], [0.0, 1.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 1.0]]'], {}), '([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0], [1.0, 0.0, 0.0\n ], [0.0, 0.0, 1.0], [0.0, 1.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 1.0]])\n', (253, 396), True, 'import numpy as np\n'), ((581, 727), 'numpy.array', 'np.array', (['[[0, 1, 2], [0, 2, 3], [0, 4, 5], [0, 5, 1], [0, 3, 6], [0, 6, 4], [4, 6, 7\n ], [4, 7, 5], [3, 2, 7], [3, 7, 6], [1, 5, 7], [1, 7, 2]]'], {}), '([[0, 1, 2], [0, 2, 3], [0, 4, 5], [0, 5, 1], [0, 3, 6], [0, 6, 4],\n [4, 6, 7], [4, 7, 5], [3, 2, 7], [3, 7, 6], [1, 5, 7], [1, 7, 2]])\n', (589, 727), True, 'import numpy as np\n'), ((981, 1210), 'numpy.array', 'np.array', (['[[0.0, 0.0, -1.0], [0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [-1.0, 0.0, 0.0], [\n 0.0, -1.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [\n 1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 0.0]]'], {}), '([[0.0, 0.0, -1.0], [0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [-1.0, 0.0,\n 0.0], [0.0, -1.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, \n 1.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 0.0]])\n', (989, 1210), True, 'import numpy as np\n'), ((6425, 6440), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6438, 6440), False, 'import unittest\n'), ((1581, 1728), 'numpy.array', 'np.array', (['[[x_cube[idx1], y_cube[idx1], z_cube[idx1]], [x_cube[idx2], y_cube[idx2],\n z_cube[idx2]], [x_cube[idx3], y_cube[idx3], z_cube[idx3]]]'], {}), '([[x_cube[idx1], y_cube[idx1], z_cube[idx1]], [x_cube[idx2], y_cube\n [idx2], z_cube[idx2]], [x_cube[idx3], y_cube[idx3], z_cube[idx3]]])\n', (1589, 1728), True, 'import numpy as np\n'), ((2689, 2750), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]])\n', (2697, 2750), True, 'import numpy as np\n'), ((2844, 2883), 'pysph.tools.mesh_tools._fill_triangle', 'G._fill_triangle', (['triangle', 'dx_triangle'], {}), '(triangle, dx_triangle)\n', (2860, 2883), True, 'import pysph.tools.mesh_tools as G\n'), ((3813, 3880), 'pysph.tools.mesh_tools._get_surface_mesh', 'G._get_surface_mesh', (['x_cube', 'y_cube', 'z_cube', 'cells', 'h'], {'uniform': '(True)'}), '(x_cube, y_cube, z_cube, cells, h, uniform=True)\n', (3832, 3880), True, 'import pysph.tools.mesh_tools as G\n'), ((3899, 3945), 'pysph.base.particle_array.ParticleArray', 'ParticleArray', ([], {'name': '"""mesh"""', 'x': 'x', 'y': 'y', 'z': 'z', 'h': 'h'}), "(name='mesh', x=x, y=y, z=z, h=h)\n", (3912, 3945), False, 'from pysph.base.particle_array import ParticleArray\n'), ((4215, 4276), 'pysph.base.particle_array.ParticleArray', 'ParticleArray', ([], {'name': '"""grid"""', 'x': 'x_grid', 'y': 'y_grid', 'z': 'z_grid', 'h': 'h'}), "(name='grid', x=x_grid, y=y_grid, z=z_grid, h=h)\n", (4228, 4276), False, 'from pysph.base.particle_array import ParticleArray\n'), ((4310, 4403), 'pysph.tools.mesh_tools.get_points_from_mgrid', 'G.get_points_from_mgrid', (['pa_grid', 'pa_mesh', 'x_list', 'y_list', 'z_list', '(1)', 'h', 'vectors', 'normals'], {}), '(pa_grid, pa_mesh, x_list, y_list, z_list, 1, h,\n vectors, normals)\n', (4333, 4403), True, 'import pysph.tools.mesh_tools as G\n'), ((5163, 5218), 'pysph.tools.mesh_tools._get_surface_mesh', 'G._get_surface_mesh', (['x_cube', 'y_cube', 'z_cube', 'cells', '(0.1)'], {}), '(x_cube, y_cube, z_cube, cells, 0.1)\n', (5182, 5218), True, 'import pysph.tools.mesh_tools as G\n'), ((5501, 5551), 'pysph.tools.mesh_tools.surface_points', 'G.surface_points', (['x_cube', 'y_cube', 'z_cube', 'cells', 'h'], {}), '(x_cube, y_cube, z_cube, cells, h)\n', (5517, 5551), True, 'import pysph.tools.mesh_tools as G\n'), ((5818, 5889), 'pysph.tools.mesh_tools.surf_points_uniform', 'G.surf_points_uniform', (['x_cube', 'y_cube', 'z_cube', 'cells', 'normals', '(1.0)', '(1.0)'], {}), '(x_cube, y_cube, z_cube, cells, normals, 1.0, 1.0)\n', (5839, 5889), True, 'import pysph.tools.mesh_tools as G\n'), ((6016, 6036), 'numpy.array', 'np.array', (['[0, -1, 0]'], {}), '([0, -1, 0])\n', (6024, 6036), True, 'import numpy as np\n'), ((6058, 6101), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 0], [0, 0, 1]]'], {}), '([[0, 0, 0], [1, 0, 0], [0, 0, 1]])\n', (6066, 6101), True, 'import numpy as np\n'), ((6192, 6226), 'pysph.tools.mesh_tools.prism', 'G.prism', (['tri_normal', 'tri_points', 'h'], {}), '(tri_normal, tri_points, h)\n', (6199, 6226), True, 'import pysph.tools.mesh_tools as G\n'), ((1954, 2008), 'pysph.tools.mesh_tools._in_triangle', 'G._in_triangle', (['(0.5)', '(0.5)', '(0.0)', '(0.0)', '(1.5)', '(0.0)', '(0.0)', '(1.5)'], {}), '(0.5, 0.5, 0.0, 0.0, 1.5, 0.0, 0.0, 1.5)\n', (1968, 2008), True, 'import pysph.tools.mesh_tools as G\n'), ((2033, 2087), 'pysph.tools.mesh_tools._in_triangle', 'G._in_triangle', (['(1.0)', '(1.0)', '(0.0)', '(0.0)', '(1.0)', '(0.0)', '(0.0)', '(1.0)'], {}), '(1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0)\n', (2047, 2087), True, 'import pysph.tools.mesh_tools as G\n'), ((2232, 2252), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (2240, 2252), True, 'import numpy as np\n'), ((2252, 2272), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (2260, 2272), True, 'import numpy as np\n'), ((2395, 2415), 'numpy.zeros', 'np.zeros', (['r.shape[0]'], {}), '(r.shape[0])\n', (2403, 2415), True, 'import numpy as np\n'), ((2527, 2566), 'numpy.linalg.norm', 'np.linalg.norm', (['(r[1:] - r[0:-1])'], {'axis': '(1)'}), '(r[1:] - r[0:-1], axis=1)\n', (2541, 2566), True, 'import numpy as np\n'), ((2898, 2913), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (2906, 2913), True, 'import numpy as np\n'), ((3172, 3192), 'numpy.zeros', 'np.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (3180, 3192), True, 'import numpy as np\n'), ((3364, 3380), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (3372, 3380), True, 'import numpy as np\n'), ((3538, 3554), 'numpy.zeros', 'np.zeros', (['(4, 3)'], {}), '((4, 3))\n', (3546, 3554), True, 'import numpy as np\n'), ((5231, 5246), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (5239, 5246), True, 'import numpy as np\n'), ((6242, 6262), 'numpy.array', 'np.array', (['[-1, 0, 0]'], {}), '([-1, 0, 0])\n', (6250, 6262), True, 'import numpy as np\n'), ((6295, 6314), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (6303, 6314), True, 'import numpy as np\n'), ((6346, 6369), 'numpy.array', 'np.array', (['[0.5, 0.5, 0]'], {}), '([0.5, 0.5, 0])\n', (6354, 6369), True, 'import numpy as np\n'), ((2605, 2628), 'numpy.ones', 'np.ones', (['(r.shape[0] - 1)'], {}), '(r.shape[0] - 1)\n', (2612, 2628), True, 'import numpy as np\n'), ((2959, 2979), 'numpy.zeros', 'np.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (2967, 2979), True, 'import numpy as np\n'), ((3028, 3048), 'numpy.zeros', 'np.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (3036, 3048), True, 'import numpy as np\n'), ((3103, 3122), 'numpy.ones', 'np.ones', (['x.shape[0]'], {}), '(x.shape[0])\n', (3110, 3122), True, 'import numpy as np\n')]
|
import xarray as xr
import pandas as pd
import cartopy
import cartopy.crs as ccrs
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
import numpy as np
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import shapely.geometry as sgeom
import cartopy.feature as cfeature
from copy import copy
# Define functions for plotting
def find_side(ls, side):
"""
Given a shapely LineString which is assumed to be rectangular, return the
line corresponding to a given side of the rectangle.
"""
minx, miny, maxx, maxy = ls.bounds
points = {'left': [(minx, miny), (minx, maxy)],
'right': [(maxx, miny), (maxx, maxy)],
'bottom': [(minx, miny), (maxx, miny)],
'top': [(minx, maxy), (maxx, maxy)],}
return sgeom.LineString(points[side])
def lambert_xticks(ax, ticks):
"""
Draw ticks on the bottom x-axis of a Lambert Conformal projection.
"""
te = lambda xy: xy[0]
lc = lambda t, n, b: np.vstack((np.zeros(n) + t, np.linspace(b[2], b[3], n))).T
xticks, xticklabels = _lambert_ticks(ax, ticks, 'bottom', lc, te)
ax.xaxis.tick_bottom()
ax.set_xticks(xticks)
ax.set_xticklabels([ax.xaxis.get_major_formatter()(xtick) for xtick in xticklabels])
def lambert_yticks(ax, ticks):
"""
Draw ticks on the left y-axis of a Lambert Conformal projection.
"""
te = lambda xy: xy[1]
lc = lambda t, n, b: np.vstack((np.linspace(b[0], b[1], n), np.zeros(n) + t)).T
yticks, yticklabels = _lambert_ticks(ax, ticks, 'left', lc, te)
ax.yaxis.tick_left()
ax.set_yticks(yticks)
ax.set_yticklabels([ax.yaxis.get_major_formatter()(ytick) for ytick in yticklabels])
def _lambert_ticks(ax, ticks, tick_location, line_constructor, tick_extractor):
"""
Get the tick locations and labels for an axis of a Lambert Conformal projection.
"""
outline_patch = sgeom.LineString(ax.outline_patch.get_path().vertices.tolist())
axis = find_side(outline_patch, tick_location)
n_steps = 30
extent = ax.get_extent(ccrs.PlateCarree())
_ticks = []
for t in ticks:
xy = line_constructor(t, n_steps, extent)
proj_xyz = ax.projection.transform_points(ccrs.Geodetic(), xy[:, 0], xy[:, 1])
xyt = proj_xyz[..., :2]
ls = sgeom.LineString(xyt.tolist())
locs = axis.intersection(ls)
if not locs:
tick = [None]
else:
tick = tick_extractor(locs.xy)
_ticks.append(tick[0])
# Remove ticks that aren't visible:
ticklabels = copy(ticks)
while True:
try:
index = _ticks.index(None)
except ValueError:
break
_ticks.pop(index)
ticklabels.pop(index)
return _ticks, ticklabels
def plot_250hPa_winds(lon, lat, u, v, wspd, mode):
"""
Plot filled contours overlayed with vectors
Input
-------
lon = lon values extracted from xarray dataset (1-D)
lat = lat values extracted from xarray dataset (1-D)
u = U-wind at 250 hPa, shape = lon X lat
v = V-wind at 250 hPa, shape = lon X lat
wspd = Wind speed at 250 hPa, shape = lon X lat
mode = 'A' for anomaly data, 'LM' for long term means, and 'EM' for extreme precipitation days
Output
--------
matplotlib figure with filled contours of wind speed overlayed with wind vectors
"""
# change data and lon to cyclic coordinates
u, lon_new = add_cyclic_point(u.values, coord = lon.values)
v, lon_new = add_cyclic_point(v.values, coord = lon.values)
wspd, lon = add_cyclic_point(wspd.values, coord = lon.values)
# Create a figure
fig = plt.figure(figsize = (10, 5))
# Set the GeoAxes to the PlateCarree projection
ax = plt.axes(projection = ccrs.PlateCarree())
# Add coastlines
ax.coastlines('50m', linewidth = 0.8)
# Assign data for filled contour
data = wspd
if mode == 'EM' or mode == 'LM':
# Plot filled contours
plt.contourf(lon, lat, data, 20, transform = ccrs.PlateCarree(),
cmap = get_cmap("viridis"))
# Add a color bar
cbar = plt.colorbar(ax = ax, shrink = .75)
cbar.ax.set_ylabel('m/s', fontsize = 18)
# Plot the vectors and reference vector
rd = 5 #regrid_delta
quiver = plt.quiver(lon[::rd], lat[::rd], u[::rd, ::rd], v[::rd, ::rd],
transform = ccrs.PlateCarree(), headwidth = 5., headlength = 5.)
ax.quiverkey(quiver, X = 0.9, Y = 1.03, U = 20., label = '20 m/s',
coordinates='axes', labelpos='E')
elif mode == 'A':
# Plot filled contours
maxval, minval = np.abs(np.amax(data)), np.abs(np.amin(data))
normmax = np.amax([maxval, minval])
norm = mpl.colors.Normalize(vmin = -normmax, vmax = normmax)
plt.contourf(lon, lat, data, 20, transform = ccrs.PlateCarree(),
norm = norm, cmap = get_cmap("RdBu_r"))
# Add a color bar
cbar = plt.colorbar(ax = ax, shrink = .75)
cbar.ax.set_ylabel('m/s', fontsize = 18)
# Plot the vectors and reference vector
rd = 5 #regrid_delta
quiver = plt.quiver(lon[::rd], lat[::rd], u[::rd, ::rd], v[::rd, ::rd],
transform = ccrs.PlateCarree(), headwidth = 5., headlength = 5.)
ax.quiverkey(quiver, X = 0.9, Y = 1.03, U = 3., label = '3 m/s',
coordinates = 'axes', labelpos = 'E')
# *must* call draw in order to get the axis boundary used to add ticks:
fig.canvas.draw()
# Add the tick marks
xticks = np.arange(0., 360., 30.)
yticks = np.arange(-90., 100., 15.)
# Label the end-points of the gridlines using the custom tick makers:
ax.xaxis.set_major_formatter(LONGITUDE_FORMATTER)
ax.yaxis.set_major_formatter(LATITUDE_FORMATTER)
lambert_xticks(ax, xticks)
lambert_yticks(ax, yticks)
# Set title and figure name
if mode == 'LM':
plt.title('250 hPa Winds'+'\n'+'long term mean', fontsize=18)
pname = 'p250_longterm.png'
elif mode == 'EM':
plt.title('250 hPa Winds'+'\n'+'extreme precipitation days', fontsize=18)
pname = 'p250_extreme.png'
elif mode == 'A':
plt.title('250 hPa Winds'+'\n'+'anomaly fields', fontsize=18)
pname = 'p250_anom.png'
ax.set_global(); ax.gridlines();
plt.tight_layout()
#plot_dir = '/mnt/a/u/sciteam/chug/Laplata_tracers/plots/dipole_assessment/'
#pname = plot_dir + name + '.png'
plt.savefig(pname, bbox_inches = 'tight')
plt.show()
def plot_500hPa_winds_geopot(lon, lat, u, v, z, mode):
"""
Plot filled contours overlayed with vectors
Input
-------
lon = lon values extracted from xarray dataset (1-D)
lat = lat values extracted from xarray dataset (1-D)
u = U-wind at 500 hPa, shape = lon X lat
v = V-wind at 500 hPa, shape = lon X lat
z = Geopotential height at 500 hPa, shape = lon X lat
mode = 'A' for anomaly data, 'LM' for long term means, and 'EM' for extreme precipitation days
Output
--------
matplotlib figure with filled contours of geopotential height overlayed with wind vectors
"""
# change data and lon to cyclic coordinates
u, lon_new = add_cyclic_point(u.values, coord = lon.values)
v, lon_new = add_cyclic_point(v.values, coord = lon.values)
z, lon = add_cyclic_point(z.values, coord = lon.values)
# Create a figure
fig = plt.figure(figsize = (10, 5))
# Set the GeoAxes to the PlateCarree projection
ax = plt.axes(projection = ccrs.PlateCarree())
# Add coastlines
ax.coastlines('50m', linewidth=0.8)
# Assign data for filled contour
data = z
if mode == 'EM' or mode == 'LM':
# Plot filled contours
plt.contourf(lon, lat, data, 20, transform = ccrs.PlateCarree(),
cmap = get_cmap("viridis"))
# Add a color bar
cbar = plt.colorbar(ax = ax, shrink = .75)
cbar.ax.set_ylabel('m', fontsize = 18)
# Plot the vectors and reference vector
rd = 5 #regrid_delta
quiver = plt.quiver(lon[::rd], lat[::rd], u[::rd, ::rd], v[::rd, ::rd],
transform = ccrs.PlateCarree(), headwidth = 5., headlength = 5.)
ax.quiverkey(quiver, X = 0.9, Y = 1.03, U = 10., label = '10 m/s',
coordinates = 'axes', labelpos = 'E')
elif mode == 'A':
# Plot filled contours
maxval, minval = np.abs(np.amax(data)), np.abs(np.amin(data))
normmax = np.amax([maxval, minval])
norm = mpl.colors.Normalize(vmin = -normmax, vmax = normmax)
plt.contourf(lon, lat, data, 20, transform = ccrs.PlateCarree(),
norm = norm, cmap = get_cmap("RdBu_r"))
# Add a color bar
cbar = plt.colorbar(ax = ax, shrink = .75)
cbar.ax.set_ylabel('m', fontsize = 18)
# Plot the vectors and reference vector
rd = 5 #regrid_delta
quiver = plt.quiver(lon[::rd], lat[::rd], u[::rd, ::rd], v[::rd, ::rd],
transform = ccrs.PlateCarree(), headwidth = 5., headlength = 5.)
ax.quiverkey(quiver, X = 0.9, Y = 1.03, U = 3., label = '3 m/s',
coordinates = 'axes', labelpos = 'E')
# *must* call draw in order to get the axis boundary used to add ticks:
fig.canvas.draw()
# Add the tick marks
xticks = np.arange(0., 360., 30.)
yticks = np.arange(-90., 100., 15.)
# Label the end-points of the gridlines using the custom tick makers:
ax.xaxis.set_major_formatter(LONGITUDE_FORMATTER)
ax.yaxis.set_major_formatter(LATITUDE_FORMATTER)
lambert_xticks(ax, xticks)
lambert_yticks(ax, yticks)
#Set title and figure name
if mode == 'LM':
plt.title('500 hPa Winds, GPH'+'\n'+'long term mean', fontsize=18)
pname = 'p500_longterm.png'
elif mode == 'EM':
plt.title('500 hPa Winds, GPH'+'\n'+'extreme precipitation days', fontsize=18)
pname = 'p500_extreme.png'
elif mode == 'A':
plt.title('500 hPa Winds, GPH'+'\n'+'anomaly fields', fontsize=18)
pname = 'p500_anom.png'
ax.set_global(); ax.gridlines();
plt.tight_layout()
#plot_dir = '/mnt/a/u/sciteam/chug/Laplata_tracers/plots/dipole_assessment/'
#pname = plot_dir + name + '.png'
plt.savefig(pname, bbox_inches = 'tight')
plt.show()
def plot_850hPa(lon, lat, u, v, t, q, mode):
"""
Plot filled contours overlayed with contours and vectors
Input
-------
lon = lon values extracted from xarray dataset (1-D)
lat = lat values extracted from xarray dataset (1-D)
u = U-wind at 850 hPa, shape = lon X lat
v = V-wind at 850 hPa, shape = lon X lat
t = Temperature at 850 hPa, shape = lon X lat
q = Specific humidity at 850 hPa, shape = lon X lat
mode = 'A' for anomaly data, 'LM' for long term means, and 'EM' for extreme precipitation days
Output
--------
matplotlib figure with filled contours of temperature overlayed with contours of spec humidity and wind vectors
"""
# change data and lon to cyclic coordinates
u, lon_new = add_cyclic_point(u.values, coord = lon.values)
v, lon_new = add_cyclic_point(v.values, coord = lon.values)
q, lon_new = add_cyclic_point(q.values, coord = lon.values)
t, lon = add_cyclic_point(t.values, coord = lon.values)
# Create a figure
fig = plt.figure(figsize = (10, 5))
# Set the GeoAxes to the PlateCarree projection
ax = plt.axes(projection = ccrs.PlateCarree())
# Add coastlines
ax.coastlines('50m', linewidth = 0.8)
# Assign data for filled contour
data = t
if mode == 'EM' or mode == 'LM':
# Plot filled contours
plt.contourf(lon, lat, data, 20, transform = ccrs.PlateCarree(),
cmap = get_cmap("viridis"))
# Add a color bar
cbar = plt.colorbar(ax = ax, shrink = .75)
cbar.ax.set_ylabel('$^{o}C$', fontsize = 18)
# Plot contours
plt.contour(lon, lat, q, transform = ccrs.PlateCarree(), colors = 'w')
# Plot the vectors and reference vector
rd = 5 #regrid_delta
quiver = plt.quiver(lon[::rd], lat[::rd], u[::rd, ::rd], v[::rd, ::rd],
transform = ccrs.PlateCarree(), headwidth = 5., headlength = 5.)
ax.quiverkey(quiver, X = 0.9, Y = 1.03, U = 8., label = '8 m/s',
coordinates = 'axes', labelpos = 'E')
elif mode == 'A':
# Plot filled contours
maxval, minval = np.abs(np.amax(data)), np.abs(np.amin(data))
normmax = np.amax([maxval, minval])
norm = mpl.colors.Normalize(vmin = -normmax, vmax = normmax)
plt.contourf(lon, lat, data, 20, transform = ccrs.PlateCarree(),
norm = norm, cmap = get_cmap("RdBu_r"))
# Add a color bar
cbar = plt.colorbar(ax = ax, shrink = .75)
cbar.ax.set_ylabel('$^{o}C$', fontsize = 18)
# Plot contours
plt.contour(lon, lat, q, transform = ccrs.PlateCarree())
# Plot the vectors and reference vector
rd = 5 #regrid_delta
quiver = plt.quiver(lon[::rd], lat[::rd], u[::rd, ::rd], v[::rd, ::rd],
transform = ccrs.PlateCarree(), headwidth = 5., headlength = 5.)
ax.quiverkey(quiver, X = 0.9, Y = 1.03, U = 3., label = '3 m/s',
coordinates = 'axes', labelpos = 'E')
# *must* call draw in order to get the axis boundary used to add ticks:
fig.canvas.draw()
# Add the tick marks
xticks = np.arange(0., 360., 30.)
yticks = np.arange(-90., 100., 15.)
# Label the end-points of the gridlines using the custom tick makers:
ax.xaxis.set_major_formatter(LONGITUDE_FORMATTER)
ax.yaxis.set_major_formatter(LATITUDE_FORMATTER)
lambert_xticks(ax, xticks)
lambert_yticks(ax, yticks)
# Set title and figure name
if mode == 'LM':
plt.title('850 hPa Winds, Temp, Humidity'+'\n'+'long term mean', fontsize = 18)
pname = 'p850_longterm.png'
elif mode == 'EM':
plt.title('850 hPa Winds, Temp, Humidity'+'\n'+'extreme precipitation days', fontsize = 18)
pname = 'p850_extreme.png'
elif mode == 'A':
plt.title('850 hPa Winds, Temp, Humidity'+'\n'+'anomaly fields', fontsize = 18)
pname = 'p850_anom.png'
ax.set_global(); ax.gridlines();
plt.tight_layout()
#plot_dir = '/mnt/a/u/sciteam/chug/Laplata_tracers/plots/dipole_assessment/'
#pname = plot_dir + name + '.png'
plt.savefig(pname, bbox_inches = 'tight')
plt.show()
def plot_sfc_winds_skt(lonu, latu, u, v, lont, latt, t, mode):
"""
Plot filled contours overlayed with contours and vectors
Input
-------
lonu = lon values extracted from wind dataset (1-D)
latu = lat values extracted from wind dataset (1-D)
u = U-wind at surface, shape = lonu X latu
v = V-wind at surface, shape = lonu X latu
lont = lon values extracted from skin temperature dataset (1-D)
latt = lat values extracted from skin temperature dataset (1-D)
t = Skin temperature, shape = lont X latt
mode = 'A' for anomaly data, 'LM' for long term means, and 'EM' for extreme precipitation days
Output
--------
matplotlib figure with filled contours of skin temperature overlayed with wind vectors
"""
# change data and lon to cyclic coordinates
u, lonu_new = add_cyclic_point(u.values, coord = lonu.values)
v, lonu = add_cyclic_point(v.values, coord = lonu.values)
t, lont = add_cyclic_point(t.values, coord = lont.values)
# Create a figure
fig = plt.figure(figsize=(10, 5))
# Set the GeoAxes to the PlateCarree projection
ax = plt.axes(projection = ccrs.PlateCarree())
# Add coastlines
ax.coastlines('50m', linewidth = 0.8)
# Assign data for filled contour
data = t
if mode == 'EM' or mode == 'LM':
# Plot filled contours
plt.contourf(lont, latt, data, 20, transform = ccrs.PlateCarree(),
cmap = get_cmap("viridis"))
# Add a color bar
cbar = plt.colorbar(ax = ax, shrink = .75)
cbar.ax.set_ylabel('$^{o}C$', fontsize = 18)
# Plot the vectors and reference vector
rd = 5 #regrid_delta
quiver = plt.quiver(lonu[::rd], latu[::rd], u[::rd, ::rd], v[::rd, ::rd],
transform = ccrs.PlateCarree(), headwidth = 5., headlength = 5.)
ax.quiverkey(quiver, X = 0.9, Y = 1.03, U = 5., label = '5 m/s',
coordinates = 'axes', labelpos = 'E')
elif mode == 'A':
# Plot filled contours
maxval, minval = np.abs(np.amax(data)), np.abs(np.amin(data))
normmax = np.amax([maxval, minval])
norm = mpl.colors.Normalize(vmin = -normmax, vmax = normmax)
plt.contourf(lont, latt, data, 20, transform = ccrs.PlateCarree(),
norm = norm, cmap = get_cmap("RdBu_r"))
# Add a color bar
cbar = plt.colorbar(ax = ax, shrink = .75)
cbar.ax.set_ylabel('$^{o}C$', fontsize = 18)
# Plot the vectors and reference vector
rd = 5 #regrid_delta
quiver = plt.quiver(lonu[::rd], latu[::rd], u[::rd, ::rd], v[::rd, ::rd],
transform = ccrs.PlateCarree(), headwidth = 5., headlength = 5.)
ax.quiverkey(quiver, X = 0.9, Y = 1.03, U = 3., label = '3 m/s',
coordinates = 'axes', labelpos = 'E')
# *must* call draw in order to get the axis boundary used to add ticks:
fig.canvas.draw()
# Add the tick marks
xticks = np.arange(0., 360., 30.)
yticks = np.arange(-80., 80., 20.)
# Label the end-points of the gridlines using the custom tick makers:
ax.xaxis.set_major_formatter(LONGITUDE_FORMATTER)
ax.yaxis.set_major_formatter(LATITUDE_FORMATTER)
lambert_xticks(ax, xticks)
lambert_yticks(ax, yticks)
# Set title and figure name
if mode == 'LM':
plt.title('Surface Winds, Skin temp'+'\n'+'long term mean', fontsize = 18)
pname = 'sfc_longterm.png'
elif mode == 'EM':
plt.title('Surface Winds, Skin temp'+'\n'+'extreme precipitation days', fontsize = 18)
pname = 'sfc_extreme.png'
elif mode == 'A':
plt.title('Surface Winds, Skin temp'+'\n'+'anomaly fields', fontsize = 18)
pname = 'sfc_anom.png'
ax.set_global(); ax.gridlines();
plt.tight_layout()
#plot_dir = '/mnt/a/u/sciteam/chug/Laplata_tracers/plots/dipole_assessment/'
#pname = plot_dir + name + '.png'
plt.savefig(pname, bbox_inches = 'tight')
plt.show()
def plot_TCWV(lon, lat, q, mode):
"""
Plot filled contours of total column water vapor
Input
-------
lon = lon values extracted from xarray dataset (1-D)
lat = lat values extracted from xarray dataset (1-D)
q = Total column water vapor, shape = lon X lat
mode = 'A' for anomaly data, 'LM' for long term means, and 'EM' for extreme precipitation days
Output
--------
matplotlib figure with filled contours of total column water vapor
"""
# change data and lon to cyclic coordinates
q, lon = add_cyclic_point(q.values, coord = lon.values)
# Create a figure
fig = plt.figure(figsize=(10, 5))
# Set the GeoAxes to the PlateCarree projection
ax = plt.axes(projection = ccrs.PlateCarree())
# Add coastlines
ax.coastlines('50m', linewidth = 0.8)
# Assign data for filled contour
data = q
if mode == 'EM' or mode == 'LM':
data[data > 80.] = 80.
# Plot filled contours
plt.contourf(lon, lat, data, 20, transform = ccrs.PlateCarree(),
cmap = get_cmap("viridis"))
# Add a color bar
cbar = plt.colorbar(ax = ax, shrink = .75)
cbar.ax.set_ylabel('$mm$', fontsize = 18)
elif mode == 'A':
maxval, minval = np.abs(np.amax(data)), np.abs(np.amin(data))
normmax = np.amax([maxval, minval])
norm = mpl.colors.Normalize(vmin = -normmax, vmax = normmax)
plt.contourf(lon, lat, data, 20, transform = ccrs.PlateCarree(),
norm = norm, cmap = get_cmap("RdBu_r"))
# Add a color bar
cbar = plt.colorbar(ax = ax, shrink = .75)
cbar.ax.set_ylabel('$mm$', fontsize = 18)
# *must* call draw in order to get the axis boundary used to add ticks:
fig.canvas.draw()
# Add the tick marks
xticks = np.arange(0., 360., 30.)
yticks = np.arange(-80., 80., 20.)
# Label the end-points of the gridlines using the custom tick makers:
ax.xaxis.set_major_formatter(LONGITUDE_FORMATTER)
ax.yaxis.set_major_formatter(LATITUDE_FORMATTER)
lambert_xticks(ax, xticks)
lambert_yticks(ax, yticks)
# Set title and figure name
if mode == 'LM':
plt.title('Total column water vapor'+'\n'+'long term mean', fontsize = 18)
pname = 'tcwv_longterm.png'
elif mode == 'EM':
plt.title('Total column water vapor'+'\n'+'extreme precipitation days',fontsize = 18)
pname = 'tcwv_extreme.png'
elif mode == 'A':
plt.title('Total column water vapor'+'\n'+'anomaly field',fontsize = 18)
pname = 'tcwv_anom.png'
ax.set_global(); ax.gridlines();
plt.tight_layout()
#plot_dir = '/mnt/a/u/sciteam/chug/Laplata_tracers/plots/dipole_assessment/'
#pname = plot_dir + name + '.png'
plt.savefig(pname, bbox_inches = 'tight')
plt.show()
###############################
# Open datasets and plot data #
# Set path to netcdf files
path = 'atms597_proj3/data/'
# First let's plot the anomalies
# 250 hPa anomalies
xrdata = xr.open_dataset(path+'pressure_anomaly.nc')
lat = xrdata['lat']
lon = xrdata['lon']
u = xrdata['u_wind_250']
v = xrdata['v_wind_250']
xrdata = xr.open_dataset('atms597_proj3/data/pressure_anomaly_new.nc')
wspd = xrdata['wind_spd_250']
plot_250hPa_winds(lon, lat, u, v, wspd, 'A')
# 500 hPa anomalies
u = xrdata['u_wind_500']
v = xrdata['v_wind_500']
z = xrdata['height_500']
plot_500hPa_winds_geopot(lon, lat, u, v, z, 'A')
# 850 hPa anomalies
u = xrdata['u_wind_850']
v = xrdata['v_wind_850']
t = xrdata['temp_850']
q = xrdata['q_850']
plot_850hPa(lon, lat, u, v, t, q, 'A')
# Next we move to surface anomalies
xrdata = xr.open_dataset(path+'surface_anomaly.nc')
latu = xrdata['lat']
lonu = xrdata['lon']
u = xrdata['sfc_u_wind_surface']
v = xrdata['sfc_v_wind_surface']
xrdata = xr.open_dataset(path+'surface_gauss_anomaly.nc')
t = xrdata['skin_temp_surface']-273 #convert to Celcius
latt = xrdata['lat']
lont = xrdata['lon']
plot_sfc_winds_skt(lonu, latu, u, v, lont, latt, t, 'A')
# TCWV anomalies
xrdata = xr.open_dataset(path+'total_column_anomaly.nc')
lat = xrdata['lat']
lon = xrdata['lon']
q = xrdata['total_column_q']
plot_TCWV(lon, lat, q, 'A')
# Next we plot the long term means
# 250 hPa long term means
xrdata = xr.open_dataset(path+'pressure_long_term_mean.nc')
lat = xrdata['lat']
lon = xrdata['lon']
u = xrdata['u_wind_250']
v = xrdata['v_wind_250']
wspd = np.sqrt(np.multiply(u, u) + np.multiply(v, v))
plot_250hPa_winds(lon, lat, u, v, wspd, 'LM')
# 500 hPa long term means
u = xrdata['u_wind_500']
v = xrdata['v_wind_500']
z = xrdata['height_500']
plot_500hPa_winds_geopot(lon, lat, u, v, z, 'LM')
# 850 hPa long term means
u = xrdata['u_wind_850']
v = xrdata['v_wind_850']
t = xrdata['temp_850']
q = xrdata['q_850']
plot_850hPa(lon, lat, u, v, t, q, 'LM')
# surface long term means
xrdata = xr.open_dataset(path+'surface_long_term_mean.nc')
latu = xrdata['lat']
lonu = xrdata['lon']
u = xrdata['sfc_u_wind_surface']
v = xrdata['sfc_v_wind_surface']
xrdata = xr.open_dataset(path+'surface_gauss_long_term_mean.nc')
t = xrdata['skin_temp_surface']
latt = xrdata['lat']
lont = xrdata['lon']
plot_sfc_winds_skt(lonu, latu, u, v, lont, latt, t, 'LM')
# TCWV long term means
xrdata = xr.open_dataset(path+'total_column_long_term_mean.nc')
lat = xrdata['lat']
lon = xrdata['lon']
q = xrdata['total_column_q']
plot_TCWV(lon, lat, q, 'LM')
# Finally we plot the mean of extreme precipitation days
# 250 hPa extreme means
xrdata = xr.open_dataset(path+'pressure_extreme_precip_mean.nc')
lat = xrdata['lat']
lon = xrdata['lon']
u = xrdata['u_wind_250']
v = xrdata['v_wind_250']
wspd = np.sqrt(np.multiply(u, u) + np.multiply(v, v))
plot_250hPa_winds(lon, lat, u, v, wspd, 'EM')
# 500 hPa extreme means
u = xrdata['u_wind_500']
v = xrdata['v_wind_500']
z = xrdata['height_500']
plot_500hPa_winds_geopot(lon, lat, u, v, z, 'EM')
# 850 hPa extreme means
u = xrdata['u_wind_850']
v = xrdata['v_wind_850']
t = xrdata['temp_850']
q = xrdata['q_850']
plot_850hPa(lon, lat, u, v, t, q, 'EM')
# surface extreme means
xrdata = xr.open_dataset(path+'surface_extreme_precip_mean.nc')
latu = xrdata['lat']
lonu = xrdata['lon']
u = xrdata['sfc_u_wind_surface']
v = xrdata['sfc_v_wind_surface']
xrdata = xr.open_dataset(path+'surface_gauss_extreme_precip_mean.nc')
t = xrdata['skin_temp_surface']-273
latt = xrdata['lat']
lont = xrdata['lon']
plot_sfc_winds_skt(lonu, latu, u, v, lont, latt, t, 'EM')
# TCWV extreme means
xrdata = xr.open_dataset(path+'total_column_extreme_precip_mean.nc')
lat = xrdata['lat']
lon = xrdata['lon']
q = xrdata['total_column_q']
plot_TCWV(lon, lat, q, 'EM')
|
[
"copy.copy",
"numpy.arange",
"numpy.multiply",
"numpy.linspace",
"matplotlib.cm.get_cmap",
"matplotlib.pyplot.savefig",
"numpy.amin",
"cartopy.crs.PlateCarree",
"shapely.geometry.LineString",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.title",
"xarray.open_dataset",
"cartopy.crs.Geodetic",
"matplotlib.pyplot.show",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.figure",
"numpy.zeros",
"matplotlib.pyplot.tight_layout",
"numpy.amax"
] |
[((22322, 22367), 'xarray.open_dataset', 'xr.open_dataset', (["(path + 'pressure_anomaly.nc')"], {}), "(path + 'pressure_anomaly.nc')\n", (22337, 22367), True, 'import xarray as xr\n'), ((22465, 22526), 'xarray.open_dataset', 'xr.open_dataset', (['"""atms597_proj3/data/pressure_anomaly_new.nc"""'], {}), "('atms597_proj3/data/pressure_anomaly_new.nc')\n", (22480, 22526), True, 'import xarray as xr\n'), ((22946, 22990), 'xarray.open_dataset', 'xr.open_dataset', (["(path + 'surface_anomaly.nc')"], {}), "(path + 'surface_anomaly.nc')\n", (22961, 22990), True, 'import xarray as xr\n'), ((23106, 23156), 'xarray.open_dataset', 'xr.open_dataset', (["(path + 'surface_gauss_anomaly.nc')"], {}), "(path + 'surface_gauss_anomaly.nc')\n", (23121, 23156), True, 'import xarray as xr\n'), ((23337, 23386), 'xarray.open_dataset', 'xr.open_dataset', (["(path + 'total_column_anomaly.nc')"], {}), "(path + 'total_column_anomaly.nc')\n", (23352, 23386), True, 'import xarray as xr\n'), ((23554, 23606), 'xarray.open_dataset', 'xr.open_dataset', (["(path + 'pressure_long_term_mean.nc')"], {}), "(path + 'pressure_long_term_mean.nc')\n", (23569, 23606), True, 'import xarray as xr\n'), ((24143, 24194), 'xarray.open_dataset', 'xr.open_dataset', (["(path + 'surface_long_term_mean.nc')"], {}), "(path + 'surface_long_term_mean.nc')\n", (24158, 24194), True, 'import xarray as xr\n'), ((24310, 24367), 'xarray.open_dataset', 'xr.open_dataset', (["(path + 'surface_gauss_long_term_mean.nc')"], {}), "(path + 'surface_gauss_long_term_mean.nc')\n", (24325, 24367), True, 'import xarray as xr\n'), ((24531, 24587), 'xarray.open_dataset', 'xr.open_dataset', (["(path + 'total_column_long_term_mean.nc')"], {}), "(path + 'total_column_long_term_mean.nc')\n", (24546, 24587), True, 'import xarray as xr\n'), ((24776, 24833), 'xarray.open_dataset', 'xr.open_dataset', (["(path + 'pressure_extreme_precip_mean.nc')"], {}), "(path + 'pressure_extreme_precip_mean.nc')\n", (24791, 24833), True, 'import xarray as xr\n'), ((25364, 25420), 'xarray.open_dataset', 'xr.open_dataset', (["(path + 'surface_extreme_precip_mean.nc')"], {}), "(path + 'surface_extreme_precip_mean.nc')\n", (25379, 25420), True, 'import xarray as xr\n'), ((25536, 25598), 'xarray.open_dataset', 'xr.open_dataset', (["(path + 'surface_gauss_extreme_precip_mean.nc')"], {}), "(path + 'surface_gauss_extreme_precip_mean.nc')\n", (25551, 25598), True, 'import xarray as xr\n'), ((25764, 25825), 'xarray.open_dataset', 'xr.open_dataset', (["(path + 'total_column_extreme_precip_mean.nc')"], {}), "(path + 'total_column_extreme_precip_mean.nc')\n", (25779, 25825), True, 'import xarray as xr\n'), ((833, 863), 'shapely.geometry.LineString', 'sgeom.LineString', (['points[side]'], {}), '(points[side])\n', (849, 863), True, 'import shapely.geometry as sgeom\n'), ((2624, 2635), 'copy.copy', 'copy', (['ticks'], {}), '(ticks)\n', (2628, 2635), False, 'from copy import copy\n'), ((3740, 3767), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (3750, 3767), True, 'import matplotlib.pyplot as plt\n'), ((5793, 5820), 'numpy.arange', 'np.arange', (['(0.0)', '(360.0)', '(30.0)'], {}), '(0.0, 360.0, 30.0)\n', (5802, 5820), True, 'import numpy as np\n'), ((5831, 5860), 'numpy.arange', 'np.arange', (['(-90.0)', '(100.0)', '(15.0)'], {}), '(-90.0, 100.0, 15.0)\n', (5840, 5860), True, 'import numpy as np\n'), ((6585, 6603), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6601, 6603), True, 'import matplotlib.pyplot as plt\n'), ((6727, 6766), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pname'], {'bbox_inches': '"""tight"""'}), "(pname, bbox_inches='tight')\n", (6738, 6766), True, 'import matplotlib.pyplot as plt\n'), ((6773, 6783), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6781, 6783), True, 'import matplotlib.pyplot as plt\n'), ((7707, 7734), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (7717, 7734), True, 'import matplotlib.pyplot as plt\n'), ((9738, 9765), 'numpy.arange', 'np.arange', (['(0.0)', '(360.0)', '(30.0)'], {}), '(0.0, 360.0, 30.0)\n', (9747, 9765), True, 'import numpy as np\n'), ((9776, 9805), 'numpy.arange', 'np.arange', (['(-90.0)', '(100.0)', '(15.0)'], {}), '(-90.0, 100.0, 15.0)\n', (9785, 9805), True, 'import numpy as np\n'), ((10544, 10562), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10560, 10562), True, 'import matplotlib.pyplot as plt\n'), ((10686, 10725), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pname'], {'bbox_inches': '"""tight"""'}), "(pname, bbox_inches='tight')\n", (10697, 10725), True, 'import matplotlib.pyplot as plt\n'), ((10732, 10742), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10740, 10742), True, 'import matplotlib.pyplot as plt\n'), ((11803, 11830), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (11813, 11830), True, 'import matplotlib.pyplot as plt\n'), ((14056, 14083), 'numpy.arange', 'np.arange', (['(0.0)', '(360.0)', '(30.0)'], {}), '(0.0, 360.0, 30.0)\n', (14065, 14083), True, 'import numpy as np\n'), ((14094, 14123), 'numpy.arange', 'np.arange', (['(-90.0)', '(100.0)', '(15.0)'], {}), '(-90.0, 100.0, 15.0)\n', (14103, 14123), True, 'import numpy as np\n'), ((14902, 14920), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14918, 14920), True, 'import matplotlib.pyplot as plt\n'), ((15044, 15083), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pname'], {'bbox_inches': '"""tight"""'}), "(pname, bbox_inches='tight')\n", (15055, 15083), True, 'import matplotlib.pyplot as plt\n'), ((15090, 15100), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15098, 15100), True, 'import matplotlib.pyplot as plt\n'), ((16171, 16198), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (16181, 16198), True, 'import matplotlib.pyplot as plt\n'), ((18203, 18230), 'numpy.arange', 'np.arange', (['(0.0)', '(360.0)', '(30.0)'], {}), '(0.0, 360.0, 30.0)\n', (18212, 18230), True, 'import numpy as np\n'), ((18241, 18269), 'numpy.arange', 'np.arange', (['(-80.0)', '(80.0)', '(20.0)'], {}), '(-80.0, 80.0, 20.0)\n', (18250, 18269), True, 'import numpy as np\n'), ((19030, 19048), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (19046, 19048), True, 'import matplotlib.pyplot as plt\n'), ((19172, 19211), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pname'], {'bbox_inches': '"""tight"""'}), "(pname, bbox_inches='tight')\n", (19183, 19211), True, 'import matplotlib.pyplot as plt\n'), ((19218, 19228), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19226, 19228), True, 'import matplotlib.pyplot as plt\n'), ((19869, 19896), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (19879, 19896), True, 'import matplotlib.pyplot as plt\n'), ((21109, 21136), 'numpy.arange', 'np.arange', (['(0.0)', '(360.0)', '(30.0)'], {}), '(0.0, 360.0, 30.0)\n', (21118, 21136), True, 'import numpy as np\n'), ((21147, 21175), 'numpy.arange', 'np.arange', (['(-80.0)', '(80.0)', '(20.0)'], {}), '(-80.0, 80.0, 20.0)\n', (21156, 21175), True, 'import numpy as np\n'), ((21936, 21954), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (21952, 21954), True, 'import matplotlib.pyplot as plt\n'), ((22078, 22117), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pname'], {'bbox_inches': '"""tight"""'}), "(pname, bbox_inches='tight')\n", (22089, 22117), True, 'import matplotlib.pyplot as plt\n'), ((22124, 22134), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22132, 22134), True, 'import matplotlib.pyplot as plt\n'), ((2122, 2140), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (2138, 2140), True, 'import cartopy.crs as ccrs\n'), ((4263, 4295), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'ax': 'ax', 'shrink': '(0.75)'}), '(ax=ax, shrink=0.75)\n', (4275, 4295), True, 'import matplotlib.pyplot as plt\n'), ((6173, 6238), 'matplotlib.pyplot.title', 'plt.title', (["('250 hPa Winds' + '\\n' + 'long term mean')"], {'fontsize': '(18)'}), "('250 hPa Winds' + '\\n' + 'long term mean', fontsize=18)\n", (6182, 6238), True, 'import matplotlib.pyplot as plt\n'), ((8216, 8248), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'ax': 'ax', 'shrink': '(0.75)'}), '(ax=ax, shrink=0.75)\n', (8228, 8248), True, 'import matplotlib.pyplot as plt\n'), ((10117, 10187), 'matplotlib.pyplot.title', 'plt.title', (["('500 hPa Winds, GPH' + '\\n' + 'long term mean')"], {'fontsize': '(18)'}), "('500 hPa Winds, GPH' + '\\n' + 'long term mean', fontsize=18)\n", (10126, 10187), True, 'import matplotlib.pyplot as plt\n'), ((12314, 12346), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'ax': 'ax', 'shrink': '(0.75)'}), '(ax=ax, shrink=0.75)\n', (12326, 12346), True, 'import matplotlib.pyplot as plt\n'), ((14436, 14521), 'matplotlib.pyplot.title', 'plt.title', (["('850 hPa Winds, Temp, Humidity' + '\\n' + 'long term mean')"], {'fontsize': '(18)'}), "('850 hPa Winds, Temp, Humidity' + '\\n' + 'long term mean',\n fontsize=18)\n", (14445, 14521), True, 'import matplotlib.pyplot as plt\n'), ((16673, 16705), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'ax': 'ax', 'shrink': '(0.75)'}), '(ax=ax, shrink=0.75)\n', (16685, 16705), True, 'import matplotlib.pyplot as plt\n'), ((18582, 18658), 'matplotlib.pyplot.title', 'plt.title', (["('Surface Winds, Skin temp' + '\\n' + 'long term mean')"], {'fontsize': '(18)'}), "('Surface Winds, Skin temp' + '\\n' + 'long term mean', fontsize=18)\n", (18591, 18658), True, 'import matplotlib.pyplot as plt\n'), ((20400, 20432), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'ax': 'ax', 'shrink': '(0.75)'}), '(ax=ax, shrink=0.75)\n', (20412, 20432), True, 'import matplotlib.pyplot as plt\n'), ((21488, 21564), 'matplotlib.pyplot.title', 'plt.title', (["('Total column water vapor' + '\\n' + 'long term mean')"], {'fontsize': '(18)'}), "('Total column water vapor' + '\\n' + 'long term mean', fontsize=18)\n", (21497, 21564), True, 'import matplotlib.pyplot as plt\n'), ((23710, 23727), 'numpy.multiply', 'np.multiply', (['u', 'u'], {}), '(u, u)\n', (23721, 23727), True, 'import numpy as np\n'), ((23730, 23747), 'numpy.multiply', 'np.multiply', (['v', 'v'], {}), '(v, v)\n', (23741, 23747), True, 'import numpy as np\n'), ((24937, 24954), 'numpy.multiply', 'np.multiply', (['u', 'u'], {}), '(u, u)\n', (24948, 24954), True, 'import numpy as np\n'), ((24957, 24974), 'numpy.multiply', 'np.multiply', (['v', 'v'], {}), '(v, v)\n', (24968, 24974), True, 'import numpy as np\n'), ((2278, 2293), 'cartopy.crs.Geodetic', 'ccrs.Geodetic', ([], {}), '()\n', (2291, 2293), True, 'import cartopy.crs as ccrs\n'), ((3858, 3876), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (3874, 3876), True, 'import cartopy.crs as ccrs\n'), ((4893, 4918), 'numpy.amax', 'np.amax', (['[maxval, minval]'], {}), '([maxval, minval])\n', (4900, 4918), True, 'import numpy as np\n'), ((4934, 4983), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': '(-normmax)', 'vmax': 'normmax'}), '(vmin=-normmax, vmax=normmax)\n', (4954, 4983), True, 'import matplotlib as mpl\n'), ((5173, 5205), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'ax': 'ax', 'shrink': '(0.75)'}), '(ax=ax, shrink=0.75)\n', (5185, 5205), True, 'import matplotlib.pyplot as plt\n'), ((6302, 6379), 'matplotlib.pyplot.title', 'plt.title', (["('250 hPa Winds' + '\\n' + 'extreme precipitation days')"], {'fontsize': '(18)'}), "('250 hPa Winds' + '\\n' + 'extreme precipitation days', fontsize=18)\n", (6311, 6379), True, 'import matplotlib.pyplot as plt\n'), ((7825, 7843), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (7841, 7843), True, 'import cartopy.crs as ccrs\n'), ((8840, 8865), 'numpy.amax', 'np.amax', (['[maxval, minval]'], {}), '([maxval, minval])\n', (8847, 8865), True, 'import numpy as np\n'), ((8881, 8930), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': '(-normmax)', 'vmax': 'normmax'}), '(vmin=-normmax, vmax=normmax)\n', (8901, 8930), True, 'import matplotlib as mpl\n'), ((9120, 9152), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'ax': 'ax', 'shrink': '(0.75)'}), '(ax=ax, shrink=0.75)\n', (9132, 9152), True, 'import matplotlib.pyplot as plt\n'), ((10251, 10337), 'matplotlib.pyplot.title', 'plt.title', (["('500 hPa Winds, GPH' + '\\n' + 'extreme precipitation days')"], {'fontsize': '(18)'}), "('500 hPa Winds, GPH' + '\\n' + 'extreme precipitation days',\n fontsize=18)\n", (10260, 10337), True, 'import matplotlib.pyplot as plt\n'), ((11921, 11939), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (11937, 11939), True, 'import cartopy.crs as ccrs\n'), ((13054, 13079), 'numpy.amax', 'np.amax', (['[maxval, minval]'], {}), '([maxval, minval])\n', (13061, 13079), True, 'import numpy as np\n'), ((13095, 13144), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': '(-normmax)', 'vmax': 'normmax'}), '(vmin=-normmax, vmax=normmax)\n', (13115, 13144), True, 'import matplotlib as mpl\n'), ((13334, 13366), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'ax': 'ax', 'shrink': '(0.75)'}), '(ax=ax, shrink=0.75)\n', (13346, 13366), True, 'import matplotlib.pyplot as plt\n'), ((14583, 14680), 'matplotlib.pyplot.title', 'plt.title', (["('850 hPa Winds, Temp, Humidity' + '\\n' + 'extreme precipitation days')"], {'fontsize': '(18)'}), "('850 hPa Winds, Temp, Humidity' + '\\n' +\n 'extreme precipitation days', fontsize=18)\n", (14592, 14680), True, 'import matplotlib.pyplot as plt\n'), ((16287, 16305), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (16303, 16305), True, 'import cartopy.crs as ccrs\n'), ((17303, 17328), 'numpy.amax', 'np.amax', (['[maxval, minval]'], {}), '([maxval, minval])\n', (17310, 17328), True, 'import numpy as np\n'), ((17344, 17393), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': '(-normmax)', 'vmax': 'normmax'}), '(vmin=-normmax, vmax=normmax)\n', (17364, 17393), True, 'import matplotlib as mpl\n'), ((17585, 17617), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'ax': 'ax', 'shrink': '(0.75)'}), '(ax=ax, shrink=0.75)\n', (17597, 17617), True, 'import matplotlib.pyplot as plt\n'), ((18723, 18815), 'matplotlib.pyplot.title', 'plt.title', (["('Surface Winds, Skin temp' + '\\n' + 'extreme precipitation days')"], {'fontsize': '(18)'}), "('Surface Winds, Skin temp' + '\\n' + 'extreme precipitation days',\n fontsize=18)\n", (18732, 18815), True, 'import matplotlib.pyplot as plt\n'), ((19985, 20003), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (20001, 20003), True, 'import cartopy.crs as ccrs\n'), ((20601, 20626), 'numpy.amax', 'np.amax', (['[maxval, minval]'], {}), '([maxval, minval])\n', (20608, 20626), True, 'import numpy as np\n'), ((20642, 20691), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': '(-normmax)', 'vmax': 'normmax'}), '(vmin=-normmax, vmax=normmax)\n', (20662, 20691), True, 'import matplotlib as mpl\n'), ((20881, 20913), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'ax': 'ax', 'shrink': '(0.75)'}), '(ax=ax, shrink=0.75)\n', (20893, 20913), True, 'import matplotlib.pyplot as plt\n'), ((21630, 21722), 'matplotlib.pyplot.title', 'plt.title', (["('Total column water vapor' + '\\n' + 'extreme precipitation days')"], {'fontsize': '(18)'}), "('Total column water vapor' + '\\n' + 'extreme precipitation days',\n fontsize=18)\n", (21639, 21722), True, 'import matplotlib.pyplot as plt\n'), ((4143, 4161), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (4159, 4161), True, 'import cartopy.crs as ccrs\n'), ((4192, 4211), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (4200, 4211), False, 'from matplotlib.cm import get_cmap\n'), ((4555, 4573), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (4571, 4573), True, 'import cartopy.crs as ccrs\n'), ((6441, 6506), 'matplotlib.pyplot.title', 'plt.title', (["('250 hPa Winds' + '\\n' + 'anomaly fields')"], {'fontsize': '(18)'}), "('250 hPa Winds' + '\\n' + 'anomaly fields', fontsize=18)\n", (6450, 6506), True, 'import matplotlib.pyplot as plt\n'), ((8096, 8114), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (8112, 8114), True, 'import cartopy.crs as ccrs\n'), ((8145, 8164), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (8153, 8164), False, 'from matplotlib.cm import get_cmap\n'), ((8506, 8524), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (8522, 8524), True, 'import cartopy.crs as ccrs\n'), ((10395, 10465), 'matplotlib.pyplot.title', 'plt.title', (["('500 hPa Winds, GPH' + '\\n' + 'anomaly fields')"], {'fontsize': '(18)'}), "('500 hPa Winds, GPH' + '\\n' + 'anomaly fields', fontsize=18)\n", (10404, 10465), True, 'import matplotlib.pyplot as plt\n'), ((12194, 12212), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (12210, 12212), True, 'import cartopy.crs as ccrs\n'), ((12243, 12262), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (12251, 12262), False, 'from matplotlib.cm import get_cmap\n'), ((12481, 12499), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (12497, 12499), True, 'import cartopy.crs as ccrs\n'), ((12722, 12740), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (12738, 12740), True, 'import cartopy.crs as ccrs\n'), ((14740, 14825), 'matplotlib.pyplot.title', 'plt.title', (["('850 hPa Winds, Temp, Humidity' + '\\n' + 'anomaly fields')"], {'fontsize': '(18)'}), "('850 hPa Winds, Temp, Humidity' + '\\n' + 'anomaly fields',\n fontsize=18)\n", (14749, 14825), True, 'import matplotlib.pyplot as plt\n'), ((16562, 16580), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (16578, 16580), True, 'import cartopy.crs as ccrs\n'), ((16611, 16630), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (16619, 16630), False, 'from matplotlib.cm import get_cmap\n'), ((16971, 16989), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (16987, 16989), True, 'import cartopy.crs as ccrs\n'), ((18874, 18950), 'matplotlib.pyplot.title', 'plt.title', (["('Surface Winds, Skin temp' + '\\n' + 'anomaly fields')"], {'fontsize': '(18)'}), "('Surface Winds, Skin temp' + '\\n' + 'anomaly fields', fontsize=18)\n", (18883, 18950), True, 'import matplotlib.pyplot as plt\n'), ((20289, 20307), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (20305, 20307), True, 'import cartopy.crs as ccrs\n'), ((20338, 20357), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (20346, 20357), False, 'from matplotlib.cm import get_cmap\n'), ((21781, 21856), 'matplotlib.pyplot.title', 'plt.title', (["('Total column water vapor' + '\\n' + 'anomaly field')"], {'fontsize': '(18)'}), "('Total column water vapor' + '\\n' + 'anomaly field', fontsize=18)\n", (21790, 21856), True, 'import matplotlib.pyplot as plt\n'), ((1068, 1094), 'numpy.linspace', 'np.linspace', (['b[2]', 'b[3]', 'n'], {}), '(b[2], b[3], n)\n', (1079, 1094), True, 'import numpy as np\n'), ((1500, 1526), 'numpy.linspace', 'np.linspace', (['b[0]', 'b[1]', 'n'], {}), '(b[0], b[1], n)\n', (1511, 1526), True, 'import numpy as np\n'), ((4837, 4850), 'numpy.amax', 'np.amax', (['data'], {}), '(data)\n', (4844, 4850), True, 'import numpy as np\n'), ((4860, 4873), 'numpy.amin', 'np.amin', (['data'], {}), '(data)\n', (4867, 4873), True, 'import numpy as np\n'), ((5041, 5059), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (5057, 5059), True, 'import cartopy.crs as ccrs\n'), ((5103, 5121), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""RdBu_r"""'], {}), "('RdBu_r')\n", (5111, 5121), False, 'from matplotlib.cm import get_cmap\n'), ((5465, 5483), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (5481, 5483), True, 'import cartopy.crs as ccrs\n'), ((8784, 8797), 'numpy.amax', 'np.amax', (['data'], {}), '(data)\n', (8791, 8797), True, 'import numpy as np\n'), ((8807, 8820), 'numpy.amin', 'np.amin', (['data'], {}), '(data)\n', (8814, 8820), True, 'import numpy as np\n'), ((8988, 9006), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (9004, 9006), True, 'import cartopy.crs as ccrs\n'), ((9050, 9068), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""RdBu_r"""'], {}), "('RdBu_r')\n", (9058, 9068), False, 'from matplotlib.cm import get_cmap\n'), ((9410, 9428), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (9426, 9428), True, 'import cartopy.crs as ccrs\n'), ((12998, 13011), 'numpy.amax', 'np.amax', (['data'], {}), '(data)\n', (13005, 13011), True, 'import numpy as np\n'), ((13021, 13034), 'numpy.amin', 'np.amin', (['data'], {}), '(data)\n', (13028, 13034), True, 'import numpy as np\n'), ((13202, 13220), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (13218, 13220), True, 'import cartopy.crs as ccrs\n'), ((13264, 13282), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""RdBu_r"""'], {}), "('RdBu_r')\n", (13272, 13282), False, 'from matplotlib.cm import get_cmap\n'), ((13501, 13519), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (13517, 13519), True, 'import cartopy.crs as ccrs\n'), ((13728, 13746), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (13744, 13746), True, 'import cartopy.crs as ccrs\n'), ((17247, 17260), 'numpy.amax', 'np.amax', (['data'], {}), '(data)\n', (17254, 17260), True, 'import numpy as np\n'), ((17270, 17283), 'numpy.amin', 'np.amin', (['data'], {}), '(data)\n', (17277, 17283), True, 'import numpy as np\n'), ((17453, 17471), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (17469, 17471), True, 'import cartopy.crs as ccrs\n'), ((17515, 17533), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""RdBu_r"""'], {}), "('RdBu_r')\n", (17523, 17533), False, 'from matplotlib.cm import get_cmap\n'), ((17875, 17893), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (17891, 17893), True, 'import cartopy.crs as ccrs\n'), ((20545, 20558), 'numpy.amax', 'np.amax', (['data'], {}), '(data)\n', (20552, 20558), True, 'import numpy as np\n'), ((20568, 20581), 'numpy.amin', 'np.amin', (['data'], {}), '(data)\n', (20575, 20581), True, 'import numpy as np\n'), ((20749, 20767), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (20765, 20767), True, 'import cartopy.crs as ccrs\n'), ((20811, 20829), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""RdBu_r"""'], {}), "('RdBu_r')\n", (20819, 20829), False, 'from matplotlib.cm import get_cmap\n'), ((1051, 1062), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1059, 1062), True, 'import numpy as np\n'), ((1528, 1539), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1536, 1539), True, 'import numpy as np\n')]
|
#!/usr/bin/python
# coding: utf-8
import numpy as np
from .facet import Facet
class PosRegion():
"""
Implement the convex polytope
"""
def __init__(self, pos_samples):
"""
Params:
pos_samples (np.array): dim+1 positive samples to create the
(dim)-polytope.
"""
self.dim = pos_samples.shape[1]
if (self.dim+1) != pos_samples.shape[0]:
raise ValueError("Wrong number of samples")
self.vertices = pos_samples
self.facets = []
self.create_facets()
def create_facets(self):
"""
Create the facets of the polytope
"""
# For each sample in the set of vertices, create the facet that does
# not contain this sample
for sample_id in range(self.vertices.shape[0]):
facet_points = np.delete(self.vertices, (sample_id), axis=0)
self.facets.append(
Facet(facet_points, self.vertices[sample_id, :])
)
def contain(self, point):
"""
Check if a point is inside the positive region.
A point is inside the positive region if it is not visible by any of
the facets of the positive region.
Params:
point (np.array): point to check.
Return:
(boolean): True if point is inside the positive region.
"""
contain = True
for facet in self.facets:
if facet.is_visible(point):
contain = False
break
return contain
def add_vertex(self, point):
"""
Add a new vertex on the positive region.
Params:
point (np.array): point to add to the positive region.
"""
# Step 1: Find visible facets
visible_facets = []
for facet in self.facets:
if facet.is_visible(point):
visible_facets.append(facet)
# If there is no visible facets, the point is inside the positive
# region, do don't do anything.
if not visible_facets:
return None
# Step 2: find ridges that connect a visible facet and a hidden facet.
# They are also the ridges that only occurs once in the set of visible
# facets.
horizon_ridges = []
hash_horizon_ridges = [] # Use hash to skip arrays comparison
horizon_ridges = []
# Work first with hash to skip array comparing issues
hash_horizon_ridges = []
for facet in visible_facets:
self.facets.remove(facet)
for ridge in facet.get_ridges():
if hash(ridge.tostring()) in hash_horizon_ridges:
hash_horizon_ridges.remove(hash(ridge.tostring()))
else:
hash_horizon_ridges.append(hash(ridge.tostring()))
# Finally, use ridge
for facet in visible_facets:
for ridge in facet.get_ridges():
if hash(ridge.tostring()) in hash_horizon_ridges:
horizon_ridges.append(ridge)
# Step 3: Add facets with the new points and horizon ridges
for ridge in horizon_ridges:
for point_id in range(self.vertices.shape[0]):
if self.vertices[point_id, :] not in ridge:
ref = self.vertices[point_id, :]
break
self.facets.append(
Facet(np.vstack((ridge, point)), ref)
)
# Finally, update the vertices of this region
self.vertices = np.vstack((
self.vertices,
point.reshape((1, -1)),
))
self.clean_vertices()
def clean_vertices(self):
"""
Remove vertices that are not on a facet
"""
to_remove = []
for vertex_id in range(self.vertices.shape[0]):
current_vertex = self.vertices[vertex_id, :]
is_useful = False
for facet in self.facets:
if current_vertex in facet.vertices:
is_useful = True
if not is_useful:
to_remove.append(vertex_id)
self.vertices = np.delete(self.vertices, to_remove, 0)
|
[
"numpy.delete",
"numpy.vstack"
] |
[((4221, 4259), 'numpy.delete', 'np.delete', (['self.vertices', 'to_remove', '(0)'], {}), '(self.vertices, to_remove, 0)\n', (4230, 4259), True, 'import numpy as np\n'), ((862, 905), 'numpy.delete', 'np.delete', (['self.vertices', 'sample_id'], {'axis': '(0)'}), '(self.vertices, sample_id, axis=0)\n', (871, 905), True, 'import numpy as np\n'), ((3462, 3487), 'numpy.vstack', 'np.vstack', (['(ridge, point)'], {}), '((ridge, point))\n', (3471, 3487), True, 'import numpy as np\n')]
|
import gym
import gym_sokoban
import torch
import numpy as np
import random
import time
from utilities.channelConverter import hwc2chw
from experts.utils import get_distance
from external_actions import get_astar_action
import warnings
warnings.simplefilter("ignore", UserWarning)
def test_the_agent(agent, data_path, USE_CUDA, eval_num, args=None, display=False, deter=False, Variable=None):
solved = []
rewards = []
#specify the environment you wanna use; v0 means sample sub-cases randomly, and v1 only sample targeted sub-cases;
#env = gym.make('Curriculum-Sokoban-v2', data_path = data_path, seed=random.randint(0,100))
env = gym.make('Curriculum-Sokoban-v2', data_path = data_path)
solved_maps = []
unsolved_maps = []
for i in range(eval_num):
episode_reward = 0
state = env.reset()
if display:
print('#### Start ####')
print(env.room_state)
print('{} steps towards the goal state'.format(get_distance(env.room_state)))
time.sleep(1)
state = hwc2chw(state, test=True)
if USE_CUDA:
state = state.cuda()
action = agent.select_action(state.unsqueeze(0), test=1, determinisitc=deter)
next_state, reward, done, _ = env.step(action.item())
episode_reward += reward
next_state = hwc2chw(next_state, test=True)
if display:
print('#### action taken ####')
print('taken action is {}, expert action is {}'.format(action.item(), get_astar_action(env.room_state)))
print(env.room_state)
print('{} steps towards the goal state'.format(get_distance(env.room_state)))
time.sleep(1)
i = 1
while not done:
state = next_state
if USE_CUDA:
state = state.cuda()
with torch.no_grad():
action = agent.select_action(state.unsqueeze(0), test=1, determinisitc=deter)
if display:
print('#### action taken ####')
print('taken action is {}, expert action is {}'.format(action.item(), get_astar_action(env.room_state)))
print(env.room_state)
print('{} steps towards the goal state'.format(get_distance(env.room_state)))
time.sleep(1)
next_state, reward, done, _ = env.step(action.item())
if get_distance(env.room_state) == -1:
if display:
print('The game is unsolvable now')
time.sleep(2)
break
episode_reward += reward
next_state = hwc2chw(next_state, test=True)
i += 1
if i < env.max_steps and get_distance(env.room_state) != -1:
solved.append(1)
solved_maps.append(env.selected_map)
else:
unsolved_maps.append(env.selected_map)
rewards.append(episode_reward)
return np.sum(solved)/eval_num
|
[
"external_actions.get_astar_action",
"experts.utils.get_distance",
"time.sleep",
"numpy.sum",
"utilities.channelConverter.hwc2chw",
"warnings.simplefilter",
"torch.no_grad",
"gym.make"
] |
[((239, 283), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'UserWarning'], {}), "('ignore', UserWarning)\n", (260, 283), False, 'import warnings\n'), ((660, 714), 'gym.make', 'gym.make', (['"""Curriculum-Sokoban-v2"""'], {'data_path': 'data_path'}), "('Curriculum-Sokoban-v2', data_path=data_path)\n", (668, 714), False, 'import gym\n'), ((1072, 1097), 'utilities.channelConverter.hwc2chw', 'hwc2chw', (['state'], {'test': '(True)'}), '(state, test=True)\n', (1079, 1097), False, 'from utilities.channelConverter import hwc2chw\n'), ((1354, 1384), 'utilities.channelConverter.hwc2chw', 'hwc2chw', (['next_state'], {'test': '(True)'}), '(next_state, test=True)\n', (1361, 1384), False, 'from utilities.channelConverter import hwc2chw\n'), ((2971, 2985), 'numpy.sum', 'np.sum', (['solved'], {}), '(solved)\n', (2977, 2985), True, 'import numpy as np\n'), ((1042, 1055), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1052, 1055), False, 'import time\n'), ((1702, 1715), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1712, 1715), False, 'import time\n'), ((2648, 2678), 'utilities.channelConverter.hwc2chw', 'hwc2chw', (['next_state'], {'test': '(True)'}), '(next_state, test=True)\n', (2655, 2678), False, 'from utilities.channelConverter import hwc2chw\n'), ((1866, 1881), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1879, 1881), False, 'import torch\n'), ((2318, 2331), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2328, 2331), False, 'import time\n'), ((2413, 2441), 'experts.utils.get_distance', 'get_distance', (['env.room_state'], {}), '(env.room_state)\n', (2425, 2441), False, 'from experts.utils import get_distance\n'), ((2549, 2562), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2559, 2562), False, 'import time\n'), ((2732, 2760), 'experts.utils.get_distance', 'get_distance', (['env.room_state'], {}), '(env.room_state)\n', (2744, 2760), False, 'from experts.utils import get_distance\n'), ((999, 1027), 'experts.utils.get_distance', 'get_distance', (['env.room_state'], {}), '(env.room_state)\n', (1011, 1027), False, 'from experts.utils import get_distance\n'), ((1531, 1563), 'external_actions.get_astar_action', 'get_astar_action', (['env.room_state'], {}), '(env.room_state)\n', (1547, 1563), False, 'from external_actions import get_astar_action\n'), ((1659, 1687), 'experts.utils.get_distance', 'get_distance', (['env.room_state'], {}), '(env.room_state)\n', (1671, 1687), False, 'from experts.utils import get_distance\n'), ((2135, 2167), 'external_actions.get_astar_action', 'get_astar_action', (['env.room_state'], {}), '(env.room_state)\n', (2151, 2167), False, 'from external_actions import get_astar_action\n'), ((2271, 2299), 'experts.utils.get_distance', 'get_distance', (['env.room_state'], {}), '(env.room_state)\n', (2283, 2299), False, 'from experts.utils import get_distance\n')]
|
"""
Test `sinethesizer.effects.equalizer` module.
Author: <NAME>
"""
from typing import Any, Dict, List
import numpy as np
import pytest
from scipy.signal import spectrogram
from sinethesizer.effects.equalizer import apply_equalizer
from sinethesizer.synth.core import Event
from sinethesizer.oscillators import generate_mono_wave
@pytest.mark.parametrize(
"frequencies, frame_rate, kind, kwargs, spectrogram_params, expected",
[
(
# `frequencies`
[100 * x for x in range(1, 20)],
# `frame_rate`
10000,
# `kind`
'absolute',
# `kwargs`
{
'breakpoint_frequencies': [300, 700],
'gains': [0.2, 1.0],
},
# `spectrogram_params`
{'nperseg': 100},
# `expected`
# In this test case, `expected` contains summed over time power
# for frequencies 0, 100, 200, ..., 1900 respectively.
np.array(
[
0.0021011, 0.0249528, 0.0277226, 0.0387388, 0.0996291,
0.2081294, 0.3571571, 0.5181565, 0.55258, 0.557289,
0.5601418, 0.5615491, 0.5621033, 0.5622196, 0.5619461,
0.5608991, 0.5583538, 0.5535695, 0.5462548, 0.536942
]
)
),
(
# `frequencies`
[100 * x for x in range(1, 20)],
# `frame_rate`
10000,
# `kind`
'absolute',
# `kwargs`
{
'breakpoint_frequencies': [0, 500, 1200, 1900],
'gains': [0, 1.0, 0.1, 1.0],
},
# `spectrogram_params`
{'nperseg': 100},
# `expected`
# In this test case, `expected` contains summed over time power
# for frequencies 0, 100, 200, ..., 1900 respectively.
np.array(
[
0.0062764, 0.0342341, 0.0986968, 0.2045612, 0.3501325,
0.4880824, 0.4132437, 0.306272, 0.2138001, 0.1371348,
0.0776751, 0.03646, 0.0184661, 0.0364665, 0.0775099,
0.136432, 0.2119483, 0.3025262, 0.4070148, 0.5069672
]
)
),
(
# `frequencies`
[100 * x for x in range(1, 20)],
# `frame_rate`
10000,
# `kind`
'absolute',
# `kwargs`
{
'breakpoint_frequencies': [0, 500, 1200, 1900, 5000],
'gains': [0, 1.0, 0.1, 1.0, 1.0],
},
# `spectrogram_params`
{'nperseg': 100},
# `expected`
# In this test case, `expected` contains summed over time power
# for frequencies 0, 100, 200, ..., 1900 respectively.
np.array(
[
0.0062764, 0.0342341, 0.0986968, 0.2045612, 0.3501325,
0.4880824, 0.4132437, 0.306272, 0.2138001, 0.1371348,
0.0776751, 0.03646, 0.0184661, 0.0364665, 0.0775099,
0.136432, 0.2119483, 0.3025262, 0.4070148, 0.5069672
]
)
),
(
# `frequencies`
[100 * x for x in range(1, 20)],
# `frame_rate`
10000,
# `kind`
'relative',
# `kwargs`
{
'breakpoint_frequencies_ratios': [0, 5, 12, 19, 50],
'gains': [0, 1.0, 0.1, 1.0, 1.0],
},
# `spectrogram_params`
{'nperseg': 100},
# `expected`
# In this test case, `expected` contains summed over time power
# for frequencies 0, 100, 200, ..., 1900 respectively.
np.array(
[
0.0062764, 0.0342341, 0.0986968, 0.2045612, 0.3501325,
0.4880824, 0.4132437, 0.306272, 0.2138001, 0.1371348,
0.0776751, 0.03646, 0.0184661, 0.0364665, 0.0775099,
0.136432, 0.2119483, 0.3025262, 0.4070148, 0.5069672
]
)
),
]
)
def test_apply_equalizer(
frequencies: List[float], frame_rate: int, kind: str,
kwargs: Dict[str, Any], spectrogram_params: Dict[str, Any],
expected: np.ndarray
) -> None:
"""Test `apply_equalizer` function."""
waves = [
generate_mono_wave(
'sine', frequency, np.ones(frame_rate), frame_rate
)
for frequency in frequencies
]
sound = sum(waves)
sound = np.vstack((sound, sound))
event = Event(
instrument='any_instrument',
start_time=0,
duration=1,
frequency=min(frequencies),
velocity=1,
effects='',
frame_rate=frame_rate
)
sound = apply_equalizer(sound, event, kind, **kwargs)
spc = spectrogram(sound[0], frame_rate, **spectrogram_params)[2]
result = spc.sum(axis=1)[:len(expected)]
np.testing.assert_almost_equal(result, expected)
|
[
"sinethesizer.effects.equalizer.apply_equalizer",
"numpy.ones",
"scipy.signal.spectrogram",
"numpy.testing.assert_almost_equal",
"numpy.array",
"numpy.vstack"
] |
[((4672, 4697), 'numpy.vstack', 'np.vstack', (['(sound, sound)'], {}), '((sound, sound))\n', (4681, 4697), True, 'import numpy as np\n'), ((4920, 4965), 'sinethesizer.effects.equalizer.apply_equalizer', 'apply_equalizer', (['sound', 'event', 'kind'], {}), '(sound, event, kind, **kwargs)\n', (4935, 4965), False, 'from sinethesizer.effects.equalizer import apply_equalizer\n'), ((5084, 5132), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['result', 'expected'], {}), '(result, expected)\n', (5114, 5132), True, 'import numpy as np\n'), ((4976, 5031), 'scipy.signal.spectrogram', 'spectrogram', (['sound[0]', 'frame_rate'], {}), '(sound[0], frame_rate, **spectrogram_params)\n', (4987, 5031), False, 'from scipy.signal import spectrogram\n'), ((4552, 4571), 'numpy.ones', 'np.ones', (['frame_rate'], {}), '(frame_rate)\n', (4559, 4571), True, 'import numpy as np\n'), ((1007, 1247), 'numpy.array', 'np.array', (['[0.0021011, 0.0249528, 0.0277226, 0.0387388, 0.0996291, 0.2081294, \n 0.3571571, 0.5181565, 0.55258, 0.557289, 0.5601418, 0.5615491, \n 0.5621033, 0.5622196, 0.5619461, 0.5608991, 0.5583538, 0.5535695, \n 0.5462548, 0.536942]'], {}), '([0.0021011, 0.0249528, 0.0277226, 0.0387388, 0.0996291, 0.2081294,\n 0.3571571, 0.5181565, 0.55258, 0.557289, 0.5601418, 0.5615491, \n 0.5621033, 0.5622196, 0.5619461, 0.5608991, 0.5583538, 0.5535695, \n 0.5462548, 0.536942])\n', (1015, 1247), True, 'import numpy as np\n'), ((1953, 2193), 'numpy.array', 'np.array', (['[0.0062764, 0.0342341, 0.0986968, 0.2045612, 0.3501325, 0.4880824, \n 0.4132437, 0.306272, 0.2138001, 0.1371348, 0.0776751, 0.03646, \n 0.0184661, 0.0364665, 0.0775099, 0.136432, 0.2119483, 0.3025262, \n 0.4070148, 0.5069672]'], {}), '([0.0062764, 0.0342341, 0.0986968, 0.2045612, 0.3501325, 0.4880824,\n 0.4132437, 0.306272, 0.2138001, 0.1371348, 0.0776751, 0.03646, \n 0.0184661, 0.0364665, 0.0775099, 0.136432, 0.2119483, 0.3025262, \n 0.4070148, 0.5069672])\n', (1961, 2193), True, 'import numpy as np\n'), ((2910, 3150), 'numpy.array', 'np.array', (['[0.0062764, 0.0342341, 0.0986968, 0.2045612, 0.3501325, 0.4880824, \n 0.4132437, 0.306272, 0.2138001, 0.1371348, 0.0776751, 0.03646, \n 0.0184661, 0.0364665, 0.0775099, 0.136432, 0.2119483, 0.3025262, \n 0.4070148, 0.5069672]'], {}), '([0.0062764, 0.0342341, 0.0986968, 0.2045612, 0.3501325, 0.4880824,\n 0.4132437, 0.306272, 0.2138001, 0.1371348, 0.0776751, 0.03646, \n 0.0184661, 0.0364665, 0.0775099, 0.136432, 0.2119483, 0.3025262, \n 0.4070148, 0.5069672])\n', (2918, 3150), True, 'import numpy as np\n'), ((3866, 4106), 'numpy.array', 'np.array', (['[0.0062764, 0.0342341, 0.0986968, 0.2045612, 0.3501325, 0.4880824, \n 0.4132437, 0.306272, 0.2138001, 0.1371348, 0.0776751, 0.03646, \n 0.0184661, 0.0364665, 0.0775099, 0.136432, 0.2119483, 0.3025262, \n 0.4070148, 0.5069672]'], {}), '([0.0062764, 0.0342341, 0.0986968, 0.2045612, 0.3501325, 0.4880824,\n 0.4132437, 0.306272, 0.2138001, 0.1371348, 0.0776751, 0.03646, \n 0.0184661, 0.0364665, 0.0775099, 0.136432, 0.2119483, 0.3025262, \n 0.4070148, 0.5069672])\n', (3874, 4106), True, 'import numpy as np\n')]
|
from __future__ import annotations
import datetime
import json
from abc import ABC, abstractmethod
from collections import defaultdict, deque, namedtuple
from typing import (
Any,
Deque,
Dict,
Iterator,
List,
Mapping,
Optional,
Set,
TextIO,
Tuple,
)
import numpy as np
import scipy.sparse
from . import labeler, ontology, timeline, utils
ColumnValue = namedtuple("ColumnValue", ["column", "value"])
"""A value for a particular column
.. py:attribute:: column
The index for the column
.. py:attribute:: value
The value for that column
"""
class FeaturizerList:
"""
Featurizer list consists of a list of featurizers to be used to featurize data.
It enables training, featurization, column name extraction and serialization/deserialization.
"""
def __init__(self, featurizers: List[Featurizer]):
"""Create the FeaturizerList from a sequence of featurizers.
Args:
featurizers (list of :class:`Featurizer`): The featurizers to use for
transforming the patients.
"""
self.featurizers = featurizers
def train_featurizers(
self,
timelines: timeline.TimelineReader,
labeler: labeler.Labeler,
end_date: Optional[datetime.date] = None,
) -> None:
"""
Train a list of featurizers on the provided patients using the given labeler.
Args:
timelines (:class:`stride_ml.timeline.TimelineReader`): The timelines to read from.
labeler (:class:`stride_ml.labeler.Labeler`): The labeler to train with.
end_date (datetime.date): An optional date used to filter data off the end of the timeline.
"""
any_needs_training = any(
featurizer.needs_training() for featurizer in self.featurizers
)
if not any_needs_training:
return
all_patients = labeler.get_all_patient_ids()
for patient_id in timelines.get_patient_ids():
if all_patients is not None and patient_id not in all_patients:
continue
patient = timelines.get_patient(patient_id, end_date=end_date)
labels = labeler.label(patient)
if len(labels) == 0:
continue
label_indices = {label.day_index for label in labels}
for featurizer in self.featurizers:
if featurizer.needs_training():
featurizer.train(patient, label_indices)
for featurizer in self.featurizers:
featurizer.finalize_training()
def featurize(
self,
timelines: timeline.TimelineReader,
labeler: labeler.Labeler,
end_date: Optional[datetime.date] = None,
) -> Tuple[Any, Any, Any, Any]:
"""
Apply a list of featurizers to obtain a feature matrix and label vector for the given patients.
Args:
timelines (:class:`stride_ml.timeline.TimelineReader`): The timelines to read from.
labeler (:class:`stride_ml.labeler.Labeler`): The labeler to compute labels with.
end_date (datetime.date): An optional date used to filter data off the end of the timeline.
Returns:
This returns a tuple (data_matrix, labels, patient_ids, patient_day_indices).
data_matrix is a sparse matrix of all the features of all the featurizers.
labels is a list of boolean values representing the labels for each row in the matrix.
patient_ids is a list of the patient ids for each row.
patient_day_indices is a list of the day indices for each row.
"""
data = []
indices: List[int] = []
indptr = []
result_labels = []
patient_ids = []
patient_day_indices = []
all_patients = labeler.get_all_patient_ids()
for patient_id in timelines.get_patient_ids():
if all_patients is not None and patient_id not in all_patients:
continue
patient = timelines.get_patient(patient_id, end_date=end_date)
labels = labeler.label(patient)
if len(labels) == 0:
continue
label_indices = set()
for label in labels:
if label.day_index in label_indices:
raise ValueError(
"The provided labeler is invalid as it contains multiple labels "
f"for patient {patient.patient_id} at day index {label.day_index}"
)
label_indices.add(label.day_index)
columns_by_featurizer = []
for featurizer in self.featurizers:
columns = featurizer.transform(patient, label_indices)
assert len(columns) == len(label_indices), (
f"The featurizer {featurizer} didn't provide enough rows for {labeler}"
" on patient {patient_id} ({len(columns)} != {len(label_indices)})"
)
columns_by_featurizer.append(columns)
for i, label in enumerate(labels):
indptr.append(len(indices))
result_labels.append(label.is_positive)
patient_ids.append(patient.patient_id)
patient_day_indices.append(label.day_index)
column_offset = 0
for j, feature_columns in enumerate(columns_by_featurizer):
for column, value in feature_columns[i]:
assert (
0 <= column < self.featurizers[j].num_columns()
), (
f"The featurizer {self.featurizers[j]} provided an out of bounds column for "
f"{labeler} on patient {patient.patient_id} ({column} should be between 0 and "
f"{self.featurizers[j].num_columns()})"
)
indices.append(column_offset + column)
data.append(value)
column_offset += self.featurizers[j].num_columns()
total_columns = sum(
featurizer.num_columns() for featurizer in self.featurizers
)
indptr.append(len(indices))
data = np.array(data, dtype=np.float32)
indices = np.array(indices, dtype=np.int32)
indptr = np.array(indptr, dtype=np.int32)
data_matrix = scipy.sparse.csr_matrix(
(data, indices, indptr), shape=(len(result_labels), total_columns)
)
return (
data_matrix,
np.array(result_labels, dtype=np.float32),
np.array(patient_ids, dtype=np.int32),
np.array(patient_day_indices, dtype=np.int32),
)
def get_column_name(self, column_index: int) -> str:
offset = 0
for featurizer in self.featurizers:
if offset <= column_index < (offset + featurizer.num_columns()):
return f"Featurizer {featurizer}, {featurizer.get_column_name(column_index - offset)}"
offset += featurizer.num_columns()
assert False, "This should never happen"
def save(self, fp: TextIO) -> None:
json.dump([featurizer.to_dict() for featurizer in self.featurizers], fp)
def load(self, fp: TextIO) -> None:
for data_for_featurizer, featurizer in zip(
json.load(fp), self.featurizers
):
featurizer.from_dict(data_for_featurizer)
class Featurizer(ABC):
def train(self, patient: timeline.Patient, label_indices: Set[int]) -> None:
"""
Train the featurizer on the given patients and label indices.
This should do nothing if the featurizer doesn't need training.
Args:
patient: A patient to train on.
label_indices (:obj:set: of int): The set of indices for that patient.
"""
pass
def finalize_training(self) -> None:
"""
Finish the featurizer at the end of training. This is not needed for every
featurizer, but does become necessary for things like verifying counts, etc.
"""
pass # The default version does nothing
@abstractmethod
def num_columns(self) -> int:
"""
Returns: The number of columns that this featurizer creates.
"""
@abstractmethod
def transform(
self, patient: timeline.Patient, label_indices: Set[int]
) -> List[List[ColumnValue]]:
"""
Transform a patient into a series of rows using the specified timepoints.
Args:
patient: The patient to train on.
label_indices (:obj:set of int): The indices which will be labeled.
Returns:
A list of rows. Each row in turn is a list of :class:`ColumnValues<ColumnValue>` for the
values in each column.
"""
def to_dict(self) -> Dict[str, Any]:
"""
Serialize the featurizer to a JSON compatible dict
Returns:
A JSON compatible dict.
"""
return {}
def from_dict(self, data: Mapping[str, Any]) -> None:
"""
Restore the state of the featurizer from a JSON compatible dict.
Args:
data: A JSON compatible dict from to_dict
"""
pass
def get_column_name(self, column_index: int) -> str:
"""
An optional method that enables the user to get the name of a column.
Args:
column_index: The index of the column
"""
return "no name"
def needs_training(self) -> bool:
return False
###########################################
# Useful featurizers
###########################################
class AgeFeaturizer(Featurizer):
"""
Produces the (possibly normalized) age at the prediction timepoint.
"""
def __init__(self, normalize: bool = True):
self.normalize = normalize
self.age_statistics = utils.OnlineStatistics()
def train(self, patient: timeline.Patient, label_indices: Set[int]) -> None:
if self.normalize:
for i, day in enumerate(patient.days):
if i in label_indices:
self.age_statistics.add(day.age)
def num_columns(self) -> int:
return 1
def transform(
self, patient: timeline.Patient, label_indices: Set[int]
) -> List[List[ColumnValue]]:
all_columns = []
for i, day in enumerate(patient.days):
if i in label_indices:
if self.normalize:
standardized_age = (
day.age - self.age_statistics.mean()
) / self.age_statistics.standard_deviation()
all_columns.append([ColumnValue(0, standardized_age)])
else:
all_columns.append([ColumnValue(0, day.age)])
return all_columns
def to_dict(self) -> Dict[str, Any]:
return {"age_statistics": self.age_statistics.to_dict()}
def from_dict(self, data: Mapping[str, Any]) -> None:
self.age_statistics = utils.OnlineStatistics(data["age_statistics"])
def needs_training(self) -> bool:
return self.normalize
class IsIcd10Era(Featurizer):
"""
Produces the (possibly normalized) age at the prediction timepoint.
"""
def num_columns(self) -> int:
return 1
def transform(
self, patient: timeline.Patient, label_indices: Set[int]
) -> List[List[ColumnValue]]:
all_columns = []
for i, day in enumerate(patient.days):
if i in label_indices:
all_columns.append([ColumnValue(0, day.date.year >= 2016)])
return all_columns
class CountFeaturizer(Featurizer):
"""
Produces one column per each diagnosis code, procedure code or prescription code.
The value in each column is the count of how many times that code appears in the patient record
up until the prediction time.
Note: time_bins should be a list optionally ending with None
Each integer in time_bins represents the end point for a particular bin. A final bin with None represents
a final bin which enables codes from any point in history.
"""
def __init__(
self,
timelines: timeline.TimelineReader,
ontologies: ontology.OntologyReader,
rollup: bool = False,
exclusion_codes: List[int] = [],
time_bins: Optional[List[Optional[int]]] = None,
):
self.patient_codes: utils.Dictionary[int] = utils.Dictionary()
self.recorded_date_codes = set(ontologies.get_recorded_date_codes())
self.exclusion_codes = set(exclusion_codes)
self.time_bins = time_bins
self.ontologies = ontologies
self.rollup = rollup
def get_codes(self, day: timeline.PatientDay) -> Iterator[int]:
for code in day.observations:
if (code in self.recorded_date_codes) and (
code not in self.exclusion_codes
):
if self.rollup:
for subcode in self.ontologies.get_subwords(code):
yield subcode
else:
yield code
def train(self, patient: timeline.Patient, label_indices: Set[int]) -> None:
for day in patient.days:
for code in self.get_codes(day):
self.patient_codes.add(code)
def num_columns(self) -> int:
if self.time_bins is None:
return len(self.patient_codes)
else:
return len(self.time_bins) * len(self.patient_codes)
def transform(
self, patient: timeline.Patient, label_indices: Set[int]
) -> List[List[ColumnValue]]:
all_columns = []
if self.time_bins is None:
current_codes: Dict[int, int] = defaultdict(int)
for i, day in enumerate(patient.days):
for code in self.get_codes(day):
if code in self.patient_codes:
current_codes[self.patient_codes.transform(code)] += 1
if i in label_indices:
all_columns.append(
[
ColumnValue(column, count)
for column, count in current_codes.items()
]
)
else:
codes_per_bin: Dict[int, Deque[Tuple[int, datetime.date]]] = {
i: deque() for i in range(len(self.time_bins))
}
code_counts_per_bin: Dict[int, Dict[int, int]] = {
i: defaultdict(int) for i in range(len(self.time_bins))
}
for day_index, day in enumerate(patient.days):
python_date = datetime.date(
day.date.year, day.date.month, day.date.day
)
for code in self.get_codes(day):
if code in self.patient_codes:
codes_per_bin[0].append((code, python_date))
code_counts_per_bin[0][code] += 1
for i, max_time in enumerate(self.time_bins):
if max_time is None:
# This means that this bin accepts everything
continue
while len(codes_per_bin[i]) > 0:
next_code, next_date = codes_per_bin[i][0]
if (python_date - next_date).days <= max_time:
break
else:
codes_per_bin[i + 1].append(
codes_per_bin[i].popleft()
)
code_counts_per_bin[i][next_code] -= 1
if code_counts_per_bin[i][next_code] == 0:
del code_counts_per_bin[i][next_code]
code_counts_per_bin[i + 1][next_code] += 1
if day_index in label_indices:
all_columns.append(
[
ColumnValue(
self.patient_codes.transform(code)
+ i * len(self.patient_codes),
count,
)
for i in range(len(self.time_bins))
for code, count in code_counts_per_bin[i].items()
]
)
return all_columns
def to_dict(self) -> Dict[str, Any]:
return {"patient_codes": self.patient_codes.to_dict()}
def from_dict(self, data: Mapping[str, Any]) -> None:
self.patient_codes = utils.Dictionary(data["patient_codes"])
def needs_training(self) -> bool:
return True
class BinaryFeaturizer(CountFeaturizer):
"""
Behaves like CountFeaturizer except all non-zero counts receive a value of 1.
"""
def transform(
self, patient: timeline.Patient, label_indices: Set[int]
) -> List[List[ColumnValue]]:
all_columns = []
current_codes = defaultdict(int)
for i, day in enumerate(patient.days):
for code in self.get_codes(day):
if code in self.patient_codes:
current_codes[self.patient_codes.transform(code)] = 1
if i in label_indices:
all_columns.append(
[
ColumnValue(column, count)
for column, count in current_codes.items()
]
)
return all_columns
class LabelerDerivedFeaturizer(Featurizer):
def __init__(self, label: labeler.Labeler):
self.label = label
def num_columns(self) -> int:
return 1
def transform(
self, patient: timeline.Patient, label_indices: Set[int]
) -> List[List[ColumnValue]]:
result = []
my_labels = self.label.label(patient)
label_dict = {
my_label.day_index: my_label.is_positive for my_label in my_labels
}
for i, day in enumerate(patient.days):
if i in label_indices:
feature = label_dict[i]
result.append([ColumnValue(0, feature)])
return result
class ConstantValueFeaturizer(Featurizer):
"""
This featurizer returns a constant value for each item.
It has only one column.
"""
def __init__(self, value: float):
self.value = value
def num_columns(self) -> int:
return 1
def transform(
self, patient: timeline.Patient, label_indices: Set[int]
) -> List[List[ColumnValue]]:
result = []
for i, day in enumerate(patient.days):
if i in label_indices:
result.append([ColumnValue(0, self.value)])
return result
class PreprocessedFeaturizer(Featurizer):
def __init__(self, value_map: Mapping[Tuple[int, int], float]):
self.value_map = value_map
def num_columns(self) -> int:
return 1
def transform(
self, patient: timeline.Patient, label_indices: Set[int]
) -> List[List[ColumnValue]]:
result = []
for i, day in enumerate(patient.days):
if i in label_indices:
value = self.value_map[(patient.patient_id, i)]
result.append([ColumnValue(0, value)])
return result
class NumericObservationWithValueFeaturizer(Featurizer):
"""
This featurizer transforms numeric lab values into binned counts.
The basic idea is that we do a pass over the training data to compute percentiles for the values and then
we use those percentiles to create bins for each lab.
"""
def __init__(
self,
timelines: timeline.TimelineReader,
ontologies: ontology.OntologyReader,
min_labs_per_bin: int = 1,
num_bins: int = 10,
):
self.recorded_date_codes = set(ontologies.get_recorded_date_codes())
self.observedNumericValues: Dict[int, List[float]] = defaultdict(list)
self.min_labs_per_bin = min_labs_per_bin
self.num_bins = num_bins
def train(self, patient: timeline.Patient, label_indices: Set[int]) -> None:
for day in patient.days:
for codeWithValue in day.observations_with_values:
if codeWithValue.code in self.recorded_date_codes:
if not codeWithValue.is_text:
self.observedNumericValues[codeWithValue.code].append(
codeWithValue.numeric_value
)
def needs_training(self) -> bool:
return True
def get_percentile(self, item: float, percentiles: List[float]) -> int:
"""Get the index for the given percentiles.
Note: There is one bin for each value in percentiles that starts at that value
"""
for i, p in enumerate(percentiles):
if item < p:
return i - 1
return len(percentiles) - 1
def finalize_training(self) -> None:
self.code_numeric_dictionary = {}
self.next_index = 0
for code, values in self.observedNumericValues.items():
values.sort()
percentiles = [-float("inf")]
for i in range(self.num_bins - 1):
next_value = values[
min(
round((len(values) - 1) * (i + 1) / self.num_bins),
len(values) - 1,
)
]
percentiles.append(next_value)
counts = [0 for _ in range(len(percentiles))]
for item in values:
counts[self.get_percentile(item, percentiles)] += 1
filtered_percentiles = []
current_low: Optional[float] = None
for i, p in enumerate(percentiles):
if counts[i] >= self.min_labs_per_bin:
if current_low is not None:
filtered_percentiles.append(current_low)
current_low = None
else:
filtered_percentiles.append(p)
elif counts[i] < self.min_labs_per_bin:
# We are skipping this one as there are too few counts
if current_low is None:
current_low = p
if (i + 1) < len(percentiles):
counts[i + 1] += counts[i]
if len(filtered_percentiles) == 0:
continue
indices_for_percentiles = list(
range(
self.next_index, self.next_index + len(filtered_percentiles)
)
)
self.next_index += len(filtered_percentiles)
self.code_numeric_dictionary[code] = (
filtered_percentiles,
indices_for_percentiles,
)
def num_columns(self) -> int:
return self.next_index
def transform(
self, patient: timeline.Patient, label_indices: Set[int]
) -> List[List[ColumnValue]]:
all_columns = []
current_codes: Dict[int, int] = defaultdict(int)
for i, day in enumerate(patient.days):
for codeWithValue in day.observations_with_values:
if codeWithValue.code in self.code_numeric_dictionary:
if not codeWithValue.is_text:
(
percentiles,
indices_for_percentiles,
) = self.code_numeric_dictionary[codeWithValue.code]
offset = self.get_percentile(
codeWithValue.numeric_value, percentiles
)
current_codes[indices_for_percentiles[offset]] += 1
if i in label_indices:
all_columns.append(
[
ColumnValue(column, count)
for column, count in current_codes.items()
]
)
return all_columns
def to_dict(self) -> Dict[str, Any]:
return {
"next_index": self.next_index,
"code_numeric_dictionary": list(
self.code_numeric_dictionary.items()
),
}
def from_dict(self, data: Mapping[str, Any]) -> None:
self.next_index = data["next_index"]
self.code_numeric_dictionary = {
code: values for code, values in data["code_numeric_dictionary"]
}
|
[
"collections.namedtuple",
"collections.deque",
"numpy.array",
"collections.defaultdict",
"datetime.date",
"json.load"
] |
[((396, 442), 'collections.namedtuple', 'namedtuple', (['"""ColumnValue"""', "['column', 'value']"], {}), "('ColumnValue', ['column', 'value'])\n", (406, 442), False, 'from collections import defaultdict, deque, namedtuple\n'), ((6342, 6374), 'numpy.array', 'np.array', (['data'], {'dtype': 'np.float32'}), '(data, dtype=np.float32)\n', (6350, 6374), True, 'import numpy as np\n'), ((6393, 6426), 'numpy.array', 'np.array', (['indices'], {'dtype': 'np.int32'}), '(indices, dtype=np.int32)\n', (6401, 6426), True, 'import numpy as np\n'), ((6444, 6476), 'numpy.array', 'np.array', (['indptr'], {'dtype': 'np.int32'}), '(indptr, dtype=np.int32)\n', (6452, 6476), True, 'import numpy as np\n'), ((17250, 17266), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (17261, 17266), False, 'from collections import defaultdict, deque, namedtuple\n'), ((20214, 20231), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (20225, 20231), False, 'from collections import defaultdict, deque, namedtuple\n'), ((23364, 23380), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (23375, 23380), False, 'from collections import defaultdict, deque, namedtuple\n'), ((6669, 6710), 'numpy.array', 'np.array', (['result_labels'], {'dtype': 'np.float32'}), '(result_labels, dtype=np.float32)\n', (6677, 6710), True, 'import numpy as np\n'), ((6724, 6761), 'numpy.array', 'np.array', (['patient_ids'], {'dtype': 'np.int32'}), '(patient_ids, dtype=np.int32)\n', (6732, 6761), True, 'import numpy as np\n'), ((6775, 6820), 'numpy.array', 'np.array', (['patient_day_indices'], {'dtype': 'np.int32'}), '(patient_day_indices, dtype=np.int32)\n', (6783, 6820), True, 'import numpy as np\n'), ((7459, 7472), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (7468, 7472), False, 'import json\n'), ((13923, 13939), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (13934, 13939), False, 'from collections import defaultdict, deque, namedtuple\n'), ((14559, 14566), 'collections.deque', 'deque', ([], {}), '()\n', (14564, 14566), False, 'from collections import defaultdict, deque, namedtuple\n'), ((14700, 14716), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (14711, 14716), False, 'from collections import defaultdict, deque, namedtuple\n'), ((14857, 14915), 'datetime.date', 'datetime.date', (['day.date.year', 'day.date.month', 'day.date.day'], {}), '(day.date.year, day.date.month, day.date.day)\n', (14870, 14915), False, 'import datetime\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
alpha = np.random.rand(7)
alpha /= np.linalg.norm(alpha, 1)
n = 40
def index_to_position(index):
p = 0
a, b, c, d, e, f = index
index = [a, d, b, e, c, f]
for i in index:
p = p * n + i
return p
if __name__ == "__main__":
with open("fdm.tsv", "w") as f:
for i in range(n):
for j in range(n):
for k in range(n):
alpha = np.random.rand(7)
alpha /= np.linalg.norm(alpha, 1)
p = index_to_position([i, j, k, i, j, k])
print("{}\t{}".format(p, alpha[0]), file=f)
if i - 1 >= 0:
p = index_to_position([i, j, k, i - 1, j, k])
print("{}\t{}".format(p, alpha[1]), file=f)
if i + 1 < n:
p = index_to_position([i, j, k, i + 1, j, k])
print("{}\t{}".format(p, alpha[2]), file=f)
if j - 1 >= 0:
p = index_to_position([i, j, k, i, j - 1, k])
print("{}\t{}".format(p, alpha[3]), file=f)
if j + 1 < n:
p = index_to_position([i, j, k, i, j + 1, k])
print("{}\t{}".format(p, alpha[4]), file=f)
if k - 1 >= 0:
p = index_to_position([i, j, k, i, j, k - 1])
print("{}\t{}".format(p, alpha[5]), file=f)
if k + 1 < n:
p = index_to_position([i, j, k, i, j, k + 1])
print("{}\t{}".format(p, alpha[6]), file=f)
|
[
"numpy.random.rand",
"numpy.linalg.norm"
] |
[((76, 93), 'numpy.random.rand', 'np.random.rand', (['(7)'], {}), '(7)\n', (90, 93), True, 'import numpy as np\n'), ((103, 127), 'numpy.linalg.norm', 'np.linalg.norm', (['alpha', '(1)'], {}), '(alpha, 1)\n', (117, 127), True, 'import numpy as np\n'), ((476, 493), 'numpy.random.rand', 'np.random.rand', (['(7)'], {}), '(7)\n', (490, 493), True, 'import numpy as np\n'), ((523, 547), 'numpy.linalg.norm', 'np.linalg.norm', (['alpha', '(1)'], {}), '(alpha, 1)\n', (537, 547), True, 'import numpy as np\n')]
|
import torch
import torch.optim as optim
from torch import autograd
import numpy as np
from tqdm import trange
import trimesh
from skimage import measure
import warnings
import time
from pipelines.utils.point_utils import sample_points_from_ray, np_get_occupied_idx, occupancy_sparse_to_dense
from pipelines.utils.postprocess_utils import remove_backface
class Generator3D(object):
''' Generator class for Local implicit grid Networks.
It provides functions to generate the final mesh as well refining options.
Args:
model (nn.Module): trained Local implicit grid model
optimizer (object): optimization utility class for optimizing latent grid
part_size (float): size of a part
num_optim_samples (int): number of points to sample at each optimization step
res_per_part (int): how many parts we split a grid into
overlap (bool): whether we use overlapping grids
device (device): pytorch device
points_batch (int): number of points we evaluate sdf values each time
conservative (bool): whether we evaluate a grid when all of its 8 neighbors contain points
postprocess (bool): whether to use post process to remove back faces
'''
def __init__(self,
model,
optimizer,
part_size=0.25,
num_optim_samples=2048,
res_per_part=0,
overlap=True,
device=None,
points_batch=20000,
conservative=False,
postprocess=True):
self.model = model.to(device)
self.optimizer = optimizer
self.part_size = part_size
self.num_optim_samples = num_optim_samples
if res_per_part == 0:
self.res_per_part = int(64 * self.part_size)
else:
self.res_per_part = res_per_part
self.overlap = overlap
self.device = device
self.points_batch = points_batch
self.conservative = conservative
self.postprocess = postprocess
def generate_mesh(self, data, return_stats=True):
''' Generates the output mesh from inputs loaded from dataset.
Args:
data (tensor): data tensor
return_stats (bool): whether stats should be returned
'''
stats_dict = {}
v = data.get('inputs', torch.empty(1, 0)).squeeze(0).cpu().numpy()
n = data.get('inputs.normals', torch.empty(1, 0)).squeeze(0).cpu().numpy()
mesh = self.generate_single_obj_mesh(v, n)
return mesh
def generate_single_obj_mesh(self, v, n):
''' Generates the output mesh of user specified single object.
Args:
v (numpy array): [#v, 3], input point cloud.
n (numpy array): [#v, 3], normals of the input point cloud.
Returns:
mesh (trimesh.Trimesh obj): output mesh object.
'''
device = self.device
surface_points = np.concatenate([v, n], axis=1)
xmin = np.min(v, axis=0)
xmax = np.max(v, axis=0)
# check if part size is too large
min_bb = np.min(xmax - xmin)
if self.part_size > 0.25 * min_bb:
warnings.warn(
'WARNING: part_size seems too large. Recommend using a part_size < '
'{:.2f} for this shape.'.format(0.25 * min_bb), UserWarning)
# add some extra slack to xmin and xmax
xmin -= self.part_size
xmax += self.part_size
#########################################################################
# generate sdf samples from pc
point_samples, sdf_values = sample_points_from_ray(v, n, sample_factor=10, std=0.01)
# shuffle
shuffle_index = np.random.permutation(point_samples.shape[0])
point_samples = point_samples[shuffle_index]
sdf_values = sdf_values[shuffle_index]
#########################################################################
################### only evaluated at sparse grid location ##############
#########################################################################
# get valid girds (we only evaluate on sparse locations)
# _.shape==(total_ncrops, ntarget, v.shape[1]) points within voxel
# occ_idx.shape==(total_ncrops, 3) index of each voxel
# grid_shape == (rr[0], rr[1], rr[2])
_, occ_idx, grid_shape = np_get_occupied_idx(
point_samples[:100000, :3],
# point_samples[:, :3],
xmin=xmin - 0.5 * self.part_size,
xmax=xmax + 0.5 * self.part_size,
crop_size=self.part_size,
ntarget=1, # we do not require `point_crops` (i.e. `_` in returns), so we set it to 1
overlap=self.overlap,
normalize_crops=False,
return_shape=True)
print('LIG shape: {}'.format(grid_shape))
#########################################################################
# treat as one batch
point_samples = torch.from_numpy(point_samples).to(device)
sdf_values = torch.from_numpy(sdf_values).to(device)
occ_idx_tensor = torch.from_numpy(occ_idx).to(device)
point_samples = point_samples.unsqueeze(0) # shape==(1, npoints, 3)
sdf_values = sdf_values.unsqueeze(0) # shape==(1, npoints, 1)
occ_idx_tensor = occ_idx_tensor.unsqueeze(0) # shape==(1, total_ncrops, 3)
# set range for computation
true_shape = ((np.array(grid_shape) - 1) / (2.0 if self.overlap else 1.0)).astype(np.int32)
self.model.set_xrange(xmin=xmin, xmax=xmin + true_shape * self.part_size)
# Clip the point position
xmin_ = self.model.grid_interp_layer.xmin
xmax_ = self.model.grid_interp_layer.xmax
x = point_samples[:, :, 0].clamp(xmin_[0], xmax_[0])
y = point_samples[:, :, 1].clamp(xmin_[1], xmax_[1])
z = point_samples[:, :, 2].clamp(xmin_[2], xmax_[2])
point_samples = torch.stack([x, y, z], dim=2)
# get label (inside==-1, outside==+1)
point_values = torch.sign(sdf_values)
#########################################################################
###################### Build/Optimize latent grid #######################
#########################################################################
# optimize latent grids, shape==(1, *grid_shape, code_len)
print('Optimizing latent codes in LIG...')
latent_grid = self.optimizer.optimize_latent_code(point_samples, point_values, occ_idx_tensor, grid_shape)
#########################################################################
##################### Evaluation (Marching Cubes) #######################
#########################################################################
# sparse occ index to dense occ grids
# (total_ncrops, 3) --> (*grid_shape, ) bool
occ_mask = occupancy_sparse_to_dense(occ_idx, grid_shape)
# points shape to be evaluated
output_grid_shape = list(self.res_per_part * true_shape)
# output_grid is ones, shape==(?, )
# xyz is points to be evaluated (dense, shape==(?, 3))
output_grid, xyz = self.get_eval_grid(xmin=xmin,
xmax=xmin + true_shape * self.part_size,
output_grid_shape=output_grid_shape)
# we only evaluate eval_points
# out_mask is for xyz, i.e. eval_points = xyz[occ_mask]
eval_points, out_mask = self.get_eval_inputs(xyz, xmin, occ_mask)
eval_points = torch.from_numpy(eval_points).to(device)
# evaluate dense grid for marching cubes (on sparse grids)
output_grid = self.generate_occ_grid(latent_grid, eval_points, output_grid, out_mask)
output_grid = output_grid.reshape(*output_grid_shape)
v, f, _, _ = measure.marching_cubes_lewiner(output_grid, 0) # logits==0
v *= (self.part_size / float(self.res_per_part) * (np.array(output_grid.shape, dtype=np.float32) /
(np.array(output_grid.shape, dtype=np.float32) - 1)))
v += xmin
# Create mesh
mesh = trimesh.Trimesh(v, f)
# Post-process the generated mesh to prevent artifacts
if self.postprocess:
print('Postprocessing generated mesh...')
mesh = remove_backface(mesh, surface_points)
return mesh
def get_eval_grid(self, xmin, xmax, output_grid_shape):
"""Initialize the eval output grid and its corresponding grid points.
Args:
xmin (numpy array): [3], minimum xyz values of the entire space.
xmax (numpy array): [3], maximum xyz values of the entire space.
output_grid_shape (list): [3], latent grid shape.
Returns:
output_grid (numpy array): [d*h*w] output grid sdf values.
xyz (numpy array): [d*h*w, 3] grid point xyz coordinates.
"""
# setup grid
eps = 1e-6
l = [np.linspace(xmin[i] + eps, xmax[i] - eps, output_grid_shape[i]) for i in range(3)]
xyz = np.stack(np.meshgrid(l[0], l[1], l[2], indexing='ij'), axis=-1).astype(np.float32)
output_grid = np.ones(output_grid_shape, dtype=np.float32)
xyz = xyz.reshape(-1, 3)
output_grid = output_grid.reshape(-1)
return output_grid, xyz
def get_eval_inputs(self, xyz, xmin, occ_mask):
"""Gathers the points within the grids that any/all of its 8 neighbors
contains points.
If self.conservative is True, gathers the points within the grids that any of its 8 neighbors
contains points.
If self.conservative is False, gathers the points within the grids that all of its 8 neighbors
contains points.
Returns the points need to be evaluate and the mask of the points and the output grid.
Args:
xyz (numpy array): [h*w*d, 3]
xmin (numpy array): [3] minimum value of the entire space.
occ_mask (numpy array): latent grid occupancy mask.
Returns:
eval_points (numpy array): [neval, 3], points to be evaluated.
out_mask (numpy array): [h*w*d], 0 1 value eval mask of the final sdf grid.
"""
mask = occ_mask.astype(np.bool)
if self.overlap:
mask = np.stack([
mask[:-1, :-1, :-1], mask[:-1, :-1, 1:], mask[:-1, 1:, :-1], mask[:-1, 1:, 1:], mask[1:, :-1, :-1],
mask[1:, :-1, 1:], mask[1:, 1:, :-1], mask[1:, 1:, 1:]
],
axis=-1)
if self.conservative:
mask = np.any(mask, axis=-1)
else:
mask = np.all(mask, axis=-1)
g = np.stack(np.meshgrid(np.arange(mask.shape[0]),
np.arange(mask.shape[1]),
np.arange(mask.shape[2]),
indexing='ij'),
axis=-1).reshape(-1, 3)
g = g[:, 0] * (mask.shape[1] * mask.shape[2]) + g[:, 1] * mask.shape[2] + g[:, 2]
g_valid = g[mask.ravel()] # valid grid index
if self.overlap:
ijk = np.floor((xyz - xmin) / self.part_size * 2).astype(np.int32)
else:
ijk = np.floor((xyz - xmin + 0.5 * self.part_size) / self.part_size).astype(np.int32)
ijk_idx = (ijk[:, 0] * (mask.shape[1] * mask.shape[2]) + ijk[:, 1] * mask.shape[2] + ijk[:, 2])
out_mask = np.isin(ijk_idx, g_valid)
eval_points = xyz[out_mask]
return eval_points, out_mask
def generate_occ_grid(self, latent_grid, eval_points, output_grid, out_mask):
"""Gets the final output occ grid.
Args:
latent_grid (tensor): [1, *grid_shape, latent_size], optimized latent grid.
eval_points (tensor): [neval, 3], points to be evaluated.
output_grid (numpy array): [d*h*w], final output occ grid.
out_mask (numpy array): [d*h*w], mask indicating the grids evaluated.
Returns:
output_grid (numpy array): [d*h*w], final output occ grid flattened.
"""
interp_old = self.model.interp
self.model.interp = True
split = int(np.ceil(eval_points.shape[0] / self.points_batch))
occ_val_list = []
self.model.eval()
with torch.no_grad():
for s in range(split):
sid = s * self.points_batch
eid = min((s + 1) * self.points_batch, eval_points.shape[0])
eval_points_slice = eval_points[sid:eid, :]
occ_vals = self.model.decode(latent_grid, eval_points_slice.unsqueeze(0))
occ_vals = occ_vals.squeeze(0).squeeze(1).cpu().numpy()
occ_val_list.append(occ_vals)
occ_vals = np.concatenate(occ_val_list, axis=0)
output_grid[out_mask] = occ_vals
self.model.interp = interp_old
return output_grid
|
[
"skimage.measure.marching_cubes_lewiner",
"numpy.isin",
"torch.from_numpy",
"numpy.array",
"numpy.arange",
"pipelines.utils.point_utils.sample_points_from_ray",
"pipelines.utils.postprocess_utils.remove_backface",
"numpy.max",
"numpy.stack",
"numpy.linspace",
"numpy.concatenate",
"numpy.min",
"numpy.meshgrid",
"numpy.random.permutation",
"numpy.ceil",
"numpy.ones",
"numpy.floor",
"torch.sign",
"numpy.any",
"trimesh.Trimesh",
"torch.empty",
"pipelines.utils.point_utils.occupancy_sparse_to_dense",
"torch.stack",
"torch.no_grad",
"numpy.all",
"pipelines.utils.point_utils.np_get_occupied_idx"
] |
[((2985, 3015), 'numpy.concatenate', 'np.concatenate', (['[v, n]'], {'axis': '(1)'}), '([v, n], axis=1)\n', (2999, 3015), True, 'import numpy as np\n'), ((3032, 3049), 'numpy.min', 'np.min', (['v'], {'axis': '(0)'}), '(v, axis=0)\n', (3038, 3049), True, 'import numpy as np\n'), ((3065, 3082), 'numpy.max', 'np.max', (['v'], {'axis': '(0)'}), '(v, axis=0)\n', (3071, 3082), True, 'import numpy as np\n'), ((3143, 3162), 'numpy.min', 'np.min', (['(xmax - xmin)'], {}), '(xmax - xmin)\n', (3149, 3162), True, 'import numpy as np\n'), ((3664, 3720), 'pipelines.utils.point_utils.sample_points_from_ray', 'sample_points_from_ray', (['v', 'n'], {'sample_factor': '(10)', 'std': '(0.01)'}), '(v, n, sample_factor=10, std=0.01)\n', (3686, 3720), False, 'from pipelines.utils.point_utils import sample_points_from_ray, np_get_occupied_idx, occupancy_sparse_to_dense\n'), ((3764, 3809), 'numpy.random.permutation', 'np.random.permutation', (['point_samples.shape[0]'], {}), '(point_samples.shape[0])\n', (3785, 3809), True, 'import numpy as np\n'), ((4459, 4684), 'pipelines.utils.point_utils.np_get_occupied_idx', 'np_get_occupied_idx', (['point_samples[:100000, :3]'], {'xmin': '(xmin - 0.5 * self.part_size)', 'xmax': '(xmax + 0.5 * self.part_size)', 'crop_size': 'self.part_size', 'ntarget': '(1)', 'overlap': 'self.overlap', 'normalize_crops': '(False)', 'return_shape': '(True)'}), '(point_samples[:100000, :3], xmin=xmin - 0.5 * self.\n part_size, xmax=xmax + 0.5 * self.part_size, crop_size=self.part_size,\n ntarget=1, overlap=self.overlap, normalize_crops=False, return_shape=True)\n', (4478, 4684), False, 'from pipelines.utils.point_utils import sample_points_from_ray, np_get_occupied_idx, occupancy_sparse_to_dense\n'), ((6031, 6060), 'torch.stack', 'torch.stack', (['[x, y, z]'], {'dim': '(2)'}), '([x, y, z], dim=2)\n', (6042, 6060), False, 'import torch\n'), ((6131, 6153), 'torch.sign', 'torch.sign', (['sdf_values'], {}), '(sdf_values)\n', (6141, 6153), False, 'import torch\n'), ((7000, 7046), 'pipelines.utils.point_utils.occupancy_sparse_to_dense', 'occupancy_sparse_to_dense', (['occ_idx', 'grid_shape'], {}), '(occ_idx, grid_shape)\n', (7025, 7046), False, 'from pipelines.utils.point_utils import sample_points_from_ray, np_get_occupied_idx, occupancy_sparse_to_dense\n'), ((7973, 8019), 'skimage.measure.marching_cubes_lewiner', 'measure.marching_cubes_lewiner', (['output_grid', '(0)'], {}), '(output_grid, 0)\n', (8003, 8019), False, 'from skimage import measure\n'), ((8309, 8330), 'trimesh.Trimesh', 'trimesh.Trimesh', (['v', 'f'], {}), '(v, f)\n', (8324, 8330), False, 'import trimesh\n'), ((9354, 9398), 'numpy.ones', 'np.ones', (['output_grid_shape'], {'dtype': 'np.float32'}), '(output_grid_shape, dtype=np.float32)\n', (9361, 9398), True, 'import numpy as np\n'), ((11635, 11660), 'numpy.isin', 'np.isin', (['ijk_idx', 'g_valid'], {}), '(ijk_idx, g_valid)\n', (11642, 11660), True, 'import numpy as np\n'), ((12965, 13001), 'numpy.concatenate', 'np.concatenate', (['occ_val_list'], {'axis': '(0)'}), '(occ_val_list, axis=0)\n', (12979, 13001), True, 'import numpy as np\n'), ((8497, 8534), 'pipelines.utils.postprocess_utils.remove_backface', 'remove_backface', (['mesh', 'surface_points'], {}), '(mesh, surface_points)\n', (8512, 8534), False, 'from pipelines.utils.postprocess_utils import remove_backface\n'), ((9151, 9214), 'numpy.linspace', 'np.linspace', (['(xmin[i] + eps)', '(xmax[i] - eps)', 'output_grid_shape[i]'], {}), '(xmin[i] + eps, xmax[i] - eps, output_grid_shape[i])\n', (9162, 9214), True, 'import numpy as np\n'), ((10487, 10671), 'numpy.stack', 'np.stack', (['[mask[:-1, :-1, :-1], mask[:-1, :-1, 1:], mask[:-1, 1:, :-1], mask[:-1, 1:,\n 1:], mask[1:, :-1, :-1], mask[1:, :-1, 1:], mask[1:, 1:, :-1], mask[1:,\n 1:, 1:]]'], {'axis': '(-1)'}), '([mask[:-1, :-1, :-1], mask[:-1, :-1, 1:], mask[:-1, 1:, :-1], mask\n [:-1, 1:, 1:], mask[1:, :-1, :-1], mask[1:, :-1, 1:], mask[1:, 1:, :-1],\n mask[1:, 1:, 1:]], axis=-1)\n', (10495, 10671), True, 'import numpy as np\n'), ((12389, 12438), 'numpy.ceil', 'np.ceil', (['(eval_points.shape[0] / self.points_batch)'], {}), '(eval_points.shape[0] / self.points_batch)\n', (12396, 12438), True, 'import numpy as np\n'), ((12505, 12520), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12518, 12520), False, 'import torch\n'), ((5072, 5103), 'torch.from_numpy', 'torch.from_numpy', (['point_samples'], {}), '(point_samples)\n', (5088, 5103), False, 'import torch\n'), ((5136, 5164), 'torch.from_numpy', 'torch.from_numpy', (['sdf_values'], {}), '(sdf_values)\n', (5152, 5164), False, 'import torch\n'), ((5201, 5226), 'torch.from_numpy', 'torch.from_numpy', (['occ_idx'], {}), '(occ_idx)\n', (5217, 5226), False, 'import torch\n'), ((7686, 7715), 'torch.from_numpy', 'torch.from_numpy', (['eval_points'], {}), '(eval_points)\n', (7702, 7715), False, 'import torch\n'), ((8092, 8137), 'numpy.array', 'np.array', (['output_grid.shape'], {'dtype': 'np.float32'}), '(output_grid.shape, dtype=np.float32)\n', (8100, 8137), True, 'import numpy as np\n'), ((10794, 10815), 'numpy.any', 'np.any', (['mask'], {'axis': '(-1)'}), '(mask, axis=-1)\n', (10800, 10815), True, 'import numpy as np\n'), ((10857, 10878), 'numpy.all', 'np.all', (['mask'], {'axis': '(-1)'}), '(mask, axis=-1)\n', (10863, 10878), True, 'import numpy as np\n'), ((8200, 8245), 'numpy.array', 'np.array', (['output_grid.shape'], {'dtype': 'np.float32'}), '(output_grid.shape, dtype=np.float32)\n', (8208, 8245), True, 'import numpy as np\n'), ((9257, 9301), 'numpy.meshgrid', 'np.meshgrid', (['l[0]', 'l[1]', 'l[2]'], {'indexing': '"""ij"""'}), "(l[0], l[1], l[2], indexing='ij')\n", (9268, 9301), True, 'import numpy as np\n'), ((11339, 11382), 'numpy.floor', 'np.floor', (['((xyz - xmin) / self.part_size * 2)'], {}), '((xyz - xmin) / self.part_size * 2)\n', (11347, 11382), True, 'import numpy as np\n'), ((11432, 11494), 'numpy.floor', 'np.floor', (['((xyz - xmin + 0.5 * self.part_size) / self.part_size)'], {}), '((xyz - xmin + 0.5 * self.part_size) / self.part_size)\n', (11440, 11494), True, 'import numpy as np\n'), ((5530, 5550), 'numpy.array', 'np.array', (['grid_shape'], {}), '(grid_shape)\n', (5538, 5550), True, 'import numpy as np\n'), ((10913, 10937), 'numpy.arange', 'np.arange', (['mask.shape[0]'], {}), '(mask.shape[0])\n', (10922, 10937), True, 'import numpy as np\n'), ((10972, 10996), 'numpy.arange', 'np.arange', (['mask.shape[1]'], {}), '(mask.shape[1])\n', (10981, 10996), True, 'import numpy as np\n'), ((11031, 11055), 'numpy.arange', 'np.arange', (['mask.shape[2]'], {}), '(mask.shape[2])\n', (11040, 11055), True, 'import numpy as np\n'), ((2381, 2398), 'torch.empty', 'torch.empty', (['(1)', '(0)'], {}), '(1, 0)\n', (2392, 2398), False, 'import torch\n'), ((2464, 2481), 'torch.empty', 'torch.empty', (['(1)', '(0)'], {}), '(1, 0)\n', (2475, 2481), False, 'import torch\n')]
|
import numpy as np
from time import time
from typing import List, Tuple
from tsp_heuristics.heuristics.utils import get_tour_distance
def nn_algo(
dist_matrix: np.array,
start: int = 0
) -> Tuple[List, float]:
"""
From a start city index, get an Tour according to the Nearest Neighbor
algorithm from the collection of the cities indexes.
Args:
dist_matrix (np.array)
start (int, optional): The first city that we will begin the Tour and
eventually return. Defaults to 0.
Returns:
np.array: Array of indexes representing the city Tour.
float: Time to complete the algorithm.
"""
t0 = time()
Tour = [start]
dist_matrix = dist_matrix.astype(float)
# Making the distance to go to the same
# city impossible.
for i in range(dist_matrix.shape[0]):
dist_matrix[i][i] = np.Inf
for _ in range(dist_matrix.shape[0] - 1):
# Finding the best next city.
min_index = np.argmin(dist_matrix[Tour[-1]])
# Making sure that we won't revisit
# the same city.
for t in Tour:
dist_matrix[min_index][t] = np.Inf
dist_matrix[t][min_index] = np.Inf
Tour.append(min_index)
return Tour, get_tour_distance(Tour, dist_matrix), (time() - t0)
|
[
"numpy.argmin",
"time.time",
"tsp_heuristics.heuristics.utils.get_tour_distance"
] |
[((666, 672), 'time.time', 'time', ([], {}), '()\n', (670, 672), False, 'from time import time\n'), ((994, 1026), 'numpy.argmin', 'np.argmin', (['dist_matrix[Tour[-1]]'], {}), '(dist_matrix[Tour[-1]])\n', (1003, 1026), True, 'import numpy as np\n'), ((1266, 1302), 'tsp_heuristics.heuristics.utils.get_tour_distance', 'get_tour_distance', (['Tour', 'dist_matrix'], {}), '(Tour, dist_matrix)\n', (1283, 1302), False, 'from tsp_heuristics.heuristics.utils import get_tour_distance\n'), ((1305, 1311), 'time.time', 'time', ([], {}), '()\n', (1309, 1311), False, 'from time import time\n')]
|
import os
import numpy as np
from sys import platform, path
if platform == "linux" or platform == "linux2":
path.insert(1, os.path.dirname(os.getcwd()) + "/src")
FILE_NAME = os.path.dirname(os.getcwd()) + "/data" + "/xAPI-Edu-Data-Edited.csv"
elif platform == "win32":
path.insert(1, os.path.dirname(os.getcwd()) + "\\src")
FILE_NAME = os.path.dirname(os.getcwd()) + "\\data" + "\\xAPI-Edu-Data-Edited.csv"
elif platform == "darwin":
path.insert(1, os.path.dirname(os.getcwd()) + "/src")
FILE_NAME = os.path.dirname(os.getcwd()) + "/data" + "/xAPI-Edu-Data-Edited.csv"
from DataPreprocessing import Preprocess, FeaturePreprocess
from DataProcessing import ModelTuning, ModelValidating, save_file, load_file
CATEGORICAL_COLUMNS = ["Gender", "Nationality", "PlaceofBirth", "StageID", "GradeID", "SectionID", "Topic",
"Semester", "Relation", "ParentAnsweringSurvey", "ParentSchoolSatisfaction",
"StudentAbsenceDays"]
PREFIXES = ["Gender", "Nationality", "PlaceofBirth", "Stage", "Grade", "Section", "Topic",
"Semester", "Relation", "Survey", "ParentSatisfaction",
"Absence"]
REMOVE_VALUES = ["G-05", "G-09"]
def preprocess_data(count_missing=False, replace_values=True, remove_values=False, encode=True,
categorical_columns=CATEGORICAL_COLUMNS,
prefixes=PREFIXES):
"""Preprocesses the raw dataset
Parameters
----------
count_missing : bool, default=False
Counts all missing values in the dataset
replace_values : bool, default=True
Replaces non significative values in the columns "Nationality" and "PlaceofBirth" with "Other"
remove_values : bool, default=False
Replaces rows with non significative values in the columns "GradeID"
encode : bool, default=True
One Hot encodes categorical columns
categorical_columns : list of str, defaut=(categorical columns of the dataset)
Columns to apply one hot encode to
prefixes : list of str, default="["Gender", "Nationality", "PlaceofBirth", "Stage", "Grade", "Section", "Topic",
"Semester", "Relation", "Survey", "ParentSatisfaction",
"Absence"]"
Prefixes for one hot encoding
Returns
----------
X_data : pandas df
feature columns
y_data : pandas df
target columns
y_labels : {ndarray, sparse matrix}
class labels
"""
preprocess = Preprocess(data=FILE_NAME)
if count_missing:
print(f"Number of rows missing values: {preprocess.check_missing_values()}")
if replace_values:
preprocess.replace_values("Nationality",
["Lybia", "Iraq", "Lebanon", "Tunisia", "SaudiArabia", "Egypt", "USA", "Venezuela",
"Iran", "Morocco", "Syria", "Palestine"], "Other")
preprocess.replace_values("PlaceofBirth",
["Lybia", "Iraq", "Lebanon", "Tunisia", "SaudiArabia", "Egypt", "USA", "Venezuela",
"Iran", "Morocco", "Syria", "Palestine"], "Other")
if remove_values:
preprocess.remove_values("GradeID", REMOVE_VALUES)
if encode:
preprocess.target_encode()
preprocess.one_hot_encode(columns=categorical_columns, prefix=prefixes)
X_data, y_data = preprocess.get_data()
y_labels = preprocess.target_decode()
return X_data, y_data, y_labels
X_data, y_data = preprocess.get_data()
return X_data, y_data
def preprocess_features(X_data, scaler_type="standard", n_components=None, plot_pca=False, threshold=0.85,
savefig=True):
"""
processes feature columns with a scaler and pca
Parameters
----------
X_data : pandas df
feature Columns
scaler_type : str, default="standard"
scalar to use ('standard'/'min_max')
n_components : int, default=None
pca components to use, if 'None' uses all components
plot_pca : bool, defaut=True
specifies if pca should be plotted
threshold : float range(0,1), default=0.85
pca variance threshold to plot vertical line at
savefig : bool, default=True
specifies if pca plot should be saved
Returns
----------
X_transformed : ndarray
preprocessed feature columns
feature_preprocess : feature_preprocess object
feature_preprocess object used (for the pipeline)
"""
if n_components is None:
n_components = len(X_data.columns)
feature_preprocess = FeaturePreprocess(X_data, n_components=n_components, scaler_type=scaler_type)
X_transformed = feature_preprocess.transform_data()
if plot_pca:
feature_preprocess.plot_pca(threshold=threshold, savefig=savefig)
return X_transformed, feature_preprocess
def create_estimators(X_data, y_data, train_size=0.7, hyperparam_tune=True, boosting=True, random_state=42,
verbose=1):
"""Splits the data in train, test and val, trains three different estimators: Decision Tree, Support Vector Machine
and Random Forest, can also tune the hyper parameters and boost the estimators with Adaboost
Parameters
----------
X_data : pandas df
feature Columns
y_data : pandas df
target column
train_size : float
Percentage for train
hyperparam_tune : bool, default=True
specifies if hyper params should be tuned
boosting : bool, default=True
specifies if estimators should be boosted
random_state : int, default=42
random state
verbose : int, default=1
verbosity level
Returns
----------
estimators : list of estimators
trained estimators
mt : ModelTuning object
ModelTuning object used (for validation set)
"""
estimators = []
mt = ModelTuning(X_data, y_data, train_size, random_state=random_state)
if verbose > 0:
print("Creating Basic Estimators...\n")
dt = mt.create_weak_learner(random_state, verbose, model_type="dt", )
svm = mt.create_weak_learner(random_state, verbose, model_type="svm")
rf = mt.create_random_forest(random_state, verbose)
estimators.extend([dt, svm, rf])
if hyperparam_tune:
if verbose > 0:
print("Tunning Hyperparams...\n")
tuned_dt = mt.tune_hyperparam(dt, random_state, verbose)
tuned_svm = mt.tune_hyperparam(svm, random_state, verbose)
tuned_rf = mt.tune_hyperparam(rf, random_state, verbose)
estimators.extend([tuned_dt, tuned_svm, tuned_rf])
if boosting:
if verbose > 0:
print("Boosting...\n")
print("Boosted dt:")
boosted_dt = mt.boost_weak_learners(tuned_dt, random_state, verbose)
if verbose > 0:
print("Boosted svm:")
boosted_svm = mt.boost_weak_learners(tuned_svm, random_state, verbose)
if verbose > 0:
print("Boosted rf:")
boosted_rf = mt.boost_weak_learners(tuned_rf, random_state, verbose)
estimators.extend([boosted_dt, boosted_svm, boosted_rf])
return estimators, mt
def get_x_y_set(mt, type="test"):
"""Gets data set from ModelTuning object
Parameters
----------
mt : ModelTuning object
ModelTuning object used
type : str, default="test"
specifies which set to return ('train'/'test'/'val')
Returns
----------
X_data, y_data : ndarray
"""
if type == "val":
return mt.get_validation_set()
if type == "train":
return mt.get_train_set()
if type == "test":
return mt.get_test_set()
def validate_estimators(estimators, X_val, y_val, y_labels, scaler_type="", plot_cf=True, clas_report=True,
savefig=True):
"""Validates estimators
Parameters
----------
estimators : list of estimators
estimators to validate
X_val : ndarray
validation data
y_val : ndarray
validation labels
y_labels : {ndarray, sparse matrix}
decoded labels
scaler_type : str, optional
scaler used ('standard'/'min_max') (for plots)
plot_cf : bool, default=True
specifies if confusion matrix should be plot
clas_report : bool, default=True
specifies if Classification Report should be printed
savefig : bool, default=True
specifies if confusion matrix should be saved as .png
"""
for est in estimators:
mv = ModelValidating(est, X_val, y_val, y_labels=y_labels, scaler=scaler_type)
if plot_cf:
mv.plot_confusion_matrix(savefig=savefig)
if clas_report:
report = mv.classification_report()
print(f"Classification Report: {est}\n{report}")
def get_n_best(estimators, X_val, y_val, y_labels, best_n=3, score="f1_score"):
"""Gets best estimators from list
Parameters
----------
estimators : list of estimators
list of trained estimators
X_val : ndarray
validation data
y_val : ndarray
validation labels
y_labels : {ndarray, sparse matrix}
decoded labels
best_n : int, default=3
number of estimators to pick
score : str, default="f1_score"
metric to use for picking best estimators ('accuracy'/'f1_score')
Returns
----------
best_est : list of estimators of len=´best_n´
"""
best_scores = []
for est in estimators:
mv = ModelValidating(est, X_val, y_val, y_labels=y_labels, scaler="")
indv_scores = mv.get_scores()
if score == "accuracy":
best_scores.append(indv_scores[0])
if score == "f1_score":
best_scores.append(indv_scores[1])
best_idx = np.argpartition(best_scores, -best_n)[-best_n:]
best_est = []
for index in best_idx:
best_est.append(estimators[index])
return best_est
def save(models, file_name=None, suffix=None):
"""Saves estimator
Parameters
----------
file_name : str, optional
name for the file if None model will be saved with suffix
suffix : str, optional
suffix to be added
"""
if file_name is None:
for model in models:
save_file(model, suffix=suffix)
else:
save_file(models, file_name=file_name)
def load(model_name):
"""Loads and returns pickle File
"""
return load_file(model_name)
|
[
"numpy.argpartition",
"DataPreprocessing.FeaturePreprocess",
"os.getcwd",
"DataProcessing.save_file",
"DataProcessing.load_file",
"DataProcessing.ModelValidating",
"DataPreprocessing.Preprocess",
"DataProcessing.ModelTuning"
] |
[((2478, 2504), 'DataPreprocessing.Preprocess', 'Preprocess', ([], {'data': 'FILE_NAME'}), '(data=FILE_NAME)\n', (2488, 2504), False, 'from DataPreprocessing import Preprocess, FeaturePreprocess\n'), ((4600, 4677), 'DataPreprocessing.FeaturePreprocess', 'FeaturePreprocess', (['X_data'], {'n_components': 'n_components', 'scaler_type': 'scaler_type'}), '(X_data, n_components=n_components, scaler_type=scaler_type)\n', (4617, 4677), False, 'from DataPreprocessing import Preprocess, FeaturePreprocess\n'), ((5903, 5969), 'DataProcessing.ModelTuning', 'ModelTuning', (['X_data', 'y_data', 'train_size'], {'random_state': 'random_state'}), '(X_data, y_data, train_size, random_state=random_state)\n', (5914, 5969), False, 'from DataProcessing import ModelTuning, ModelValidating, save_file, load_file\n'), ((10446, 10467), 'DataProcessing.load_file', 'load_file', (['model_name'], {}), '(model_name)\n', (10455, 10467), False, 'from DataProcessing import ModelTuning, ModelValidating, save_file, load_file\n'), ((8534, 8607), 'DataProcessing.ModelValidating', 'ModelValidating', (['est', 'X_val', 'y_val'], {'y_labels': 'y_labels', 'scaler': 'scaler_type'}), '(est, X_val, y_val, y_labels=y_labels, scaler=scaler_type)\n', (8549, 8607), False, 'from DataProcessing import ModelTuning, ModelValidating, save_file, load_file\n'), ((9514, 9578), 'DataProcessing.ModelValidating', 'ModelValidating', (['est', 'X_val', 'y_val'], {'y_labels': 'y_labels', 'scaler': '""""""'}), "(est, X_val, y_val, y_labels=y_labels, scaler='')\n", (9529, 9578), False, 'from DataProcessing import ModelTuning, ModelValidating, save_file, load_file\n'), ((9792, 9829), 'numpy.argpartition', 'np.argpartition', (['best_scores', '(-best_n)'], {}), '(best_scores, -best_n)\n', (9807, 9829), True, 'import numpy as np\n'), ((10327, 10365), 'DataProcessing.save_file', 'save_file', (['models'], {'file_name': 'file_name'}), '(models, file_name=file_name)\n', (10336, 10365), False, 'from DataProcessing import ModelTuning, ModelValidating, save_file, load_file\n'), ((10277, 10308), 'DataProcessing.save_file', 'save_file', (['model'], {'suffix': 'suffix'}), '(model, suffix=suffix)\n', (10286, 10308), False, 'from DataProcessing import ModelTuning, ModelValidating, save_file, load_file\n'), ((144, 155), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (153, 155), False, 'import os\n'), ((199, 210), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (208, 210), False, 'import os\n'), ((313, 324), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (322, 324), False, 'import os\n'), ((369, 380), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (378, 380), False, 'import os\n'), ((486, 497), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (495, 497), False, 'import os\n'), ((541, 552), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (550, 552), False, 'import os\n')]
|
import gym
from baselines import deepq
from baselines.common.atari_wrappers_deprecated import wrap_dqn, ScaledFloatFrame
from cloud_environment import CloudEnvironment
import numpy as np
import collections
import os
import csv
import pandas as pd
#Logging
def logger_callback(locals,globals):
done = locals['done']
num_episodes = locals['num_episodes']
log_action_l = locals['log_action_l'] # actions chosen in current episode step
log_action_l.append(locals['action'])
if done:
action_counter = collections.Counter(log_action_l).items()
reward_sum = np.sum(locals['episode_rewards'])
reward_mean = np.mean(locals['episode_rewards'])
c_reward_sum = np.sum(locals['cumulative_episode_rewards'])
c_reward_mean = np.mean(locals['cumulative_episode_rewards'])
path = locals['test_file_path']
print("Writing episode {} log to ".format(num_episodes), path)
with open(path, 'a') as f:
env = locals['env']
actions_np = np.zeros(env.action_space.n)
for k, v in action_counter:
actions_np[k] = v
action_count_header = ['action_count{}'.format(i) for i in range(env.action_space.n)]
#action_q_header = ['mean_action_q{}'.format(i) for i in range(len(episode_q_t.tolist()))]
headers = ['episode','reward_sum','reward_mean','c_reward_sum','c_reward_mean']
#headers = headers + action_q_header+action_count_header
headers = headers + action_count_header
action_counts = list(actions_np)
#actions_qs = [q for q in episode_q_t.tolist()]
#output_list = [num_episodes]+[steps]+[rew100]+[rew50]+[rew10]+[episode_q_t_selected]+[episode_q_t_targets]+[episode_td_errors]+[episode_errors]+ actions_qs+action_counts
output_list = [num_episodes] + [reward_sum] + [reward_mean] + [c_reward_sum] + [c_reward_mean] + action_counts
print(headers)
print(output_list)
w = csv.writer(f)
if os.stat(path).st_size == 0:
w.writerow(headers)
w.writerow(output_list)
return False
def result_callback(ci_list,episode_list,locals,globals):
nps = len(episode_list[0])-len(ci_list[0])
cis_l = [[np.nan]*nps + cil for cil in ci_list]
e_df = pd.concat(episode_list,axis=0).reset_index(drop=True)
ci_df = pd.DataFrame(np.concatenate(cis_l),columns=['ci']).reset_index(drop=True)
output_df = pd.concat([e_df,ci_df],axis=1)
output_df.dropna(inplace=True)
output_df = output_df.reset_index(drop=True)
output_df.to_csv('eval_predictions.csv')
def main():
load_cpk="/home/nox/Masterarbeit/thesis_data/baseline_rl/simple_rl/7_unbalanced_test/experiments_unbalanced/cloud_model.pkl"
channels=3
seq_length=2
img_size=84
env = CloudEnvironment(img_size=img_size,radius=[12,13],sequence_stride=1,channels=channels,sequence_length=seq_length,ramp_step=0.1,action_type=1,action_nr=3,stochastic_irradiance=True,save_images=True)
#Note: cloud speed can be changes but may also require different ramps.. default, speed of cloud per frame at least 1 pixel in y direction
model = deepq.models.cnn_to_mlp(
convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)],
hiddens=[256],
dueling=True,
channels=channels,
seq_length=seq_length,
img_size=img_size
)
deepq.test(load_cpk=load_cpk,
result_callback=result_callback,
env=env,
q_func=model,
log_callback=logger_callback,
episode_n=1
)
if __name__ == '__main__':
main()
|
[
"numpy.mean",
"cloud_environment.CloudEnvironment",
"csv.writer",
"baselines.deepq.test",
"numpy.sum",
"numpy.zeros",
"collections.Counter",
"baselines.deepq.models.cnn_to_mlp",
"numpy.concatenate",
"os.stat",
"pandas.concat"
] |
[((2638, 2670), 'pandas.concat', 'pd.concat', (['[e_df, ci_df]'], {'axis': '(1)'}), '([e_df, ci_df], axis=1)\n', (2647, 2670), True, 'import pandas as pd\n'), ((3001, 3216), 'cloud_environment.CloudEnvironment', 'CloudEnvironment', ([], {'img_size': 'img_size', 'radius': '[12, 13]', 'sequence_stride': '(1)', 'channels': 'channels', 'sequence_length': 'seq_length', 'ramp_step': '(0.1)', 'action_type': '(1)', 'action_nr': '(3)', 'stochastic_irradiance': '(True)', 'save_images': '(True)'}), '(img_size=img_size, radius=[12, 13], sequence_stride=1,\n channels=channels, sequence_length=seq_length, ramp_step=0.1,\n action_type=1, action_nr=3, stochastic_irradiance=True, save_images=True)\n', (3017, 3216), False, 'from cloud_environment import CloudEnvironment\n'), ((3355, 3521), 'baselines.deepq.models.cnn_to_mlp', 'deepq.models.cnn_to_mlp', ([], {'convs': '[(32, 8, 4), (64, 4, 2), (64, 3, 1)]', 'hiddens': '[256]', 'dueling': '(True)', 'channels': 'channels', 'seq_length': 'seq_length', 'img_size': 'img_size'}), '(convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)], hiddens\n =[256], dueling=True, channels=channels, seq_length=seq_length,\n img_size=img_size)\n', (3378, 3521), False, 'from baselines import deepq\n'), ((3571, 3703), 'baselines.deepq.test', 'deepq.test', ([], {'load_cpk': 'load_cpk', 'result_callback': 'result_callback', 'env': 'env', 'q_func': 'model', 'log_callback': 'logger_callback', 'episode_n': '(1)'}), '(load_cpk=load_cpk, result_callback=result_callback, env=env,\n q_func=model, log_callback=logger_callback, episode_n=1)\n', (3581, 3703), False, 'from baselines import deepq\n'), ((611, 644), 'numpy.sum', 'np.sum', (["locals['episode_rewards']"], {}), "(locals['episode_rewards'])\n", (617, 644), True, 'import numpy as np\n'), ((671, 705), 'numpy.mean', 'np.mean', (["locals['episode_rewards']"], {}), "(locals['episode_rewards'])\n", (678, 705), True, 'import numpy as np\n'), ((733, 777), 'numpy.sum', 'np.sum', (["locals['cumulative_episode_rewards']"], {}), "(locals['cumulative_episode_rewards'])\n", (739, 777), True, 'import numpy as np\n'), ((806, 851), 'numpy.mean', 'np.mean', (["locals['cumulative_episode_rewards']"], {}), "(locals['cumulative_episode_rewards'])\n", (813, 851), True, 'import numpy as np\n'), ((1080, 1108), 'numpy.zeros', 'np.zeros', (['env.action_space.n'], {}), '(env.action_space.n)\n', (1088, 1108), True, 'import numpy as np\n'), ((2147, 2160), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (2157, 2160), False, 'import csv\n'), ((2480, 2511), 'pandas.concat', 'pd.concat', (['episode_list'], {'axis': '(0)'}), '(episode_list, axis=0)\n', (2489, 2511), True, 'import pandas as pd\n'), ((543, 576), 'collections.Counter', 'collections.Counter', (['log_action_l'], {}), '(log_action_l)\n', (562, 576), False, 'import collections\n'), ((2560, 2581), 'numpy.concatenate', 'np.concatenate', (['cis_l'], {}), '(cis_l)\n', (2574, 2581), True, 'import numpy as np\n'), ((2181, 2194), 'os.stat', 'os.stat', (['path'], {}), '(path)\n', (2188, 2194), False, 'import os\n')]
|
#!/usr/bin/env python
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import numpy as np
import scipy.io
import glob
import os
import csv
import random
import tensorflow as tf
import transition_model_common as tm
# import sys
# sys.path.append('./tensorflow_hmm')
# import tensorflow_hmm.hmm as hmm
def train_model():
dl = tm.DataLoader()
n_examples = dl.num_examples
n_input = dl.feature_len
n_classes = dl.num_labels
# Parameters
learning_rate = 0.01
training_epochs = 5000
batch_size = 100
display_step = 50
tmm = tm.create_model(n_input, n_classes, train=True)
# Define loss and optimizer
# residual_pre = tf.reduce_mean(tf.squared_difference(x_pre, ae_pre_out))
residual_post = tf.reduce_mean(tf.squared_difference(tmm.x_post, tmm.ae_post_out))
# cost_current = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred_current, labels=y_current))
cost_next = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=tmm.pred_next, labels=tmm.y_next))
regularizer = tf.nn.l2_loss(tmm.pred_weights[0])
for i in range(1, len(tmm.pred_weights)):
regularizer += tf.nn.l2_loss(tmm.pred_weights[i])
# total_loss = 0.01 * (residual_pre + residual_post) + cost_current + cost_next
total_loss = 0.01 * (residual_post) + cost_next + 0.001 * regularizer
# total_loss = cost_next + cost_current
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(total_loss)
# Initializing the variables
init = tf.global_variables_initializer()
# Calculate accuracy
# correct_pred_current = tf.equal(tf.argmax(pred_current, 1), tf.argmax(y_current, 1))
correct_pred_next = tf.equal(tf.argmax(tmm.pred_next, 1), tf.argmax(tmm.y_next, 1))
# accuracy_current = tf.reduce_mean(tf.cast(correct_pred_current, 'float'))
accuracy_next = tf.reduce_mean(tf.cast(correct_pred_next, 'float'))
# Launch the graph
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
writer = tf.summary.FileWriter("tensorboard/train", sess.graph)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
x_pre_batch, x_post_batch, y_current_batch, y_next_batch = dl.next_training_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
# feed = {x_pre: x_pre_batch, x_post: x_post_batch, y_current: y_current_batch, y_next: y_next_batch }
feed = {tmm.x_post: x_post_batch,
tmm.y_current: y_current_batch,
tmm.y_next: y_next_batch,
tmm.keep_prob: 0.7}
_, c = sess.run([optimizer, total_loss], feed_dict=feed)
# Compute average loss
avg_cost += c / total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print('Epoch: {:04d} cost: {:.9f}'.format(epoch, avg_cost))
# print(' train accuracy (next): {:.9f}'.format(accuracy_next.eval({tmm.x_post: dl.training_post_data, y_next: dl.training_next_action})))
# print(' test accuracy (next): {:.9f}'.format(accuracy_next.eval({tmm.x_post: dl.testing_post_data, y_next: dl.testing_next_action})))
print(' train accuracy (next): {:.9f}'.format(accuracy_next.eval({tmm.x_post: dl.training_post_data,
tmm.y_current: dl.training_current_action,
tmm.y_next: dl.training_next_action,
tmm.keep_prob: 1.0})))
print(' test accuracy (next): {:.9f}'.format(accuracy_next.eval({tmm.x_post: dl.testing_post_data,
tmm.y_current: dl.testing_current_action,
tmm.y_next: dl.testing_next_action,
tmm.keep_prob: 1.0})))
# print(' train accuracy (current): {:.9f}'.format(accuracy_current.eval({x_pre: dl.training_pre_data, tmm.x_post: dl.training_post_data, y_current: dl.training_current_action})))
# print(' test accuracy (current): {:.9f}'.format(accuracy_current.eval({x_pre: dl.testing_pre_data, tmm.x_post: dl.testing_post_data, y_current: dl.testing_current_action})))
test_action_accuracy(accuracy_next, tmm, dl, training=False)
print("Optimization Finished!")
if not os.path.exists('./models/transition'):
os.mkdir('./models/transition')
saver.save(sess, './models/transition/model.ckpt')
writer.close()
def train_mapping():
dl = tm.DataLoader()
n_input = dl.feature_len
n_classes = dl.num_labels
tmm = tm.create_model(n_input, n_classes, train=True)
# Launch the graph
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, './models/transition/model.ckpt')
rdl = tm.RobotDataLoader(dl, tmm.x_post, tmm.ae_post_enc, tmm.keep_prob)
robot_test_data, human_test_data, y_current_test_data, y_next_test_data = rdl.extract_data_as_arrays(train=False)
n_dim1 = rdl.human_enc_dim
n_dim2 = rdl.robot_dim
# tf Graph input
# x = tf.placeholder('float', [None, n_dim2], name='x_robot_enc')
y_gt = tf.placeholder('float', [None, n_dim1], name='y_human_gt')
# y = create_mapping_model(x, n_dim2, n_dim1, train=True)
x = tmm.x_map_input
y = tmm.y_map_output
# Parameters
learning_rate = 0.001
training_epochs = 10000
batch_size = 100
display_step = 50
total_batch = 20
# Define loss and optimizer
# cost_next = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred_next, labels=y_next))
residual = tf.reduce_mean(tf.squared_difference(y, y_gt))
regularizers = tf.nn.l2_loss(tmm.mapping_weights[0])
for i in range(1, len(tmm.mapping_weights)):
regularizers += tf.nn.l2_loss(tmm.mapping_weights[i])
total_loss = residual + 0.001 * regularizers
# total_loss = residual
# total_loss = 0.01 * residual + cost_next
# optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(total_loss, var_list=[ae_post_out, y_current, y_next, x, y_gt, keep_prob])
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(total_loss)
new_vars = []
for var in tf.global_variables():
if 'mapping' in var.name or 'beta' in var.name:
new_vars.append(var)
# Initializing the variables
#init = tf.global_variables_initializer()
# init = tf.initialize_variables(new_vars)
init = tf.variables_initializer(new_vars)
# Launch the graph
sess.run(init)
writer = tf.summary.FileWriter("tensorboard/map", sess.graph)
# Calculate accuracy
# correct_pred_current = tf.equal(tf.argmax(pred_current, 1), tf.argmax(y_current, 1))
correct_pred_next = tf.equal(tf.argmax(tmm.pred_next, 1), tf.argmax(tmm.y_next, 1))
# accuracy_current = tf.reduce_mean(tf.cast(correct_pred_current, 'float'))
accuracy_next = tf.reduce_mean(tf.cast(correct_pred_next, 'float'))
num_training = training_epochs * total_batch * batch_size
# robot data projected to human subspace
mapped_robot_data = np.zeros((num_training, n_dim1), dtype=np.float)
action_idx_data = np.full((num_training, len(dl.index_name)), -2, dtype=np.int)
next_action_idx_data = np.full((num_training, len(dl.index_name)), -2, dtype=np.int)
data_idx = 0
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
# Loop over all batches
for i in range(total_batch):
x_batch, y_batch, action_idx_batch, next_action_idx_batch = rdl.next_training_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
feed = {x: x_batch, y_gt: y_batch, tmm.keep_prob: 0.7}
_, c = sess.run([optimizer, total_loss], feed_dict=feed)
# Compute average loss
avg_cost += c / total_batch
# collect data to feed to accuracy eval (testing)
mapped_robot_enc_test = tmm.y_map_output.eval({tmm.x_map_input: robot_test_data, tmm.keep_prob: 1.0})
action_idx_test = dl.one_hot(y_current_test_data, len(dl.index_name))
next_action_idx_test = dl.one_hot(y_next_test_data, len(dl.index_name))
# collect data to feed to accuracy eval (training)
mapped_robot_enc = tmm.y_map_output.eval({tmm.x_map_input: x_batch, tmm.keep_prob: 1.0})
action_idx = dl.one_hot(action_idx_batch, len(dl.index_name))
next_action_idx = dl.one_hot(next_action_idx_batch, len(dl.index_name))
mapped_robot_data[data_idx:data_idx+batch_size,:] = mapped_robot_enc
action_idx_data[data_idx:data_idx+batch_size,:] = action_idx
next_action_idx_data[data_idx:data_idx+batch_size,:] = next_action_idx
data_idx += batch_size
# Display logs per epoch step
if epoch % display_step == 0:
print('Epoch: {:04d} cost: {:.9f}'.format(epoch, avg_cost))
print(' accuracy (next): {:.9f}'.format(accuracy_next.eval({tmm.ae_post_enc: mapped_robot_data[0:data_idx],
tmm.y_current: action_idx_data[0:data_idx],
tmm.y_next: next_action_idx_data[0:data_idx]})))
# test_action_accuracy_map(accuracy_next, ae_post_enc, y_current, y_next,
# mapped_robot_data[0:data_idx], action_idx_data[0:data_idx],
# next_action_idx_data[0:data_idx], dl, train=True)
test_action_accuracy_map(accuracy_next, tmm, mapped_robot_enc_test,
action_idx_test, next_action_idx_test, dl, False)
print("Optimization Finished!")
if not os.path.exists('./models/map'):
os.mkdir('./models/map')
saver.save(sess, './models/map/model.ckpt')
writer.close()
'''
Map from robot state to human (encoded) state
'''
def run_mapping():
dl = tm.DataLoader()
n_input = dl.feature_len
n_classes = dl.num_labels
n_dim1 = 6
n_dim2 = 7
tmm = tm.create_model(n_input, n_classes, train=True)
pred_next_sm = tf.nn.softmax(tmm.pred_next)
# pred_current_sm = tf.nn.softmax(pred_current)
# tf Graph input
# x = tf.placeholder('float', [None, n_dim2], name='x_robot_enc')
# y = create_mapping_model(x, n_dim2, n_dim1, train=False)
# Launch the graph
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, './models/map/model.ckpt')
rdl = tm.RobotDataLoader(dl, tmm.x_post, tmm.ae_post_enc)
for i in range(10):
y_human, x_robot, action_idx, next_action_idx = rdl.get_random_pair()
# y_output_pre = y_map_output.eval({x_map_input: np.expand_dims(x_robot[0], axis=0)})
y_action = dl.one_hot(np.full((1,), action_idx), len(dl.index_name))
y_output_post = tmm.y_map_output.eval({tmm.x_map_input: np.reshape(x_robot, (1,7)),
tmm.keep_prob: 1.0})
# res_current = pred_current_sm.eval({ae_pre_enc: y_output_pre, ae_post_enc: y_output_post})
res_next = pred_next_sm.eval({tmm.ae_post_enc: y_output_post,
tmm.y_current: y_action,
tmm.keep_prob: 1.0})
# res_current_idx = np.argmax(res_current)
res_next_idx = np.argmax(res_next)
print('Prediction next: {} {}, true {} {}'.format(res_next_idx, dl.index_name[res_next_idx],
next_action_idx, dl.index_name[next_action_idx]))
print(' Probabilities (next):')
for j in range(len(dl.index_name)):
name = dl.index_name[j]
tab_str = get_tab_str(name)
print(' {}{}{:.6f}'.format(name, tab_str, res_next[0,j]))
def run_demo():
index_name = ['end', 'approach', 'move', 'grasp_left', 'grasp_right', 'ungrasp_left', 'ungrasp_right',
'twist', 'push', 'neutral', 'pull', 'pinch', 'unpinch']
n_input = 159
n_classes = 13
n_dim1 = 6
n_dim2 = 7
tmm = tm.create_model(n_input, n_classes, train=True)
pred_next_sm = tf.nn.softmax(tmm.pred_next)
# pred_current_sm = tf.nn.softmax(pred_current)
# Launch the graph
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, './models/map/model.ckpt')
# INSERT ACTUAL ROBOT MEASUREMENTS HERE
# NOTE: if running from actual robot data, don't forget to divide the gripper
# state by 255 (last dimension of feature vector)
x_robot_pre = np.random.normal(size=(1,7))
x_robot_post = np.random.normal(size=(1,7))
action = np.full((1,), random.randint(1,13))
y_action = tm.DataLoader.one_hot(action, 13)
# y_output_pre = y_map_output.eval({x_map_input: x_robot_pre})
y_output_post = tmm.y_map_output.eval({tmm.x_map_input: x_robot_post,
tmm.y_current: y_action,
tmm.keep_prob: 1.0})
# res_current = pred_current_sm.eval({ae_pre_enc: y_output_pre, ae_post_enc: y_output_post})
res_next = pred_next_sm.eval({tmm.ae_post_enc: y_output_post,
tmm.y_current: y_action,
tmm.keep_prob: 1.0})
# res_current_idx = np.argmax(res_current)
res_next_idx = np.argmax(res_next)
print('Prediction next: {} {}'.format(res_next_idx, index_name[res_next_idx]))
print(' Probabilities (next):')
for j in range(len(index_name)):
name = index_name[j]
tab_str = get_tab_str(name)
print(' {}{}{:.6f}'.format(name, tab_str, res_next[0,j]))
'''
Tests the accuracy of a single action's encoding/decoding
'''
def test_action_accuracy(accuracy_next, tmm, dl=tm.DataLoader(),
training=False):
if training:
# pre_data = dl.training_pre_data
post_data = dl.training_post_data
current_action = dl.training_current_action
next_action = dl.training_next_action
type_str = 'training'
else:
# pre_data = dl.testing_pre_data
post_data = dl.testing_post_data
current_action = dl.testing_current_action
next_action = dl.testing_next_action
type_str = 'testing'
for action_idx in range(1, len(dl.index_name)):
# find matching indicies for this action
index_arr = np.full((1, 1), action_idx)
action_one_hot = dl.one_hot(index_arr, len(dl.index_name))
action_indices = np.where((current_action == action_one_hot).all(axis=1))[0]
tab_str = get_tab_str(dl.index_name[action_idx])
print(' {}:{} {} accuracy (next): {:.9f}'.format(dl.index_name[action_idx],
tab_str,
type_str,
accuracy_next.eval({tmm.x_post: post_data[action_indices,:],
tmm.y_current: current_action[action_indices,:],
tmm.y_next: next_action[action_indices,:],
tmm.keep_prob: 1.0})))
'''
Tests the accuracy of a single action's encoding/decoding during mapping
'''
def test_action_accuracy_map(accuracy_next, tmm, mapped_robot_data,
action_idx_data, next_action_idx_data, dl=tm.DataLoader(), train=False):
for action_idx in range(1, len(dl.index_name)):
# find matching indicies for this action
index_arr = np.full((1, 1), action_idx)
action_one_hot = dl.one_hot(index_arr, len(dl.index_name))
action_indices = np.where((action_idx_data == action_one_hot).all(axis=1))[0]
if train:
type_str = 'training'
else:
type_str = 'testing'
tab_str = get_tab_str(dl.index_name[action_idx])
print(' {}:{} {} accuracy (next): {:.9f}'.format(dl.index_name[action_idx],
tab_str,
type_str,
accuracy_next.eval({tmm.ae_post_enc: mapped_robot_data[action_indices,:],
tmm.y_current: action_idx_data[action_indices,:],
tmm.y_next: next_action_idx_data[action_indices,:],
tmm.keep_prob: 1.0})))
def test_model():
dl = tm.DataLoader()
n_input = dl.feature_len
n_classes = dl.num_labels
tmm = tm.create_model(n_input, n_classes)
# Calculate accuracy
# correct_pred_current = tf.equal(tf.argmax(pred_current, 1), tf.argmax(y_current, 1))
correct_pred_next = tf.equal(tf.argmax(tmm.pred_next, 1), tf.argmax(tmm.y_next, 1))
# accuracy_current = tf.reduce_mean(tf.cast(correct_pred_current, 'float'))
accuracy_next = tf.reduce_mean(tf.cast(correct_pred_next, 'float'))
# Launch the graph
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, './models/map/model.ckpt')
print(' train accuracy (next): {:.9f}'.format(accuracy_next.eval({tmm.x_post: dl.training_post_data,
tmm.y_current: dl.training_current_action,
tmm.y_next: dl.training_next_action})))
print(' test accuracy (next): {:.9f}'.format(accuracy_next.eval({tmm.x_post: dl.testing_post_data,
tmm.y_current: dl.testing_current_action,
tmm.y_next: dl.testing_next_action})))
# print(' train accuracy (current): {:.9f}'.format(accuracy_current.eval({x_pre: dl.training_pre_data, tmm.x_post: dl.training_post_data, y_current: dl.training_current_action})))
# print(' test accuracy (current): {:.9f}'.format(accuracy_current.eval({x_pre: dl.testing_pre_data, tmm.x_post: dl.testing_post_data, y_current: dl.testing_current_action})))
'''
Print out the probability tables of the current pre and post-condition observations
'''
def test_sequence():
dl = tm.DataLoader()
n_input = dl.feature_len
n_classes = dl.num_labels
tmm = tm.create_model(n_input, n_classes)
pred_next_sm = tf.nn.softmax(tmm.pred_next)
# pred_current_sm = tf.nn.softmax(pred_current)
# Launch the graph
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, './models/transition/model.ckpt')
for i in range(dl.training_pre_data.shape[0]):
x_pre_data = np.expand_dims(dl.training_pre_data[i,:], axis=0)
x_post_data = np.expand_dims(dl.training_post_data[i,:], axis=0)
y_action = np.reshape(dl.training_current_action[i,:], (1, len(dl.index_name)))
# res_current = pred_current_sm.eval({x_pre: x_pre_data, tmm.x_post: x_post_data})
# res_next = pred_next_sm.eval({x_pre: x_pre_data, tmm.x_post: x_post_data})
res_next = pred_next_sm.eval({tmm.x_post: x_post_data,
tmm.y_current: y_action})
# res_current_idx = np.argmax(res_current)
res_next_idx = np.argmax(res_next)
print('Prediction next: {} {}'.format(res_next_idx, dl.index_name[res_next_idx]))
print(' Probabilities (next):')
for j in range(len(dl.index_name)):
name = dl.index_name[j]
tab_str = get_tab_str(name)
print(' {}:{}{:.6f}'.format(name, tab_str, res_next[0,j]))
break
'''
Encode the human measurements into low-dimensional subspace
'''
def encode_human():
dl = tm.DataLoader()
n_input = dl.feature_len
n_classes = dl.num_labels
tmm = tm.create_model(n_input, n_classes)
# Launch the graph
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, './models/transition/model.ckpt')
for i in range(dl.training_pre_data.shape[0]):
x_pre_data = np.expand_dims(dl.training_pre_data[i,:], axis=0)
x_post_data = np.expand_dims(dl.training_post_data[i,:], axis=0)
# y_enc_pre = tmm.ae_pre_enc.eval({tmm.x_pre: x_pre_data})
y_enc_post = tmm.ae_post_enc.eval({tmm.x_post: x_post_data})
# Print the 6-dimensional representation
# print(y_enc_pre.tolist())
print(y_enc_post.tolist())
break
def get_tab_str(action_name):
if len(action_name) < 7:
tab_str = '\t\t\t'
elif len(action_name) >= 7 and len(action_name) < 10:
tab_str = '\t\t'
else:
tab_str = '\t'
return tab_str
def parse_args():
# Parse input arguments
parser = ArgumentParser(description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('mode', default=None, help='train | trainmap | runmap | test | seq | encode')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if args.mode == 'train':
train_model()
elif args.mode == 'test':
test_model()
elif args.mode == 'seq':
test_sequence()
elif args.mode == 'encode':
encode_human()
elif args.mode == 'trainmap':
train_mapping()
elif args.mode == 'runmap':
run_mapping()
elif args.mode == 'demo':
run_demo()
|
[
"tensorflow.nn.softmax",
"transition_model_common.create_model",
"tensorflow.cast",
"tensorflow.variables_initializer",
"transition_model_common.RobotDataLoader",
"os.path.exists",
"numpy.reshape",
"argparse.ArgumentParser",
"tensorflow.squared_difference",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"os.mkdir",
"tensorflow.train.AdamOptimizer",
"random.randint",
"numpy.random.normal",
"tensorflow.nn.l2_loss",
"tensorflow.global_variables",
"numpy.argmax",
"tensorflow.summary.FileWriter",
"transition_model_common.DataLoader",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"tensorflow.argmax",
"numpy.zeros",
"numpy.expand_dims",
"transition_model_common.DataLoader.one_hot",
"numpy.full"
] |
[((350, 365), 'transition_model_common.DataLoader', 'tm.DataLoader', ([], {}), '()\n', (363, 365), True, 'import transition_model_common as tm\n'), ((583, 630), 'transition_model_common.create_model', 'tm.create_model', (['n_input', 'n_classes'], {'train': '(True)'}), '(n_input, n_classes, train=True)\n', (598, 630), True, 'import transition_model_common as tm\n'), ((1076, 1110), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['tmm.pred_weights[0]'], {}), '(tmm.pred_weights[0])\n', (1089, 1110), True, 'import tensorflow as tf\n'), ((1553, 1586), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1584, 1586), True, 'import tensorflow as tf\n'), ((1980, 1996), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1994, 1996), True, 'import tensorflow as tf\n'), ((5130, 5145), 'transition_model_common.DataLoader', 'tm.DataLoader', ([], {}), '()\n', (5143, 5145), True, 'import transition_model_common as tm\n'), ((5216, 5263), 'transition_model_common.create_model', 'tm.create_model', (['n_input', 'n_classes'], {'train': '(True)'}), '(n_input, n_classes, train=True)\n', (5231, 5263), True, 'import transition_model_common as tm\n'), ((5300, 5316), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5314, 5316), True, 'import tensorflow as tf\n'), ((11028, 11043), 'transition_model_common.DataLoader', 'tm.DataLoader', ([], {}), '()\n', (11041, 11043), True, 'import transition_model_common as tm\n'), ((11144, 11191), 'transition_model_common.create_model', 'tm.create_model', (['n_input', 'n_classes'], {'train': '(True)'}), '(n_input, n_classes, train=True)\n', (11159, 11191), True, 'import transition_model_common as tm\n'), ((11211, 11239), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['tmm.pred_next'], {}), '(tmm.pred_next)\n', (11224, 11239), True, 'import tensorflow as tf\n'), ((11483, 11499), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (11497, 11499), True, 'import tensorflow as tf\n'), ((13273, 13320), 'transition_model_common.create_model', 'tm.create_model', (['n_input', 'n_classes'], {'train': '(True)'}), '(n_input, n_classes, train=True)\n', (13288, 13320), True, 'import transition_model_common as tm\n'), ((13340, 13368), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['tmm.pred_next'], {}), '(tmm.pred_next)\n', (13353, 13368), True, 'import tensorflow as tf\n'), ((13457, 13473), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (13471, 13473), True, 'import tensorflow as tf\n'), ((15068, 15083), 'transition_model_common.DataLoader', 'tm.DataLoader', ([], {}), '()\n', (15081, 15083), True, 'import transition_model_common as tm\n'), ((16836, 16851), 'transition_model_common.DataLoader', 'tm.DataLoader', ([], {}), '()\n', (16849, 16851), True, 'import transition_model_common as tm\n'), ((18056, 18071), 'transition_model_common.DataLoader', 'tm.DataLoader', ([], {}), '()\n', (18069, 18071), True, 'import transition_model_common as tm\n'), ((18143, 18178), 'transition_model_common.create_model', 'tm.create_model', (['n_input', 'n_classes'], {}), '(n_input, n_classes)\n', (18158, 18178), True, 'import transition_model_common as tm\n'), ((18572, 18588), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (18586, 18588), True, 'import tensorflow as tf\n'), ((19848, 19863), 'transition_model_common.DataLoader', 'tm.DataLoader', ([], {}), '()\n', (19861, 19863), True, 'import transition_model_common as tm\n'), ((19935, 19970), 'transition_model_common.create_model', 'tm.create_model', (['n_input', 'n_classes'], {}), '(n_input, n_classes)\n', (19950, 19970), True, 'import transition_model_common as tm\n'), ((19991, 20019), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['tmm.pred_next'], {}), '(tmm.pred_next)\n', (20004, 20019), True, 'import tensorflow as tf\n'), ((20108, 20124), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (20122, 20124), True, 'import tensorflow as tf\n'), ((21405, 21420), 'transition_model_common.DataLoader', 'tm.DataLoader', ([], {}), '()\n', (21418, 21420), True, 'import transition_model_common as tm\n'), ((21492, 21527), 'transition_model_common.create_model', 'tm.create_model', (['n_input', 'n_classes'], {}), '(n_input, n_classes)\n', (21507, 21527), True, 'import transition_model_common as tm\n'), ((21564, 21580), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (21578, 21580), True, 'import tensorflow as tf\n'), ((22464, 22551), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'ArgumentDefaultsHelpFormatter'}), '(description=__doc__, formatter_class=\n ArgumentDefaultsHelpFormatter)\n', (22478, 22551), False, 'from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n'), ((777, 827), 'tensorflow.squared_difference', 'tf.squared_difference', (['tmm.x_post', 'tmm.ae_post_out'], {}), '(tmm.x_post, tmm.ae_post_out)\n', (798, 827), True, 'import tensorflow as tf\n'), ((976, 1061), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'tmm.pred_next', 'labels': 'tmm.y_next'}), '(logits=tmm.pred_next, labels=tmm.y_next\n )\n', (1015, 1061), True, 'import tensorflow as tf\n'), ((1180, 1214), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['tmm.pred_weights[i]'], {}), '(tmm.pred_weights[i])\n', (1193, 1214), True, 'import tensorflow as tf\n'), ((1737, 1764), 'tensorflow.argmax', 'tf.argmax', (['tmm.pred_next', '(1)'], {}), '(tmm.pred_next, 1)\n', (1746, 1764), True, 'import tensorflow as tf\n'), ((1766, 1790), 'tensorflow.argmax', 'tf.argmax', (['tmm.y_next', '(1)'], {}), '(tmm.y_next, 1)\n', (1775, 1790), True, 'import tensorflow as tf\n'), ((1907, 1942), 'tensorflow.cast', 'tf.cast', (['correct_pred_next', '"""float"""'], {}), "(correct_pred_next, 'float')\n", (1914, 1942), True, 'import tensorflow as tf\n'), ((2006, 2018), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2016, 2018), True, 'import tensorflow as tf\n'), ((2069, 2123), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""tensorboard/train"""', 'sess.graph'], {}), "('tensorboard/train', sess.graph)\n", (2090, 2123), True, 'import tensorflow as tf\n'), ((5326, 5338), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5336, 5338), True, 'import tensorflow as tf\n'), ((5425, 5491), 'transition_model_common.RobotDataLoader', 'tm.RobotDataLoader', (['dl', 'tmm.x_post', 'tmm.ae_post_enc', 'tmm.keep_prob'], {}), '(dl, tmm.x_post, tmm.ae_post_enc, tmm.keep_prob)\n', (5443, 5491), True, 'import transition_model_common as tm\n'), ((5797, 5855), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, n_dim1]'], {'name': '"""y_human_gt"""'}), "('float', [None, n_dim1], name='y_human_gt')\n", (5811, 5855), True, 'import tensorflow as tf\n'), ((6376, 6413), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['tmm.mapping_weights[0]'], {}), '(tmm.mapping_weights[0])\n', (6389, 6413), True, 'import tensorflow as tf\n'), ((6963, 6984), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (6982, 6984), True, 'import tensorflow as tf\n'), ((7237, 7271), 'tensorflow.variables_initializer', 'tf.variables_initializer', (['new_vars'], {}), '(new_vars)\n', (7261, 7271), True, 'import tensorflow as tf\n'), ((7341, 7393), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""tensorboard/map"""', 'sess.graph'], {}), "('tensorboard/map', sess.graph)\n", (7362, 7393), True, 'import tensorflow as tf\n'), ((7915, 7963), 'numpy.zeros', 'np.zeros', (['(num_training, n_dim1)'], {'dtype': 'np.float'}), '((num_training, n_dim1), dtype=np.float)\n', (7923, 7963), True, 'import numpy as np\n'), ((11509, 11521), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (11519, 11521), True, 'import tensorflow as tf\n'), ((11601, 11652), 'transition_model_common.RobotDataLoader', 'tm.RobotDataLoader', (['dl', 'tmm.x_post', 'tmm.ae_post_enc'], {}), '(dl, tmm.x_post, tmm.ae_post_enc)\n', (11619, 11652), True, 'import transition_model_common as tm\n'), ((13483, 13495), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (13493, 13495), True, 'import tensorflow as tf\n'), ((13775, 13804), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, 7)'}), '(size=(1, 7))\n', (13791, 13804), True, 'import numpy as np\n'), ((13827, 13856), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, 7)'}), '(size=(1, 7))\n', (13843, 13856), True, 'import numpy as np\n'), ((13928, 13961), 'transition_model_common.DataLoader.one_hot', 'tm.DataLoader.one_hot', (['action', '(13)'], {}), '(action, 13)\n', (13949, 13961), True, 'import transition_model_common as tm\n'), ((14621, 14640), 'numpy.argmax', 'np.argmax', (['res_next'], {}), '(res_next)\n', (14630, 14640), True, 'import numpy as np\n'), ((15695, 15722), 'numpy.full', 'np.full', (['(1, 1)', 'action_idx'], {}), '((1, 1), action_idx)\n', (15702, 15722), True, 'import numpy as np\n'), ((16989, 17016), 'numpy.full', 'np.full', (['(1, 1)', 'action_idx'], {}), '((1, 1), action_idx)\n', (16996, 17016), True, 'import numpy as np\n'), ((18329, 18356), 'tensorflow.argmax', 'tf.argmax', (['tmm.pred_next', '(1)'], {}), '(tmm.pred_next, 1)\n', (18338, 18356), True, 'import tensorflow as tf\n'), ((18358, 18382), 'tensorflow.argmax', 'tf.argmax', (['tmm.y_next', '(1)'], {}), '(tmm.y_next, 1)\n', (18367, 18382), True, 'import tensorflow as tf\n'), ((18499, 18534), 'tensorflow.cast', 'tf.cast', (['correct_pred_next', '"""float"""'], {}), "(correct_pred_next, 'float')\n", (18506, 18534), True, 'import tensorflow as tf\n'), ((18598, 18610), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (18608, 18610), True, 'import tensorflow as tf\n'), ((20134, 20146), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (20144, 20146), True, 'import tensorflow as tf\n'), ((21590, 21602), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (21600, 21602), True, 'import tensorflow as tf\n'), ((1435, 1486), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (1457, 1486), True, 'import tensorflow as tf\n'), ((4932, 4969), 'os.path.exists', 'os.path.exists', (['"""./models/transition"""'], {}), "('./models/transition')\n", (4946, 4969), False, 'import os\n'), ((4983, 5014), 'os.mkdir', 'os.mkdir', (['"""./models/transition"""'], {}), "('./models/transition')\n", (4991, 5014), False, 'import os\n'), ((6321, 6351), 'tensorflow.squared_difference', 'tf.squared_difference', (['y', 'y_gt'], {}), '(y, y_gt)\n', (6342, 6351), True, 'import tensorflow as tf\n'), ((6495, 6532), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['tmm.mapping_weights[i]'], {}), '(tmm.mapping_weights[i])\n', (6508, 6532), True, 'import tensorflow as tf\n'), ((7556, 7583), 'tensorflow.argmax', 'tf.argmax', (['tmm.pred_next', '(1)'], {}), '(tmm.pred_next, 1)\n', (7565, 7583), True, 'import tensorflow as tf\n'), ((7585, 7609), 'tensorflow.argmax', 'tf.argmax', (['tmm.y_next', '(1)'], {}), '(tmm.y_next, 1)\n', (7594, 7609), True, 'import tensorflow as tf\n'), ((7734, 7769), 'tensorflow.cast', 'tf.cast', (['correct_pred_next', '"""float"""'], {}), "(correct_pred_next, 'float')\n", (7741, 7769), True, 'import tensorflow as tf\n'), ((10798, 10828), 'os.path.exists', 'os.path.exists', (['"""./models/map"""'], {}), "('./models/map')\n", (10812, 10828), False, 'import os\n'), ((10842, 10866), 'os.mkdir', 'os.mkdir', (['"""./models/map"""'], {}), "('./models/map')\n", (10850, 10866), False, 'import os\n'), ((12506, 12525), 'numpy.argmax', 'np.argmax', (['res_next'], {}), '(res_next)\n', (12515, 12525), True, 'import numpy as np\n'), ((13887, 13908), 'random.randint', 'random.randint', (['(1)', '(13)'], {}), '(1, 13)\n', (13901, 13908), False, 'import random\n'), ((20299, 20349), 'numpy.expand_dims', 'np.expand_dims', (['dl.training_pre_data[i, :]'], {'axis': '(0)'}), '(dl.training_pre_data[i, :], axis=0)\n', (20313, 20349), True, 'import numpy as np\n'), ((20375, 20426), 'numpy.expand_dims', 'np.expand_dims', (['dl.training_post_data[i, :]'], {'axis': '(0)'}), '(dl.training_post_data[i, :], axis=0)\n', (20389, 20426), True, 'import numpy as np\n'), ((20921, 20940), 'numpy.argmax', 'np.argmax', (['res_next'], {}), '(res_next)\n', (20930, 20940), True, 'import numpy as np\n'), ((21755, 21805), 'numpy.expand_dims', 'np.expand_dims', (['dl.training_pre_data[i, :]'], {'axis': '(0)'}), '(dl.training_pre_data[i, :], axis=0)\n', (21769, 21805), True, 'import numpy as np\n'), ((21831, 21882), 'numpy.expand_dims', 'np.expand_dims', (['dl.training_post_data[i, :]'], {'axis': '(0)'}), '(dl.training_post_data[i, :], axis=0)\n', (21845, 21882), True, 'import numpy as np\n'), ((6848, 6899), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (6870, 6899), True, 'import tensorflow as tf\n'), ((11898, 11923), 'numpy.full', 'np.full', (['(1,)', 'action_idx'], {}), '((1,), action_idx)\n', (11905, 11923), True, 'import numpy as np\n'), ((12013, 12040), 'numpy.reshape', 'np.reshape', (['x_robot', '(1, 7)'], {}), '(x_robot, (1, 7))\n', (12023, 12040), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
sns.set()
path = r'C:\Users\HP\PycharmProjects\Operaciones_Calor\Data.xlsx'
df = pd.read_excel("Data.xlsx")
df2 = pd.read_excel("time.xlsx")
df3 = np.transpose(df)
ax = sns.heatmap(data=df3)
plt.show()
|
[
"seaborn.set",
"seaborn.heatmap",
"pandas.read_excel",
"numpy.transpose",
"matplotlib.pyplot.show"
] |
[((94, 103), 'seaborn.set', 'sns.set', ([], {}), '()\n', (101, 103), True, 'import seaborn as sns\n'), ((175, 201), 'pandas.read_excel', 'pd.read_excel', (['"""Data.xlsx"""'], {}), "('Data.xlsx')\n", (188, 201), True, 'import pandas as pd\n'), ((208, 234), 'pandas.read_excel', 'pd.read_excel', (['"""time.xlsx"""'], {}), "('time.xlsx')\n", (221, 234), True, 'import pandas as pd\n'), ((241, 257), 'numpy.transpose', 'np.transpose', (['df'], {}), '(df)\n', (253, 257), True, 'import numpy as np\n'), ((264, 285), 'seaborn.heatmap', 'sns.heatmap', ([], {'data': 'df3'}), '(data=df3)\n', (275, 285), True, 'import seaborn as sns\n'), ((286, 296), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (294, 296), True, 'import matplotlib.pyplot as plt\n')]
|
"""
This module deals with querying and downloading LAT FITS files.
"""
# Scientific Library
import numpy as np
import pandas as pd
# Requests Urls and Manupilate Files
from astropy.utils.data import download_files_in_parallel, download_file
from astroquery import fermi
from tqdm import tqdm
import requests
import shutil
import os
# import pathlib
import functools
# Logging
import logging
# logger_info = logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(level=logging.WARNING)
# from retry import retry
from retry.api import retry_call
# See https://stackoverflow.com/questions/492519/timeout-on-a-function-call
import signal
class Download:
MISSING = pd.NA
NAME = 'GCNNAME'
DONE = 'DONE'
WAIT = 2
TBUFF = 30.
INFOPRE = "https://fermi.gsfc.nasa.gov/cgi-bin/ssc/LAT/QueryResults.cgi?id="
TRIES = 3
def __init__(self, grbs):
self.grbs = grbs
# Transform a method into a static method. A static method does not receive an implicit first argument.
# https://docs.python.org/3/library/functions.html#staticmethod
@staticmethod
def Filename(path, sep="/"):
"""Retrieve the file name (without directory) from a full path.
Parameters
----------
path : str
Path to a GRB FITS file.
sep : str, optional
Seperator of directory, by default '/' (in Unix).
Returns
-------
str
File name without directory.
"""
return path.rsplit(sep, 1)[1]
def GRB_record(self, row: int, col: str, value):
"""Record information for the for the given grb.
Parameters
----------
row : int
Row index of the given GRB.
col : str
Colume index of the given GRB.
value
Any value to save to the unit.
"""
try:
self.grbs.at[row, col] = value
except Exception as e:
print("Fail to record: ", e)
print(type(value), value)
def Missing(self, row: int, col: str):
"""Check if the data in the give unit is missing.
Parameters
----------
row : int
Row index of the given GRB.
col : str
Colume index of the given GRB.
Returns
-------
bool
True if missing; else, False.
"""
res = pd.isna(self.grbs.at[row, col])
return np.any(res)
def Urls_resolve(self, row):
"""Retrive urls from the given row.
Parameters
----------
row : int
Row index of the given GRB.
Returns
-------
list of str, or None
Urls for a given LAT GRB FITS photon (PH) and spacecraft (SC) files.
"""
try:
urls = eval(self.grbs.at[row, 'urls'])
return urls
except Exception as e:
print("Fail to resolve urls: ", e)
print(self.grbs._repr_html_)
'''
functions for single DataFrame GRB
'''
def Query_url(self, row: int, period: float, E_MeV_i: float, E_MeV_f: float, trigtime: str, tpeak: str, timeout: float=-1.):
"""Query urls for downloading.
Parameters
----------
row : int
Row index of the given GRB.
period : float
Period after initial time in second.
E_MeV_i : float
Start energy in MeV.
E_MeV_f : float
End energy in MeV.
trigtime : str
Mission Elapsed Time in second.
tpeak : str
First low peak time in second.
timeout : float, optional
Time for timeout in second, by default -1 (no timeout).
"""
col = 'urls'
timesys = 'MET'
name = self.grbs.at[row, self.NAME]
missing = self.Missing(row, col)
if not missing:
logging.info('{}query already done'.format(' ' * 9))
return self.DONE
grb_name = 'GRB' + name
met_i = self.grbs.at[row, trigtime]
delta_t = self.grbs.at[row, tpeak]
met_f = met_i + delta_t + period # "window of 90 seconds" as apears in XHW2018.
start = met_i - self.TBUFF
stop = met_f + self.TBUFF
met = '{}, {}'.format(start, stop)
E_MeV = '{}, {}'.format(E_MeV_i, E_MeV_f)
if timeout > 0:
signal.alarm(timeout)
try:
fits_urls = retry_call(
fermi.FermiLAT.query_object,
fargs=[grb_name],
fkwargs={
'energyrange_MeV': E_MeV,
'obsdates': met,
'timesys': timesys},
tries=self.TRIES)
except Exception as e:
self.GRB_record(row, col, self.MISSING)
logging.warning('{}Query_url failed while receiving:\n{}'.format(' ' * 9, e))
return self.MISSING
#! save urls (list) as str; Please extract urls with eval() later
self.GRB_record(row, col, str(fits_urls))
print(self)
logging.info('{}query finished'.format(' ' * 9))
def Download_fits(self, row: int, out_dir, timeout: float=-1.):
"""Download fits files provided in urls and save to out_dir.
Parameters
----------
row : int
Row index of the given GRB.
out_dir : [type]
Output directory for FITS file.
timeout : float, optional
Time for timeout in second, by default -1 (no timeout).
Returns
-------
self.DONE or self.MISSING
self.DONE: if succeeded; self.MISSING: if failed
"""
urls = self.Urls_resolve(row)
# urls = eval(self.grbs.at[row, 'urls'])
col = 'fits'
name = self.grbs.at[row, self.NAME]
if not self.Missing(row, col):
logging.info('{}fits already saved'.format(' ' * 9))
return self.DONE
if timeout > 0:
signal.alarm(timeout)
# for url in urls:
try:
file_list = retry_call(
# astropy.utils.data.download_files_in_parallel
download_files_in_parallel,
fargs=[urls],
tries=self.TRIES)
except:
try:
file_list = []
for url in urls:
file_list.append(
retry_call(
# astropy.utils.data.download_file
download_file,
fargs=[url],
tries=self.TRIES)
)
except Exception as e:
self.GRB_record(row, col, self.MISSING)
logging.warning("{}while downloading fits got:\n{}".format(' ' * 9, e))
print("urls failed to download: ", urls)
return self.MISSING
# Following https://stackoverflow.com/questions/12517451/automatically-creating-directories-with-file-output
os.makedirs(out_dir, exist_ok=True)
for i, url in enumerate(urls):
filename = self.Filename(url)
filename = out_dir / filename
# filename = out_dir + "/" + filename
if filename.exists():
continue
try:
# filename2 = wget.download(url, out_dir)
# Following https://stackoverflow.com/questions/7243750/download-file-from-web-in-python-3
shutil.copyfile(file_list[i], filename)
logging.info(filename.as_posix() + ' saved')
# logging.info(filename + ' saved as ' + filename2)
except Exception as e:
logging.warning(e)
self.GRB_record(row, col, self.DONE)
return self.DONE
def Download_info(self, row, out_dir, pre=INFOPRE, wait=WAIT, timeout: float=-1.):
"""Request query page in url, and save tables to out_dir.
Parameters
----------
row : int
Row index of the given GRB.
out_dir : str
Output directory for FITS file.
pre : str, optional
prefix in url, by default INFOPRE
wait : int or float, optional
Wait time in second, by default WAIT
timeout : float, optional
Time for timeout in second, by default -1 (no timeout).
Returns
-------
self.DONE or self.MISSING
self.DONE: if succeeded; self.MISSING: if failed
"""
col = 'info'
if not self.Missing(row, col):
name = self.grbs.at[row, self.NAME]
logging.info('{}info already saved'.format(' ' * 9))
return self.DONE
urls = self.Urls_resolve(row)
# urls = self.grbs.at[row, 'urls']
try:
url = urls[0]
except:
logging.info("{}urls missing".format(' ' * 9))
return self.MISSING
ID = self.Filename(url).split("_")[0]
query_url = pre + ID
wait_times = 0
if timeout > 0:
signal.alarm(timeout)
try:
r = retry_call(
requests.get,
fargs=[query_url],
tries=self.TRIES)
except:
self.GRB_record(row, col, self.MISSING)
logging.info("{}query page downloading failed".format(' ' * 9))
return self.MISSING
query_info = r.text
dfs = pd.read_html(query_info)
status = dfs[1]
position_in_queue = status['Position in Queue']
if any(position_in_queue != 'Query complete'):
logging.info("{}Query incomplete.".format(' ' * 9))
return self.MISSING
else:
criteria = dfs[0]
filename = out_dir / 'criteria.csv'
# filename = out_dir + '/criteria.csv'
os.makedirs(os.path.dirname(filename), exist_ok=True)
criteria.to_csv(filename)
info = dfs[2]
filename = out_dir / 'info.csv'
# filename = out_dir + '/info.csv'
info.to_csv(filename)
self.GRB_record(row, col, self.DONE)
logging.info("{}query page downloaded".format(' ' * 9))
return self.DONE
class Query(Download):
FAKE = 'fake'
PERIOD = 90.
EMIN = 1e2 # 1e3 MeV / 10, as 1 + z < 10
EMAX = 5e5 # The highest energy available in LAT
TIMESYS = 'MET'
def __init__(self, grbs, out_dir, init=False, retry=True, timeout: float=-1.):
self.grbs = grbs
self.out_dir = out_dir
self.init = init
self.timeout = timeout
if self.init != False:
self.Reset(self.init)
self.Main_loop(outer_dir=out_dir)
if retry and np.sum(self._Which_missing()) > 0:
logging.info("Querying for missing information")
self.Requery()
def _repr_html_(self):
return self.grbs._repr_html_()
def Row_index(self, name):
'''Return index of the given name'''
index_np = self.grbs[self.grbs[self.NAME] == name].index
index = index_np[0]
return index
def _Which_missing(self):
"""Find locations of missing information.
Returns
-------
list of bool
Where the information is missing, the location of it will be True.
"""
urls = self.grbs['urls'].isna()
fits = self.grbs['fits'].isna()
info = self.grbs['info'].isna()
where = functools.reduce(np.logical_or, [urls, fits, info])
num = np.sum(where)
if num > 0:
print("{}\n{} GRB{} missing".format('-' * 15, num, 's' if num > 1 else ''))
print("Please Run .Requery() with(out) .Reset(init) for several times.\nIf those do not help, please download missing files manually.")
return where
def Which_missing(self):
"""Find GRBs with missing information.
Returns
-------
pandas.DataFrame or astropy.table.table.Table
GRBs with missing information.
"""
return self.grbs[self._Which_missing()]
def Main_loop(self, outer_dir):
"""Main loop to download all required data.
Parameters
----------
outer_dir : pathlib.PosixPath
Output directory.
"""
period = self.PERIOD
E_MeV_i = self.EMIN
E_MeV_f = self.EMAX
timesys = self.TIMESYS
row_index = self.grbs.index
for row in tqdm(row_index):
name = self.grbs.at[row, self.NAME]
urls = self.grbs.at[row, 'urls']
out_dir = outer_dir / name[:6]
# out_dir = outer_dir + '/' + name[:6]
timeout = self.timeout
logging.info(name + ':')
# status = self.Query_url(row=row, period=period, E_MeV_i=E_MeV_i, E_MeV_f=E_MeV_f, trigtime='GBM_MET', tpeak='tpeak_ref')
self.Query_url(row=row, period=period, E_MeV_i=E_MeV_i, E_MeV_f=E_MeV_f, trigtime='GBM_MET', tpeak='tpeak_ref', timeout=timeout)
status = self.grbs.at[row, 'urls']
if status is not self.MISSING:
self.Download_info(row=row, out_dir=out_dir, timeout=timeout)
self.Download_fits(row=row, out_dir=out_dir, timeout=timeout)
rows = self._Which_missing()
if np.sum(rows) > 0:
# pretty printing in jupyter-notebook, following https://stackoverflow.com/questions/19124601/pretty-print-an-entire-pandas-series-dataframe
display(self.grbs.loc[rows])
else:
logging.info("Congratulations! All information downloaded successfully")
def Reset(self, init=False):
"""Initialize urls of grbs with missing fits or info.
Parameters
----------
init : bool, optional
Whether to initialize the table, by default False.
"""
rows = self._Which_missing() if init==False else self.grbs.index
self.grbs.loc[rows, ('urls', 'fits', 'info')] = self.MISSING
def Requery(self):
"""Remove queried urls and run Main_loop for missing grbs."""
self.Main_loop(outer_dir=self.out_dir)
|
[
"logging.basicConfig",
"os.makedirs",
"functools.reduce",
"tqdm.tqdm",
"logging.warning",
"numpy.any",
"numpy.sum",
"shutil.copyfile",
"os.path.dirname",
"signal.alarm",
"retry.api.retry_call",
"pandas.read_html",
"pandas.isna",
"logging.info"
] |
[((457, 499), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.WARNING'}), '(level=logging.WARNING)\n', (476, 499), False, 'import logging\n'), ((2439, 2470), 'pandas.isna', 'pd.isna', (['self.grbs.at[row, col]'], {}), '(self.grbs.at[row, col])\n', (2446, 2470), True, 'import pandas as pd\n'), ((2486, 2497), 'numpy.any', 'np.any', (['res'], {}), '(res)\n', (2492, 2497), True, 'import numpy as np\n'), ((7176, 7211), 'os.makedirs', 'os.makedirs', (['out_dir'], {'exist_ok': '(True)'}), '(out_dir, exist_ok=True)\n', (7187, 7211), False, 'import os\n'), ((9658, 9682), 'pandas.read_html', 'pd.read_html', (['query_info'], {}), '(query_info)\n', (9670, 9682), True, 'import pandas as pd\n'), ((11805, 11856), 'functools.reduce', 'functools.reduce', (['np.logical_or', '[urls, fits, info]'], {}), '(np.logical_or, [urls, fits, info])\n', (11821, 11856), False, 'import functools\n'), ((11871, 11884), 'numpy.sum', 'np.sum', (['where'], {}), '(where)\n', (11877, 11884), True, 'import numpy as np\n'), ((12855, 12870), 'tqdm.tqdm', 'tqdm', (['row_index'], {}), '(row_index)\n', (12859, 12870), False, 'from tqdm import tqdm\n'), ((4497, 4518), 'signal.alarm', 'signal.alarm', (['timeout'], {}), '(timeout)\n', (4509, 4518), False, 'import signal\n'), ((4556, 4714), 'retry.api.retry_call', 'retry_call', (['fermi.FermiLAT.query_object'], {'fargs': '[grb_name]', 'fkwargs': "{'energyrange_MeV': E_MeV, 'obsdates': met, 'timesys': timesys}", 'tries': 'self.TRIES'}), "(fermi.FermiLAT.query_object, fargs=[grb_name], fkwargs={\n 'energyrange_MeV': E_MeV, 'obsdates': met, 'timesys': timesys}, tries=\n self.TRIES)\n", (4566, 4714), False, 'from retry.api import retry_call\n'), ((6138, 6159), 'signal.alarm', 'signal.alarm', (['timeout'], {}), '(timeout)\n', (6150, 6159), False, 'import signal\n'), ((6224, 6294), 'retry.api.retry_call', 'retry_call', (['download_files_in_parallel'], {'fargs': '[urls]', 'tries': 'self.TRIES'}), '(download_files_in_parallel, fargs=[urls], tries=self.TRIES)\n', (6234, 6294), False, 'from retry.api import retry_call\n'), ((9277, 9298), 'signal.alarm', 'signal.alarm', (['timeout'], {}), '(timeout)\n', (9289, 9298), False, 'import signal\n'), ((9328, 9389), 'retry.api.retry_call', 'retry_call', (['requests.get'], {'fargs': '[query_url]', 'tries': 'self.TRIES'}), '(requests.get, fargs=[query_url], tries=self.TRIES)\n', (9338, 9389), False, 'from retry.api import retry_call\n'), ((11080, 11128), 'logging.info', 'logging.info', (['"""Querying for missing information"""'], {}), "('Querying for missing information')\n", (11092, 11128), False, 'import logging\n'), ((13106, 13130), 'logging.info', 'logging.info', (["(name + ':')"], {}), "(name + ':')\n", (13118, 13130), False, 'import logging\n'), ((13719, 13731), 'numpy.sum', 'np.sum', (['rows'], {}), '(rows)\n', (13725, 13731), True, 'import numpy as np\n'), ((13957, 14029), 'logging.info', 'logging.info', (['"""Congratulations! All information downloaded successfully"""'], {}), "('Congratulations! All information downloaded successfully')\n", (13969, 14029), False, 'import logging\n'), ((7645, 7684), 'shutil.copyfile', 'shutil.copyfile', (['file_list[i]', 'filename'], {}), '(file_list[i], filename)\n', (7660, 7684), False, 'import shutil\n'), ((10107, 10132), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (10122, 10132), False, 'import os\n'), ((7865, 7883), 'logging.warning', 'logging.warning', (['e'], {}), '(e)\n', (7880, 7883), False, 'import logging\n'), ((6567, 6623), 'retry.api.retry_call', 'retry_call', (['download_file'], {'fargs': '[url]', 'tries': 'self.TRIES'}), '(download_file, fargs=[url], tries=self.TRIES)\n', (6577, 6623), False, 'from retry.api import retry_call\n')]
|
# Copyright (c) 2020, <NAME>, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of <NAME>, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <NAME>, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from typing import Dict, Optional, Sequence
import numpy as np
import torch as to
from init_args_serializer import Serializable
from torch import nn as nn
from torch.functional import Tensor
from tqdm import tqdm
import pyrado
from pyrado.algorithms.base import Algorithm
from pyrado.algorithms.step_based.svpg import SVPGBuilder, SVPGHyperparams
from pyrado.domain_randomization.domain_parameter import DomainParam
from pyrado.environment_wrappers.base import EnvWrapper
from pyrado.environment_wrappers.utils import inner_env
from pyrado.environments.base import Env
from pyrado.logger.step import StepLogger
from pyrado.policies.base import Policy
from pyrado.policies.recurrent.rnn import LSTMPolicy
from pyrado.sampling.parallel_evaluation import eval_domain_params
from pyrado.sampling.sampler_pool import SamplerPool
from pyrado.sampling.step_sequence import StepSequence
from pyrado.spaces.base import Space
from pyrado.spaces.box import BoxSpace
from pyrado.utils.data_types import EnvSpec
class ADR(Algorithm):
"""
Active Domain Randomization (ADR)
.. seealso::
[1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, "Active Domain Randomization", arXiv, 2019
"""
name: str = "adr"
def __init__(
self,
ex_dir: pyrado.PathLike,
env: Env,
subrtn: Algorithm,
adr_hp: Dict,
svpg_hp: SVPGHyperparams,
reward_generator_hp: Dict,
max_iter: int,
num_discriminator_epoch: int,
batch_size: int,
svpg_warmup: int = 0,
num_workers: int = 4,
num_trajs_per_config: int = 8,
log_exploration: bool = False,
randomized_params: Sequence[str] = None,
logger: Optional[StepLogger] = None,
):
"""
Constructor
:param save_dir: directory to save the snapshots i.e. the results in
:param env: the environment to train in
:param subrtn: algorithm which performs the policy / value-function optimization
:param max_iter: maximum number of iterations
:param svpg_particle_hparam: SVPG particle hyperparameters
:param num_svpg_particles: number of SVPG particles
:param num_discriminator_epoch: epochs in discriminator training
:param batch_size: batch size for training
:param svpg_learning_rate: SVPG particle optimizers' learning rate
:param svpg_temperature: SVPG temperature coefficient (how strong is the influence of the particles on each other)
:param svpg_evaluation_steps: how many configurations to sample between training
:param svpg_horizon: how many steps until the particles are reset
:param svpg_kl_factor: kl reward coefficient
:param svpg_warmup: number of iterations without SVPG training in the beginning
:param svpg_serial: serial mode (see SVPG)
:param num_workers: number of environments for parallel sampling
:param num_trajs_per_config: number of trajectories to sample from each config
:param max_step_length: maximum change of physics parameters per step
:param randomized_params: which parameters to randomize
:param logger: logger for every step of the algorithm, if `None` the default logger will be created
"""
if not isinstance(env, Env):
raise pyrado.TypeErr(given=env, expected_type=Env)
if not isinstance(subrtn, Algorithm):
raise pyrado.TypeErr(given=subrtn, expected_type=Algorithm)
if not isinstance(subrtn.policy, Policy):
raise pyrado.TypeErr(given=subrtn.policy, expected_type=Policy)
# Call Algorithm's constructor
super().__init__(ex_dir, max_iter, subrtn.policy, logger)
self.log_loss = True
# Store the inputs
self.env = env
self._subrtn = subrtn
self._subrtn.save_name = "subrtn"
self.num_discriminator_epoch = num_discriminator_epoch
self.batch_size = batch_size
self.num_trajs_per_config = num_trajs_per_config
self.warm_up_time = svpg_warmup
self.log_exploration = log_exploration
self.curr_time_step = 0
randomized_params = adr_hp["randomized_params"]
# Get the number of params
if isinstance(randomized_params, list) and len(randomized_params) == 0:
randomized_params = inner_env(self.env).get_nominal_domain_param().keys()
self.params = [DomainParam(param, 1) for param in randomized_params]
self.num_params = len(self.params)
# Initialize reward generator
self.reward_generator = RewardGenerator(env.spec, logger=self.logger, **reward_generator_hp)
# Initialize logbook
self.sim_instances_full_horizon = np.random.random_sample(
(
svpg_hp["algo"]["num_particles"],
svpg_hp["algo"]["horizon"],
adr_hp["evaluation_steps"],
self.num_params,
)
)
# Initialize SVPG adapter
self.svpg_wrapper = SVPGAdapter(
env,
self.params,
subrtn.expl_strat,
self.reward_generator,
svpg_hp["algo"]["num_particles"],
horizon=svpg_hp["algo"]["horizon"],
num_rollouts_per_config=self.num_trajs_per_config,
step_length=adr_hp["step_length"],
num_workers=num_workers,
)
# Generate SVPG with default architecture using SVPGBuilder
self.svpg = SVPGBuilder(ex_dir, self.svpg_wrapper, svpg_hp).svpg
@property
def sample_count(self) -> int:
return self._subrtn.sample_count
def compute_params(self, sim_instances: to.Tensor, t: int):
"""
Compute the parameters.
:param sim_instances: Physics configurations rollout
:param t: time step to chose
:return: parameters at the time
"""
nominal = self.svpg_wrapper.nominal_dict()
keys = nominal.keys()
assert len(keys) == sim_instances[t][0].shape[0]
params = []
for sim_instance in sim_instances[t]:
d = {k: (sim_instance[i] + 0.5) * (nominal[k]) for i, k in enumerate(keys)}
params.append(d)
return params
def step(self, snapshot_mode: str, meta_info: dict = None):
rand_trajs = []
ref_trajs = []
ros = []
for i, p in enumerate(self.svpg.iter_particles):
done = False
svpg_env = self.svpg_wrapper
state = svpg_env.reset(i)
states = []
actions = []
rewards = []
infos = []
rand_trajs_now = []
exploration_logbook = []
with to.no_grad():
while not done:
action = p.expl_strat(to.as_tensor(state, dtype=to.get_default_dtype())).detach().cpu().numpy()
state, reward, done, info = svpg_env.step(action, i)
state_dict = svpg_env.array_to_dict((state + 0.5) * svpg_env.nominal())
print(state_dict, " => ", reward)
# Log visited states as dict
if self.log_exploration:
exploration_logbook.append(state_dict)
# Store rollout results
states.append(state)
rewards.append(reward)
actions.append(action)
infos.append(info)
# Extract trajectories from info
rand_trajs_now.extend(info["rand"])
rand_trajs += info["rand"]
ref_trajs += info["ref"]
ros.append(StepSequence(observations=states, actions=actions, rewards=rewards))
self.logger.add_value(f"SVPG_agent_{i}_mean_reward", np.mean(rewards))
ros[i].torch(data_type=to.DoubleTensor)
# rand_trajs_now = StepSequence.concat(rand_trajs_now)
for rt in rand_trajs_now:
self.convert_and_detach(rt)
self._subrtn.update(rand_trajs_now)
# Logging
rets = [ro.undiscounted_return() for ro in rand_trajs]
ret_avg = np.mean(rets)
ret_med = np.median(rets)
ret_std = np.std(rets)
self.logger.add_value("avg rollout len", np.mean([ro.length for ro in rand_trajs]))
self.logger.add_value("avg return", ret_avg)
self.logger.add_value("median return", ret_med)
self.logger.add_value("std return", ret_std)
# Flatten and combine all randomized and reference trajectories for discriminator
flattened_randomized = StepSequence.concat(rand_trajs)
flattened_randomized.torch(data_type=to.double)
flattened_reference = StepSequence.concat(ref_trajs)
flattened_reference.torch(data_type=to.double)
self.reward_generator.train(flattened_reference, flattened_randomized, self.num_discriminator_epoch)
pyrado.save(
self.reward_generator.discriminator, "discriminator.pt", self.save_dir, prefix="adr", use_state_dict=True
)
if self.curr_time_step > self.warm_up_time:
# Update the particles
# List of lists to comply with interface
self.svpg.update(list(map(lambda x: [x], ros)))
self.convert_and_detach(flattened_randomized)
# np.save(f'{self.save_dir}actions{self.curr_iter}', flattened_randomized.actions)
self.make_snapshot(snapshot_mode, float(ret_avg), meta_info)
self._subrtn.make_snapshot(snapshot_mode="best", curr_avg_ret=float(ret_avg))
self.curr_time_step += 1
def convert_and_detach(self, arg0):
arg0.torch(data_type=to.float)
arg0.observations = arg0.observations.float().detach()
arg0.actions = arg0.actions.float().detach()
def save_snapshot(self, meta_info: dict = None):
super().save_snapshot(meta_info)
if meta_info is not None:
raise pyrado.ValueErr(msg=f"{self.name} is not supposed be run as a subrtn!")
# This algorithm instance is not a subrtn of another algorithm
pyrado.save(self.env, "env.pkl", self.save_dir)
self._subrtn.save_snapshot(meta_info=meta_info)
# self.svpg.save_snapshot(meta_info)
class SVPGAdapter(EnvWrapper, Serializable):
"""Wrapper to encapsulate the domain parameter search as an RL task."""
def __init__(
self,
wrapped_env: Env,
parameters: Sequence[DomainParam],
inner_policy: Policy,
discriminator,
num_particles: int,
step_length: float = 0.01,
horizon: int = 50,
num_rollouts_per_config: int = 8,
num_workers: int = 4,
max_steps: int = 8,
):
"""
Constructor
:param wrapped_env: the environment to wrap
:param parameters: which physics parameters should be randomized
:param inner_policy: the policy to train the subrtn on
:param discriminator: the discriminator to distinguish reference environments from randomized ones
:param step_length: the step size
:param horizon: an svpg horizon
:param num_rollouts_per_config: number of trajectories to sample per physics configuration
:param num_workers: number of environments for parallel sampling
"""
Serializable._init(self, locals())
EnvWrapper.__init__(self, wrapped_env)
self.parameters: Sequence[DomainParam] = parameters
try:
self.pool = SamplerPool(num_workers)
except AssertionError:
Warning("THIS IS NOT MEANT TO BE PARALLEL SAMPLED")
self.inner_policy = inner_policy
self.num_particles = num_particles
self.inner_parameter_state: np.ndarray = np.zeros((self.num_particles, len(self.parameters)))
self.count = np.zeros(self.num_particles)
self.num_trajs = num_rollouts_per_config
self.svpg_max_step_length = step_length
self.discriminator = discriminator
self.max_steps = max_steps
self._adapter_obs_space = BoxSpace(-np.ones(len(parameters)), np.ones(len(parameters)))
self._adapter_act_space = BoxSpace(-np.ones(len(parameters)), np.ones(len(parameters)))
self.horizon = horizon
self.horizon_count = 0
self.reset()
@property
def obs_space(self) -> Space:
return self._adapter_obs_space
@property
def act_space(self) -> Space:
return self._adapter_act_space
def reset(self, i=None, init_state: np.ndarray = None, domain_param: dict = None) -> np.ndarray:
if i is not None:
assert domain_param is None
self.count[i] = 0
if init_state is None:
self.inner_parameter_state[i] = np.random.random_sample(len(self.parameters))
else:
self.inner_parameter_state[i] = init_state
return self.inner_parameter_state[i]
assert domain_param is None
self.count = np.zeros(self.num_particles)
if init_state is None:
self.inner_parameter_state = np.random.random_sample((self.num_particles, len(self.parameters)))
else:
self.inner_parameter_state = init_state
return self.inner_parameter_state
def step(self, act: np.ndarray, i: int) -> tuple:
if i is not None:
# Clip the action according to the maximum step length
action = np.clip(act, -1, 1) * self.svpg_max_step_length
# Perform step by moving into direction of action
self.inner_parameter_state[i] = np.clip(self.inner_parameter_state[i] + action, 0, 1)
param_norm = self.inner_parameter_state[i] + 0.5
random_parameters = [self.array_to_dict(param_norm * self.nominal())] * self.num_trajs
nominal_parameters = [self.nominal_dict()] * self.num_trajs
# Sample trajectories from random and reference environments
rand = eval_domain_params(self.pool, self.wrapped_env, self.inner_policy, random_parameters)
ref = eval_domain_params(self.pool, self.wrapped_env, self.inner_policy, nominal_parameters)
# Calculate the rewards for each trajectory
rewards = [self.discriminator.get_reward(traj) for traj in rand]
reward = np.mean(rewards)
info = dict(rand=rand, ref=ref)
# Handle step count management
done = self.count[i] >= self.max_steps - 1
self.count[i] += 1
self.horizon_count += 1
if self.count[i] % self.horizon == 0:
self.inner_parameter_state[i] = np.random.random_sample(len(self.parameters))
return self.inner_parameter_state[i], reward, done, info
raise NotImplementedError("Not parallelizable")
def eval_states(self, states: Sequence[np.ndarray]):
"""
Evaluate the states.
:param states: the states to evaluate
:return: respective rewards and according trajectories
"""
flatten = lambda l: [item for sublist in l for item in sublist]
sstates = flatten([[self.array_to_dict((state + 0.5) * self.nominal())] * self.num_trajs for state in states])
rand = eval_domain_params(self.pool, self.wrapped_env, self.inner_policy, sstates)
ref = eval_domain_params(
self.pool, self.wrapped_env, self.inner_policy, [self.nominal_dict()] * (self.num_trajs * len(states))
)
rewards = [self.discriminator.get_reward(traj) for traj in rand]
rewards = [np.mean(rewards[i * self.num_trajs : (i + 1) * self.num_trajs]) for i in range(len(states))]
return rewards, rand, ref
def params(self):
return [param.name for param in self.parameters]
def nominal(self):
return [inner_env(self.wrapped_env).get_nominal_domain_param()[k] for k in self.params()]
def nominal_dict(self):
return {k: inner_env(self.wrapped_env).get_nominal_domain_param()[k] for k in self.params()}
def array_to_dict(self, arr):
return {k: a for k, a in zip(self.params(), arr)}
class RewardGenerator:
"""Class for generating the discriminator rewards in ADR. Generates a reward using a trained discriminator network."""
def __init__(
self,
env_spec: EnvSpec,
batch_size: int,
reward_multiplier: float,
lr: float = 3e-3,
hidden_size=256,
logger: StepLogger = None,
device: str = "cuda" if to.cuda.is_available() else "cpu",
):
"""
Constructor
:param env_spec: environment specification
:param batch_size: batch size for each update step
:param reward_multiplier: factor for the predicted probability
:param lr: learning rate
:param logger: logger for every step of the algorithm, if `None` the default logger will be created
"""
self.device = device
self.batch_size = batch_size
self.reward_multiplier = reward_multiplier
self.lr = lr
spec = EnvSpec(
obs_space=BoxSpace.cat([env_spec.obs_space, env_spec.act_space]),
act_space=BoxSpace(bound_lo=[0], bound_up=[1]),
)
self.discriminator = LSTMPolicy(
spec=spec, hidden_size=hidden_size, num_recurrent_layers=1, output_nonlin=to.sigmoid
)
self.loss_fcn = nn.BCELoss()
self.optimizer = to.optim.Adam(self.discriminator.parameters(), lr=lr, eps=1e-5)
self.logger = logger
def get_reward(self, traj: StepSequence) -> to.Tensor:
"""Compute the reward of a trajectory.
Trajectories considered as not fixed yield a high reward.
:param traj: trajectory to evaluate
:return: a score
:rtype: to.Tensor
"""
traj = preprocess_rollout(traj)
with to.no_grad():
reward = self.discriminator.forward(traj)[0]
return to.log(reward.mean()) * self.reward_multiplier
def train(
self, reference_trajectory: StepSequence, randomized_trajectory: StepSequence, num_epoch: int
) -> to.Tensor:
reference_batch_generator = reference_trajectory.iterate_rollouts()
random_batch_generator = randomized_trajectory.iterate_rollouts()
loss = None
for _ in tqdm(range(num_epoch), "Discriminator Epoch", num_epoch):
for reference_batch, random_batch in zip(reference_batch_generator, random_batch_generator):
reference_batch = preprocess_rollout(reference_batch).float()
random_batch = preprocess_rollout(random_batch).float()
random_results = self.discriminator(random_batch)[0]
reference_results = self.discriminator(reference_batch)[0]
self.optimizer.zero_grad()
loss = self.loss_fcn(random_results, to.ones(random_results.shape[0], 1)) + self.loss_fcn(
reference_results, to.zeros(reference_results.shape[0], 1)
)
loss.backward()
self.optimizer.step()
# Logging
if self.logger is not None:
self.logger.add_value("discriminator_loss", loss)
return loss
def preprocess_rollout(rollout: StepSequence) -> Tensor:
"""
Extract observations and actions from a `StepSequence` and packs them into a PyTorch tensor.
:param rollout: a `StepSequence` instance containing a trajectory
:return: a PyTorch tensor` containing the trajectory
"""
if not isinstance(rollout, StepSequence):
raise pyrado.TypeErr(given=rollout, expected_type=StepSequence)
# Convert data type
rollout.torch(to.get_default_dtype())
# Extract the data
state = rollout.get_data_values("observations")[:-1]
next_state = rollout.get_data_values("observations")[1:]
action = rollout.get_data_values("actions").narrow(0, 0, next_state.shape[0])
return to.cat((state, action), 1)
|
[
"numpy.clip",
"pyrado.policies.recurrent.rnn.LSTMPolicy",
"pyrado.environment_wrappers.base.EnvWrapper.__init__",
"torch.cuda.is_available",
"pyrado.algorithms.step_based.svpg.SVPGBuilder",
"pyrado.save",
"pyrado.sampling.parallel_evaluation.eval_domain_params",
"numpy.mean",
"pyrado.sampling.sampler_pool.SamplerPool",
"pyrado.ValueErr",
"pyrado.TypeErr",
"numpy.random.random_sample",
"pyrado.domain_randomization.domain_parameter.DomainParam",
"numpy.std",
"pyrado.sampling.step_sequence.StepSequence",
"pyrado.environment_wrappers.utils.inner_env",
"pyrado.spaces.box.BoxSpace",
"pyrado.sampling.step_sequence.StepSequence.concat",
"torch.cat",
"torch.get_default_dtype",
"numpy.median",
"pyrado.spaces.box.BoxSpace.cat",
"numpy.zeros",
"torch.nn.BCELoss",
"torch.no_grad",
"torch.zeros",
"torch.ones"
] |
[((21755, 21781), 'torch.cat', 'to.cat', (['(state, action)', '(1)'], {}), '((state, action), 1)\n', (21761, 21781), True, 'import torch as to\n'), ((6471, 6608), 'numpy.random.random_sample', 'np.random.random_sample', (["(svpg_hp['algo']['num_particles'], svpg_hp['algo']['horizon'], adr_hp[\n 'evaluation_steps'], self.num_params)"], {}), "((svpg_hp['algo']['num_particles'], svpg_hp['algo'][\n 'horizon'], adr_hp['evaluation_steps'], self.num_params))\n", (6494, 6608), True, 'import numpy as np\n'), ((9932, 9945), 'numpy.mean', 'np.mean', (['rets'], {}), '(rets)\n', (9939, 9945), True, 'import numpy as np\n'), ((9964, 9979), 'numpy.median', 'np.median', (['rets'], {}), '(rets)\n', (9973, 9979), True, 'import numpy as np\n'), ((9998, 10010), 'numpy.std', 'np.std', (['rets'], {}), '(rets)\n', (10004, 10010), True, 'import numpy as np\n'), ((10387, 10418), 'pyrado.sampling.step_sequence.StepSequence.concat', 'StepSequence.concat', (['rand_trajs'], {}), '(rand_trajs)\n', (10406, 10418), False, 'from pyrado.sampling.step_sequence import StepSequence\n'), ((10505, 10535), 'pyrado.sampling.step_sequence.StepSequence.concat', 'StepSequence.concat', (['ref_trajs'], {}), '(ref_trajs)\n', (10524, 10535), False, 'from pyrado.sampling.step_sequence import StepSequence\n'), ((10708, 10831), 'pyrado.save', 'pyrado.save', (['self.reward_generator.discriminator', '"""discriminator.pt"""', 'self.save_dir'], {'prefix': '"""adr"""', 'use_state_dict': '(True)'}), "(self.reward_generator.discriminator, 'discriminator.pt', self.\n save_dir, prefix='adr', use_state_dict=True)\n", (10719, 10831), False, 'import pyrado\n'), ((11879, 11926), 'pyrado.save', 'pyrado.save', (['self.env', '"""env.pkl"""', 'self.save_dir'], {}), "(self.env, 'env.pkl', self.save_dir)\n", (11890, 11926), False, 'import pyrado\n'), ((13149, 13187), 'pyrado.environment_wrappers.base.EnvWrapper.__init__', 'EnvWrapper.__init__', (['self', 'wrapped_env'], {}), '(self, wrapped_env)\n', (13168, 13187), False, 'from pyrado.environment_wrappers.base import EnvWrapper\n'), ((13613, 13641), 'numpy.zeros', 'np.zeros', (['self.num_particles'], {}), '(self.num_particles)\n', (13621, 13641), True, 'import numpy as np\n'), ((14780, 14808), 'numpy.zeros', 'np.zeros', (['self.num_particles'], {}), '(self.num_particles)\n', (14788, 14808), True, 'import numpy as np\n'), ((17031, 17106), 'pyrado.sampling.parallel_evaluation.eval_domain_params', 'eval_domain_params', (['self.pool', 'self.wrapped_env', 'self.inner_policy', 'sstates'], {}), '(self.pool, self.wrapped_env, self.inner_policy, sstates)\n', (17049, 17106), False, 'from pyrado.sampling.parallel_evaluation import eval_domain_params\n'), ((19044, 19144), 'pyrado.policies.recurrent.rnn.LSTMPolicy', 'LSTMPolicy', ([], {'spec': 'spec', 'hidden_size': 'hidden_size', 'num_recurrent_layers': '(1)', 'output_nonlin': 'to.sigmoid'}), '(spec=spec, hidden_size=hidden_size, num_recurrent_layers=1,\n output_nonlin=to.sigmoid)\n', (19054, 19144), False, 'from pyrado.policies.recurrent.rnn import LSTMPolicy\n'), ((19187, 19199), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (19197, 19199), True, 'from torch import nn as nn\n'), ((21394, 21451), 'pyrado.TypeErr', 'pyrado.TypeErr', ([], {'given': 'rollout', 'expected_type': 'StepSequence'}), '(given=rollout, expected_type=StepSequence)\n', (21408, 21451), False, 'import pyrado\n'), ((21495, 21517), 'torch.get_default_dtype', 'to.get_default_dtype', ([], {}), '()\n', (21515, 21517), True, 'import torch as to\n'), ((5056, 5100), 'pyrado.TypeErr', 'pyrado.TypeErr', ([], {'given': 'env', 'expected_type': 'Env'}), '(given=env, expected_type=Env)\n', (5070, 5100), False, 'import pyrado\n'), ((5165, 5218), 'pyrado.TypeErr', 'pyrado.TypeErr', ([], {'given': 'subrtn', 'expected_type': 'Algorithm'}), '(given=subrtn, expected_type=Algorithm)\n', (5179, 5218), False, 'import pyrado\n'), ((5287, 5344), 'pyrado.TypeErr', 'pyrado.TypeErr', ([], {'given': 'subrtn.policy', 'expected_type': 'Policy'}), '(given=subrtn.policy, expected_type=Policy)\n', (5301, 5344), False, 'import pyrado\n'), ((6162, 6183), 'pyrado.domain_randomization.domain_parameter.DomainParam', 'DomainParam', (['param', '(1)'], {}), '(param, 1)\n', (6173, 6183), False, 'from pyrado.domain_randomization.domain_parameter import DomainParam\n'), ((7230, 7277), 'pyrado.algorithms.step_based.svpg.SVPGBuilder', 'SVPGBuilder', (['ex_dir', 'self.svpg_wrapper', 'svpg_hp'], {}), '(ex_dir, self.svpg_wrapper, svpg_hp)\n', (7241, 7277), False, 'from pyrado.algorithms.step_based.svpg import SVPGBuilder, SVPGHyperparams\n'), ((10060, 10101), 'numpy.mean', 'np.mean', (['[ro.length for ro in rand_trajs]'], {}), '([ro.length for ro in rand_trajs])\n', (10067, 10101), True, 'import numpy as np\n'), ((11727, 11798), 'pyrado.ValueErr', 'pyrado.ValueErr', ([], {'msg': 'f"""{self.name} is not supposed be run as a subrtn!"""'}), "(msg=f'{self.name} is not supposed be run as a subrtn!')\n", (11742, 11798), False, 'import pyrado\n'), ((13286, 13310), 'pyrado.sampling.sampler_pool.SamplerPool', 'SamplerPool', (['num_workers'], {}), '(num_workers)\n', (13297, 13310), False, 'from pyrado.sampling.sampler_pool import SamplerPool\n'), ((15381, 15434), 'numpy.clip', 'np.clip', (['(self.inner_parameter_state[i] + action)', '(0)', '(1)'], {}), '(self.inner_parameter_state[i] + action, 0, 1)\n', (15388, 15434), True, 'import numpy as np\n'), ((15760, 15849), 'pyrado.sampling.parallel_evaluation.eval_domain_params', 'eval_domain_params', (['self.pool', 'self.wrapped_env', 'self.inner_policy', 'random_parameters'], {}), '(self.pool, self.wrapped_env, self.inner_policy,\n random_parameters)\n', (15778, 15849), False, 'from pyrado.sampling.parallel_evaluation import eval_domain_params\n'), ((15864, 15954), 'pyrado.sampling.parallel_evaluation.eval_domain_params', 'eval_domain_params', (['self.pool', 'self.wrapped_env', 'self.inner_policy', 'nominal_parameters'], {}), '(self.pool, self.wrapped_env, self.inner_policy,\n nominal_parameters)\n', (15882, 15954), False, 'from pyrado.sampling.parallel_evaluation import eval_domain_params\n'), ((16106, 16122), 'numpy.mean', 'np.mean', (['rewards'], {}), '(rewards)\n', (16113, 16122), True, 'import numpy as np\n'), ((17358, 17419), 'numpy.mean', 'np.mean', (['rewards[i * self.num_trajs:(i + 1) * self.num_trajs]'], {}), '(rewards[i * self.num_trajs:(i + 1) * self.num_trajs])\n', (17365, 17419), True, 'import numpy as np\n'), ((18295, 18317), 'torch.cuda.is_available', 'to.cuda.is_available', ([], {}), '()\n', (18315, 18317), True, 'import torch as to\n'), ((19652, 19664), 'torch.no_grad', 'to.no_grad', ([], {}), '()\n', (19662, 19664), True, 'import torch as to\n'), ((8452, 8464), 'torch.no_grad', 'to.no_grad', ([], {}), '()\n', (8462, 8464), True, 'import torch as to\n'), ((9565, 9581), 'numpy.mean', 'np.mean', (['rewards'], {}), '(rewards)\n', (9572, 9581), True, 'import numpy as np\n'), ((15226, 15245), 'numpy.clip', 'np.clip', (['act', '(-1)', '(1)'], {}), '(act, -1, 1)\n', (15233, 15245), True, 'import numpy as np\n'), ((18889, 18943), 'pyrado.spaces.box.BoxSpace.cat', 'BoxSpace.cat', (['[env_spec.obs_space, env_spec.act_space]'], {}), '([env_spec.obs_space, env_spec.act_space])\n', (18901, 18943), False, 'from pyrado.spaces.box import BoxSpace\n'), ((18967, 19003), 'pyrado.spaces.box.BoxSpace', 'BoxSpace', ([], {'bound_lo': '[0]', 'bound_up': '[1]'}), '(bound_lo=[0], bound_up=[1])\n', (18975, 19003), False, 'from pyrado.spaces.box import BoxSpace\n'), ((9431, 9498), 'pyrado.sampling.step_sequence.StepSequence', 'StepSequence', ([], {'observations': 'states', 'actions': 'actions', 'rewards': 'rewards'}), '(observations=states, actions=actions, rewards=rewards)\n', (9443, 9498), False, 'from pyrado.sampling.step_sequence import StepSequence\n'), ((17605, 17632), 'pyrado.environment_wrappers.utils.inner_env', 'inner_env', (['self.wrapped_env'], {}), '(self.wrapped_env)\n', (17614, 17632), False, 'from pyrado.environment_wrappers.utils import inner_env\n'), ((17735, 17762), 'pyrado.environment_wrappers.utils.inner_env', 'inner_env', (['self.wrapped_env'], {}), '(self.wrapped_env)\n', (17744, 17762), False, 'from pyrado.environment_wrappers.utils import inner_env\n'), ((20669, 20704), 'torch.ones', 'to.ones', (['random_results.shape[0]', '(1)'], {}), '(random_results.shape[0], 1)\n', (20676, 20704), True, 'import torch as to\n'), ((20762, 20801), 'torch.zeros', 'to.zeros', (['reference_results.shape[0]', '(1)'], {}), '(reference_results.shape[0], 1)\n', (20770, 20801), True, 'import torch as to\n'), ((6085, 6104), 'pyrado.environment_wrappers.utils.inner_env', 'inner_env', (['self.env'], {}), '(self.env)\n', (6094, 6104), False, 'from pyrado.environment_wrappers.utils import inner_env\n'), ((8566, 8588), 'torch.get_default_dtype', 'to.get_default_dtype', ([], {}), '()\n', (8586, 8588), True, 'import torch as to\n')]
|
"""
Stats stuff!
"""
import textwrap
import numpy as np
import pandas as pd
from scipy import stats
from IPython.display import display
from .boxes import *
from .table_display import *
__DEBUG__ = False
def debug(*args, **kwargs):
if __DEBUG__:
print(*args, **kwargs)
class Chi2Result(object):
"""Chi2 result class.
Primarily used for pretty-printing results.
"""
def __init__(self, name1: str, name2: str, xs: pd.DataFrame, dof: int,
p: float, alpha=0.05):
"""Create a new Chi2Result instance."""
self.name1 = name1
self.name2 = name2
self.xs = xs
self.dof = dof
self.p = p
self.alpha = alpha
def __repr__(self):
"""Return a string representation of this result."""
if self.p <= self.alpha:
p_conclusion = f'p ≤ {self.alpha}'
else:
p_conclusion = f'p > {self.alpha}'
s = f"""
Chi2 analysis between {self.name1} and {self.name2}
p = {self.p:.4f} with {self.dof} degree(s) of freedom.
{p_conclusion}
"""
return textwrap.dedent(s)
def _repr_html_(self):
"""Return an HTML representation of this result."""
if self.p <= self.alpha:
p_conclusion = f'p ≤ {self.alpha}'
else:
p_conclusion = f'p > {self.alpha}'
tpl = f"""
<div style="font-family: courier; padding: 0px 10px;">
<div style="text-align:center">
Chi² analysis between <b>{self.name1}</b> and
<b>{self.name2}</b></div>
<div>p-value: <b>{self.p:.4f}</b> with
<b>{self.dof}</b> degree(s) of freedom.</div>
<div>{p_conclusion}</div>
</div>
"""
if self.p <= self.alpha:
return info(tpl, raw=True)
return box(tpl, '#efefef', '#cfcfcf', raw=True)
def Chi2(col1: pd.Series, col2: pd.Series, show_crosstab=False) -> Chi2Result:
"""Compute the Chi2 statistic."""
xs = pd.crosstab(col1, col2)
_, p, dof, expected = stats.chi2_contingency(xs)
if show_crosstab:
display(xs)
return Chi2Result(col1.name, col2.name, xs, dof, p)
class CMHResult(object):
"""Represents the result of a Cochran-Mantel-Haenszel Chi2 analysis."""
def __init__(self, STATISTIC, df, p, var1, var2, stratifier, alpha=0.05):
"""
Initialize a new CMHResult.
STATISTIC: X2 statistic
df: degrees of freedom
p: p-value
"""
self.STATISTIC = STATISTIC
self.df = df
self.p = p
self.var1 = var1
self.var2 = var2
self.stratifier = stratifier
self.alpha = alpha
def __repr__(self):
stat = round(self.STATISTIC, 5)
pval = round(self.p, 4)
df = self.df
return textwrap.dedent(f"""
Cochran-Mantel-Haenszel Chi2 test
"{self.var1}" x "{self.var2}", stratified by "{self.stratifier}"
Cochran-Mantel-Haenszel M^2 = {stat}, df = {df}, p-value = {pval}
""")
def _repr_html_(self):
stat = round(self.STATISTIC, 5)
pval = round(self.p, 4)
df = self.df
tpl = f"""
<div style="font-family: courier; font-size: 10pt; padding: 0px 10px;">
<div style="text-align:center">
Cochran-Mantel-Haenszel Chi² test
</div>
<div>
<b>{self.var1}</b> x <b>{self.var2}</b>,
stratified by <b>{self.stratifier}</b>
</div>
<div>
Cochran-Mantel-Haenszel
M^2 = {stat},
df = {df},
p-value = <b>{pval}</b>
</div>
</div>
"""
if pval > self.alpha:
return box(tpl, '#efefef', '#cfcfcf')
return box(tpl, '#b0cbe9', '#4393e1')
def CMH(df: pd.DataFrame, var: str, outcome: str, stratifier: str, raw=False):
"""Compute the CMH statistic.
Based on "Categorical Data Analysis", page 295 by Agresti (2002) and
R implementation of mantelhaen.test().
"""
df = df.copy()
df[outcome] = df[outcome].astype('category')
df[var] = df[var].astype('category')
df[stratifier] = df[stratifier].astype('category')
# Compute contingency table size KxIxJ
I = len(df[outcome].cat.categories)
J = len(df[var].cat.categories)
K = len(df[stratifier].cat.categories)
contingency_tables = np.zeros((I, J, K), dtype='float')
# Create stratified contingency tables
for k in range(K):
cat = df[stratifier].cat.categories[k]
subset = df.loc[df[stratifier] == cat, [var, outcome]]
xs = pd.crosstab(subset[outcome], subset[var], dropna=False)
contingency_tables[:, :, k] = xs
# Compute the actual CMH
STATISTIC, df, pval = CMH_numpy(contingency_tables)
if raw:
return STATISTIC, df, pval
return CMHResult(STATISTIC, df, pval, var, outcome, stratifier)
def CMH_numpy(X):
"""Compute the CMH statistic.
Based on "Categorical Data Analysis", page 295 by Agresti (2002) and
R implementation of mantelhaen.test().
"""
# I: nr. of rows
# J: nr. of columns
# K: nr. of strata
# ⚠️ Note: this does *not* match the format used when printing!
I, J, K = X.shape
debug(f"I: {I}, J: {J}, K: {K}")
debug()
df = (I - 1) * (J - 1)
debug(f'{df} degree(s) of freedom')
# Initialize m and n to a vector(0) of length df
n = np.zeros(df)
m = np.zeros(df)
V = np.zeros((df, df))
# iterate over the strata
for k in range(K):
debug(f'partial {k}')
# f holds partial contigency table k
f = X[:, :, k]
# debuggin'
debug(' f:')
debug(f)
debug()
# Sum of *all* values in the partial table
ntot = f.sum()
debug(f' ntot: {ntot}')
# Compute the sum over all row/column entries *excluding* the last
# entry. The last entries are excluded, as they hold redundant
# information in combination with the row/column totals.
colsums = f.sum(axis=0)[:-1]
rowsums = f.sum(axis=1)[:-1]
debug(' rowsums:', rowsums)
debug(' colsums:', colsums)
# f[-I, -J] holds the partial matrix, excluding the last row & column.
# The result is reshaped into a vector.
debug(' f[:-1, :-1].reshape(-1): ', f[:-1, :-1].reshape(-1))
n = n + f[:-1, :-1].reshape(-1)
# Take the outer product of the row- and colsums, divide it by the
# total of the partial table. Yields a vector of length df. This holds
# the expected value under the assumption of conditional independence.
m_k = (np.outer(rowsums, colsums) / ntot).reshape(-1)
m = m + m_k
debug(' m_k:', m_k)
debug()
# V_k holds the null covariance matrix (matrices).
k1 = np.diag(ntot * colsums)[:J, :J] - np.outer(colsums, colsums)
k2 = np.diag(ntot * rowsums)[:I, :I] - np.outer(rowsums, rowsums)
debug('np.kron(k1, k2):')
debug(np.kron(k1, k2))
debug()
V_k = np.kron(k1, k2) / (ntot**2 * (ntot - 1))
debug(' V_k:')
debug(V_k)
V = V + V_k
debug()
# Subtract the mean from the entries
n = n - m
debug(f'n: {n}')
debug()
debug('np.linalg.solve(V, n):')
debug(np.linalg.solve(V, n))
debug()
STATISTIC = np.inner(n, np.linalg.solve(V, n).transpose())
debug('STATISTIC:', STATISTIC)
pval = 1 - stats.chi2.cdf(STATISTIC, df)
return STATISTIC, df, pval
def table1(df, vars, outcome, p_name='p', p_precision=None, title=''):
"""Prepare Table 1"""
def replace(string, dict_):
for key, replacement in dict_.items():
if string == key:
return replacement
return string
# We're going to create multiple tables, one for each variable.
tables = []
col2 = df[outcome]
totals = col2.value_counts()
headers = {
header: f'{header} (n={total})' for header, total in totals.iteritems()
}
# Iterate over the variables
for v in vars:
col1 = df[v]
# Crosstab with absolute numbers
x1 = pd.crosstab(col1, col2)
# Crosstab with percentages
x2 = pd.crosstab(col1, col2, normalize='columns')
x2 = (x2 * 100).round(1)
# Chi2 is calculated using absolute nrs.
chi2, p, dof, expected = stats.chi2_contingency(x1)
# Combine absolute nrs. with percentages in a single cell.
xs = x1.astype('str') + ' (' + x2.applymap('{:3.1f}'.format) + ')'
# Add the totals ('n={total}') to the headers
xs.columns = [replace(h, headers) for h in list(xs.columns)]
# If title is provided, we'll add a level to the column index and put
# it there (on top).
if title:
colidx = pd.MultiIndex.from_product(
[[title, ], list(xs.columns)],
)
xs.columns = colidx
# Add the p-value in a new column, but only in the top row.
xs[p_name] = ''
if p_precision:
p_tpl = f"{{:.{p_precision}f}}"
xs.iloc[0, len(xs.columns) - 1] = p_tpl.format(p)
else:
xs[p_name] = np.nan
xs.iloc[0, len(xs.columns) - 1] = p
# Prepend the name of the current variable to the row index, so we can
# concat the tables later ...
xs.index = pd.MultiIndex.from_product(
[[v, ], list(xs.index)],
names=['variable', 'values']
)
tables.append(xs)
return pd.concat(tables)
|
[
"textwrap.dedent",
"IPython.display.display",
"numpy.linalg.solve",
"scipy.stats.chi2.cdf",
"scipy.stats.chi2_contingency",
"pandas.crosstab",
"numpy.diag",
"numpy.kron",
"numpy.zeros",
"numpy.outer",
"pandas.concat"
] |
[((2089, 2112), 'pandas.crosstab', 'pd.crosstab', (['col1', 'col2'], {}), '(col1, col2)\n', (2100, 2112), True, 'import pandas as pd\n'), ((2139, 2165), 'scipy.stats.chi2_contingency', 'stats.chi2_contingency', (['xs'], {}), '(xs)\n', (2161, 2165), False, 'from scipy import stats\n'), ((4582, 4616), 'numpy.zeros', 'np.zeros', (['(I, J, K)'], {'dtype': '"""float"""'}), "((I, J, K), dtype='float')\n", (4590, 4616), True, 'import numpy as np\n'), ((5626, 5638), 'numpy.zeros', 'np.zeros', (['df'], {}), '(df)\n', (5634, 5638), True, 'import numpy as np\n'), ((5647, 5659), 'numpy.zeros', 'np.zeros', (['df'], {}), '(df)\n', (5655, 5659), True, 'import numpy as np\n'), ((5668, 5686), 'numpy.zeros', 'np.zeros', (['(df, df)'], {}), '((df, df))\n', (5676, 5686), True, 'import numpy as np\n'), ((9807, 9824), 'pandas.concat', 'pd.concat', (['tables'], {}), '(tables)\n', (9816, 9824), True, 'import pandas as pd\n'), ((1136, 1154), 'textwrap.dedent', 'textwrap.dedent', (['s'], {}), '(s)\n', (1151, 1154), False, 'import textwrap\n'), ((2197, 2208), 'IPython.display.display', 'display', (['xs'], {}), '(xs)\n', (2204, 2208), False, 'from IPython.display import display\n'), ((2926, 3168), 'textwrap.dedent', 'textwrap.dedent', (['f"""\n Cochran-Mantel-Haenszel Chi2 test\n\n "{self.var1}" x "{self.var2}", stratified by "{self.stratifier}"\n\n Cochran-Mantel-Haenszel M^2 = {stat}, df = {df}, p-value = {pval}\n """'], {}), '(\n f"""\n Cochran-Mantel-Haenszel Chi2 test\n\n "{self.var1}" x "{self.var2}", stratified by "{self.stratifier}"\n\n Cochran-Mantel-Haenszel M^2 = {stat}, df = {df}, p-value = {pval}\n """\n )\n', (2941, 3168), False, 'import textwrap\n'), ((4808, 4863), 'pandas.crosstab', 'pd.crosstab', (['subset[outcome]', 'subset[var]'], {'dropna': '(False)'}), '(subset[outcome], subset[var], dropna=False)\n', (4819, 4863), True, 'import pandas as pd\n'), ((7547, 7568), 'numpy.linalg.solve', 'np.linalg.solve', (['V', 'n'], {}), '(V, n)\n', (7562, 7568), True, 'import numpy as np\n'), ((7697, 7726), 'scipy.stats.chi2.cdf', 'stats.chi2.cdf', (['STATISTIC', 'df'], {}), '(STATISTIC, df)\n', (7711, 7726), False, 'from scipy import stats\n'), ((8399, 8422), 'pandas.crosstab', 'pd.crosstab', (['col1', 'col2'], {}), '(col1, col2)\n', (8410, 8422), True, 'import pandas as pd\n'), ((8473, 8517), 'pandas.crosstab', 'pd.crosstab', (['col1', 'col2'], {'normalize': '"""columns"""'}), "(col1, col2, normalize='columns')\n", (8484, 8517), True, 'import pandas as pd\n'), ((8634, 8660), 'scipy.stats.chi2_contingency', 'stats.chi2_contingency', (['x1'], {}), '(x1)\n', (8656, 8660), False, 'from scipy import stats\n'), ((7090, 7116), 'numpy.outer', 'np.outer', (['colsums', 'colsums'], {}), '(colsums, colsums)\n', (7098, 7116), True, 'import numpy as np\n'), ((7164, 7190), 'numpy.outer', 'np.outer', (['rowsums', 'rowsums'], {}), '(rowsums, rowsums)\n', (7172, 7190), True, 'import numpy as np\n'), ((7240, 7255), 'numpy.kron', 'np.kron', (['k1', 'k2'], {}), '(k1, k2)\n', (7247, 7255), True, 'import numpy as np\n'), ((7288, 7303), 'numpy.kron', 'np.kron', (['k1', 'k2'], {}), '(k1, k2)\n', (7295, 7303), True, 'import numpy as np\n'), ((7056, 7079), 'numpy.diag', 'np.diag', (['(ntot * colsums)'], {}), '(ntot * colsums)\n', (7063, 7079), True, 'import numpy as np\n'), ((7130, 7153), 'numpy.diag', 'np.diag', (['(ntot * rowsums)'], {}), '(ntot * rowsums)\n', (7137, 7153), True, 'import numpy as np\n'), ((7611, 7632), 'numpy.linalg.solve', 'np.linalg.solve', (['V', 'n'], {}), '(V, n)\n', (7626, 7632), True, 'import numpy as np\n'), ((6871, 6897), 'numpy.outer', 'np.outer', (['rowsums', 'colsums'], {}), '(rowsums, colsums)\n', (6879, 6897), True, 'import numpy as np\n')]
|
import os
import numpy as np
import matplotlib.pyplot as plt
from .kshell_utilities import atomic_numbers, loadtxt
from .general_utilities import create_spin_parity_list, gamma_strength_function_average
class LEE:
def __init__(self, directory):
self.bin_width = 0.2
self.E_max = 30
self.Ex_min = 0 # Lower limit for emitted gamma energy [MeV].
self.Ex_max = 30 # Upper limit for emitted gamma energy [MeV].
n_bins = int(np.ceil(self.E_max/self.bin_width))
E_max_adjusted = self.bin_width*n_bins
bins = np.linspace(0, E_max_adjusted, n_bins + 1)
self.bins_middle = (bins[0: -1] + bins[1:])/2
self.all_fnames = {}
self.directory = directory
for element in sorted(os.listdir(self.directory)):
"""
List all content in self.directory.
"""
if os.path.isdir(f"{self.directory}/{element}"):
"""
If element is a directory, enter it to find data files.
"""
self.all_fnames[element] = [] # Create blank entry in dict for current element.
for isotope in os.listdir(f"{self.directory}/{element}"):
"""
List all content in the element directory.
"""
if isotope.startswith("summary"):
"""
Extract summary data files.
"""
try:
"""
Example: O16.
"""
n_neutrons = int(isotope[9:11])
except ValueError:
"""
Example: Ne20.
"""
n_neutrons = int(isotope[10:12])
n_neutrons -= atomic_numbers[element.split("_")[1]]
self.all_fnames[element].append([f"{element}/{isotope}", n_neutrons])
for key in self.all_fnames:
"""
Sort each list in the dict by the number of neutrons.
"""
self.all_fnames[key].sort(key=lambda tup: tup[1]) # Why not do this when directory is listed?
# def plot_gsf(self, isotope_name):
# """
# Plot the gamma strength function for a single isotope.
# isotope_name : string
# Examples: S24, Ne30.
# Raises
# ------
# ValueError
# If isotope_name cannot be found in the calculated data
# files.
# """
# fname = None
# for fnames in self.fnames_combined:
# for i in range(len(fnames)):
# if isotope_name in fnames[i][0]:
# fname = fnames[i][0]
# if fname is None:
# msg = f"Isotope name '{isotope_name}' is not a valid name."
# raise ValueError(msg)
# res = loadtxt(self.directory + fname)
# _, ax = plt.subplots()
# Jpi_list = create_jpi_list(res.levels[:, 1], None)
# E_gs = res.levels[0, 0]
# res.transitions[:, 2] += E_gs # Add ground state energy for compatibility with Jørgen.
# gsf = strength_function_average(
# levels = res.levels,
# transitions = res.transitions,
# Jpi_list = Jpi_list,
# bin_width = self.bin_width,
# Ex_min = self.Ex_min, # [MeV].
# Ex_max = self.Ex_max, # [MeV].
# multipole_type = "M1"
# )
# bin_slice = self.bins_middle[0:len(gsf)]
# ax.plot(bin_slice, gsf, label=fname)
# ax.legend()
# ax.set_xlabel(r"$E_{\gamma}$ [MeV]")
# ax.set_ylabel(r"gsf [MeV$^{-3}$]")
# plt.show()
def calculate_low_energy_enhancement(self, filter=None):
"""
Recreate the figure from Jørgens article.
"""
self.labels = [] # Suggested labels for plotting.
self.ratios = []
self.n_neutrons = []
for key in self.all_fnames:
"""
Loop over all elements (grunnstoff).
"""
fnames = self.all_fnames[key] # For compatibility with old code.
if filter is not None:
if key.split("_")[1] not in filter:
"""
Skip elements not in filter.
"""
continue
ratios = [] # Reset ratio for every new element.
for i in range(len(fnames)):
"""
Loop over all isotopes per element.
"""
try:
res = loadtxt(f"{self.directory}/{fnames[i][0]}")
except FileNotFoundError:
print(f"File {fnames[i][0]} skipped! File not found.")
ratios.append(None) # Maintain correct list length for plotting.
continue
Jpi_list = create_spin_parity_list(
spins = res.levels[:, 1],
parities = res.levels[:, 2]
)
E_gs = res.levels[0, 0]
try:
res.transitions[:, 2] += E_gs # Add ground state energy for compatibility with Jørgen.
except IndexError:
print(f"File {fnames[i][0]} skipped! Too few / no energy levels are present in this data file.")
ratios.append(None) # Maintain correct list length for plotting.
continue
try:
gsf = strength_function_average(
levels = res.levels,
transitions = res.transitions,
Jpi_list = Jpi_list,
bin_width = self.bin_width,
Ex_min = self.Ex_min, # [MeV].
Ex_max = self.Ex_max, # [MeV].
multipole_type = "M1"
)
except IndexError:
print(f"File {fnames[i][0]} skipped! That unknown index out of bounds error in ksutil.")
ratios.append(None)
continue
# Sum gsf for low and high energy range and take the ratio.
bin_slice = self.bins_middle[0:len(gsf)]
low_idx = (bin_slice <= 2)
high_idx = (bin_slice <= 6) == (2 <= bin_slice)
low = np.sum(gsf[low_idx])
high = np.sum(gsf[high_idx])
low_high_ratio = low/high
ratios.append(low_high_ratio)
print(f"{fnames[i][0]} loaded")
if all(elem is None for elem in ratios):
"""
Skip current element if no ratios are calculated.
"""
continue
self.labels.append(fnames[0][0][:fnames[0][0].index("/")])
self.n_neutrons.append([n_neutrons for _, n_neutrons in fnames])
self.ratios.append(ratios)
def quick_plot(self):
fig, ax = plt.subplots()
for i in range(len(self.n_neutrons)):
ax.plot(self.n_neutrons[i], self.ratios[i], ".--", label=self.labels[i])
ax.set_yscale("log")
ax.set_xlabel("N")
ax.set_ylabel("Rel. amount of low-energy strength")
ax.legend()
plt.show()
def low_energy_enhancement(directory):
"""
Wrapper for easier usage.
Parameters
----------
directory : string
Directory containing subfolders with KSHELL data.
Returns
-------
res : kshell_utilities.low_energy_enhancement.LEE
Class instance containing LEE data.
"""
res = LEE(directory)
res.calculate_low_energy_enhancement()
return res
|
[
"numpy.ceil",
"os.listdir",
"numpy.sum",
"numpy.linspace",
"os.path.isdir",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((563, 605), 'numpy.linspace', 'np.linspace', (['(0)', 'E_max_adjusted', '(n_bins + 1)'], {}), '(0, E_max_adjusted, n_bins + 1)\n', (574, 605), True, 'import numpy as np\n'), ((7272, 7286), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7284, 7286), True, 'import matplotlib.pyplot as plt\n'), ((7579, 7589), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7587, 7589), True, 'import matplotlib.pyplot as plt\n'), ((465, 501), 'numpy.ceil', 'np.ceil', (['(self.E_max / self.bin_width)'], {}), '(self.E_max / self.bin_width)\n', (472, 501), True, 'import numpy as np\n'), ((757, 783), 'os.listdir', 'os.listdir', (['self.directory'], {}), '(self.directory)\n', (767, 783), False, 'import os\n'), ((881, 925), 'os.path.isdir', 'os.path.isdir', (['f"""{self.directory}/{element}"""'], {}), "(f'{self.directory}/{element}')\n", (894, 925), False, 'import os\n'), ((1169, 1210), 'os.listdir', 'os.listdir', (['f"""{self.directory}/{element}"""'], {}), "(f'{self.directory}/{element}')\n", (1179, 1210), False, 'import os\n'), ((6638, 6658), 'numpy.sum', 'np.sum', (['gsf[low_idx]'], {}), '(gsf[low_idx])\n', (6644, 6658), True, 'import numpy as np\n'), ((6682, 6703), 'numpy.sum', 'np.sum', (['gsf[high_idx]'], {}), '(gsf[high_idx])\n', (6688, 6703), True, 'import numpy as np\n')]
|
"""Tests for Pipeline class"""
import pytest
from cognigraph.nodes.pipeline import Pipeline
import numpy as np
from numpy.testing import assert_array_equal
from cognigraph.tests.prepare_pipeline_tests import (
create_dummy_info,
ConcreteSource,
ConcreteProcessor,
ConcreteOutput,
)
@pytest.fixture(scope="function")
def pipeline():
source = ConcreteSource()
processor = ConcreteProcessor()
output = ConcreteOutput()
pipeline = Pipeline()
pipeline.add_child(source)
source.add_child(processor)
processor.add_child(output)
return pipeline
def test_pipeline_initialization(pipeline):
pipeline.chain_initialize()
source = pipeline._children[0]
processor = source._children[0]
output = processor._children[0]
assert source._initialized
assert source.mne_info is not None
assert source.mne_info["nchan"] == source.nchan
assert processor._initialized
assert output._initialized
def test_pipeline_update(pipeline):
"""Update all pipeline nodes twice and check outputs"""
pipeline.chain_initialize()
src = pipeline._children[0]
proc = src._children[0]
out = proc._children[0]
nch = src.nchan
nsamp = src.nsamp
pr_inc = proc.increment
out_inc = out.increment
pipeline.update()
assert_array_equal(src.output, np.zeros([nch, nsamp]))
assert_array_equal(proc.output, np.zeros([nch, nsamp]) + pr_inc)
assert_array_equal(out.output, proc.output + out_inc)
pipeline.update()
assert_array_equal(src.output, np.ones([nch, nsamp]))
assert_array_equal(proc.output, np.ones([nch, nsamp]) + pr_inc * 2)
assert_array_equal(out.output, proc.output + out_inc * 2)
def test_reset_mechanics(pipeline):
"""
Test if upstream output shape changes when changing number of channels in
source via _mne_info, test if src._on_critical_attr_changed is called when
this happens, test if it triggers reinitialization for node for
which _mne_info is in UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION,
finally test if history invalidation mechanics works.
"""
src = pipeline._children[0]
proc = src._children[0]
out = proc._children[0]
pipeline.chain_initialize()
pipeline.update()
new_nchan = 43
new_info = create_dummy_info(nchan=new_nchan)
assert src.n_resets == 0
assert proc.n_initializations == 1
assert proc.n_hist_invalidations == 0
src._mne_info = new_info
pipeline.update()
assert src.n_resets == 1
for i in range(3):
pipeline.update()
pipeline.update()
assert np.all(out.output)
assert out.output.shape[0] == new_nchan
assert proc.n_initializations == 2
assert proc.n_hist_invalidations == 1
def test_add_child_on_the_fly(pipeline):
src = pipeline._children[0]
pipeline.chain_initialize()
pipeline.update()
new_processor = ConcreteProcessor(increment=0.2)
src.add_child(new_processor, initialize=True)
pipeline.update()
nch = src.nchan
nsamp = src.nsamp
assert_array_equal(
new_processor.output, np.ones([nch, nsamp]) + new_processor.increment
)
assert new_processor._root is pipeline
# def test_critical_upstream_change_happened(pipeline):
# src = pipeline._children[0]
# proc = src._children[0]
# pipeline.chain_initialize()
# pipeline.update()
|
[
"cognigraph.tests.prepare_pipeline_tests.ConcreteSource",
"cognigraph.tests.prepare_pipeline_tests.create_dummy_info",
"numpy.ones",
"cognigraph.tests.prepare_pipeline_tests.ConcreteProcessor",
"cognigraph.tests.prepare_pipeline_tests.ConcreteOutput",
"numpy.zeros",
"pytest.fixture",
"numpy.all",
"cognigraph.nodes.pipeline.Pipeline",
"numpy.testing.assert_array_equal"
] |
[((301, 333), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (315, 333), False, 'import pytest\n'), ((363, 379), 'cognigraph.tests.prepare_pipeline_tests.ConcreteSource', 'ConcreteSource', ([], {}), '()\n', (377, 379), False, 'from cognigraph.tests.prepare_pipeline_tests import create_dummy_info, ConcreteSource, ConcreteProcessor, ConcreteOutput\n'), ((396, 415), 'cognigraph.tests.prepare_pipeline_tests.ConcreteProcessor', 'ConcreteProcessor', ([], {}), '()\n', (413, 415), False, 'from cognigraph.tests.prepare_pipeline_tests import create_dummy_info, ConcreteSource, ConcreteProcessor, ConcreteOutput\n'), ((429, 445), 'cognigraph.tests.prepare_pipeline_tests.ConcreteOutput', 'ConcreteOutput', ([], {}), '()\n', (443, 445), False, 'from cognigraph.tests.prepare_pipeline_tests import create_dummy_info, ConcreteSource, ConcreteProcessor, ConcreteOutput\n'), ((461, 471), 'cognigraph.nodes.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (469, 471), False, 'from cognigraph.nodes.pipeline import Pipeline\n'), ((1432, 1485), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['out.output', '(proc.output + out_inc)'], {}), '(out.output, proc.output + out_inc)\n', (1450, 1485), False, 'from numpy.testing import assert_array_equal\n'), ((1644, 1701), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['out.output', '(proc.output + out_inc * 2)'], {}), '(out.output, proc.output + out_inc * 2)\n', (1662, 1701), False, 'from numpy.testing import assert_array_equal\n'), ((2295, 2329), 'cognigraph.tests.prepare_pipeline_tests.create_dummy_info', 'create_dummy_info', ([], {'nchan': 'new_nchan'}), '(nchan=new_nchan)\n', (2312, 2329), False, 'from cognigraph.tests.prepare_pipeline_tests import create_dummy_info, ConcreteSource, ConcreteProcessor, ConcreteOutput\n'), ((2603, 2621), 'numpy.all', 'np.all', (['out.output'], {}), '(out.output)\n', (2609, 2621), True, 'import numpy as np\n'), ((2896, 2928), 'cognigraph.tests.prepare_pipeline_tests.ConcreteProcessor', 'ConcreteProcessor', ([], {'increment': '(0.2)'}), '(increment=0.2)\n', (2913, 2928), False, 'from cognigraph.tests.prepare_pipeline_tests import create_dummy_info, ConcreteSource, ConcreteProcessor, ConcreteOutput\n'), ((1335, 1357), 'numpy.zeros', 'np.zeros', (['[nch, nsamp]'], {}), '([nch, nsamp])\n', (1343, 1357), True, 'import numpy as np\n'), ((1545, 1566), 'numpy.ones', 'np.ones', (['[nch, nsamp]'], {}), '([nch, nsamp])\n', (1552, 1566), True, 'import numpy as np\n'), ((1395, 1417), 'numpy.zeros', 'np.zeros', (['[nch, nsamp]'], {}), '([nch, nsamp])\n', (1403, 1417), True, 'import numpy as np\n'), ((1604, 1625), 'numpy.ones', 'np.ones', (['[nch, nsamp]'], {}), '([nch, nsamp])\n', (1611, 1625), True, 'import numpy as np\n'), ((3098, 3119), 'numpy.ones', 'np.ones', (['[nch, nsamp]'], {}), '([nch, nsamp])\n', (3105, 3119), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
'''Testing for read_rockstar.py
@author: <NAME>
@contact: <EMAIL>
@status: Development
'''
import glob
from mock import call, patch
import numpy as np
import numpy.testing as npt
import os
import pdb
import pytest
import unittest
import galaxy_dive.read_data.rockstar as read_rockstar
import galaxy_dive.utils.utilities as utilities
########################################################################
########################################################################
class TestRockstarReader( unittest.TestCase ):
def setUp( self ):
self.rockstar_reader = read_rockstar.RockstarReader(
'./tests/data/rockstar_dir',
)
########################################################################
def test_get_halos( self ):
self.rockstar_reader.get_halos( 600 )
expected = 51
actual = self.rockstar_reader.halos['Np'][6723]
npt.assert_allclose( expected, actual )
|
[
"numpy.testing.assert_allclose",
"galaxy_dive.read_data.rockstar.RockstarReader"
] |
[((603, 660), 'galaxy_dive.read_data.rockstar.RockstarReader', 'read_rockstar.RockstarReader', (['"""./tests/data/rockstar_dir"""'], {}), "('./tests/data/rockstar_dir')\n", (631, 660), True, 'import galaxy_dive.read_data.rockstar as read_rockstar\n'), ((901, 938), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (920, 938), True, 'import numpy.testing as npt\n')]
|
from typing import Union, Optional, Sequence, Any, Mapping, List, Tuple, Callable
from collections.abc import Iterable
import operator
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.axes import Axes
def pie_marker(
ratios: Sequence[float],
res: int = 50,
direction: str = "+",
start: float = 0.0,
) -> Tuple[list, list]:
"""
Create each slice of pie as a separate marker.
Parameters:
ratios(list): List of ratios that add up to 1.
res: Number of points around the circle.
direction: '+' for counter-clockwise, or '-' for clockwise.
start: Starting position in radians.
Returns:
xys, ss: Tuple of list of xy points and sizes of each slice in the pie marker.
"""
if np.abs(np.sum(ratios) - 1) > 0.01:
print("Warning: Ratios do not add up to 1.")
if direction == '+':
op = operator.add
elif direction == '-':
op = operator.sub
xys = [] # list of xy points of each slice
ss = [] # list of size of each slice
start = float(start)
for ratio in ratios:
# points on the circle including the origin (0,0) and the slice
end = op(start, 2 * np.pi * ratio)
n = round(ratio * res) # number of points forming the arc
x = [0] + np.cos(np.linspace(start, end, n)).tolist()
y = [0] + np.sin(np.linspace(start, end, n)).tolist()
xy = np.column_stack([x, y])
xys.append(xy)
ss.append(np.abs(xy).max())
start = end
return xys, ss
def scatter_pie(
x: Union[int, float, Sequence[int], Sequence[float]],
y: Union[int, float, Sequence[int], Sequence[float]],
ratios: Union[Sequence[float], Sequence[Sequence[float]]],
colors: Union[List, str] = "tab10",
res: int = 50,
direction: str = "+",
start: float = 0.0,
ax=None,
size=100,
edgecolor="none",
**kwargs) -> Axes:
"""
Plot scatter pie plots.
Parameters:
x: list/array of x values
y: list/array of y values
ratios: List of lists of ratios that add up to 1.
colors: List of colors in order, or name of colormap.
res: Number of points around the circle.
direction: '+' for counter-clockwise, or '-' for clockwise.
start: Starting position in radians.
kwargs: Arguments passed to :func:`matplotlib.pyplot.scatter`
Returns:
A :class:`~matplotlib.axes.Axes`
"""
if ax is None:
_, ax = plt.subplots()
# convert arguments to interables when there is only one point
if (not isinstance(x, Iterable)) and type(ratios[0]==float):
print("Plotting single point")
x = [x]
y = [y]
ratios = [ratios]
# Set colors
if type(colors) == str:
cmap = plt.get_cmap(colors)
colors = [cmap(i) for i in range(len(ratios[0]))]
# make pie marker for each unique set of ratios
df = pd.DataFrame({'x':x, 'y':y, 'ratios':ratios})
df.ratios = df.ratios.apply(tuple)
gb = df.groupby("ratios")
for ratio in gb.groups:
group = gb.get_group(ratio)
xys, ss = pie_marker(ratio, res=res, direction=direction, start=start)
for xy, s, color in zip(xys, ss, colors):
# plot non-zero slices
if s != 0:
ax.scatter(group.x, group.y, marker=xy, s=[s*s*size],
facecolor=color, edgecolor=edgecolor, **kwargs)
return ax
def get_palette(categories, cmap):
"""
Generate dictionary mapping categories to color.
"""
cc = plt.get_cmap(cmap)
if len(categories) > len(cc.colors):
raise ValueError("Number of categories more than number of colors in cmap.")
palette = {x: cc(i) for i, x in enumerate(categories)}
return palette
def scatter_pie_from_df(
df: pd.DataFrame,
x: str,
y: str,
cols: Optional[list] = [],
normalize: bool = True,
return_df: bool = False,
palette: Optional[dict] = None,
cmap: Optional[str] = "tab10",
**kwargs,
) -> Axes:
"""
Plot scatter pie based on columns in a DataFrame.
Parameters:
df: Dataframe containing x, y, and additional count columns.
x: Column to use as x-values.
y: Column to use as y-values.
cols: List of columns in dataframe to use as ratios and plotting.
If [], uses all columns besides x and y.
normalize: If True, calculate ratios using selected columns.
return_df: If True, also return normalized dataframe.
palette: Dictionary mapping column name to color.
If None, create mapping using cmap.
cmap: Name of colormap to use if palette not provided.
kwargs: Arguments passed to :func:`scatter_pie`
Returns:
A :class:`~matplotlib.axes.Axes` and normalized df if `return_df` is True.
"""
# make copy of dataframe and set xy as index
df = df.copy().set_index([x, y])
if (type(cols)==list) & (len(cols) > 1):
# used specified list of columns
df = df.loc[:, cols]
elif cols!=[]:
raise ValueError("cols must be a list of more than one column headers")
# row normalize
categories = df.columns
df = df.div(df.sum(axis=1), axis=0).fillna(0)
df = df.reset_index()
# generate mapping of category to color
if palette == None:
palette = get_palette(categories, cmap)
ratios = df[categories].to_records(index=False).tolist()
colors = [palette[cat] for cat in categories]
ax = scatter_pie(df[x].values, df[y].values, ratios, colors, **kwargs)
# generate legend as separate figure
if return_df:
return ax, df
return ax
def scatter_legend(ax, labels, palette, **kwargs):
handles = [plt.scatter([], [], color=palette[l], label=l) for l in labels]
ax.legend(handles=handles, **kwargs)
def expand_xlim(ax, percent=0.1):
lim = ax.get_xlim()
length = lim[1] - lim[0]
change = length * percent
lower = lim[0] - change
upper = lim[1] + change
ax.set_xlim(lower, upper)
return
def expand_ylim(ax, percent=0.1):
lim = ax.get_ylim()
length = lim[1] - lim[0]
change = length * percent
lower = lim[0] - change
upper = lim[1] + change
ax.set_ylim(lower, upper)
return
|
[
"numpy.abs",
"numpy.column_stack",
"numpy.sum",
"numpy.linspace",
"matplotlib.pyplot.scatter",
"pandas.DataFrame",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.get_cmap"
] |
[((3013, 3061), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x, 'y': y, 'ratios': ratios}"], {}), "({'x': x, 'y': y, 'ratios': ratios})\n", (3025, 3061), True, 'import pandas as pd\n'), ((3646, 3664), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (3658, 3664), True, 'import matplotlib.pyplot as plt\n'), ((1471, 1494), 'numpy.column_stack', 'np.column_stack', (['[x, y]'], {}), '([x, y])\n', (1486, 1494), True, 'import numpy as np\n'), ((2562, 2576), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2574, 2576), True, 'import matplotlib.pyplot as plt\n'), ((2872, 2892), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['colors'], {}), '(colors)\n', (2884, 2892), True, 'import matplotlib.pyplot as plt\n'), ((5854, 5900), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[]', '[]'], {'color': 'palette[l]', 'label': 'l'}), '([], [], color=palette[l], label=l)\n', (5865, 5900), True, 'import matplotlib.pyplot as plt\n'), ((824, 838), 'numpy.sum', 'np.sum', (['ratios'], {}), '(ratios)\n', (830, 838), True, 'import numpy as np\n'), ((1536, 1546), 'numpy.abs', 'np.abs', (['xy'], {}), '(xy)\n', (1542, 1546), True, 'import numpy as np\n'), ((1359, 1385), 'numpy.linspace', 'np.linspace', (['start', 'end', 'n'], {}), '(start, end, n)\n', (1370, 1385), True, 'import numpy as np\n'), ((1421, 1447), 'numpy.linspace', 'np.linspace', (['start', 'end', 'n'], {}), '(start, end, n)\n', (1432, 1447), True, 'import numpy as np\n')]
|
"""
Test script for data.py classes.
"""
import os
import numpy as np
import pytest
from bilby.core.prior import PriorDict, Uniform
from cwinpy import HeterodynedData, MultiHeterodynedData, TargetedPulsarLikelihood
class TestTargetedPulsarLikelhood(object):
"""
Tests for the TargetedPulsarLikelihood class.
"""
parfile = "J0123+3456.par"
times = np.linspace(1000000000.0, 1000086340.0, 1440)
data = np.random.normal(0.0, 1e-25, size=(1440, 2))
onesdata = np.ones((1440, 2))
detector = "H1"
@classmethod
def setup_class(cls):
# create a pulsar parameter file
parcontent = """\
PSRJ J0123+3456
RAJ 01:23:45.6789
DECJ 34:56:54.321
F0 567.89
F1 -1.2e-12
PEPOCH 56789
H0 9.87e-26
COSIOTA 0.3
PSI 1.1
PHI0 2.4
"""
# add content to the par file
with open("J0123+3456.par", "w") as fp:
fp.write(parcontent)
@classmethod
def teardown_class(cls):
os.remove("J0123+3456.par")
def test_wrong_inputs(self):
"""
Test that exceptions are raised for incorrect inputs to the
TargetedPulsarLikelihood.
"""
with pytest.raises(TypeError):
TargetedPulsarLikelihood(None, None)
# create HeterodynedData object (no par file)
het = HeterodynedData(self.data, times=self.times, detector=self.detector)
priors = dict()
priors["h0"] = Uniform(0.0, 1.0e-23, "h0")
# error with no par file
with pytest.raises(ValueError):
TargetedPulsarLikelihood(het, PriorDict(priors))
het = HeterodynedData(
self.data, times=self.times, detector=self.detector, par=self.parfile
)
mhet = MultiHeterodynedData(het) # multihet object for testing
with pytest.raises(TypeError):
TargetedPulsarLikelihood(het, None)
with pytest.raises(TypeError):
TargetedPulsarLikelihood(mhet, None)
def test_priors(self):
"""
Test the parsed priors.
"""
# bad priors (unexpected parameter names)
priors = dict()
priors["a"] = Uniform(0.0, 1.0, "blah")
priors["b"] = 2.0
het = HeterodynedData(
self.data, times=self.times, detector=self.detector, par=self.parfile
)
with pytest.raises(ValueError):
_ = TargetedPulsarLikelihood(het, PriorDict(priors))
def test_wrong_likelihood(self):
"""
Test with a bad likelihood name.
"""
het = HeterodynedData(
self.data, times=self.times, detector=self.detector, par=self.parfile
)
priors = dict()
priors["h0"] = Uniform(0.0, 1.0e-23, "h0")
with pytest.raises(ValueError):
_ = TargetedPulsarLikelihood(het, PriorDict(priors), likelihood="blah")
def test_likelihood_null_likelihood(self):
"""
Test likelihood and null likelihood.
"""
het = HeterodynedData(
self.data, times=self.times, detector=self.detector, par=self.parfile
)
priors = dict()
priors["h0"] = Uniform(0.0, 1.0e-23, "h0")
for likelihood in ["gaussian", "studentst"]:
like = TargetedPulsarLikelihood(
het, PriorDict(priors), likelihood=likelihood
)
like.parameters = {"h0": 0.0}
assert like.log_likelihood() == like.noise_log_likelihood()
def test_numba_likelihood(self):
"""
Test likelihood using numba against the standard likelihood.
"""
het = HeterodynedData(
self.data, times=self.times, detector=self.detector, par=self.parfile
)
priors = dict()
priors["h0"] = Uniform(0.0, 1.0e-23, "h0")
for likelihood in ["gaussian", "studentst"]:
like1 = TargetedPulsarLikelihood(
het, PriorDict(priors), likelihood=likelihood
)
like1.parameters = {"h0": 1e-24}
like2 = TargetedPulsarLikelihood(
het, PriorDict(priors), likelihood=likelihood, numba=True
)
like2.parameters = {"h0": 1e-24}
assert np.allclose(
[like1.log_likelihood()], [like2.log_likelihood()], atol=1e-10, rtol=0.0
)
|
[
"numpy.random.normal",
"cwinpy.HeterodynedData",
"numpy.ones",
"cwinpy.MultiHeterodynedData",
"cwinpy.TargetedPulsarLikelihood",
"numpy.linspace",
"pytest.raises",
"bilby.core.prior.PriorDict",
"bilby.core.prior.Uniform",
"os.remove"
] |
[((372, 417), 'numpy.linspace', 'np.linspace', (['(1000000000.0)', '(1000086340.0)', '(1440)'], {}), '(1000000000.0, 1000086340.0, 1440)\n', (383, 417), True, 'import numpy as np\n'), ((429, 473), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1e-25)'], {'size': '(1440, 2)'}), '(0.0, 1e-25, size=(1440, 2))\n', (445, 473), True, 'import numpy as np\n'), ((489, 507), 'numpy.ones', 'np.ones', (['(1440, 2)'], {}), '((1440, 2))\n', (496, 507), True, 'import numpy as np\n'), ((989, 1016), 'os.remove', 'os.remove', (['"""J0123+3456.par"""'], {}), "('J0123+3456.par')\n", (998, 1016), False, 'import os\n'), ((1335, 1403), 'cwinpy.HeterodynedData', 'HeterodynedData', (['self.data'], {'times': 'self.times', 'detector': 'self.detector'}), '(self.data, times=self.times, detector=self.detector)\n', (1350, 1403), False, 'from cwinpy import HeterodynedData, MultiHeterodynedData, TargetedPulsarLikelihood\n'), ((1452, 1477), 'bilby.core.prior.Uniform', 'Uniform', (['(0.0)', '(1e-23)', '"""h0"""'], {}), "(0.0, 1e-23, 'h0')\n", (1459, 1477), False, 'from bilby.core.prior import PriorDict, Uniform\n'), ((1630, 1721), 'cwinpy.HeterodynedData', 'HeterodynedData', (['self.data'], {'times': 'self.times', 'detector': 'self.detector', 'par': 'self.parfile'}), '(self.data, times=self.times, detector=self.detector, par=\n self.parfile)\n', (1645, 1721), False, 'from cwinpy import HeterodynedData, MultiHeterodynedData, TargetedPulsarLikelihood\n'), ((1754, 1779), 'cwinpy.MultiHeterodynedData', 'MultiHeterodynedData', (['het'], {}), '(het)\n', (1774, 1779), False, 'from cwinpy import HeterodynedData, MultiHeterodynedData, TargetedPulsarLikelihood\n'), ((2169, 2194), 'bilby.core.prior.Uniform', 'Uniform', (['(0.0)', '(1.0)', '"""blah"""'], {}), "(0.0, 1.0, 'blah')\n", (2176, 2194), False, 'from bilby.core.prior import PriorDict, Uniform\n'), ((2236, 2327), 'cwinpy.HeterodynedData', 'HeterodynedData', (['self.data'], {'times': 'self.times', 'detector': 'self.detector', 'par': 'self.parfile'}), '(self.data, times=self.times, detector=self.detector, par=\n self.parfile)\n', (2251, 2327), False, 'from cwinpy import HeterodynedData, MultiHeterodynedData, TargetedPulsarLikelihood\n'), ((2569, 2660), 'cwinpy.HeterodynedData', 'HeterodynedData', (['self.data'], {'times': 'self.times', 'detector': 'self.detector', 'par': 'self.parfile'}), '(self.data, times=self.times, detector=self.detector, par=\n self.parfile)\n', (2584, 2660), False, 'from cwinpy import HeterodynedData, MultiHeterodynedData, TargetedPulsarLikelihood\n'), ((2726, 2751), 'bilby.core.prior.Uniform', 'Uniform', (['(0.0)', '(1e-23)', '"""h0"""'], {}), "(0.0, 1e-23, 'h0')\n", (2733, 2751), False, 'from bilby.core.prior import PriorDict, Uniform\n'), ((3011, 3102), 'cwinpy.HeterodynedData', 'HeterodynedData', (['self.data'], {'times': 'self.times', 'detector': 'self.detector', 'par': 'self.parfile'}), '(self.data, times=self.times, detector=self.detector, par=\n self.parfile)\n', (3026, 3102), False, 'from cwinpy import HeterodynedData, MultiHeterodynedData, TargetedPulsarLikelihood\n'), ((3168, 3193), 'bilby.core.prior.Uniform', 'Uniform', (['(0.0)', '(1e-23)', '"""h0"""'], {}), "(0.0, 1e-23, 'h0')\n", (3175, 3193), False, 'from bilby.core.prior import PriorDict, Uniform\n'), ((3632, 3723), 'cwinpy.HeterodynedData', 'HeterodynedData', (['self.data'], {'times': 'self.times', 'detector': 'self.detector', 'par': 'self.parfile'}), '(self.data, times=self.times, detector=self.detector, par=\n self.parfile)\n', (3647, 3723), False, 'from cwinpy import HeterodynedData, MultiHeterodynedData, TargetedPulsarLikelihood\n'), ((3789, 3814), 'bilby.core.prior.Uniform', 'Uniform', (['(0.0)', '(1e-23)', '"""h0"""'], {}), "(0.0, 1e-23, 'h0')\n", (3796, 3814), False, 'from bilby.core.prior import PriorDict, Uniform\n'), ((1191, 1215), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1204, 1215), False, 'import pytest\n'), ((1229, 1265), 'cwinpy.TargetedPulsarLikelihood', 'TargetedPulsarLikelihood', (['None', 'None'], {}), '(None, None)\n', (1253, 1265), False, 'from cwinpy import HeterodynedData, MultiHeterodynedData, TargetedPulsarLikelihood\n'), ((1527, 1552), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1540, 1552), False, 'import pytest\n'), ((1825, 1849), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1838, 1849), False, 'import pytest\n'), ((1863, 1898), 'cwinpy.TargetedPulsarLikelihood', 'TargetedPulsarLikelihood', (['het', 'None'], {}), '(het, None)\n', (1887, 1898), False, 'from cwinpy import HeterodynedData, MultiHeterodynedData, TargetedPulsarLikelihood\n'), ((1913, 1937), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1926, 1937), False, 'import pytest\n'), ((1951, 1987), 'cwinpy.TargetedPulsarLikelihood', 'TargetedPulsarLikelihood', (['mhet', 'None'], {}), '(mhet, None)\n', (1975, 1987), False, 'from cwinpy import HeterodynedData, MultiHeterodynedData, TargetedPulsarLikelihood\n'), ((2359, 2384), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2372, 2384), False, 'import pytest\n'), ((2768, 2793), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2781, 2793), False, 'import pytest\n'), ((1596, 1613), 'bilby.core.prior.PriorDict', 'PriorDict', (['priors'], {}), '(priors)\n', (1605, 1613), False, 'from bilby.core.prior import PriorDict, Uniform\n'), ((2432, 2449), 'bilby.core.prior.PriorDict', 'PriorDict', (['priors'], {}), '(priors)\n', (2441, 2449), False, 'from bilby.core.prior import PriorDict, Uniform\n'), ((2841, 2858), 'bilby.core.prior.PriorDict', 'PriorDict', (['priors'], {}), '(priors)\n', (2850, 2858), False, 'from bilby.core.prior import PriorDict, Uniform\n'), ((3316, 3333), 'bilby.core.prior.PriorDict', 'PriorDict', (['priors'], {}), '(priors)\n', (3325, 3333), False, 'from bilby.core.prior import PriorDict, Uniform\n'), ((3938, 3955), 'bilby.core.prior.PriorDict', 'PriorDict', (['priors'], {}), '(priors)\n', (3947, 3955), False, 'from bilby.core.prior import PriorDict, Uniform\n'), ((4106, 4123), 'bilby.core.prior.PriorDict', 'PriorDict', (['priors'], {}), '(priors)\n', (4115, 4123), False, 'from bilby.core.prior import PriorDict, Uniform\n')]
|
###############################################################################
# mockDensData.py: generate mock data following a given density
###############################################################################
import os, os.path
import pickle
import multiprocessing
from optparse import OptionParser
import numpy
from scipy import ndimage
import fitsio
from galpy.util import bovy_coords, multi
import mwdust
import define_rcsample
import fitDens
import densprofiles
dmap= None
dmapg15= None
apo= None
def generate(locations,
type='exp',
sample='lowlow',
extmap='green15',
nls=101,
nmock=1000,
H0=-1.49,
_dmapg15=None,
ncpu=1):
"""
NAME:
generate
PURPOSE:
generate mock data following a given density
INPUT:
locations - locations to be included in the sample
type= ('exp') type of density profile to sample from
sample= ('lowlow') for selecting mock parameters
extmap= ('green15') extinction map to use ('marshall06' and others use Green15 to fill in unobserved regions)
nls= (101) number of longitude bins to use for each field
nmock= (1000) number of mock data points to generate
H0= (-1.49) absolute magnitude (can be array w/ sampling spread)
ncpu= (1) number of cpus to use to compute the probability
OUTPUT:
mockdata recarray with tags 'RC_GALR_H', 'RC_GALPHI_H', 'RC_GALZ_H'
HISTORY:
2015-04-03 - Written - Bovy (IAS)
"""
if isinstance(H0,float): H0= [H0]
# Setup the density function and its initial parameters
rdensfunc= fitDens._setup_densfunc(type)
mockparams= _setup_mockparams_densfunc(type,sample)
densfunc= lambda x,y,z: rdensfunc(x,y,z,params=mockparams)
# Setup the extinction map
global dmap
global dmapg15
if _dmapg15 is None: dmapg15= mwdust.Green15(filter='2MASS H')
else: dmapg15= _dmapg15
if isinstance(extmap,mwdust.DustMap3D.DustMap3D):
dmap= extmap
elif extmap.lower() == 'green15':
dmap= dmapg15
elif extmap.lower() == 'marshall06':
dmap= mwdust.Marshall06(filter='2MASS H')
elif extmap.lower() == 'sale14':
dmap= mwdust.Sale14(filter='2MASS H')
elif extmap.lower() == 'drimmel03':
dmap= mwdust.Drimmel03(filter='2MASS H')
# Use brute-force rejection sampling to make no approximations
# First need to estimate the max probability to use in rejection;
# Loop through all locations and compute sampling probability on grid in
# (l,b,D)
# First restore the APOGEE selection function (assumed pre-computed)
global apo
selectFile= '../savs/selfunc-nospdata.sav'
if os.path.exists(selectFile):
with open(selectFile,'rb') as savefile:
apo= pickle.load(savefile)
# Now compute the necessary coordinate transformations and evaluate the
# maximum probability
distmods= numpy.linspace(7.,15.5,301)
ds= 10.**(distmods/5-2.)
nbs= nls
lnprobs= numpy.empty((len(locations),len(distmods),nbs,nls))
radii= []
lcens, bcens= [], []
lnprobs= multi.parallel_map(lambda x: _calc_lnprob(locations[x],nls,nbs,
ds,distmods,
H0,
densfunc),
range(len(locations)),
numcores=numpy.amin([len(locations),
multiprocessing.cpu_count(),ncpu]))
lnprobs= numpy.array(lnprobs)
for ll, loc in enumerate(locations):
lcen, bcen= apo.glonGlat(loc)
rad= apo.radius(loc)
radii.append(rad) # save for later
lcens.append(lcen[0])
bcens.append(bcen[0])
maxp= (numpy.exp(numpy.nanmax(lnprobs))-10.**-8.)*1.1 # Just to be sure
# Now generate mock data using rejection sampling
nout= 0
arlocations= numpy.array(locations)
arradii= numpy.array(radii)
arlcens= numpy.array(lcens)
arbcens= numpy.array(bcens)
out= numpy.recarray((nmock,),
dtype=[('RC_DIST_H','f8'),
('RC_DM_H','f8'),
('RC_GALR_H','f8'),
('RC_GALPHI_H','f8'),
('RC_GALZ_H','f8')])
while nout < nmock:
nnew= 2*(nmock-nout)
# nnew new locations
locIndx= numpy.floor(numpy.random.uniform(size=nnew)*len(locations)).astype('int')
newlocations= arlocations[locIndx]
# Point within these locations
newds_coord= numpy.random.uniform(size=nnew)
newds= 10.**((newds_coord*(numpy.amax(distmods)-numpy.amin(distmods))\
+numpy.amin(distmods))/5.-2.)
newdls_coord= numpy.random.uniform(size=nnew)
newdls= newdls_coord*2.*arradii[locIndx]\
-arradii[locIndx]
newdbs_coord= numpy.random.uniform(size=nnew)
newdbs= newdbs_coord*2.*arradii[locIndx]\
-arradii[locIndx]
newr2s= newdls**2.+newdbs**2.
keepIndx= newr2s < arradii[locIndx]**2.
newlocations= newlocations[keepIndx]
newds_coord= newds_coord[keepIndx]
newdls_coord= newdls_coord[keepIndx]
newdbs_coord= newdbs_coord[keepIndx]
newds= newds[keepIndx]
newdls= newdls[keepIndx]
newdbs= newdbs[keepIndx]
newls= newdls+arlcens[locIndx][keepIndx]
newbs= newdbs+arbcens[locIndx][keepIndx]
# Reject?
tps= numpy.zeros_like(newds)
for nloc in list(set(newlocations)):
lindx= newlocations == nloc
pindx= arlocations == nloc
coord= numpy.array([newds_coord[lindx]*(len(distmods)-1.),
newdbs_coord[lindx]*(nbs-1.),
newdls_coord[lindx]*(nls-1.)])
tps[lindx]= \
numpy.exp(ndimage.interpolation.map_coordinates(\
lnprobs[pindx][0],
coord,cval=-10.,
order=1))-10.**-8.
XYZ= bovy_coords.lbd_to_XYZ(newls,newbs,newds,degree=True)
Rphiz= bovy_coords.XYZ_to_galcencyl(XYZ[:,0],XYZ[:,1],XYZ[:,2],
Xsun=define_rcsample._R0,
Ysun=0.,
Zsun=define_rcsample._Z0)
testp= numpy.random.uniform(size=len(newds))*maxp
keepIndx= tps > testp
if numpy.sum(keepIndx) > nmock-nout:
rangeIndx= numpy.zeros(len(keepIndx),dtype='int')
rangeIndx[keepIndx]= numpy.arange(numpy.sum(keepIndx))
keepIndx*= (rangeIndx < nmock-nout)
out['RC_DIST_H'][nout:nout+numpy.sum(keepIndx)]= newds[keepIndx]
out['RC_DM_H'][nout:nout+numpy.sum(keepIndx)]= newds_coord[keepIndx]*(numpy.amax(distmods)-numpy.amin(distmods))\
+numpy.amin(distmods)
out['RC_GALR_H'][nout:nout+numpy.sum(keepIndx)]= Rphiz[0][keepIndx]
out['RC_GALPHI_H'][nout:nout+numpy.sum(keepIndx)]= Rphiz[1][keepIndx]
out['RC_GALZ_H'][nout:nout+numpy.sum(keepIndx)]= Rphiz[2][keepIndx]
nout= nout+numpy.sum(keepIndx)
return (out,lnprobs)
def _setup_mockparams_densfunc(type,sample):
"""Return the parameters of the mock density for this type"""
if type.lower() == 'exp':
if sample.lower() == 'lowlow':
return [0.,1./0.3]
elif sample.lower() == 'solar':
return [1./3.,1./0.3]
else:
return [1./3.,1./0.3]
elif type.lower() == 'expplusconst':
if sample.lower() == 'lowlow':
return [0.,1./0.3,numpy.log(0.1)]
else:
return [1./3.,1./0.3,numpy.log(0.1)]
elif type.lower() == 'twoexp':
return [1./3.,1./0.3,1./4.,1./0.5,densprofiles.logit(0.5)]
elif type.lower() == 'brokenexp':
if sample.lower() == 'lowlow':
return [-0.2,1./.3,0.2,numpy.log(11.)]
elif sample.lower() == 'solar':
return [-1./6.,1./0.3,1./2.,numpy.log(8.)]
else:
return [-1./6.,1./0.3,1./2.,numpy.log(6.)]
elif type.lower() == 'brokenexpflare':
if sample.lower() == 'lowlow':
return [-0.2,1./.3,0.2,numpy.log(11.),-0.1]
elif sample.lower() == 'solar':
return [-1./6.,1./0.3,1./2.,numpy.log(8.),-0.1]
else:
return [-1./6.,1./0.3,1./2.,numpy.log(6.),-0.1]
elif type.lower() == 'gaussexp':
if sample.lower() == 'lowlow':
return [.4,1./0.3,numpy.log(11.)]
else:
return [1./3.,1./0.3,numpy.log(10.)]
def _calc_lnprob(loc,nls,nbs,ds,distmods,H0,densfunc):
lcen, bcen= apo.glonGlat(loc)
rad= apo.radius(loc)
ls= numpy.linspace(lcen-rad,lcen+rad,nls)
bs= numpy.linspace(bcen-rad,bcen+rad,nbs)
# Tile these
tls= numpy.tile(ls,(len(ds),len(bs),1))
tbs= numpy.swapaxes(numpy.tile(bs,(len(ds),len(ls),1)),1,2)
tds= numpy.tile(ds,(len(ls),len(bs),1)).T
XYZ= bovy_coords.lbd_to_XYZ(tls.flatten(),
tbs.flatten(),
tds.flatten(),
degree=True)
Rphiz= bovy_coords.XYZ_to_galcencyl(XYZ[:,0],XYZ[:,1],XYZ[:,2],
Xsun=define_rcsample._R0,
Ysun=0.,
Zsun=define_rcsample._Z0)
# Evaluate probability density
tH= numpy.tile(distmods.T,(1,len(ls),len(bs),1))[0].T
for ii in range(tH.shape[1]):
for jj in range(tH.shape[2]):
try:
tH[:,ii,jj]+= dmap(ls[jj],bs[ii],ds)
except (IndexError, TypeError,ValueError):
try:
tH[:,ii,jj]+= dmapg15(ls[jj],bs[ii],ds)
except IndexError: # assume zero outside
pass
tH= tH.flatten()+H0[0]
ps= densfunc(Rphiz[0],Rphiz[1],Rphiz[2])*apo(loc,tH)\
*numpy.fabs(numpy.cos(tbs.flatten()/180.*numpy.pi))\
*tds.flatten()**3.
return numpy.log(numpy.reshape(ps,(len(distmods),nbs,nls))\
+10.**-8.)
def get_options():
usage = "usage: %prog [options] <savefilename>\n\nsavefilename= name of the file that the mock data will be saved to"
parser = OptionParser(usage=usage)
parser.add_option("--type",dest='type',default='exp',
help="Type of density profile")
parser.add_option("--sample",dest='sample',default='lowlow',
help="Sample parameter for mock parameters")
parser.add_option("--H0",dest='H0',default=-1.49,type='float',
help="RC absolute magnitude")
parser.add_option("--nls",dest='nls',default=101,type='int',
help="Number of longitudes to bin each field in")
parser.add_option("--nmock",dest='nmock',default=20000,type='int',
help="Number of mock samples to generate")
# Dust map to use
parser.add_option("--extmap",dest='extmap',default='green15',
help="Dust map to use ('Green15', 'Marshall03', 'Drimmel03', 'Sale14', or 'zero'")
# Multiprocessing?
parser.add_option("-m","--multi",dest='multi',default=1,type='int',
help="number of cpus to use")
return parser
if __name__ == '__main__':
parser= get_options()
options, args= parser.parse_args()
data= define_rcsample.get_rcsample()
locations= list(set(list(data['LOCATION_ID'])))
#locations= [4240,4242]
out= generate(locations,
type=options.type,
sample=options.sample,
extmap=options.extmap,
nls=options.nls,
nmock=options.nmock,
H0=options.H0,
ncpu=options.multi)
fitsio.write(args[0],out[0],clobber=True)
|
[
"mwdust.Drimmel03",
"define_rcsample.get_rcsample",
"densprofiles.logit",
"numpy.log",
"multiprocessing.cpu_count",
"numpy.array",
"mwdust.Green15",
"os.path.exists",
"numpy.linspace",
"numpy.nanmax",
"numpy.amax",
"numpy.amin",
"mwdust.Marshall06",
"pickle.load",
"galpy.util.bovy_coords.XYZ_to_galcencyl",
"mwdust.Sale14",
"scipy.ndimage.interpolation.map_coordinates",
"fitsio.write",
"optparse.OptionParser",
"fitDens._setup_densfunc",
"galpy.util.bovy_coords.lbd_to_XYZ",
"numpy.sum",
"numpy.recarray",
"numpy.random.uniform",
"numpy.zeros_like"
] |
[((1685, 1714), 'fitDens._setup_densfunc', 'fitDens._setup_densfunc', (['type'], {}), '(type)\n', (1708, 1714), False, 'import fitDens\n'), ((2767, 2793), 'os.path.exists', 'os.path.exists', (['selectFile'], {}), '(selectFile)\n', (2781, 2793), False, 'import os, os.path\n'), ((2999, 3029), 'numpy.linspace', 'numpy.linspace', (['(7.0)', '(15.5)', '(301)'], {}), '(7.0, 15.5, 301)\n', (3013, 3029), False, 'import numpy\n'), ((3669, 3689), 'numpy.array', 'numpy.array', (['lnprobs'], {}), '(lnprobs)\n', (3680, 3689), False, 'import numpy\n'), ((4060, 4082), 'numpy.array', 'numpy.array', (['locations'], {}), '(locations)\n', (4071, 4082), False, 'import numpy\n'), ((4096, 4114), 'numpy.array', 'numpy.array', (['radii'], {}), '(radii)\n', (4107, 4114), False, 'import numpy\n'), ((4128, 4146), 'numpy.array', 'numpy.array', (['lcens'], {}), '(lcens)\n', (4139, 4146), False, 'import numpy\n'), ((4160, 4178), 'numpy.array', 'numpy.array', (['bcens'], {}), '(bcens)\n', (4171, 4178), False, 'import numpy\n'), ((4188, 4330), 'numpy.recarray', 'numpy.recarray', (['(nmock,)'], {'dtype': "[('RC_DIST_H', 'f8'), ('RC_DM_H', 'f8'), ('RC_GALR_H', 'f8'), (\n 'RC_GALPHI_H', 'f8'), ('RC_GALZ_H', 'f8')]"}), "((nmock,), dtype=[('RC_DIST_H', 'f8'), ('RC_DM_H', 'f8'), (\n 'RC_GALR_H', 'f8'), ('RC_GALPHI_H', 'f8'), ('RC_GALZ_H', 'f8')])\n", (4202, 4330), False, 'import numpy\n'), ((8910, 8953), 'numpy.linspace', 'numpy.linspace', (['(lcen - rad)', '(lcen + rad)', 'nls'], {}), '(lcen - rad, lcen + rad, nls)\n', (8924, 8953), False, 'import numpy\n'), ((8956, 8999), 'numpy.linspace', 'numpy.linspace', (['(bcen - rad)', '(bcen + rad)', 'nbs'], {}), '(bcen - rad, bcen + rad, nbs)\n', (8970, 8999), False, 'import numpy\n'), ((9362, 9490), 'galpy.util.bovy_coords.XYZ_to_galcencyl', 'bovy_coords.XYZ_to_galcencyl', (['XYZ[:, 0]', 'XYZ[:, 1]', 'XYZ[:, 2]'], {'Xsun': 'define_rcsample._R0', 'Ysun': '(0.0)', 'Zsun': 'define_rcsample._Z0'}), '(XYZ[:, 0], XYZ[:, 1], XYZ[:, 2], Xsun=\n define_rcsample._R0, Ysun=0.0, Zsun=define_rcsample._Z0)\n', (9390, 9490), False, 'from galpy.util import bovy_coords, multi\n'), ((10481, 10506), 'optparse.OptionParser', 'OptionParser', ([], {'usage': 'usage'}), '(usage=usage)\n', (10493, 10506), False, 'from optparse import OptionParser\n'), ((11604, 11634), 'define_rcsample.get_rcsample', 'define_rcsample.get_rcsample', ([], {}), '()\n', (11632, 11634), False, 'import define_rcsample\n'), ((12012, 12055), 'fitsio.write', 'fitsio.write', (['args[0]', 'out[0]'], {'clobber': '(True)'}), '(args[0], out[0], clobber=True)\n', (12024, 12055), False, 'import fitsio\n'), ((1937, 1969), 'mwdust.Green15', 'mwdust.Green15', ([], {'filter': '"""2MASS H"""'}), "(filter='2MASS H')\n", (1951, 1969), False, 'import mwdust\n'), ((4745, 4776), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'size': 'nnew'}), '(size=nnew)\n', (4765, 4776), False, 'import numpy\n'), ((4920, 4951), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'size': 'nnew'}), '(size=nnew)\n', (4940, 4951), False, 'import numpy\n'), ((5054, 5085), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'size': 'nnew'}), '(size=nnew)\n', (5074, 5085), False, 'import numpy\n'), ((5656, 5679), 'numpy.zeros_like', 'numpy.zeros_like', (['newds'], {}), '(newds)\n', (5672, 5679), False, 'import numpy\n'), ((6220, 6276), 'galpy.util.bovy_coords.lbd_to_XYZ', 'bovy_coords.lbd_to_XYZ', (['newls', 'newbs', 'newds'], {'degree': '(True)'}), '(newls, newbs, newds, degree=True)\n', (6242, 6276), False, 'from galpy.util import bovy_coords, multi\n'), ((6289, 6417), 'galpy.util.bovy_coords.XYZ_to_galcencyl', 'bovy_coords.XYZ_to_galcencyl', (['XYZ[:, 0]', 'XYZ[:, 1]', 'XYZ[:, 2]'], {'Xsun': 'define_rcsample._R0', 'Ysun': '(0.0)', 'Zsun': 'define_rcsample._Z0'}), '(XYZ[:, 0], XYZ[:, 1], XYZ[:, 2], Xsun=\n define_rcsample._R0, Ysun=0.0, Zsun=define_rcsample._Z0)\n', (6317, 6417), False, 'from galpy.util import bovy_coords, multi\n'), ((2860, 2881), 'pickle.load', 'pickle.load', (['savefile'], {}), '(savefile)\n', (2871, 2881), False, 'import pickle\n'), ((6639, 6658), 'numpy.sum', 'numpy.sum', (['keepIndx'], {}), '(keepIndx)\n', (6648, 6658), False, 'import numpy\n'), ((7058, 7078), 'numpy.amin', 'numpy.amin', (['distmods'], {}), '(distmods)\n', (7068, 7078), False, 'import numpy\n'), ((7328, 7347), 'numpy.sum', 'numpy.sum', (['keepIndx'], {}), '(keepIndx)\n', (7337, 7347), False, 'import numpy\n'), ((2188, 2223), 'mwdust.Marshall06', 'mwdust.Marshall06', ([], {'filter': '"""2MASS H"""'}), "(filter='2MASS H')\n", (2205, 2223), False, 'import mwdust\n'), ((3922, 3943), 'numpy.nanmax', 'numpy.nanmax', (['lnprobs'], {}), '(lnprobs)\n', (3934, 3943), False, 'import numpy\n'), ((6781, 6800), 'numpy.sum', 'numpy.sum', (['keepIndx'], {}), '(keepIndx)\n', (6790, 6800), False, 'import numpy\n'), ((2275, 2306), 'mwdust.Sale14', 'mwdust.Sale14', ([], {'filter': '"""2MASS H"""'}), "(filter='2MASS H')\n", (2288, 2306), False, 'import mwdust\n'), ((3620, 3647), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (3645, 3647), False, 'import multiprocessing\n'), ((6052, 6140), 'scipy.ndimage.interpolation.map_coordinates', 'ndimage.interpolation.map_coordinates', (['lnprobs[pindx][0]', 'coord'], {'cval': '(-10.0)', 'order': '(1)'}), '(lnprobs[pindx][0], coord, cval=-10.0,\n order=1)\n', (6089, 6140), False, 'from scipy import ndimage\n'), ((6885, 6904), 'numpy.sum', 'numpy.sum', (['keepIndx'], {}), '(keepIndx)\n', (6894, 6904), False, 'import numpy\n'), ((6956, 6975), 'numpy.sum', 'numpy.sum', (['keepIndx'], {}), '(keepIndx)\n', (6965, 6975), False, 'import numpy\n'), ((7001, 7021), 'numpy.amax', 'numpy.amax', (['distmods'], {}), '(distmods)\n', (7011, 7021), False, 'import numpy\n'), ((7022, 7042), 'numpy.amin', 'numpy.amin', (['distmods'], {}), '(distmods)\n', (7032, 7042), False, 'import numpy\n'), ((7114, 7133), 'numpy.sum', 'numpy.sum', (['keepIndx'], {}), '(keepIndx)\n', (7123, 7133), False, 'import numpy\n'), ((7192, 7211), 'numpy.sum', 'numpy.sum', (['keepIndx'], {}), '(keepIndx)\n', (7201, 7211), False, 'import numpy\n'), ((7268, 7287), 'numpy.sum', 'numpy.sum', (['keepIndx'], {}), '(keepIndx)\n', (7277, 7287), False, 'import numpy\n'), ((7817, 7831), 'numpy.log', 'numpy.log', (['(0.1)'], {}), '(0.1)\n', (7826, 7831), False, 'import numpy\n'), ((7880, 7894), 'numpy.log', 'numpy.log', (['(0.1)'], {}), '(0.1)\n', (7889, 7894), False, 'import numpy\n'), ((7973, 7996), 'densprofiles.logit', 'densprofiles.logit', (['(0.5)'], {}), '(0.5)\n', (7991, 7996), False, 'import densprofiles\n'), ((2361, 2395), 'mwdust.Drimmel03', 'mwdust.Drimmel03', ([], {'filter': '"""2MASS H"""'}), "(filter='2MASS H')\n", (2377, 2395), False, 'import mwdust\n'), ((4580, 4611), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'size': 'nnew'}), '(size=nnew)\n', (4600, 4611), False, 'import numpy\n'), ((4869, 4889), 'numpy.amin', 'numpy.amin', (['distmods'], {}), '(distmods)\n', (4879, 4889), False, 'import numpy\n'), ((8110, 8125), 'numpy.log', 'numpy.log', (['(11.0)'], {}), '(11.0)\n', (8119, 8125), False, 'import numpy\n'), ((4812, 4832), 'numpy.amax', 'numpy.amax', (['distmods'], {}), '(distmods)\n', (4822, 4832), False, 'import numpy\n'), ((4833, 4853), 'numpy.amin', 'numpy.amin', (['distmods'], {}), '(distmods)\n', (4843, 4853), False, 'import numpy\n'), ((8206, 8220), 'numpy.log', 'numpy.log', (['(8.0)'], {}), '(8.0)\n', (8215, 8220), False, 'import numpy\n'), ((8275, 8289), 'numpy.log', 'numpy.log', (['(6.0)'], {}), '(6.0)\n', (8284, 8289), False, 'import numpy\n'), ((8407, 8422), 'numpy.log', 'numpy.log', (['(11.0)'], {}), '(11.0)\n', (8416, 8422), False, 'import numpy\n'), ((8508, 8522), 'numpy.log', 'numpy.log', (['(8.0)'], {}), '(8.0)\n', (8517, 8522), False, 'import numpy\n'), ((8582, 8596), 'numpy.log', 'numpy.log', (['(6.0)'], {}), '(6.0)\n', (8591, 8596), False, 'import numpy\n'), ((8708, 8723), 'numpy.log', 'numpy.log', (['(11.0)'], {}), '(11.0)\n', (8717, 8723), False, 'import numpy\n'), ((8771, 8786), 'numpy.log', 'numpy.log', (['(10.0)'], {}), '(10.0)\n', (8780, 8786), False, 'import numpy\n')]
|
import os
import numpy as np
import pandas as pd
import yaml
from . import model as model_lib
from . import training, tensorize, io_local
def main():
#Turn off warnings:
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
###Load training data - Put the path to your own data here
training_data_path = "/root/training/training_preprocessed.csv"
training_df = pd.read_csv(training_data_path)
###Dump all Peptides containing selenocystein
training_df = training_df.loc[~training_df.modified_sequence.str.contains("U")]
print("CSV Loaded, shape is {}.".format(training_df.shape))
###Load Untrained Retention Time Model and prepare its training data
iRT_model_dir = "/root/training/iRT/"
iRT_model, iRT_config = model_lib.load(iRT_model_dir, trained=False)
iRT_callbacks = training.get_callbacks(iRT_model_dir)
iRT_raw_mean = training_df.uRT.mean()
iRT_raw_var = training_df.uRT.var()
iRT_config['iRT_rescaling_mean'] = float(iRT_raw_mean)
iRT_config['iRT_rescaling_var'] = float(iRT_raw_var)
with open(iRT_model_dir + "config_new.yml", "w") as config_outfile:
yaml.dump(iRT_config, config_outfile)
###Load Untrained Fragmentation Model and prepare its training data
msms_model_dir = "/root/training/msms/"
msms_model, msms_config = model_lib.load(msms_model_dir, trained=False)
msms_callbacks = training.get_callbacks(msms_model_dir)
#The intensity lists are already in proper order, but might have some missing values and need to be padded to the correct length
#(Only a peptide of the maximal length 29 will have 522 values, but all lists need to be of this length)
intensities_length = 522
print("iRT and Fragmentation Intensity Models Loaded.")
#Compile the models once, and then call fit separately - useful if you lack memory or space and have to partition your training data
training.compile_model(iRT_model, iRT_config)
training.compile_model(msms_model, msms_config)
training_tensorized = tensorize.csv(training_df[['modified_sequence', 'collision_energy', 'precursor_charge']], nlosses=3)
print("CSV Tensorized.")
training_tensorized['prediction'] = np.reshape(
np.asarray((training_df.uRT - iRT_raw_mean) / np.sqrt(iRT_raw_var)),
(-1,1))
training_df.relative_intensities = training_df.relative_intensities.apply(eval)
training_df.relative_intensities = training_df.relative_intensities.apply(
lambda ls: np.nan_to_num(np.pad(ls, pad_width=(0,intensities_length-len(ls)),constant_values=-1, mode="constant"),-1))
training_tensorized['intensities_raw'] = np.stack(training_df.relative_intensities)
###Write and reload training data in hdf5 format
hdf5_path = "/root/training/training_data.hdf5"
io_local.to_hdf5(training_tensorized,hdf5_path)
print("Training Data Written to HDF5 File.")
#Load the hdf5 again
training_loaded = io_local.from_hdf5(hdf5_path)
print("Training Data Reloaded from HDF5 File.\nCommencing Training of iRT Model...")
###Train both models
iRT_history = training.train_model(training_loaded, iRT_model, iRT_config, iRT_callbacks)
iRT_epochs = len(iRT_history.history['val_loss'])
iRT_val_loss = iRT_history.history['val_loss'][-1]
iRT_weights_filename = "{}/weight_{:02d}_{:.5f}.hdf5".format(iRT_model_dir, iRT_epochs, iRT_val_loss)
iRT_model.save_weights(iRT_weights_filename)
print("Training of iRT Model Complete.\nCommencing Training of Fragmentation Intensity Model...")
msms_history = training.train_model(training_loaded, msms_model, msms_config, msms_callbacks)
#Save the weights to a file named by the val_loss and the epochs
msms_epochs = len(msms_history.history['val_loss'])
msms_val_loss = msms_history.history['val_loss'][-1]
msms_weights_filename = "{}/weight_{:02d}_{:.5f}.hdf5".format(msms_model_dir, msms_epochs, msms_val_loss)
msms_model.save_weights(msms_weights_filename)
print("Training of Fragmentation Intensity Model Complete.")
print("Done! You may now use these models for your predictions.")
if __name__ == '__main__':
main()
|
[
"numpy.stack",
"numpy.sqrt",
"pandas.read_csv",
"yaml.dump"
] |
[((368, 399), 'pandas.read_csv', 'pd.read_csv', (['training_data_path'], {}), '(training_data_path)\n', (379, 399), True, 'import pandas as pd\n'), ((2552, 2594), 'numpy.stack', 'np.stack', (['training_df.relative_intensities'], {}), '(training_df.relative_intensities)\n', (2560, 2594), True, 'import numpy as np\n'), ((1097, 1134), 'yaml.dump', 'yaml.dump', (['iRT_config', 'config_outfile'], {}), '(iRT_config, config_outfile)\n', (1106, 1134), False, 'import yaml\n'), ((2194, 2214), 'numpy.sqrt', 'np.sqrt', (['iRT_raw_var'], {}), '(iRT_raw_var)\n', (2201, 2214), True, 'import numpy as np\n')]
|
from time import perf_counter
import numpy as np
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.metrics import *
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from hpsklearn import HyperoptEstimator, svc, random_forest, knn
from hyperopt import tpe
from sklearn.metrics import f1_score
def scorer(yt, yp): return 1 - f1_score(yt, yp, average='macro')
if __name__=='__main__':
np.random.seed(42)
train_X = np.load('data/train_X.npy')
test_X = np.load('data/test_X.npy')
train_Y = np.load('data/train_Y.npy')
test_Y = np.load('data/test_Y.npy')
estim = HyperoptEstimator(classifier=random_forest('rf'),algo=tpe.suggest,loss_fn=scorer,max_evals=200,trial_timeout=1200)
estim.fit(train_X, train_Y)
yp = estim.predict(test_X)
print(f1_score(test_Y, yp, average='macro'))
|
[
"sklearn.metrics.f1_score",
"numpy.load",
"numpy.random.seed",
"hpsklearn.random_forest"
] |
[((545, 563), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (559, 563), True, 'import numpy as np\n'), ((578, 605), 'numpy.load', 'np.load', (['"""data/train_X.npy"""'], {}), "('data/train_X.npy')\n", (585, 605), True, 'import numpy as np\n'), ((619, 645), 'numpy.load', 'np.load', (['"""data/test_X.npy"""'], {}), "('data/test_X.npy')\n", (626, 645), True, 'import numpy as np\n'), ((660, 687), 'numpy.load', 'np.load', (['"""data/train_Y.npy"""'], {}), "('data/train_Y.npy')\n", (667, 687), True, 'import numpy as np\n'), ((701, 727), 'numpy.load', 'np.load', (['"""data/test_Y.npy"""'], {}), "('data/test_Y.npy')\n", (708, 727), True, 'import numpy as np\n'), ((481, 514), 'sklearn.metrics.f1_score', 'f1_score', (['yt', 'yp'], {'average': '"""macro"""'}), "(yt, yp, average='macro')\n", (489, 514), False, 'from sklearn.metrics import f1_score\n'), ((929, 966), 'sklearn.metrics.f1_score', 'f1_score', (['test_Y', 'yp'], {'average': '"""macro"""'}), "(test_Y, yp, average='macro')\n", (937, 966), False, 'from sklearn.metrics import f1_score\n'), ((770, 789), 'hpsklearn.random_forest', 'random_forest', (['"""rf"""'], {}), "('rf')\n", (783, 789), False, 'from hpsklearn import HyperoptEstimator, svc, random_forest, knn\n')]
|
import numpy as np
from scipy.spatial.distance import pdist, squareform
from scipy import exp
from scipy.linalg import eigh
def rbf_kernel_pca(X, gamma, n_components):
"""
RBF kernel PCA implementation.
Parameters
------------
X: {NumPy ndarray}, shape = [n_samples, n_features]
gamma: float
Tuning parameter of the RBF kernel
n_components: int
Number of principal components to return
Returns
------------
X_pc: {NumPy ndarray}, shape = [n_samples, k_features]
Projected dataset
"""
# Calculate pairwise squared Euclidean distances
# in the MxN dimensional dataset.
sq_dists = pdist(X, 'sqeuclidean')
# Convert pairwise distances into a square matrix.
mat_sq_dists = squareform(sq_dists)
# Compute the symmetric kernel matrix.
K = exp(-gamma * mat_sq_dists)
# Center the kernel matrix.
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
# Obtaining eigenpairs from the centered kernel matrix
# numpy.eigh returns them in sorted order
eigvals, eigvecs = eigh(K)
print(np.sqrt(eigvals[-1]))
print(np.sqrt(eigvals[-2]))
# Collect the top k eigenvectors (projected samples)
#X_pc = np.column_stack((eigvecs[:, -i]
# for i in range(1, n_components + 1)))
# scikit-learnの結果と比べてみても, たぶんこれが正しい気がする
# ただ結局各成分にスケール因子が入るだけなので、
# 学習という意味ではどちらでも良いのかもしれない
X_pc = np.column_stack((np.sqrt(eigvals[-i]) * eigvecs[:, -i]
for i in range(1, n_components + 1)))
# PCA固有ベクトルvをデータサンプルに直すには X v とする必要がある
# ここで正規化された特異ベクトルの間の関係を使う。
# X v_i = sigma_i a_i (sigma_i = sqrt(lambda_i))
# よって sqrt(lambda_i) a_i で主成分方向に基底変換したデータサンプルになる。
return X_pc
##
# 本文の後で出てくるバージョン
# 計算は一緒で返すものが違うだけ
# 固有値と固有ベクトルを返す
#
def rbf_kernel_pca2(X, gamma, n_components):
"""
RBF kernel PCA implementation.
Parameters
------------
X: {NumPy ndarray}, shape = [n_samples, n_features]
gamma: float
Tuning parameter of the RBF kernel
n_components: int
Number of principal components to return
Returns
------------
X_pc: {NumPy ndarray}, shape = [n_samples, k_features]
Projected dataset
lambdas: list
Eigenvalues
"""
# Calculate pairwise squared Euclidean distances
# in the MxN dimensional dataset.
sq_dists = pdist(X, 'sqeuclidean')
# Convert pairwise distances into a square matrix.
mat_sq_dists = squareform(sq_dists)
# Compute the symmetric kernel matrix.
K = exp(-gamma * mat_sq_dists)
# Center the kernel matrix.
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
# Obtaining eigenpairs from the centered kernel matrix
# numpy.eigh returns them in sorted order
eigvals, eigvecs = eigh(K)
# Collect the top k eigenvectors (projected samples)
alphas = np.column_stack((eigvecs[:, -i]
for i in range(1, n_components + 1)))
# Collect the corresponding eigenvalues
lambdas = [eigvals[-i] for i in range(1, n_components + 1)]
return alphas, lambdas
import numpy as np
from scipy.spatial.distance import pdist, squareform
from scipy import exp
from scipy.linalg import eigh
##
# カーネルを線形にしてみた
# test_kpca.py で使う
#
def linear_kernel_pca(X, n_components):
"""
RBF kernel PCA implementation.
Parameters
------------
X: {NumPy ndarray}, shape = [n_samples, n_features]
gamma: float
Tuning parameter of the RBF kernel
n_components: int
Number of principal components to return
Returns
------------
X_pc: {NumPy ndarray}, shape = [n_samples, k_features]
Projected dataset
"""
# 線形カーネル関数は内積(x_i, x_j)とする
N = X.shape[0]
K = np.ones((N, N))
for i in range(N):
for j in range(N):
K[i, j] = np.dot(X[i, :], X[j, :])
print(K.shape)
# Center the kernel matrix.
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
# Obtaining eigenpairs from the centered kernel matrix
# numpy.eigh returns them in sorted order
eigvals, eigvecs = eigh(K)
print(np.sqrt(eigvals[-1]))
print(np.sqrt(eigvals[-2]))
# Collect the top k eigenvectors (projected samples)
#X_pc = np.column_stack((eigvecs[:, -i]
# for i in range(1, n_components + 1)))
# scikit-learnの結果と比べてみても, たぶんこれが正しい気がする
# ただ結局各成分にスケール因子が入るだけなので、
# 学習という意味ではどちらでも良いのかもしれない
X_pc = np.column_stack((np.sqrt(eigvals[-i]) * eigvecs[:, -i]
for i in range(1, n_components + 1)))
# PCA固有ベクトルvをデータサンプルに直すには X v とする必要がある
# ここで正規化された特異ベクトルの間の関係を使う。
# X v_i = sigma_i a_i (sigma_i = sqrt(lambda_i))
# よって sqrt(lambda_i) a_i で主成分方向に基底変換したデータサンプルになる。
return X_pc
|
[
"scipy.linalg.eigh",
"scipy.spatial.distance.squareform",
"numpy.sqrt",
"numpy.ones",
"scipy.exp",
"scipy.spatial.distance.pdist",
"numpy.dot"
] |
[((660, 683), 'scipy.spatial.distance.pdist', 'pdist', (['X', '"""sqeuclidean"""'], {}), "(X, 'sqeuclidean')\n", (665, 683), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((759, 779), 'scipy.spatial.distance.squareform', 'squareform', (['sq_dists'], {}), '(sq_dists)\n', (769, 779), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((832, 858), 'scipy.exp', 'exp', (['(-gamma * mat_sq_dists)'], {}), '(-gamma * mat_sq_dists)\n', (835, 858), False, 'from scipy import exp\n'), ((1138, 1145), 'scipy.linalg.eigh', 'eigh', (['K'], {}), '(K)\n', (1142, 1145), False, 'from scipy.linalg import eigh\n'), ((2460, 2483), 'scipy.spatial.distance.pdist', 'pdist', (['X', '"""sqeuclidean"""'], {}), "(X, 'sqeuclidean')\n", (2465, 2483), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((2559, 2579), 'scipy.spatial.distance.squareform', 'squareform', (['sq_dists'], {}), '(sq_dists)\n', (2569, 2579), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((2632, 2658), 'scipy.exp', 'exp', (['(-gamma * mat_sq_dists)'], {}), '(-gamma * mat_sq_dists)\n', (2635, 2658), False, 'from scipy import exp\n'), ((2938, 2945), 'scipy.linalg.eigh', 'eigh', (['K'], {}), '(K)\n', (2942, 2945), False, 'from scipy.linalg import eigh\n'), ((3906, 3921), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (3913, 3921), True, 'import numpy as np\n'), ((4317, 4324), 'scipy.linalg.eigh', 'eigh', (['K'], {}), '(K)\n', (4321, 4324), False, 'from scipy.linalg import eigh\n'), ((923, 938), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (930, 938), True, 'import numpy as np\n'), ((1156, 1176), 'numpy.sqrt', 'np.sqrt', (['eigvals[-1]'], {}), '(eigvals[-1])\n', (1163, 1176), True, 'import numpy as np\n'), ((1188, 1208), 'numpy.sqrt', 'np.sqrt', (['eigvals[-2]'], {}), '(eigvals[-2])\n', (1195, 1208), True, 'import numpy as np\n'), ((2723, 2738), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (2730, 2738), True, 'import numpy as np\n'), ((4102, 4117), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (4109, 4117), True, 'import numpy as np\n'), ((4335, 4355), 'numpy.sqrt', 'np.sqrt', (['eigvals[-1]'], {}), '(eigvals[-1])\n', (4342, 4355), True, 'import numpy as np\n'), ((4367, 4387), 'numpy.sqrt', 'np.sqrt', (['eigvals[-2]'], {}), '(eigvals[-2])\n', (4374, 4387), True, 'import numpy as np\n'), ((3994, 4018), 'numpy.dot', 'np.dot', (['X[i, :]', 'X[j, :]'], {}), '(X[i, :], X[j, :])\n', (4000, 4018), True, 'import numpy as np\n'), ((1516, 1536), 'numpy.sqrt', 'np.sqrt', (['eigvals[-i]'], {}), '(eigvals[-i])\n', (1523, 1536), True, 'import numpy as np\n'), ((4695, 4715), 'numpy.sqrt', 'np.sqrt', (['eigvals[-i]'], {}), '(eigvals[-i])\n', (4702, 4715), True, 'import numpy as np\n')]
|
# %%
import timeit
import tqdm
from os import path
import inspect
import numpy as np
import dill
import init
import fastg3.crisp as g3crisp
from plot_utils import plot_bench
from constants import N_REPEATS, N_STEPS, DILL_FOLDER
from number_utils import format_number
from dataset_utils import AVAILABLE_DATASETS, load_dataset
MAX_SYN = 100000000
def gen_setup(dataset_name, f):
return f'''
import init
import fastg3.crisp as g3crisp
from sql_utils import g3_sql_bench
from dataset_utils import load_dataset
df, X, Y = load_dataset('{dataset_name}', n_tuples_syn={MAX_SYN})
df = df.sample(frac={str(f)}, replace=False, random_state=27)
'''
def time_test(dataset_name, frac_samples):
to_benchmark, labels = init.gen_time_benchmark()
for f in tqdm.tqdm(frac_samples):
setup=gen_setup(dataset_name, f)
for cmd in to_benchmark:
if cmd != 'G3_SQL':
duration_mean = timeit.timeit(cmd, setup=setup, number=N_REPEATS)/N_REPEATS*1000
to_benchmark[cmd].append(duration_mean)
else:
exec(setup)
to_benchmark[cmd].append(1000*eval(f'g3_sql_bench(df, X, Y, n_repeats={N_REPEATS})'))
yaxis_name=f"Average time on {str(N_REPEATS)} runs (ms)"
return to_benchmark, labels, yaxis_name
def approx_test(dataset_name, frac_samples):
to_benchmark, labels = init.gen_sampling_benchmark()
for f in tqdm.tqdm(frac_samples):
setup=gen_setup(dataset_name, f)
exec(setup)
true_g3=eval('g3crisp.g3_hash(df, X, Y)')
for cmd in to_benchmark:
to_benchmark[cmd].append(abs(true_g3-eval(cmd)))
yaxis_name=f"Absolute error"# mean on {str(N_REPEATS)} runs"
return to_benchmark, labels, yaxis_name
if __name__ == '__main__':
STEP=1/N_STEPS
frac_samples = list(np.arange(STEP, 1+STEP, STEP))
for dataset_name in AVAILABLE_DATASETS:
for test_name in ['time', 'approx']:
script_name = inspect.stack()[0].filename.split('.')[0]
file_path = './'+path.join(DILL_FOLDER, f'{script_name}_{test_name}_{dataset_name}.d')
if path.isfile(file_path):
print(f'{file_path} found! Skipping...')
continue
else:
print(f'{file_path} in progress...')
if test_name=='time':
to_benchmark, labels, yaxis_name = time_test(dataset_name, frac_samples)
else:
to_benchmark, labels, yaxis_name = approx_test(dataset_name, frac_samples)
fig, ax = plot_bench(to_benchmark,
frac_samples,
labels,
xlabel="Number of tuples",
ylabel=yaxis_name,
logy=False,
savefig=False
)
if dataset_name=='syn':
dataset_size=MAX_SYN
else:
dataset_size = len(load_dataset(dataset_name)[0].index)
ax.xaxis.set_major_formatter(lambda x, pos: format_number(x*dataset_size))
dill.dump((fig, {"dataset_size": dataset_size}), open(file_path, "wb"))
|
[
"init.gen_sampling_benchmark",
"number_utils.format_number",
"inspect.stack",
"tqdm.tqdm",
"os.path.join",
"os.path.isfile",
"dataset_utils.load_dataset",
"timeit.timeit",
"plot_utils.plot_bench",
"numpy.arange",
"init.gen_time_benchmark"
] |
[((718, 743), 'init.gen_time_benchmark', 'init.gen_time_benchmark', ([], {}), '()\n', (741, 743), False, 'import init\n'), ((757, 780), 'tqdm.tqdm', 'tqdm.tqdm', (['frac_samples'], {}), '(frac_samples)\n', (766, 780), False, 'import tqdm\n'), ((1367, 1396), 'init.gen_sampling_benchmark', 'init.gen_sampling_benchmark', ([], {}), '()\n', (1394, 1396), False, 'import init\n'), ((1410, 1433), 'tqdm.tqdm', 'tqdm.tqdm', (['frac_samples'], {}), '(frac_samples)\n', (1419, 1433), False, 'import tqdm\n'), ((1824, 1855), 'numpy.arange', 'np.arange', (['STEP', '(1 + STEP)', 'STEP'], {}), '(STEP, 1 + STEP, STEP)\n', (1833, 1855), True, 'import numpy as np\n'), ((2126, 2148), 'os.path.isfile', 'path.isfile', (['file_path'], {}), '(file_path)\n', (2137, 2148), False, 'from os import path\n'), ((2558, 2681), 'plot_utils.plot_bench', 'plot_bench', (['to_benchmark', 'frac_samples', 'labels'], {'xlabel': '"""Number of tuples"""', 'ylabel': 'yaxis_name', 'logy': '(False)', 'savefig': '(False)'}), "(to_benchmark, frac_samples, labels, xlabel='Number of tuples',\n ylabel=yaxis_name, logy=False, savefig=False)\n", (2568, 2681), False, 'from plot_utils import plot_bench\n'), ((2041, 2110), 'os.path.join', 'path.join', (['DILL_FOLDER', 'f"""{script_name}_{test_name}_{dataset_name}.d"""'], {}), "(DILL_FOLDER, f'{script_name}_{test_name}_{dataset_name}.d')\n", (2050, 2110), False, 'from os import path\n'), ((3010, 3041), 'number_utils.format_number', 'format_number', (['(x * dataset_size)'], {}), '(x * dataset_size)\n', (3023, 3041), False, 'from number_utils import format_number\n'), ((920, 969), 'timeit.timeit', 'timeit.timeit', (['cmd'], {'setup': 'setup', 'number': 'N_REPEATS'}), '(cmd, setup=setup, number=N_REPEATS)\n', (933, 969), False, 'import timeit\n'), ((2917, 2943), 'dataset_utils.load_dataset', 'load_dataset', (['dataset_name'], {}), '(dataset_name)\n', (2929, 2943), False, 'from dataset_utils import AVAILABLE_DATASETS, load_dataset\n'), ((1970, 1985), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (1983, 1985), False, 'import inspect\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2018 <NAME> & <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
import numpy as np
from ..utils import to_categorical
from .activations import softmax, sigmoid
# softmax交叉熵
def softmax_cross_entropy(out, label):
# out:神经元的输出值
# label:实际类别或one-hot编码
out, label = np.array(out), np.array(label)
assert len(out.shape) == 2 # 输出形状错误
assert len(label.shape) == 1 or len(label.shape) == 2 # 标签形状错误
if len(label.shape) == 1: # 转化为one-hot编码
y = to_categorical(label, num_classes=out.shape[1])
else:
if label.shape[1] == 1:
y = to_categorical(label.squeeze(), num_classes=out.shape[1])
else:
assert label.max() == 1 and label.sum(1).mean() == 1 # 标签one-hot编码错误
y = label
yhat = softmax(out)
return -np.mean(y * np.log(yhat))
# 交叉熵
def cross_entropy(out, label):
# out:神经元的输出值
# label:实际类别或one-hot编码
yhat, label = np.array(out), np.array(label)
assert len(out.shape) == 2 # 输出形状错误
assert len(label.shape) == 1 or len(label.shape) == 2 # 标签形状错误
if len(label.shape) == 1: # 转化为one-hot编码
y = to_categorical(label, num_classes=out.shape[1])
else:
if label.shape[1] == 1:
y = to_categorical(label.squeeze(), num_classes=out.shape[1])
else:
assert label.max() == 1 and label.sum(1).mean() == 1 # 标签one-hot编码错误
y = label
return -np.mean(y * np.log(yhat))
# 二分类
def sigmoid_binary_cross_entropy(out, label):
# out:神经元的输出值
# label:实际类别或one-hot编码
out, y = np.array(out), np.array(label)
assert len(out.shape) == 2 and out.shape[1] == 1 # 输出形状错误
assert len(y.shape) == 1 # 标签形状错误
yhat = sigmoid(out)
return -np.mean(y * np.log(yhat) + (1 - y) * np.log(1 - yhat))
# 二分类
def binary_cross_entropy(out, label):
# out:神经元的输出值
# label:实际类别或one-hot编码
yhat, y = np.array(out), np.array(label)
assert len(yhat.shape) == 2 and out.shape[1] == 1 # 输出形状错误
assert len(y.shape) == 1 # 标签形状错误
return -np.mean(y * np.log(yhat) + (1 - y) * np.log(1 - yhat))
# 最小二乘损失
def square_loss(prediction, y):
# prediction:预测值
# y:实际值
prediction, y = np.array(prediction), np.array(y)
assert (len(prediction.shape) == 2 and prediction.shape[1] == 1) or len(prediction.shape) == 1 # 输出形状错误
assert len(y.shape) == 1 or (len(y.shape) == 2 and y.shape[1] == 1) # 真实值形状错误
return np.sum(np.sum(np.square(prediction.reshape(-1, 1) - y.reshape(-1, 1)), 1))
# 均方误差
def mse(prediction, y):
# prediction:预测值
# y:实际值
prediction, y = np.array(prediction), np.array(y)
assert (len(prediction.shape) == 2 and prediction.shape[1] == 1) or len(prediction.shape) == 1 # 输出形状错误
assert len(y.shape) == 1 or (len(y.shape) == 2 and y.shape[1] == 1) # 真实值形状错误
return np.mean(np.sum(np.square(prediction.reshape(-1, 1) - y.reshape(-1, 1)), 1))
|
[
"numpy.array",
"numpy.log"
] |
[((888, 901), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (896, 901), True, 'import numpy as np\n'), ((903, 918), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (911, 918), True, 'import numpy as np\n'), ((1551, 1564), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (1559, 1564), True, 'import numpy as np\n'), ((1566, 1581), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (1574, 1581), True, 'import numpy as np\n'), ((2199, 2212), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (2207, 2212), True, 'import numpy as np\n'), ((2214, 2229), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (2222, 2229), True, 'import numpy as np\n'), ((2539, 2552), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (2547, 2552), True, 'import numpy as np\n'), ((2554, 2569), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (2562, 2569), True, 'import numpy as np\n'), ((2846, 2866), 'numpy.array', 'np.array', (['prediction'], {}), '(prediction)\n', (2854, 2866), True, 'import numpy as np\n'), ((2868, 2879), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2876, 2879), True, 'import numpy as np\n'), ((3254, 3274), 'numpy.array', 'np.array', (['prediction'], {}), '(prediction)\n', (3262, 3274), True, 'import numpy as np\n'), ((3276, 3287), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (3284, 3287), True, 'import numpy as np\n'), ((1428, 1440), 'numpy.log', 'np.log', (['yhat'], {}), '(yhat)\n', (1434, 1440), True, 'import numpy as np\n'), ((2066, 2078), 'numpy.log', 'np.log', (['yhat'], {}), '(yhat)\n', (2072, 2078), True, 'import numpy as np\n'), ((2384, 2396), 'numpy.log', 'np.log', (['yhat'], {}), '(yhat)\n', (2390, 2396), True, 'import numpy as np\n'), ((2409, 2425), 'numpy.log', 'np.log', (['(1 - yhat)'], {}), '(1 - yhat)\n', (2415, 2425), True, 'import numpy as np\n'), ((2700, 2712), 'numpy.log', 'np.log', (['yhat'], {}), '(yhat)\n', (2706, 2712), True, 'import numpy as np\n'), ((2725, 2741), 'numpy.log', 'np.log', (['(1 - yhat)'], {}), '(1 - yhat)\n', (2731, 2741), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
from __future__ import division, print_function
try:
range = xrange
except NameError:
pass
import os
import sys
import h5py
import json
import time
import numpy
import ctypes
import signal
import logging
import argparse
import threading
from functools import reduce
from datetime import datetime, timedelta
from mnc.common import *
from mnc.mcs import ImageMonitorPoint, MultiMonitorPoint, Client
from station import ovro
from reductions import *
from operations import FileOperationsQueue
from monitoring import GlobalLogger
from control import VisibilityCommandProcessor
from lwams import get_zenith_uvw
from bifrost.address import Address
from bifrost.udp_socket import UDPSocket
from bifrost.packet_capture import PacketCaptureCallback, UDPCapture, DiskReader
from bifrost.ring import Ring
import bifrost.affinity as cpu_affinity
import bifrost.ndarray as BFArray
from bifrost.ndarray import copy_array
from bifrost.libbifrost import bf
from bifrost.proclog import ProcLog
from bifrost.memory import memcpy as BFMemCopy, memset as BFMemSet
from bifrost import asarray as BFAsArray
import PIL.Image, PIL.ImageDraw, PIL.ImageFont
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
QUEUE = FileOperationsQueue()
class CaptureOp(object):
def __init__(self, log, sock, oring, nbl, ntime_gulp=1,
slot_ntime=6, fast=False, shutdown_event=None, core=None):
self.log = log
self.sock = sock
self.oring = oring
self.nbl = nbl
self.ntime_gulp = ntime_gulp
self.slot_ntime = slot_ntime
self.fast = fast
if shutdown_event is None:
shutdown_event = threading.Event()
self.shutdown_event = shutdown_event
self.core = core
def shutdown(self):
self.shutdown_event.set()
def seq_callback(self, seq0, time_tag, chan0, nchan, navg, nsrc, hdr_ptr, hdr_size_ptr):
print("++++++++++++++++ seq0 =", seq0)
print(" time_tag =", time_tag)
hdr = {'time_tag': time_tag,
'seq0': seq0,
'chan0': chan0,
'cfreq': chan0*CHAN_BW,
'nchan': nchan,
'bw': nchan*CHAN_BW*(4 if self.fast else 1),
'navg': navg,
'nstand': int(numpy.sqrt(8*nsrc+1)-1)//2,
'npol': 4,
'nbl': nsrc,
'complex': True,
'nbit': 32}
print("******** CFREQ:", hdr['cfreq'])
hdr_str = json.dumps(hdr).encode()
# TODO: Can't pad with NULL because returned as C-string
#hdr_str = json.dumps(hdr).ljust(4096, '\0')
#hdr_str = json.dumps(hdr).ljust(4096, ' ')
header_buf = ctypes.create_string_buffer(hdr_str)
hdr_ptr[0] = ctypes.cast(header_buf, ctypes.c_void_p)
hdr_size_ptr[0] = len(hdr_str)
return 0
def main(self):
seq_callback = PacketCaptureCallback()
seq_callback.set_cor(self.seq_callback)
with UDPCapture("cor", self.sock, self.oring, self.nbl, 1, 9000,
self.ntime_gulp, self.slot_ntime,
sequence_callback=seq_callback, core=self.core) as capture:
while not self.shutdown_event.is_set():
status = capture.recv()
if status in (1,4,5,6):
break
del capture
class DummyOp(object):
def __init__(self, log, sock, oring, nbl, ntime_gulp=1,
slot_ntime=6, fast=False, shutdown_event=None, core=None):
self.log = log
self.sock = sock
self.oring = oring
self.nbl = nbl
self.ntime_gulp = ntime_gulp
self.slot_ntime = slot_ntime
self.fast = fast
if shutdown_event is None:
shutdown_event = threading.Event()
self.shutdown_event = shutdown_event
self.core = core
self.bind_proclog = ProcLog(type(self).__name__+"/bind")
self.out_proclog = ProcLog(type(self).__name__+"/out")
self.size_proclog = ProcLog(type(self).__name__+"/size")
self.perf_proclog = ProcLog(type(self).__name__+"/perf")
self.out_proclog.update( {'nring':1, 'ring0':self.oring.name})
self.size_proclog.update({'nseq_per_gulp': self.ntime_gulp})
def shutdown(self):
self.shutdown_event.set()
def main(self):
with self.oring.begin_writing() as oring:
navg = 2400 if self.fast else 240000
tint = navg / CHAN_BW
tgulp = tint * self.ntime_gulp
nsrc = self.nbl
nbl = self.nbl
chan0 = 1234
nchan = 192 // (4 if self.fast else 1)
npol = 4
# Try to load model visibilities
try:
vis_base = numpy.load('utils/sky.npy')
except:
self.log.warn("Could not load model visibilities from utils/sky.py, using random data")
vis_base = numpy.zeros((nbl, nchan, npol), dtype=numpy.complex64)
assert(vis_base.shape[0] >= nbl)
assert(vis_base.shape[1] >= nchan)
assert(vis_base.shape[2] == npol)
vis_base = vis_base[:self.nbl,::(4 if self.fast else 1),:]
vis_base_r = (vis_base.real*1000).astype(numpy.int32)
vis_base_i = (vis_base.imag*1000).astype(numpy.int32)
vis_base = numpy.zeros((nbl, nchan, npol, 2), dtype=numpy.int32)
vis_base[...,0] = vis_base_r
vis_base[...,1] = vis_base_i
ohdr = {'time_tag': int(int(time.time())*FS),
'seq0': 0,
'chan0': chan0,
'cfreq': chan0*CHAN_BW,
'nchan': nchan,
'bw': nchan*CHAN_BW*(4 if self.fast else 1),
'navg': navg*8192,
'nstand': int(numpy.sqrt(8*nsrc+1)-1)//2,
'npol': npol,
'nbl': nbl,
'complex': True,
'nbit': 32}
ohdr_str = json.dumps(ohdr)
ogulp_size = self.ntime_gulp*nbl*nchan*npol*8 # ci32
oshape = (self.ntime_gulp,nbl,nchan,npol)
self.oring.resize(ogulp_size)
prev_time = time.time()
with oring.begin_sequence(time_tag=ohdr['time_tag'], header=ohdr_str) as oseq:
while not self.shutdown_event.is_set():
with oseq.reserve(ogulp_size) as ospan:
curr_time = time.time()
reserve_time = curr_time - prev_time
prev_time = curr_time
odata = ospan.data_view(numpy.int32).reshape(oshape+(2,))
temp = vis_base + (1000*0.01*numpy.random.randn(*odata.shape)).astype(numpy.int32)
odata[...] = temp
curr_time = time.time()
while curr_time - prev_time < tgulp:
time.sleep(0.01)
curr_time = time.time()
curr_time = time.time()
process_time = curr_time - prev_time
prev_time = curr_time
self.perf_proclog.update({'acquire_time': -1,
'reserve_time': reserve_time,
'process_time': process_time,})
class SpectraOp(object):
def __init__(self, log, id, iring, ntime_gulp=1, guarantee=True, core=-1):
self.log = log
self.iring = iring
self.ntime_gulp = ntime_gulp
self.guarantee = guarantee
self.core = core
self.client = Client(id)
self.bind_proclog = ProcLog(type(self).__name__+"/bind")
self.in_proclog = ProcLog(type(self).__name__+"/in")
self.size_proclog = ProcLog(type(self).__name__+"/size")
self.sequence_proclog = ProcLog(type(self).__name__+"/sequence0")
self.perf_proclog = ProcLog(type(self).__name__+"/perf")
self.in_proclog.update({'nring':1, 'ring0':self.iring.name})
def _plot_spectra(self, time_tag, freq, specs):
# Plotting setup
nchan = freq.size
nstand = specs.shape[0]
try:
minval = numpy.min(specs[numpy.where(numpy.isfinite(specs))])
maxval = numpy.max(specs[numpy.where(numpy.isfinite(specs))])
except ValueError:
minval = 0.0
maxval = 1.0
# Image setup
width = 20
height = 18
im = PIL.Image.new('RGB', (width * 65 + 1, height * 65 + 21), '#FFFFFF')
draw = PIL.ImageDraw.Draw(im)
font = PIL.ImageFont.load(os.path.join(BASE_PATH, 'fonts', 'helvB10.pil'))
# Axes boxes
for i in range(width + 1):
draw.line([i * 65, 0, i * 65, height * 65], fill = '#000000')
for i in range(height + 1):
draw.line([(0, i * 65), (im.size[0], i * 65)], fill = '#000000')
# Power as a function of frequency for all antennas
x = numpy.arange(nchan) * 64 // nchan
for s in range(nstand):
if s >= height * width:
break
x0, y0 = (s % width) * 65 + 1, (s // width + 1) * 65
draw.text((x0 + 5, y0 - 60), str(s+1), font=font, fill='#000000')
## XX
c = '#1F77B4'
y = ((54.0 / (maxval - minval)) * (specs[s,:,0] - minval)).clip(0, 54)
draw.point(list(zip(x0 + x, y0 - y)), fill=c)
## YY
c = '#FF7F0E'
y = ((54.0 / (maxval - minval)) * (specs[s,:,1] - minval)).clip(0, 54)
draw.point(list(zip(x0 + x, y0 - y)), fill=c)
# Summary
ySummary = height * 65 + 2
timeStr = datetime.utcfromtimestamp(time_tag / FS)
timeStr = timeStr.strftime("%Y/%m/%d %H:%M:%S UTC")
draw.text((5, ySummary), timeStr, font = font, fill = '#000000')
rangeStr = 'range shown: %.3f to %.3f dB' % (minval, maxval)
draw.text((210, ySummary), rangeStr, font = font, fill = '#000000')
x = im.size[0] + 15
for label, c in reversed(list(zip(('XX', 'YY'),
('#1F77B4','#FF7F0E')))):
x -= draw.textsize(label, font = font)[0] + 20
draw.text((x, ySummary), label, font = font, fill = c)
return im
def main(self):
cpu_affinity.set_core(self.core)
self.bind_proclog.update({'ncore': 1,
'core0': cpu_affinity.get_core(),})
for iseq in self.iring.read(guarantee=self.guarantee):
ihdr = json.loads(iseq.header.tostring())
self.sequence_proclog.update(ihdr)
self.log.info("Spectra: Start of new sequence: %s", str(ihdr))
# Setup the ring metadata and gulp sizes
time_tag = ihdr['time_tag']
navg = ihdr['navg']
nbl = ihdr['nbl']
nstand = ihdr['nstand']
chan0 = ihdr['chan0']
nchan = ihdr['nchan']
chan_bw = ihdr['bw'] / nchan
npol = ihdr['npol']
igulp_size = self.ntime_gulp*nbl*nchan*npol*8 # ci32
ishape = (self.ntime_gulp,nbl,nchan,npol)
# Setup the arrays for the frequencies and auto-correlations
freq = chan0*chan_bw + numpy.arange(nchan)*chan_bw
autos = [i*(2*(nstand-1)+1-i)//2 + i for i in range(nstand)]
last_save = 0.0
prev_time = time.time()
for ispan in iseq.read(igulp_size):
if ispan.size < igulp_size:
continue # Ignore final gulp
curr_time = time.time()
acquire_time = curr_time - prev_time
prev_time = curr_time
## Setup and load
idata = ispan.data_view('ci32').reshape(ishape)
if time.time() - last_save > 60:
## Timestamp
tt = LWATime(time_tag, format='timetag')
ts = tt.unix
## Pull out the auto-correlations
adata = idata.view(numpy.int32)
adata = adata.reshape(ishape+(2,))
adata = adata[0,autos,:,:,0]
adata = adata[:,:,[0,3]]
## Plot
im = self._plot_spectra(time_tag, freq, 10*numpy.log10(adata))
## Save
mp = ImageMonitorPoint.from_image(im)
self.client.write_monitor_point('diagnostics/spectra',
mp, timestamp=ts)
if True:
## Save again, this time to disk
mjd, dt = tt.mjd, tt.datetime
mjd = int(mjd)
h, m, s = dt.hour, dt.minute, dt.second
filename = '%06i_%02i%02i%02i_spectra.png' % (mjd, h, m, s)
mp.to_file(filename)
### Save the raw spectra for comparison purposes
#filename = '%06i_%02i%02i%02i_spectra.npy' % (mjd, h, m, s)
#numpy.save(filename, adata)
#
### Save everything for comparison purposes
#odata = idata.view(numpy.int32)
#odata = odata.reshape(ishape+(2,))
#filename = '%06i_%02i%02i%02i_everything.npy' % (mjd, h, m, s)
#numpy.save(filename, odata)
last_save = time.time()
time_tag += navg * self.ntime_gulp
curr_time = time.time()
process_time = curr_time - prev_time
prev_time = curr_time
self.perf_proclog.update({'acquire_time': acquire_time,
'reserve_time': 0.0,
'process_time': process_time,})
self.log.info("SpectraOp - Done")
class BaselineOp(object):
def __init__(self, log, id, station, iring, ntime_gulp=1, guarantee=True, core=-1):
self.log = log
self.station = station
self.iring = iring
self.ntime_gulp = ntime_gulp
self.guarantee = guarantee
self.core = core
self.client = Client(id)
self.bind_proclog = ProcLog(type(self).__name__+"/bind")
self.in_proclog = ProcLog(type(self).__name__+"/in")
self.size_proclog = ProcLog(type(self).__name__+"/size")
self.sequence_proclog = ProcLog(type(self).__name__+"/sequence0")
self.perf_proclog = ProcLog(type(self).__name__+"/perf")
self.in_proclog.update({'nring':1, 'ring0':self.iring.name})
def _plot_baselines(self, time_tag, freq, dist, baselines, valid):
# Plotting setup
nchan = freq.size
nbl = baselines.shape[0]
freq = freq[nchan//2]
baselines = baselines[valid,nchan//2,:]
baselines = numpy.abs(baselines[:,[0,1,3]])
minval = numpy.min(baselines)
maxval = numpy.max(baselines)
if minval == maxval:
maxval = minval + 1.0
mindst = 0.0
maxdst = numpy.max(dist)
# Image setup
im = PIL.Image.new('RGB', (601, 421), '#FFFFFF')
draw = PIL.ImageDraw.Draw(im)
font = PIL.ImageFont.load(os.path.join(BASE_PATH, 'fonts', 'helvB10.pil'))
# Axes boxes
for i in range(2):
draw.line([i * 600, 0, i * 600, 400], fill = '#000000')
for i in range(2):
draw.line([(0, i * 400), (im.size[0], i * 400)], fill = '#000000')
# Visiblity amplitudes as a function of (u,v) distance
x0, y0 = 1, 400
draw.text((x0 + 500, y0 - 395), '%.3f MHz' % (freq/1e6,), font=font, fill='#000000')
## (u,v) distance
x = ((599.0 / (maxdst - mindst)) * (dist - mindst)).clip(0, 599)
## XX
y = ((399.0 / (maxval - minval)) * (baselines[:,0] - minval)).clip(0, 399)
draw.point(list(zip(x0 + x, y0 - y)), fill='#1F77B4')
## YY
y = ((399.0 / (maxval - minval)) * (baselines[:,2] - minval)).clip(0, 399)
draw.point(list(zip(x0 + x, y0 - y)), fill='#FF7F0E')
### XY
#y = ((399.0 / (maxval - minval)) * (baselines[:,1] - minval)).clip(0, 399)
#draw.point(list(zip(x0 + x, y0 - y)), fill='#A00000')
# Details and labels
ySummary = 402
timeStr = datetime.utcfromtimestamp(time_tag / FS)
timeStr = timeStr.strftime("%Y/%m/%d %H:%M:%S UTC")
draw.text((5, ySummary), timeStr, font = font, fill = '#000000')
rangeStr = 'range shown: %.6f - %.6f' % (minval, maxval)
draw.text((210, ySummary), rangeStr, font = font, fill = '#000000')
x = im.size[0] + 15
#for label, c in reversed(list(zip(('XX','XY','YY'), ('#1F77B4','#A00000','#FF7F0E')))):
for label, c in reversed(list(zip(('XX','YY'), ('#1F77B4','#FF7F0E')))):
x -= draw.textsize(label, font = font)[0] + 20
draw.text((x, ySummary), label, font = font, fill = c)
return im
def main(self):
cpu_affinity.set_core(self.core)
self.bind_proclog.update({'ncore': 1,
'core0': cpu_affinity.get_core(),})
for iseq in self.iring.read(guarantee=self.guarantee):
ihdr = json.loads(iseq.header.tostring())
self.sequence_proclog.update(ihdr)
self.log.info("Baseline: Start of new sequence: %s", str(ihdr))
# Setup the ring metadata and gulp sizes
time_tag = ihdr['time_tag']
navg = ihdr['navg']
nbl = ihdr['nbl']
nstand = ihdr['nstand']
chan0 = ihdr['chan0']
nchan = ihdr['nchan']
chan_bw = ihdr['bw'] / nchan
npol = ihdr['npol']
igulp_size = self.ntime_gulp*nbl*nchan*npol*8
ishape = (self.ntime_gulp,nbl,nchan,npol)
self.iring.resize(igulp_size)
# Setup the arrays for the frequencies and baseline lenghts
freq = chan0*chan_bw + numpy.arange(nchan)*chan_bw
uvw = get_zenith_uvw(self.station, LWATime(time_tag, format='timetag'))
uvw[:,2] = 0
dist = numpy.sqrt((uvw**2).sum(axis=1))
valid = numpy.where(dist > 0.1)[0]
last_save = 0.0
prev_time = time.time()
for ispan in iseq.read(igulp_size):
if ispan.size < igulp_size:
continue # Ignore final gulp
curr_time = time.time()
acquire_time = curr_time - prev_time
prev_time = curr_time
## Setup and load
idata = ispan.data_view('ci32').reshape(ishape)
if time.time() - last_save > 60:
## Timestamp
tt = LWATime(time_tag, format='timetag')
ts = tt.unix
## Plot
bdata = idata[0,...]
bdata = bdata.view(numpy.int32)
bdata = bdata.reshape(ishape+(2,))
bdata = bdata[0,:,:,:,0] + 1j*bdata[0,:,:,:,1]
bdata = bdata.astype(numpy.complex64)
im = self._plot_baselines(time_tag, freq, dist, bdata, valid)
## Save
mp = ImageMonitorPoint.from_image(im)
self.client.write_monitor_point('diagnostics/baselines',
mp, timestamp=ts)
if True:
## Save again, this time to disk
mjd, dt = tt.mjd, tt.datetime
mjd = int(mjd)
h, m, s = dt.hour, dt.minute, dt.second
filename = '%06i_%02i%02i%02i_baselines.png' % (mjd, h, m, s)
mp.to_file(filename)
last_save = time.time()
time_tag += navg * self.ntime_gulp
curr_time = time.time()
process_time = curr_time - prev_time
prev_time = curr_time
self.perf_proclog.update({'acquire_time': acquire_time,
'reserve_time': 0.0,
'process_time': process_time,})
self.log.info("BaselineOp - Done")
class StatisticsOp(object):
def __init__(self, log, id, iring, ntime_gulp=1, guarantee=True, core=None):
self.log = log
self.iring = iring
self.ntime_gulp = ntime_gulp
self.guarantee = guarantee
self.core = core
self.client = Client(id)
self.bind_proclog = ProcLog(type(self).__name__+"/bind")
self.in_proclog = ProcLog(type(self).__name__+"/in")
self.size_proclog = ProcLog(type(self).__name__+"/size")
self.sequence_proclog = ProcLog(type(self).__name__+"/sequence0")
self.perf_proclog = ProcLog(type(self).__name__+"/perf")
self.in_proclog.update( {'nring':1, 'ring0':self.iring.name})
self.size_proclog.update({'nseq_per_gulp': self.ntime_gulp})
def main(self):
if self.core is not None:
cpu_affinity.set_core(self.core)
self.bind_proclog.update({'ncore': 1,
'core0': cpu_affinity.get_core(),})
for iseq in self.iring.read(guarantee=self.guarantee):
ihdr = json.loads(iseq.header.tostring())
self.sequence_proclog.update(ihdr)
self.log.info("Statistics: Start of new sequence: %s", str(ihdr))
# Setup the ring metadata and gulp sizes
time_tag = ihdr['time_tag']
navg = ihdr['navg']
nbl = ihdr['nbl']
nstand = ihdr['nstand']
chan0 = ihdr['chan0']
nchan = ihdr['nchan']
chan_bw = ihdr['bw'] / nchan
npol = ihdr['npol']
igulp_size = self.ntime_gulp*nbl*nchan*npol*8 # ci32
ishape = (self.ntime_gulp,nbl,nchan,npol)
autos = [i*(2*(nstand-1)+1-i)//2 + i for i in range(nstand)]
data_pols = ['XX', 'YY']
last_save = 0.0
prev_time = time.time()
iseq_spans = iseq.read(igulp_size)
for ispan in iseq_spans:
if ispan.size < igulp_size:
continue # Ignore final gulp
curr_time = time.time()
acquire_time = curr_time - prev_time
prev_time = curr_time
## Setup and load
idata = ispan.data_view('ci32').reshape(ishape)
if time.time() - last_save > 60:
## Timestamp
tt = LWATime(time_tag, format='timetag')
ts = tt.unix
## Pull out the auto-correlations
adata = idata.view(numpy.int32)
adata = adata.reshape(ishape+(2,))
adata = adata[0,autos,:,:,0]
adata = adata[:,:,[0,3]]
## Run the statistics over all times/channels
## * only really works for ntime_gulp=1
data_min = numpy.min(adata, axis=1)
data_max = numpy.max(adata, axis=1)
data_avg = numpy.mean(adata, axis=1)
## Save
for data,name in zip((data_min,data_avg,data_max), ('min','avg','max')):
value = MultiMonitorPoint([data[:,i].tolist() for i in range(data.shape[1])],
timestamp=ts, field=data_pols)
self.client.write_monitor_point('statistics/%s' % name, value)
last_save = time.time()
time_tag += navg * self.ntime_gulp
curr_time = time.time()
process_time = curr_time - prev_time
prev_time = curr_time
self.perf_proclog.update({'acquire_time': acquire_time,
'reserve_time': -1,
'process_time': process_time,})
self.log.info("StatisticsOp - Done")
class WriterOp(object):
def __init__(self, log, station, iring, ntime_gulp=1, fast=False, guarantee=True, core=None):
self.log = log
self.station = station
self.iring = iring
self.ntime_gulp = ntime_gulp
self.fast = fast
self.guarantee = guarantee
self.core = core
self.bind_proclog = ProcLog(type(self).__name__+"/bind")
self.in_proclog = ProcLog(type(self).__name__+"/in")
self.size_proclog = ProcLog(type(self).__name__+"/size")
self.sequence_proclog = ProcLog(type(self).__name__+"/sequence0")
self.perf_proclog = ProcLog(type(self).__name__+"/perf")
self.in_proclog.update( {'nring':1, 'ring0':self.iring.name})
self.size_proclog.update({'nseq_per_gulp': self.ntime_gulp})
def main(self):
global QUEUE
if self.core is not None:
cpu_affinity.set_core(self.core)
self.bind_proclog.update({'ncore': 1,
'core0': cpu_affinity.get_core(),})
for iseq in self.iring.read(guarantee=self.guarantee):
ihdr = json.loads(iseq.header.tostring())
self.sequence_proclog.update(ihdr)
self.log.info("Writer: Start of new sequence: %s", str(ihdr))
# Setup the ring metadata and gulp sizes
time_tag = ihdr['time_tag']
navg = ihdr['navg']
nbl = ihdr['nbl']
chan0 = ihdr['chan0']
nchan = ihdr['nchan']
chan_bw = ihdr['bw'] / nchan
npol = ihdr['npol']
pols = ['XX','XY','YX','YY']
igulp_size = self.ntime_gulp*nbl*nchan*npol*8 # ci32
ishape = (self.ntime_gulp,nbl,nchan,npol)
self.iring.resize(igulp_size, 10*igulp_size*(4 if self.fast else 1))
norm_factor = navg // (2*NCHAN)
first_gulp = True
was_active = False
prev_time = time.time()
iseq_spans = iseq.read(igulp_size)
for ispan in iseq_spans:
if ispan.size < igulp_size:
continue # Ignore final gulp
curr_time = time.time()
acquire_time = curr_time - prev_time
prev_time = curr_time
## On our first span, update the pipeline lag for the queue
## so that we start recording at the right times
if first_gulp:
QUEUE.update_lag(LWATime(time_tag, format='timetag').datetime)
self.log.info("Current pipeline lag is %s", QUEUE.lag)
first_gulp = False
## Setup and load
idata = ispan.data_view('ci32').reshape(ishape)
idata = idata.view(numpy.int32)
idata = idata.reshape(ishape+(2,))
idata = idata[...,0] + 1j*idata[...,1]
idata /= norm_factor
idata = idata.astype(numpy.complex64)
## Determine what to do
if QUEUE.active is not None:
### Recording active - write
if not QUEUE.active.is_started:
self.log.info("Started operation - %s", QUEUE.active)
QUEUE.active.start(self.station, chan0, navg, nchan, chan_bw, npol, pols)
was_active = True
QUEUE.active.write(time_tag, idata)
elif was_active:
### Recording just finished
#### Clean
was_active = False
QUEUE.clean()
#### Close
self.log.info("Ended operation - %s", QUEUE.previous)
QUEUE.previous.stop()
time_tag += navg
curr_time = time.time()
process_time = curr_time - prev_time
prev_time = curr_time
self.perf_proclog.update({'acquire_time': acquire_time,
'reserve_time': -1,
'process_time': process_time,})
self.log.info("WriterOp - Done")
def main(argv):
global QUEUE
parser = argparse.ArgumentParser(
description="Data recorder for slow/fast visibility data"
)
parser.add_argument('-a', '--address', type=str, default='127.0.0.1',
help='IP address to listen to')
parser.add_argument('-p', '--port', type=int, default=10000,
help='UDP port to receive data on')
parser.add_argument('-o', '--offline', action='store_true',
help='run in offline using the specified file to read from')
parser.add_argument('-c', '--cores', type=str, default='0,1,2,3,4,5',
help='comma separated list of cores to bind to')
parser.add_argument('-g', '--gulp-size', type=int, default=1,
help='gulp size for ring buffers')
parser.add_argument('-l', '--logfile', type=str,
help='file to write logging to')
parser.add_argument('-r', '--record-directory', type=str, default=os.path.abspath('.'),
help='directory to save recorded files to')
parser.add_argument('-t', '--record-directory-quota', type=quota_size, default=0,
help='quota for the recording directory, 0 disables the quota')
parser.add_argument('-q', '--quick', action='store_true',
help='run in fast visibiltiy mode')
parser.add_argument('-i', '--nint-per-file', type=int, default=1,
help='number of integrations to write per measurement set')
parser.add_argument('-n', '--no-tar', action='store_true',
help='do not store the measurement sets inside a tar file')
parser.add_argument('-f', '--fork', action='store_true',
help='fork and run in the background')
args = parser.parse_args()
# Process the -q/--quick option
station = ovro
if args.quick:
args.nint_per_file = max([10, args.nint_per_file])
station = ovro.select_subset(list(range(1, 48+1)))
# Fork, if requested
if args.fork:
stderr = '/tmp/%s_%i.stderr' % (os.path.splitext(os.path.basename(__file__))[0], args.port)
daemonize(stdin='/dev/null', stdout='/dev/null', stderr=stderr)
# Setup logging
log = logging.getLogger(__name__)
logFormat = logging.Formatter('%(asctime)s [%(levelname)-8s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logFormat.converter = time.gmtime
if args.logfile is None:
logHandler = logging.StreamHandler(sys.stdout)
else:
logHandler = LogFileHandler(args.logfile)
logHandler.setFormatter(logFormat)
log.addHandler(logHandler)
log.setLevel(logging.DEBUG)
log.info("Starting %s with PID %i", os.path.basename(__file__), os.getpid())
log.info("Cmdline args:")
for arg in vars(args):
log.info(" %s: %s", arg, getattr(args, arg))
# Setup the subsystem ID
mcs_id = 'drv'
if args.quick:
mcs_id += 'f'
else:
mcs_id += 's'
base_ip = int(args.address.split('.')[-1], 10)
base_port = args.port % 100
mcs_id += str(base_ip*100 + base_port)
# Setup the cores and GPUs to use
cores = [int(v, 10) for v in args.cores.split(',')]
log.info("CPUs: %s", ' '.join([str(v) for v in cores]))
# Setup the socket, if needed
isock = None
if not args.offline:
iaddr = Address(args.address, args.port)
isock = UDPSocket()
isock.bind(iaddr)
# Setup the rings
capture_ring = Ring(name="capture")
# Setup antennas
nant = len(station.antennas)
nbl = nant*(nant+1)//2
# Setup the recording directory, if needed
if not os.path.exists(args.record_directory):
status = os.system('mkdir -p %s' % args.record_directory)
if status != 0:
raise RuntimeError("Unable to create directory: %s" % args.record_directory)
else:
if not os.path.isdir(os.path.realpath(args.record_directory)):
raise RuntimeError("Cannot record to a non-directory: %s" % args.record_directory)
# Setup the blocks
ops = []
if args.offline:
ops.append(DummyOp(log, isock, capture_ring, (NPIPELINE//16)*nbl,
ntime_gulp=args.gulp_size, slot_ntime=(10 if args.quick else 6),
fast=args.quick, core=cores.pop(0)))
else:
ops.append(CaptureOp(log, isock, capture_ring, (NPIPELINE//16)*nbl, # two pipelines/recorder
ntime_gulp=args.gulp_size, slot_ntime=(10 if args.quick else 6),
fast=args.quick, core=cores.pop(0)))
if not args.quick:
ops.append(SpectraOp(log, mcs_id, capture_ring,
ntime_gulp=args.gulp_size, core=cores.pop(0)))
ops.append(BaselineOp(log, mcs_id, station, capture_ring,
ntime_gulp=args.gulp_size, core=cores.pop(0)))
ops.append(StatisticsOp(log, mcs_id, capture_ring,
ntime_gulp=args.gulp_size, core=cores.pop(0)))
ops.append(WriterOp(log, station, capture_ring,
ntime_gulp=args.gulp_size, fast=args.quick, core=cores.pop(0)))
ops.append(GlobalLogger(log, mcs_id, args, QUEUE, quota=args.record_directory_quota))
ops.append(VisibilityCommandProcessor(log, mcs_id, args.record_directory, QUEUE,
nint_per_file=args.nint_per_file,
is_tarred=not args.no_tar))
# Setup the threads
threads = [threading.Thread(target=op.main) for op in ops]
# Setup signal handling
shutdown_event = setup_signal_handling(ops)
ops[0].shutdown_event = shutdown_event
ops[-2].shutdown_event = shutdown_event
ops[-1].shutdown_event = shutdown_event
# Launch!
log.info("Launching %i thread(s)", len(threads))
for thread in threads:
#thread.daemon = True
thread.start()
t_now = LWATime(datetime.utcnow() + timedelta(seconds=15), format='datetime', scale='utc')
mjd_now = int(t_now.mjd)
mpm_now = int((t_now.mjd - mjd_now)*86400.0*1000.0)
c = Client()
r = c.send_command(mcs_id, 'start',
start_mjd=mjd_now, start_mpm=mpm_now)
print('III', r)
t_now = LWATime(datetime.utcnow() + timedelta(seconds=75), format='datetime', scale='utc')
mjd_now = int(t_now.mjd)
mpm_now = int((t_now.mjd - mjd_now)*86400.0*1000.0)
r = c.send_command(mcs_id, 'stop',
stop_mjd=mjd_now, stop_mpm=mpm_now)
print('III', r)
while not shutdown_event.is_set():
signal.pause()
log.info("Shutdown, waiting for threads to join")
for thread in threads:
thread.join()
log.info("All done")
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
[
"logging.getLogger",
"datetime.datetime.utcfromtimestamp",
"logging.StreamHandler",
"numpy.sqrt",
"bifrost.packet_capture.PacketCaptureCallback",
"numpy.log10",
"bifrost.affinity.get_core",
"ctypes.create_string_buffer",
"time.sleep",
"numpy.isfinite",
"datetime.timedelta",
"mnc.mcs.Client",
"bifrost.ring.Ring",
"numpy.arange",
"os.path.exists",
"numpy.mean",
"bifrost.packet_capture.UDPCapture",
"argparse.ArgumentParser",
"monitoring.GlobalLogger",
"numpy.where",
"json.dumps",
"numpy.max",
"ctypes.cast",
"os.getpid",
"numpy.min",
"bifrost.affinity.set_core",
"control.VisibilityCommandProcessor",
"numpy.abs",
"signal.pause",
"bifrost.udp_socket.UDPSocket",
"mnc.mcs.ImageMonitorPoint.from_image",
"threading.Thread",
"time.time",
"numpy.random.randn",
"operations.FileOperationsQueue",
"bifrost.address.Address",
"datetime.datetime.utcnow",
"logging.Formatter",
"os.path.join",
"threading.Event",
"os.path.realpath",
"numpy.zeros",
"os.path.basename",
"os.path.abspath",
"os.system",
"numpy.load"
] |
[((1240, 1261), 'operations.FileOperationsQueue', 'FileOperationsQueue', ([], {}), '()\n', (1259, 1261), False, 'from operations import FileOperationsQueue\n'), ((1203, 1228), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1218, 1228), False, 'import os\n'), ((30595, 30682), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Data recorder for slow/fast visibility data"""'}), "(description=\n 'Data recorder for slow/fast visibility data')\n", (30618, 30682), False, 'import argparse\n'), ((32872, 32899), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (32889, 32899), False, 'import logging\n'), ((32916, 33012), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s [%(levelname)-8s] %(message)s"""'], {'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "('%(asctime)s [%(levelname)-8s] %(message)s', datefmt=\n '%Y-%m-%d %H:%M:%S')\n", (32933, 33012), False, 'import logging\n'), ((34180, 34200), 'bifrost.ring.Ring', 'Ring', ([], {'name': '"""capture"""'}), "(name='capture')\n", (34184, 34200), False, 'from bifrost.ring import Ring\n'), ((36859, 36867), 'mnc.mcs.Client', 'Client', ([], {}), '()\n', (36865, 36867), False, 'from mnc.mcs import ImageMonitorPoint, MultiMonitorPoint, Client\n'), ((2815, 2851), 'ctypes.create_string_buffer', 'ctypes.create_string_buffer', (['hdr_str'], {}), '(hdr_str)\n', (2842, 2851), False, 'import ctypes\n'), ((2878, 2918), 'ctypes.cast', 'ctypes.cast', (['header_buf', 'ctypes.c_void_p'], {}), '(header_buf, ctypes.c_void_p)\n', (2889, 2918), False, 'import ctypes\n'), ((3028, 3051), 'bifrost.packet_capture.PacketCaptureCallback', 'PacketCaptureCallback', ([], {}), '()\n', (3049, 3051), False, 'from bifrost.packet_capture import PacketCaptureCallback, UDPCapture, DiskReader\n'), ((8103, 8113), 'mnc.mcs.Client', 'Client', (['id'], {}), '(id)\n', (8109, 8113), False, 'from mnc.mcs import ImageMonitorPoint, MultiMonitorPoint, Client\n'), ((10274, 10314), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['(time_tag / FS)'], {}), '(time_tag / FS)\n', (10299, 10314), False, 'from datetime import datetime, timedelta\n'), ((10943, 10975), 'bifrost.affinity.set_core', 'cpu_affinity.set_core', (['self.core'], {}), '(self.core)\n', (10964, 10975), True, 'import bifrost.affinity as cpu_affinity\n'), ((15294, 15304), 'mnc.mcs.Client', 'Client', (['id'], {}), '(id)\n', (15300, 15304), False, 'from mnc.mcs import ImageMonitorPoint, MultiMonitorPoint, Client\n'), ((15986, 16020), 'numpy.abs', 'numpy.abs', (['baselines[:, [0, 1, 3]]'], {}), '(baselines[:, [0, 1, 3]])\n', (15995, 16020), False, 'import numpy\n'), ((16035, 16055), 'numpy.min', 'numpy.min', (['baselines'], {}), '(baselines)\n', (16044, 16055), False, 'import numpy\n'), ((16073, 16093), 'numpy.max', 'numpy.max', (['baselines'], {}), '(baselines)\n', (16082, 16093), False, 'import numpy\n'), ((16208, 16223), 'numpy.max', 'numpy.max', (['dist'], {}), '(dist)\n', (16217, 16223), False, 'import numpy\n'), ((17551, 17591), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['(time_tag / FS)'], {}), '(time_tag / FS)\n', (17576, 17591), False, 'from datetime import datetime, timedelta\n'), ((18266, 18298), 'bifrost.affinity.set_core', 'cpu_affinity.set_core', (['self.core'], {}), '(self.core)\n', (18287, 18298), True, 'import bifrost.affinity as cpu_affinity\n'), ((22159, 22169), 'mnc.mcs.Client', 'Client', (['id'], {}), '(id)\n', (22165, 22169), False, 'from mnc.mcs import ImageMonitorPoint, MultiMonitorPoint, Client\n'), ((33130, 33163), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (33151, 33163), False, 'import logging\n'), ((33371, 33397), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (33387, 33397), False, 'import os\n'), ((33399, 33410), 'os.getpid', 'os.getpid', ([], {}), '()\n', (33408, 33410), False, 'import os\n'), ((34043, 34075), 'bifrost.address.Address', 'Address', (['args.address', 'args.port'], {}), '(args.address, args.port)\n', (34050, 34075), False, 'from bifrost.address import Address\n'), ((34092, 34103), 'bifrost.udp_socket.UDPSocket', 'UDPSocket', ([], {}), '()\n', (34101, 34103), False, 'from bifrost.udp_socket import UDPSocket\n'), ((34350, 34387), 'os.path.exists', 'os.path.exists', (['args.record_directory'], {}), '(args.record_directory)\n', (34364, 34387), False, 'import os\n'), ((34406, 34454), 'os.system', 'os.system', (["('mkdir -p %s' % args.record_directory)"], {}), "('mkdir -p %s' % args.record_directory)\n", (34415, 34454), False, 'import os\n'), ((35900, 35973), 'monitoring.GlobalLogger', 'GlobalLogger', (['log', 'mcs_id', 'args', 'QUEUE'], {'quota': 'args.record_directory_quota'}), '(log, mcs_id, args, QUEUE, quota=args.record_directory_quota)\n', (35912, 35973), False, 'from monitoring import GlobalLogger\n'), ((35990, 36124), 'control.VisibilityCommandProcessor', 'VisibilityCommandProcessor', (['log', 'mcs_id', 'args.record_directory', 'QUEUE'], {'nint_per_file': 'args.nint_per_file', 'is_tarred': '(not args.no_tar)'}), '(log, mcs_id, args.record_directory, QUEUE,\n nint_per_file=args.nint_per_file, is_tarred=not args.no_tar)\n', (36016, 36124), False, 'from control import VisibilityCommandProcessor\n'), ((36250, 36282), 'threading.Thread', 'threading.Thread', ([], {'target': 'op.main'}), '(target=op.main)\n', (36266, 36282), False, 'import threading\n'), ((37344, 37358), 'signal.pause', 'signal.pause', ([], {}), '()\n', (37356, 37358), False, 'import signal\n'), ((1702, 1719), 'threading.Event', 'threading.Event', ([], {}), '()\n', (1717, 1719), False, 'import threading\n'), ((3122, 3267), 'bifrost.packet_capture.UDPCapture', 'UDPCapture', (['"""cor"""', 'self.sock', 'self.oring', 'self.nbl', '(1)', '(9000)', 'self.ntime_gulp', 'self.slot_ntime'], {'sequence_callback': 'seq_callback', 'core': 'self.core'}), "('cor', self.sock, self.oring, self.nbl, 1, 9000, self.ntime_gulp,\n self.slot_ntime, sequence_callback=seq_callback, core=self.core)\n", (3132, 3267), False, 'from bifrost.packet_capture import PacketCaptureCallback, UDPCapture, DiskReader\n'), ((3941, 3958), 'threading.Event', 'threading.Event', ([], {}), '()\n', (3956, 3958), False, 'import threading\n'), ((5594, 5647), 'numpy.zeros', 'numpy.zeros', (['(nbl, nchan, npol, 2)'], {'dtype': 'numpy.int32'}), '((nbl, nchan, npol, 2), dtype=numpy.int32)\n', (5605, 5647), False, 'import numpy\n'), ((6312, 6328), 'json.dumps', 'json.dumps', (['ohdr'], {}), '(ohdr)\n', (6322, 6328), False, 'import json\n'), ((6545, 6556), 'time.time', 'time.time', ([], {}), '()\n', (6554, 6556), False, 'import time\n'), ((9142, 9189), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""fonts"""', '"""helvB10.pil"""'], {}), "(BASE_PATH, 'fonts', 'helvB10.pil')\n", (9154, 9189), False, 'import os\n'), ((12155, 12166), 'time.time', 'time.time', ([], {}), '()\n', (12164, 12166), False, 'import time\n'), ((16384, 16431), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""fonts"""', '"""helvB10.pil"""'], {}), "(BASE_PATH, 'fonts', 'helvB10.pil')\n", (16396, 16431), False, 'import os\n'), ((19646, 19657), 'time.time', 'time.time', ([], {}), '()\n', (19655, 19657), False, 'import time\n'), ((22735, 22767), 'bifrost.affinity.set_core', 'cpu_affinity.set_core', (['self.core'], {}), '(self.core)\n', (22756, 22767), True, 'import bifrost.affinity as cpu_affinity\n'), ((23856, 23867), 'time.time', 'time.time', ([], {}), '()\n', (23865, 23867), False, 'import time\n'), ((27000, 27032), 'bifrost.affinity.set_core', 'cpu_affinity.set_core', (['self.core'], {}), '(self.core)\n', (27021, 27032), True, 'import bifrost.affinity as cpu_affinity\n'), ((28172, 28183), 'time.time', 'time.time', ([], {}), '()\n', (28181, 28183), False, 'import time\n'), ((31570, 31590), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (31585, 31590), False, 'import os\n'), ((36691, 36708), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (36706, 36708), False, 'from datetime import datetime, timedelta\n'), ((36711, 36732), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(15)'}), '(seconds=15)\n', (36720, 36732), False, 'from datetime import datetime, timedelta\n'), ((37014, 37031), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (37029, 37031), False, 'from datetime import datetime, timedelta\n'), ((37034, 37055), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(75)'}), '(seconds=75)\n', (37043, 37055), False, 'from datetime import datetime, timedelta\n'), ((2599, 2614), 'json.dumps', 'json.dumps', (['hdr'], {}), '(hdr)\n', (2609, 2614), False, 'import json\n'), ((4983, 5010), 'numpy.load', 'numpy.load', (['"""utils/sky.npy"""'], {}), "('utils/sky.npy')\n", (4993, 5010), False, 'import numpy\n'), ((9527, 9546), 'numpy.arange', 'numpy.arange', (['nchan'], {}), '(nchan)\n', (9539, 9546), False, 'import numpy\n'), ((11066, 11089), 'bifrost.affinity.get_core', 'cpu_affinity.get_core', ([], {}), '()\n', (11087, 11089), True, 'import bifrost.affinity as cpu_affinity\n'), ((12336, 12347), 'time.time', 'time.time', ([], {}), '()\n', (12345, 12347), False, 'import time\n'), ((14574, 14585), 'time.time', 'time.time', ([], {}), '()\n', (14583, 14585), False, 'import time\n'), ((18389, 18412), 'bifrost.affinity.get_core', 'cpu_affinity.get_core', ([], {}), '()\n', (18410, 18412), True, 'import bifrost.affinity as cpu_affinity\n'), ((19554, 19577), 'numpy.where', 'numpy.where', (['(dist > 0.1)'], {}), '(dist > 0.1)\n', (19565, 19577), False, 'import numpy\n'), ((19827, 19838), 'time.time', 'time.time', ([], {}), '()\n', (19836, 19838), False, 'import time\n'), ((21477, 21488), 'time.time', 'time.time', ([], {}), '()\n', (21486, 21488), False, 'import time\n'), ((22858, 22881), 'bifrost.affinity.get_core', 'cpu_affinity.get_core', ([], {}), '()\n', (22879, 22881), True, 'import bifrost.affinity as cpu_affinity\n'), ((24073, 24084), 'time.time', 'time.time', ([], {}), '()\n', (24082, 24084), False, 'import time\n'), ((25675, 25686), 'time.time', 'time.time', ([], {}), '()\n', (25684, 25686), False, 'import time\n'), ((27123, 27146), 'bifrost.affinity.get_core', 'cpu_affinity.get_core', ([], {}), '()\n', (27144, 27146), True, 'import bifrost.affinity as cpu_affinity\n'), ((28389, 28400), 'time.time', 'time.time', ([], {}), '()\n', (28398, 28400), False, 'import time\n'), ((30171, 30182), 'time.time', 'time.time', ([], {}), '()\n', (30180, 30182), False, 'import time\n'), ((34607, 34646), 'os.path.realpath', 'os.path.realpath', (['args.record_directory'], {}), '(args.record_directory)\n', (34623, 34646), False, 'import os\n'), ((5162, 5216), 'numpy.zeros', 'numpy.zeros', (['(nbl, nchan, npol)'], {'dtype': 'numpy.complex64'}), '((nbl, nchan, npol), dtype=numpy.complex64)\n', (5173, 5216), False, 'import numpy\n'), ((7467, 7478), 'time.time', 'time.time', ([], {}), '()\n', (7476, 7478), False, 'import time\n'), ((11989, 12008), 'numpy.arange', 'numpy.arange', (['nchan'], {}), '(nchan)\n', (12001, 12008), False, 'import numpy\n'), ((13229, 13261), 'mnc.mcs.ImageMonitorPoint.from_image', 'ImageMonitorPoint.from_image', (['im'], {}), '(im)\n', (13257, 13261), False, 'from mnc.mcs import ImageMonitorPoint, MultiMonitorPoint, Client\n'), ((14445, 14456), 'time.time', 'time.time', ([], {}), '()\n', (14454, 14456), False, 'import time\n'), ((19345, 19364), 'numpy.arange', 'numpy.arange', (['nchan'], {}), '(nchan)\n', (19357, 19364), False, 'import numpy\n'), ((20716, 20748), 'mnc.mcs.ImageMonitorPoint.from_image', 'ImageMonitorPoint.from_image', (['im'], {}), '(im)\n', (20744, 20748), False, 'from mnc.mcs import ImageMonitorPoint, MultiMonitorPoint, Client\n'), ((21348, 21359), 'time.time', 'time.time', ([], {}), '()\n', (21357, 21359), False, 'import time\n'), ((24939, 24963), 'numpy.min', 'numpy.min', (['adata'], {'axis': '(1)'}), '(adata, axis=1)\n', (24948, 24963), False, 'import numpy\n'), ((24995, 25019), 'numpy.max', 'numpy.max', (['adata'], {'axis': '(1)'}), '(adata, axis=1)\n', (25004, 25019), False, 'import numpy\n'), ((25051, 25076), 'numpy.mean', 'numpy.mean', (['adata'], {'axis': '(1)'}), '(adata, axis=1)\n', (25061, 25076), False, 'import numpy\n'), ((25546, 25557), 'time.time', 'time.time', ([], {}), '()\n', (25555, 25557), False, 'import time\n'), ((2379, 2403), 'numpy.sqrt', 'numpy.sqrt', (['(8 * nsrc + 1)'], {}), '(8 * nsrc + 1)\n', (2389, 2403), False, 'import numpy\n'), ((6800, 6811), 'time.time', 'time.time', ([], {}), '()\n', (6809, 6811), False, 'import time\n'), ((7236, 7247), 'time.time', 'time.time', ([], {}), '()\n', (7245, 7247), False, 'import time\n'), ((8739, 8760), 'numpy.isfinite', 'numpy.isfinite', (['specs'], {}), '(specs)\n', (8753, 8760), False, 'import numpy\n'), ((8813, 8834), 'numpy.isfinite', 'numpy.isfinite', (['specs'], {}), '(specs)\n', (8827, 8834), False, 'import numpy\n'), ((12590, 12601), 'time.time', 'time.time', ([], {}), '()\n', (12599, 12601), False, 'import time\n'), ((20081, 20092), 'time.time', 'time.time', ([], {}), '()\n', (20090, 20092), False, 'import time\n'), ((24327, 24338), 'time.time', 'time.time', ([], {}), '()\n', (24336, 24338), False, 'import time\n'), ((32718, 32744), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (32734, 32744), False, 'import os\n'), ((5783, 5794), 'time.time', 'time.time', ([], {}), '()\n', (5792, 5794), False, 'import time\n'), ((6112, 6136), 'numpy.sqrt', 'numpy.sqrt', (['(8 * nsrc + 1)'], {}), '(8 * nsrc + 1)\n', (6122, 6136), False, 'import numpy\n'), ((7337, 7353), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (7347, 7353), False, 'import time\n'), ((7394, 7405), 'time.time', 'time.time', ([], {}), '()\n', (7403, 7405), False, 'import time\n'), ((13135, 13153), 'numpy.log10', 'numpy.log10', (['adata'], {}), '(adata)\n', (13146, 13153), False, 'import numpy\n'), ((7079, 7111), 'numpy.random.randn', 'numpy.random.randn', (['*odata.shape'], {}), '(*odata.shape)\n', (7097, 7111), False, 'import numpy\n')]
|
import copy
import numpy as np
import pandas as pd
import os
import contextlib
from sklearn.metrics import f1_score, accuracy_score
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
SEED = 0
NFOLDS = 4
KFOLD = StratifiedKFold(n_splits=NFOLDS, shuffle=True, random_state=SEED)
def skl_macro_f1(y_true, y_hat):
"""Early stopping by macro F1-score, callback function for LightGBM sklearn API."""
y_hat = np.where(y_hat > 0.5, 1, 0)
return 'f1', f1_score(y_true, y_hat, average='macro'), True
class SklearnWrapper(object):
"""Wapper object for Sklearn classifiers."""
def __init__(self, clf, seed=SEED, params=None, scale=False):
if scale:
if params is None:
self.clf = make_pipeline(StandardScaler(), clf)
else:
self.clf = make_pipeline(StandardScaler(), clf(**params))
else:
if params is None:
self.clf = clf
else:
self.clf = clf(**params)
self.clftype = type(clf)
def train(self, x_train, y_train, x_val=None, y_val=None):
self.clf.fit(X=x_train, y=y_train)
def predict(self, x):
return self.clf.predict_proba(x)[:, 1]
def __str__(self):
return str(self.clftype).split(".")[-1][:-2]
class LightGBMWrapper(object):
"""Wrapper object for LightGBMClassifier."""
def __init__(self, clf, seed=SEED, params=None):
params['feature_fraction_seed'] = seed
params['bagging_seed'] = seed
self.params = params
self.clf = clf(**params, n_estimators=10000)
def train(self, x_train, y_train, x_val, y_val):
self.clf.fit(X=x_train, y=y_train, eval_set=(x_val, y_val), verbose=0, early_stopping_rounds=250,
eval_metric=skl_macro_f1)
def predict(self, x):
return self.clf.predict_proba(x)[:, 1]
def __str__(self):
return str(type(self.clf)).split(".")[-1][:-2]
def get_oof(clf, x_train, y_train, x_test, y_test):
"""Get stacked out-of-fold predictions on training data and save classifiers
for future predictions."""
oof_train = np.zeros((x_train.shape[0],))
oof_test = np.zeros((x_test.shape[0],))
oof_test_skf = np.empty((NFOLDS, x_test.shape[0]))
models = []
for i, (train_index, val_index) in enumerate(KFOLD.split(x_train, y_train)):
x_train_fold = x_train[train_index, :]
y_train_fold = y_train[train_index]
x_val_fold = x_train[val_index, :]
y_val_fold = y_train[val_index]
clf.train(x_train_fold, y_train_fold, x_val_fold, y_val_fold)
train_pred = clf.predict(x_train_fold)
oof_pred = clf.predict(x_val_fold)
test_pred = clf.predict(x_test)
oof_train[val_index] = oof_pred
oof_test_skf[i, :] = test_pred
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
models.append(copy.deepcopy(clf))
train_f1 = f1_score(y_train_fold, np.round(train_pred), average='macro')
val_f1 = f1_score(y_val_fold, np.round(oof_pred), average='macro')
test_f1 = f1_score(y_test, np.round(test_pred), average='macro')
print(f'Fold {i + 1}/{NFOLDS}, {clf}, train macro-F1: {train_f1:.3f}, oof macro-F1: {val_f1:.3f}, '
f'macro-F1: {test_f1:.3f}')
oof_test[:] = oof_test_skf.mean(axis=0)
return oof_train.reshape(-1, 1).ravel(), oof_test.reshape(-1, 1).ravel(), models
class StackingEnsemble:
"""Stacking ensemble classifier.
To add classifiers, call 'add_to_ensemble' and provide a list of wrappers, a training set for oof predictions,
and test set for validation. The feature set needs a name when training parts of the ensemble on different sets.
After adding classifiers, 'train_meta_learner' needs to be called to train on out-of-fold training predictions.
Predictions can be made on new data provided a list of the same features that was used during training classifiers.
"""
def __init__(self):
self.initialised = False
self.ready_for_meta_learning = False
self.oof_train = pd.DataFrame()
self.oof_test = pd.DataFrame()
self.y_train = None
self.y_test = None
self.clf_count = 0
self.feature_set_count = 0
self.clf_feature_set_ids = []
self.feature_sets = dict()
self.models = []
self.metalearner = None
def add_to_ensemble(self, clf_wrapper_list, x_train, y_train, x_test, y_test, feature_set_name):
"""Train classifiers on provided feature set, add and save to ensemble object."""
print(f"\nAdding to ensemble, {len(clf_wrapper_list)} classifiers trained on input {x_train.shape}:\n")
if feature_set_name in self.feature_sets:
feature_set_id = self.feature_sets['feature_set_name']
else:
feature_set_id = self.feature_set_count
self.feature_sets['feature_set_name'] = self.feature_set_count
self.feature_set_count += 1
if self.initialised:
assert (self.y_train == y_train).all() and (self.y_test == y_test).all(), "provided dataset is different to previously fitted set"
else:
self.initialised = True
self.y_train = y_train
self.y_test = y_test
for clf in clf_wrapper_list:
oof_train, oof_test, models = get_oof(clf, x_train, y_train, x_test, y_test)
self.oof_train[f'{self.feature_set_count}_{self.clf_count}'] = oof_train
self.oof_test[f'{self.feature_set_count}_{self.clf_count}'] = oof_test
self.models.append(models)
self.clf_count += 1
self.clf_feature_set_ids.append(feature_set_id)
self.ready_for_meta_learning = True
def train_meta_learner(self):
"""Train meta-learner on out-of-fold predictions.
Can only be called after having called 'add_to_ensemble'."""
assert self.ready_for_meta_learning is True
print(f"\nTraining meta-learner on ensemble of {self.clf_count} classifiers:")
self.metalearner = LogisticRegression()
self.metalearner.fit(self.oof_train, self.y_train)
preds = self.metalearner.predict(self.oof_train)
ac = accuracy_score(self.y_train, preds)
f1 = f1_score(self.y_train, preds, average='macro')
print(f"Train: accuracy {ac:0.3f}, macro-F1 {f1:0.3f}")
preds = self.metalearner.predict(self.oof_test)
ac = accuracy_score(self.y_test, preds)
f1 = f1_score(self.y_test, preds, average='macro')
print(f"Valid: accuracy {ac:0.3f}, macro-F1 {f1:0.3f} ")
def predict_proba(self, fs_list):
"""Predict probabilities on a list of feature sets, the same used when training the ensemble."""
assert self.metalearner is not None
basepreds = pd.DataFrame()
for i, clf_models in enumerate(self.models):
fs_id = self.clf_feature_set_ids[i]
clf_preds = np.zeros((fs_list[fs_id].shape[0],))
preds_skf = np.empty((NFOLDS, fs_list[fs_id].shape[0]))
for j, clf in enumerate(clf_models):
pred = clf.predict(fs_list[fs_id])
preds_skf[j, :] = pred
clf_preds[:] = preds_skf.mean(axis=0)
basepreds[i] = clf_preds
preds_prob = self.metalearner.predict_proba(basepreds)[:, 1]
return preds_prob
def predict(self, fs_list):
"""Predict binary classes for a list of feature sets, the same used when training the ensemble."""
assert self.metalearner is not None
basepreds = pd.DataFrame()
for i, clf_models in enumerate(self.models):
fs_id = self.clf_feature_set_ids[i]
clf_preds = np.zeros((fs_list[fs_id].shape[0],))
preds_skf = np.empty((NFOLDS, fs_list[fs_id].shape[0]))
for j, clf in enumerate(clf_models):
pred = clf.predict(fs_list[fs_id])
preds_skf[j, :] = pred
clf_preds[:] = preds_skf.mean(axis=0)
basepreds[i] = clf_preds
preds = self.metalearner.predict(basepreds)
return preds
def evaluate(self, fs_list, y):
"""Evaluate ensemble given a list of feature sets and labels."""
preds = self.predict(fs_list)
ac = accuracy_score(y, preds)
f1 = f1_score(y, preds, average='macro')
print(f"Evaluation: accuracy {ac:0.4f}, macro-F1 {f1:0.4f}")
|
[
"contextlib.redirect_stdout",
"sklearn.metrics.f1_score",
"numpy.where",
"sklearn.linear_model.LogisticRegression",
"sklearn.model_selection.StratifiedKFold",
"sklearn.preprocessing.StandardScaler",
"numpy.zeros",
"numpy.empty",
"copy.deepcopy",
"pandas.DataFrame",
"sklearn.metrics.accuracy_score",
"numpy.round"
] |
[((358, 423), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'NFOLDS', 'shuffle': '(True)', 'random_state': 'SEED'}), '(n_splits=NFOLDS, shuffle=True, random_state=SEED)\n', (373, 423), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((559, 586), 'numpy.where', 'np.where', (['(y_hat > 0.5)', '(1)', '(0)'], {}), '(y_hat > 0.5, 1, 0)\n', (567, 586), True, 'import numpy as np\n'), ((2278, 2307), 'numpy.zeros', 'np.zeros', (['(x_train.shape[0],)'], {}), '((x_train.shape[0],))\n', (2286, 2307), True, 'import numpy as np\n'), ((2323, 2351), 'numpy.zeros', 'np.zeros', (['(x_test.shape[0],)'], {}), '((x_test.shape[0],))\n', (2331, 2351), True, 'import numpy as np\n'), ((2371, 2406), 'numpy.empty', 'np.empty', (['(NFOLDS, x_test.shape[0])'], {}), '((NFOLDS, x_test.shape[0]))\n', (2379, 2406), True, 'import numpy as np\n'), ((604, 644), 'sklearn.metrics.f1_score', 'f1_score', (['y_true', 'y_hat'], {'average': '"""macro"""'}), "(y_true, y_hat, average='macro')\n", (612, 644), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((4263, 4277), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4275, 4277), True, 'import pandas as pd\n'), ((4302, 4316), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4314, 4316), True, 'import pandas as pd\n'), ((6262, 6282), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (6280, 6282), False, 'from sklearn.linear_model import LogisticRegression\n'), ((6413, 6448), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['self.y_train', 'preds'], {}), '(self.y_train, preds)\n', (6427, 6448), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((6462, 6508), 'sklearn.metrics.f1_score', 'f1_score', (['self.y_train', 'preds'], {'average': '"""macro"""'}), "(self.y_train, preds, average='macro')\n", (6470, 6508), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((6643, 6677), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['self.y_test', 'preds'], {}), '(self.y_test, preds)\n', (6657, 6677), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((6691, 6736), 'sklearn.metrics.f1_score', 'f1_score', (['self.y_test', 'preds'], {'average': '"""macro"""'}), "(self.y_test, preds, average='macro')\n", (6699, 6736), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((7011, 7025), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7023, 7025), True, 'import pandas as pd\n'), ((7788, 7802), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7800, 7802), True, 'import pandas as pd\n'), ((8500, 8524), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y', 'preds'], {}), '(y, preds)\n', (8514, 8524), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((8538, 8573), 'sklearn.metrics.f1_score', 'f1_score', (['y', 'preds'], {'average': '"""macro"""'}), "(y, preds, average='macro')\n", (8546, 8573), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((3004, 3033), 'contextlib.redirect_stdout', 'contextlib.redirect_stdout', (['f'], {}), '(f)\n', (3030, 3033), False, 'import contextlib\n'), ((3124, 3144), 'numpy.round', 'np.round', (['train_pred'], {}), '(train_pred)\n', (3132, 3144), True, 'import numpy as np\n'), ((3201, 3219), 'numpy.round', 'np.round', (['oof_pred'], {}), '(oof_pred)\n', (3209, 3219), True, 'import numpy as np\n'), ((3273, 3292), 'numpy.round', 'np.round', (['test_pred'], {}), '(test_pred)\n', (3281, 3292), True, 'import numpy as np\n'), ((7154, 7190), 'numpy.zeros', 'np.zeros', (['(fs_list[fs_id].shape[0],)'], {}), '((fs_list[fs_id].shape[0],))\n', (7162, 7190), True, 'import numpy as np\n'), ((7215, 7258), 'numpy.empty', 'np.empty', (['(NFOLDS, fs_list[fs_id].shape[0])'], {}), '((NFOLDS, fs_list[fs_id].shape[0]))\n', (7223, 7258), True, 'import numpy as np\n'), ((7931, 7967), 'numpy.zeros', 'np.zeros', (['(fs_list[fs_id].shape[0],)'], {}), '((fs_list[fs_id].shape[0],))\n', (7939, 7967), True, 'import numpy as np\n'), ((7992, 8035), 'numpy.empty', 'np.empty', (['(NFOLDS, fs_list[fs_id].shape[0])'], {}), '((NFOLDS, fs_list[fs_id].shape[0]))\n', (8000, 8035), True, 'import numpy as np\n'), ((3061, 3079), 'copy.deepcopy', 'copy.deepcopy', (['clf'], {}), '(clf)\n', (3074, 3079), False, 'import copy\n'), ((890, 906), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (904, 906), False, 'from sklearn.preprocessing import StandardScaler\n'), ((972, 988), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (986, 988), False, 'from sklearn.preprocessing import StandardScaler\n')]
|
import numpy as np
import yt
from matplotlib import rc
fsize = 17
rc('text', usetex=False)
rc('font', size=fsize)#, ftype=42)
line_width = 3
point_size = 30
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from galaxy_analysis.particle_analysis import particle_types as pdef
def plot_dtd(ds):
data = ds.all_data()
snIa = pdef.snIa(ds, data)
WD = pdef.white_dwarfs(ds, data)
WD_death = data['dynamical_time'][WD] # + data['creation_time'][WD]
SNIa_death = data['dynamical_time'][snIa] # + data['creation_time'][snIa]
WD_death = list(WD_death.convert_to_units("Gyr").value)
SNIa_death = list(SNIa_death.convert_to_units("Gyr").value)
fig, ax = plt.subplots()
all = np.array( WD_death + SNIa_death)
hist, bins = np.histogram(all, bins = np.arange(0,14.25,0.5))
x = 0.5 * (bins[1:] + bins[:-1])
ax.plot(x, hist, lw = 3, color = 'black', ls = '-')
y = x**(-1.0* ds.parameters['IndividualStarDTDSlope'])
norm = hist[0] / y[0]
ax.plot(x, norm*y, lw = 3, color = 'black', ls='--')
ax.plot(x, hist[0]/((x[0])**(-1.01)) * x**(-1.01),lw =3, color = 'black',ls=':')
ax.set_xlabel(r'Time (Gyr)')
ax.set_ylabel(r'Binned SNIa (counts)')
ax.loglog()
fig.set_size_inches(8,8)
plt.tight_layout()
plt.minorticks_on()
fig.savefig('dtd.png')
plt.close()
return
if __name__ == "__main__":
ds = yt.load('DD0205/DD0205')
data = ds.all_data()
plot_dtd(ds)
|
[
"matplotlib.use",
"galaxy_analysis.particle_analysis.particle_types.snIa",
"matplotlib.pyplot.minorticks_on",
"matplotlib.pyplot.close",
"numpy.array",
"galaxy_analysis.particle_analysis.particle_types.white_dwarfs",
"yt.load",
"matplotlib.rc",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"numpy.arange"
] |
[((66, 90), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(False)'}), "('text', usetex=False)\n", (68, 90), False, 'from matplotlib import rc\n'), ((91, 113), 'matplotlib.rc', 'rc', (['"""font"""'], {'size': 'fsize'}), "('font', size=fsize)\n", (93, 113), False, 'from matplotlib import rc\n'), ((182, 196), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (189, 196), True, 'import matplotlib as mpl\n'), ((355, 374), 'galaxy_analysis.particle_analysis.particle_types.snIa', 'pdef.snIa', (['ds', 'data'], {}), '(ds, data)\n', (364, 374), True, 'from galaxy_analysis.particle_analysis import particle_types as pdef\n'), ((386, 413), 'galaxy_analysis.particle_analysis.particle_types.white_dwarfs', 'pdef.white_dwarfs', (['ds', 'data'], {}), '(ds, data)\n', (403, 413), True, 'from galaxy_analysis.particle_analysis import particle_types as pdef\n'), ((710, 724), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (722, 724), True, 'import matplotlib.pyplot as plt\n'), ((735, 766), 'numpy.array', 'np.array', (['(WD_death + SNIa_death)'], {}), '(WD_death + SNIa_death)\n', (743, 766), True, 'import numpy as np\n'), ((1282, 1300), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1298, 1300), True, 'import matplotlib.pyplot as plt\n'), ((1305, 1324), 'matplotlib.pyplot.minorticks_on', 'plt.minorticks_on', ([], {}), '()\n', (1322, 1324), True, 'import matplotlib.pyplot as plt\n'), ((1356, 1367), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1365, 1367), True, 'import matplotlib.pyplot as plt\n'), ((1419, 1443), 'yt.load', 'yt.load', (['"""DD0205/DD0205"""'], {}), "('DD0205/DD0205')\n", (1426, 1443), False, 'import yt\n'), ((811, 835), 'numpy.arange', 'np.arange', (['(0)', '(14.25)', '(0.5)'], {}), '(0, 14.25, 0.5)\n', (820, 835), True, 'import numpy as np\n')]
|
"""Tests cac.models.classification.ClassificationModel"""
import os
from os.path import dirname, join, exists
from copy import deepcopy
import torch
import wandb
import unittest
from tqdm import tqdm
import numpy as np
from torch import optim
from cac.config import Config
from cac.utils.logger import set_logger, color
from cac.models.classification import ClassificationModel
class ClassificationModelTestCase(unittest.TestCase):
"""Class to check the creation of ClassificationModel"""
@classmethod
def setUpClass(cls):
version = 'default.yml'
cls.cfg = Config(version)
cls.cfg.data['dataset']['params']['val']['fraction'] = 0.1
cls.cfg.num_workers = 1 if torch.cuda.is_available() else 10
# def test_1_model_fitting(self):
# """Test model.fit()"""
# set_logger(join(self.cfg.log_dir, 'train.log'))
# tester_cfg = deepcopy(self.cfg)
# tester_cfg.model['epochs'] = 1
# classifier = ClassificationModel(tester_cfg)
# classifier.fit(debug=True, use_wandb=False)
def test_optimizer(self):
"""Test model.fit()"""
set_logger(join(self.cfg.log_dir, 'train.log'))
tester_cfg = deepcopy(self.cfg)
tester_cfg.model['epochs'] = 1
classifier = ClassificationModel(tester_cfg)
self.assertIsInstance(classifier.optimizer, optim.SGD)
self.assertIsInstance(
classifier.scheduler, optim.lr_scheduler.ReduceLROnPlateau)
def test_with_frames(self):
"""Test models/lassification.py with fixed frames"""
cfg = Config('defaults/with-frames.yml')
cfg.data['dataset']['params']['train']['fraction'] = 0.01
cfg.data['dataset']['params']['val']['fraction'] = 0.03
cfg.model['batch_size'] = 4 # to make it work on small CPU machines
cfg.num_workers = 1
set_logger(join(cfg.log_dir, 'train.log'))
tester_cfg = deepcopy(cfg)
tester_cfg.model['epochs'] = 1
classifier = ClassificationModel(tester_cfg)
classifier.fit(debug=True, use_wandb=False)
def test_with_label_smoothing(self):
"""Test model.fit() with label smoothing"""
tester_cfg = Config('defaults/label-smoothing-random.yml')
set_logger(join(tester_cfg.log_dir, 'train.log'))
tester_cfg.data['dataset']['params']['train']['fraction'] = 0.01
tester_cfg.data['dataset']['params']['val']['fraction'] = 0.03
tester_cfg.model['batch_size'] = 4 # to make it work on small CPU machines
tester_cfg.num_workers = 1
tester_cfg.model['epochs'] = 1
classifier = ClassificationModel(tester_cfg)
classifier.fit(use_wandb=False)
def test_get_unique_paths(self):
"""Tests getting unique paths with order preserved (Used in _aggregate_data())"""
# input paths
paths = ['b', 'b', 'a', 'a', 'c', 'c', 'c', 'c']
# expected unique outputs with preserved order
exp_output = np.array(['b', 'a', 'c'])
_, idx = np.unique(paths, return_index=True)
unique_paths = np.take(paths, np.sort(idx))
self.assertTrue((unique_paths == exp_output).all())
if __name__ == "__main__":
unittest.main()
|
[
"numpy.unique",
"numpy.sort",
"os.path.join",
"numpy.array",
"torch.cuda.is_available",
"copy.deepcopy",
"unittest.main",
"cac.models.classification.ClassificationModel",
"cac.config.Config"
] |
[((3251, 3266), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3264, 3266), False, 'import unittest\n'), ((587, 602), 'cac.config.Config', 'Config', (['version'], {}), '(version)\n', (593, 602), False, 'from cac.config import Config\n'), ((1202, 1220), 'copy.deepcopy', 'deepcopy', (['self.cfg'], {}), '(self.cfg)\n', (1210, 1220), False, 'from copy import deepcopy\n'), ((1281, 1312), 'cac.models.classification.ClassificationModel', 'ClassificationModel', (['tester_cfg'], {}), '(tester_cfg)\n', (1300, 1312), False, 'from cac.models.classification import ClassificationModel\n'), ((1587, 1621), 'cac.config.Config', 'Config', (['"""defaults/with-frames.yml"""'], {}), "('defaults/with-frames.yml')\n", (1593, 1621), False, 'from cac.config import Config\n'), ((1929, 1942), 'copy.deepcopy', 'deepcopy', (['cfg'], {}), '(cfg)\n', (1937, 1942), False, 'from copy import deepcopy\n'), ((2003, 2034), 'cac.models.classification.ClassificationModel', 'ClassificationModel', (['tester_cfg'], {}), '(tester_cfg)\n', (2022, 2034), False, 'from cac.models.classification import ClassificationModel\n'), ((2202, 2247), 'cac.config.Config', 'Config', (['"""defaults/label-smoothing-random.yml"""'], {}), "('defaults/label-smoothing-random.yml')\n", (2208, 2247), False, 'from cac.config import Config\n'), ((2628, 2659), 'cac.models.classification.ClassificationModel', 'ClassificationModel', (['tester_cfg'], {}), '(tester_cfg)\n', (2647, 2659), False, 'from cac.models.classification import ClassificationModel\n'), ((3010, 3035), 'numpy.array', 'np.array', (["['b', 'a', 'c']"], {}), "(['b', 'a', 'c'])\n", (3018, 3035), True, 'import numpy as np\n'), ((3062, 3097), 'numpy.unique', 'np.unique', (['paths'], {'return_index': '(True)'}), '(paths, return_index=True)\n', (3071, 3097), True, 'import numpy as np\n'), ((705, 730), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (728, 730), False, 'import torch\n'), ((1143, 1178), 'os.path.join', 'join', (['self.cfg.log_dir', '"""train.log"""'], {}), "(self.cfg.log_dir, 'train.log')\n", (1147, 1178), False, 'from os.path import dirname, join, exists\n'), ((1875, 1905), 'os.path.join', 'join', (['cfg.log_dir', '"""train.log"""'], {}), "(cfg.log_dir, 'train.log')\n", (1879, 1905), False, 'from os.path import dirname, join, exists\n'), ((2267, 2304), 'os.path.join', 'join', (['tester_cfg.log_dir', '"""train.log"""'], {}), "(tester_cfg.log_dir, 'train.log')\n", (2271, 2304), False, 'from os.path import dirname, join, exists\n'), ((3136, 3148), 'numpy.sort', 'np.sort', (['idx'], {}), '(idx)\n', (3143, 3148), True, 'import numpy as np\n')]
|
import numpy as np
from numba import jit
from numba.core import types
from numba.tests.support import TestCase, tag
import unittest
# Array overlaps involving a displacement
def array_overlap1(src, dest, k=1):
assert src.shape == dest.shape
dest[k:] = src[:-k]
def array_overlap2(src, dest, k=1):
assert src.shape == dest.shape
dest[:-k] = src[k:]
def array_overlap3(src, dest, k=1):
assert src.shape == dest.shape
dest[:,:-k] = src[:,k:]
def array_overlap4(src, dest, k=1):
assert src.shape == dest.shape
dest[:,k:] = src[:,:-k]
def array_overlap5(src, dest, k=1):
assert src.shape == dest.shape
dest[...,:-k] = src[...,k:]
def array_overlap6(src, dest, k=1):
assert src.shape == dest.shape
dest[...,k:] = src[...,:-k]
# Array overlaps involving an in-place reversal
def array_overlap11(src, dest):
assert src.shape == dest.shape
dest[::-1] = src
def array_overlap12(src, dest):
assert src.shape == dest.shape
dest[:] = src[::-1]
def array_overlap13(src, dest):
assert src.shape == dest.shape
dest[:,::-1] = src
def array_overlap14(src, dest):
assert src.shape == dest.shape
dest[:] = src[:,::-1]
def array_overlap15(src, dest):
assert src.shape == dest.shape
dest[...,::-1] = src
def array_overlap16(src, dest):
assert src.shape == dest.shape
dest[:] = src[...,::-1]
class TestArrayOverlap(TestCase):
def check_overlap(self, pyfunc, min_ndim, have_k_argument=False):
N = 4
def vary_layouts(orig):
yield orig.copy(order='C')
yield orig.copy(order='F')
a = orig[::-1].copy()[::-1]
assert not a.flags.c_contiguous and not a.flags.f_contiguous
yield a
def check(pyfunc, cfunc, pydest, cdest, kwargs):
pyfunc(pydest, pydest, **kwargs)
cfunc(cdest, cdest, **kwargs)
self.assertPreciseEqual(pydest, cdest)
cfunc = jit(nopython=True)(pyfunc)
# Check for up to 3d arrays
for ndim in range(min_ndim, 4):
shape = (N,) * ndim
orig = np.arange(0, N**ndim).reshape(shape)
# Note we cannot copy a 'A' layout array exactly (bitwise),
# so instead we call vary_layouts() twice
for pydest, cdest in zip(vary_layouts(orig), vary_layouts(orig)):
if have_k_argument:
for k in range(1, N):
check(pyfunc, cfunc, pydest, cdest, dict(k=k))
else:
check(pyfunc, cfunc, pydest, cdest, {})
def check_overlap_with_k(self, pyfunc, min_ndim):
self.check_overlap(pyfunc, min_ndim=min_ndim, have_k_argument=True)
def test_overlap1(self):
self.check_overlap_with_k(array_overlap1, min_ndim=1)
def test_overlap2(self):
self.check_overlap_with_k(array_overlap2, min_ndim=1)
def test_overlap3(self):
self.check_overlap_with_k(array_overlap3, min_ndim=2)
def test_overlap4(self):
self.check_overlap_with_k(array_overlap4, min_ndim=2)
def test_overlap5(self):
self.check_overlap_with_k(array_overlap5, min_ndim=1)
def test_overlap6(self):
self.check_overlap_with_k(array_overlap6, min_ndim=1)
def test_overlap11(self):
self.check_overlap(array_overlap11, min_ndim=1)
def test_overlap12(self):
self.check_overlap(array_overlap12, min_ndim=1)
def test_overlap13(self):
self.check_overlap(array_overlap13, min_ndim=2)
def test_overlap14(self):
self.check_overlap(array_overlap14, min_ndim=2)
def test_overlap15(self):
self.check_overlap(array_overlap15, min_ndim=1)
def test_overlap16(self):
self.check_overlap(array_overlap16, min_ndim=1)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"numba.jit",
"numpy.arange"
] |
[((3823, 3838), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3836, 3838), False, 'import unittest\n'), ((1959, 1977), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (1962, 1977), False, 'from numba import jit\n'), ((2113, 2136), 'numpy.arange', 'np.arange', (['(0)', '(N ** ndim)'], {}), '(0, N ** ndim)\n', (2122, 2136), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
from copy import deepcopy
import warnings
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.externals.joblib import Parallel, delayed
from gravity_learn.utils import (force_array,
check_cv,
fit_model,
check_is_fitted)
__all__ = ['EnsemblerClassifier',
'QuickStackClassifier',
'FullStackClassifier']
class EnsemblerClassifier(BaseEstimator, TransformerMixin):
# TODO: require df? how to pass Yfactory in
"""
This is a class to ensemble a set of given base models. The assumption
is that those models are tuned (hyperparameters chosen). It works as
follows.
It accepts a dictionary of base models, the ensembler to combine them,
a number of folds (to be used in the cross validation strategy) and
a random state (to be used in the cross val strategy)
The fit method:
The ensemblers iterates through the base models, doing two things:
- determining out of sample predictions (so n_folds fit-predict
combinations). This is used for fitting the ensembler next.
- fit the base model to the full data, which is used for the
ensemblers predict method
Notice this implies we have n_folds + 1 fits for each base model.
With these out of sample predictions, it determines the parameters
of the ensemblers.
The predict method:
Determines the predictions of each of the base models and then
combines them with the fitted ensembler.
"""
def __init__(self, base_models, ensembler_est, n_folds, random_state=0):
"""
Parameters
----------
base_models : a dictionary of model name/model pairs
ensembler_est : an ensembler to combine the outputs of the base
model
n_folds : the number of folds to use when estimating the parameters
of the ensemblers. Note: Ideally, n_folds should be high, because
it makes the size of the base model fit for predictions and the
base model fit for ensembler calibration more similar.
random_state : the random state to use in the cross validaiton
strategy
"""
self.base_models = base_models
self.ensembler_est = ensembler_est
self.n_folds = n_folds
self.random_state = random_state
self.fitted_base_models = {}
self.model_order = []
warnings.warn('EnsemblerClassifier is deprecated, '
'please use FullStackClassifier instead',
DeprecationWarning)
def fit(self, X, y):
cv = StratifiedKFold(
n_splits=self.n_folds,
shuffle=True,
random_state=self.random_state
)
base_predictions = {}
for name, model in self.base_models.items():
# This is for determining the ensembler parameters
base_predictions[name] = cross_val_predict(
model, X, y, cv=cv, method='predict_proba'
)[:, 1]
# This for the ensembler.predict method
self.fitted_base_models[name] = model.fit(X, y)
self.model_order.append(name)
base_predictions = pd.DataFrame(
base_predictions,
index=X.index
)[self.model_order]
self.ensembler_est.fit(base_predictions, y)
return self
def predict_proba(self, X):
base_predictions = {}
for name, model in self.fitted_base_models.items():
base_predictions[name] = model.predict_proba(X)[:, 1]
base_predictions = pd.DataFrame(
base_predictions,
index=X.index
)[self.model_order]
return self.ensembler_est.predict_proba(base_predictions)
class QuickStackClassifier(BaseEstimator):
"""
This class has a similar stacking structure but also is scalable,
which means, it's objective to save computing run time on training
in-sample-fold and outputing out-of-fold predictions for fitting ensembler
Instead of doing K-fold training for each base model, it does only one-fold
To have a good performance, it requires ensembler to be a simple model with
only a few parameters to tune
Parameters
----------
base_models : list of (string, base_model) tuples. The first
half of each tuple is the group name of the pipeline.
ensembler : an ensembler to combine the outputs of the base models
proba : bool, if True, model will implement predict_proba when it
gets called
full_train : bool, if True, its base models are trained with 100% data
again and they are used for generating probas for new data
Default is True
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
"""
def __init__(self, base_models, ensembler, proba=True,
full_train=True, cv=None, n_jobs=1, verbose=0):
self.base_models = list(base_models)
self.ensembler = ensembler
self.proba = proba
self.full_train = full_train
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
if self.cv is None:
self.cv = KFold(n_splits=3, shuffle=True)
warnings.warn('QuickStackClassifier is deprecated, '
'please use FullStackClassifier instead',
DeprecationWarning)
def get_params(self, deep=True):
return self.ensembler.get_params(deep=deep)
def set_params(self, **params):
return self.ensembler.set_params(**params)
def _fit(self, X, y, *args, **kwargs):
"""
private method to train n base models for last fold of cv
"""
# get list of folds of indices
self.last_fold = list(check_cv(self.cv).split(X, y))[-1]
self.in_fold = self.last_fold[0]
self.out_of_fold = self.last_fold[-1]
# Paralellization
parallel = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)
if isinstance(X, pd.DataFrame):
if not isinstance(y, (pd.Series, pd.DataFrame)):
y = pd.DataFrame(y)
self.fitted_models = parallel(delayed(fit_model)(
model=deepcopy(model),
X=X.iloc[self.in_fold],
y=y.iloc[self.in_fold],
*args,
**kwargs
) for (_, model) in self.base_models
)
else: # X is not a dataframe
self.fitted_models = parallel(delayed(fit_model)(
model=deepcopy(model),
X=X[self.in_fold],
y=force_array(y)[self.in_fold],
*args,
**kwargs
) for (_, model) in self.base_models
)
# train model with full 100% data
if self.full_train:
self.full_fitted_models = parallel(delayed(fit_model)(
model=deepcopy(model),
X=X,
y=y,
*args,
**kwargs
) for (_, model) in self.base_models
)
def fit(self, X, y, *args, **kwargs):
"""
fit method is the method for fitting the ensembler and the trainning
data is out-of-fold predictions from base_models
"""
# call _fit
self._fit(X, y, *args, **kwargs)
# generate out-of-sample predictions and reserve same order!!
proba_dfs = []
if isinstance(X, pd.DataFrame):
for i, model in enumerate(self.fitted_models):
df_proba = pd.DataFrame(
{'proba_{}'.format(i): model.predict_proba(X.iloc[self.out_of_fold])[:, 1]}, # noqa
index=self.out_of_fold
)
proba_dfs.append(df_proba)
else: # X is not a dataframe
for i, model in enumerate(self.fitted_models):
df_proba = pd.DataFrame(
{'proba_{}'.format(i): model.predict_proba(X[self.out_of_fold])[:, 1]}, # noqa
index=self.out_of_fold
)
proba_dfs.append(df_proba)
# horizontal concat dfs and revert to origin order
df_out_of_fold_pred = pd.concat(proba_dfs, axis=1)
# if need to convert to predict
if not self.proba:
df_out_of_fold_pred = df_out_of_fold_pred >= 0.5
# Now train ensembler
if not isinstance(y, (pd.Series, pd.DataFrame)):
y = pd.DataFrame(y)
self.ensembler.fit(
X=df_out_of_fold_pred,
y=y.iloc[self.out_of_fold],
*args, **kwargs
)
# signal done fitting
self.fitted = True
return self
def predict_proba(self, X, *args, **kwargs):
check_is_fitted(self, 'fitted')
# use full_trained model or not
if self.full_train:
base_models_list = self.full_fitted_models
else:
base_models_list = self.fitted_models
# get pred from all base models
proba_dfs = []
for i, model in enumerate(base_models_list):
df_proba = pd.DataFrame(
{'proba_{}'.format(i): model.predict_proba(X)[:, 1]}
)
proba_dfs.append(df_proba)
# horizontal concat P1 from all base models
df_base_pred = pd.concat(proba_dfs, axis=1)
if not self.proba:
df_base_pred = df_base_pred >= 0.5
# ensembler make predictions
return self.ensembler.predict_proba(df_base_pred, *args, **kwargs)
def predict(self, X, *args, **kwargs):
df_proba = self.predict_proba(X, *args, **kwargs)[:, 1]
df_pred = df_proba >= 0.5
return force_array(df_pred)
def _base_model_cross_val(model, X, y, cv=None, proba=True, *args, **kwargs):
"""
A private function that trains each base model for each fold
and outputs fitted base models, its out-of-fold predictions,
and array of y (in same order of out-of-fold predictions)
for fitting ensembler
Parameters
----------
model : object, base model
X : array-like, or dataframe
y : array-like, or dataframe
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
proba : bool, if True, model will implement predict_proba when it
gets called
Returns
-------
list of fitted model for each fold, Xt(out-of-fold pred),
y(matched with Xt)
"""
# get list of folds of indices
all_folds = list(check_cv(cv).split(X, y))
# check data type
if not isinstance(X, (pd.DataFrame, pd.Series)):
X = pd.DataFrame(force_array(X))
if not isinstance(y, (pd.DataFrame, pd.Series)):
y = pd.DataFrame(force_array(y))
# iterate each train-fold and fit base model
fitted_models = [
fit_model(
model=deepcopy(model),
X=X.iloc[train],
y=y.iloc[train],
*args,
**kwargs
) for train, test in all_folds
]
# generate out-of-sample predictions and reserve same order!!
proba_dfs = []
for i, (train, test) in enumerate(all_folds):
df_proba = pd.DataFrame(
{'proba': fitted_models[i].predict_proba(X.iloc[test])[:, 1]}, # noqa
index=test
)
proba_dfs.append(df_proba)
# concat dfs, sort index, and record index
df_out_of_sample = pd.concat(proba_dfs).sort_index()
idx = df_out_of_sample.index.values
# get pred_out_of_sample
pred_out_of_sample = \
force_array(df_out_of_sample).reshape((len(df_out_of_sample), 1))
# if need to convert to predict
if not proba:
pred_out_of_sample = pred_out_of_sample > 0.5
# get y matched with pred_out_of_sample
y_out_of_sample = y.iloc[idx]
return fitted_models, pred_out_of_sample, y_out_of_sample
class FullStackClassifier(BaseEstimator):
"""
This class is a full version of QuickStackClassifier, in other words,
QuickStackClassifier is a sub-instance of FullStackClassifier
Its objective is outputing out-of-fold predictions to fit ensembler
Instead of passing Xt, y (keep same shape) to ensembler, this class is
meant to allow Xt, y (modified shape due to specific CV strat) to ensembler
Parameters
----------
base_models : list of (string, base_model) tuples. The first
half of each tuple is the group name of the pipeline.
ensembler : an ensembler to combine the outputs of the base models
proba : bool, if True, model will implement predict_proba when it
gets called
full_train : bool, if True, its base models are trained with 100% data
again and they are used for generating probas for new data
Default is True
quick_stack : bool, if True, base models predict only on the last fold to
output out-of-sample predictions for ensembler to fit.
Default is False
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
"""
def __init__(self, base_models, ensembler, proba=True,
full_train=True, quick_stack=False,
cv=None, n_jobs=1, verbose=0):
self.base_models = list(base_models)
self.ensembler = ensembler
self.proba = proba
self.full_train = full_train
self.quick_stack = quick_stack
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
def get_params(self, deep=True):
return self.ensembler.get_params(deep=deep)
def set_params(self, **params):
return self.ensembler.set_params(**params)
@property
def get_fitted_models_(self):
check_is_fitted(self, 'fitted')
if self.full_train:
fitted_models = self.full_fitted_models
else:
fitted_models = self.fitted_models
return fitted_models
@property
def get_fitted_ensembler_(self):
check_is_fitted(self, 'fitted')
return self.ensembler
def fit(self, X, y, *args, **kwargs):
"""
fit method is the method for fitting the ensembler and the trainning
data is out-of-fold predictions from base_models
"""
# cv has to be deterministic
cv = list(check_cv(self.cv).split(X, y))
# check quick_stack
if self.quick_stack:
cv = [cv[-1]]
# parallel iterating thru models to output out-of-fold pred
parallel = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)
result = parallel(delayed(_base_model_cross_val)(
model=deepcopy(model),
X=X,
y=y,
cv=cv,
proba=self.proba,
*args, **kwargs
) for (_, model) in self.base_models
)
# post process
fitted_models, pred_out_of_sample, y_out_of_sample = zip(*result)
self.fitted_models = \
[
(self.base_models[i][0], models)
for i, models in enumerate(fitted_models)
]
# assume all y_out_of_sample are the same, which they should be
y_out_of_sample = y_out_of_sample[0]
# prepare out_of_sample to fit ensembler
pred_out_of_sample = np.hstack(pred_out_of_sample)
# Now train ensembler
self.ensembler.fit(
X=pred_out_of_sample,
y=y_out_of_sample,
*args, **kwargs
)
# check full_train
if self.full_train:
self.full_fitted_models = parallel(delayed(fit_model)(
model=deepcopy(model),
X=X,
y=y,
*args,
**kwargs
) for (_, model) in self.base_models
)
# post process
self.full_fitted_models = \
[
(self.base_models[i][0], models)
for i, models in enumerate(self.full_fitted_models)
]
# signal done fitting
self.fitted = True
return self
def predict_proba(self, X, *args, **kwargs):
check_is_fitted(self, 'fitted')
# use full_trained model or not
proba_dfs = []
if self.full_train:
for name, model in self.full_fitted_models:
df_proba = pd.DataFrame(
{'proba_{}'.format(name): model.predict_proba(X)[:, 1]}
)
proba_dfs.append(df_proba)
else:
for name, models in self.fitted_models:
avg_proba = np.average(
np.hstack(
[
model.predict_proba(X)[:, 1].reshape((len(X), 1))
for model in models
]
),
axis=1
)
df_proba = pd.DataFrame({'proba_{}'.format(name): avg_proba})
proba_dfs.append(df_proba)
# horizontal concat P1 from all base models
df_base_pred = pd.concat(proba_dfs, axis=1)
if not self.proba:
df_base_pred = df_base_pred > 0.5
# ensembler make predictions
return self.ensembler.predict_proba(df_base_pred, *args, **kwargs)
def predict(self, X, *args, **kwargs):
df_proba = self.predict_proba(X, *args, **kwargs)[:, 1]
df_pred = df_proba > 0.5
return force_array(df_pred)
|
[
"copy.deepcopy",
"sklearn.externals.joblib.delayed",
"pandas.DataFrame",
"numpy.hstack",
"gravity_learn.utils.force_array",
"sklearn.model_selection.StratifiedKFold",
"gravity_learn.utils.check_is_fitted",
"sklearn.externals.joblib.Parallel",
"sklearn.model_selection.cross_val_predict",
"gravity_learn.utils.check_cv",
"warnings.warn",
"sklearn.model_selection.KFold",
"pandas.concat"
] |
[((2660, 2780), 'warnings.warn', 'warnings.warn', (['"""EnsemblerClassifier is deprecated, please use FullStackClassifier instead"""', 'DeprecationWarning'], {}), "(\n 'EnsemblerClassifier is deprecated, please use FullStackClassifier instead'\n , DeprecationWarning)\n", (2673, 2780), False, 'import warnings\n'), ((2857, 2946), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'self.n_folds', 'shuffle': '(True)', 'random_state': 'self.random_state'}), '(n_splits=self.n_folds, shuffle=True, random_state=self.\n random_state)\n', (2872, 2946), False, 'from sklearn.model_selection import KFold, StratifiedKFold\n'), ((6009, 6130), 'warnings.warn', 'warnings.warn', (['"""QuickStackClassifier is deprecated, please use FullStackClassifier instead"""', 'DeprecationWarning'], {}), "(\n 'QuickStackClassifier is deprecated, please use FullStackClassifier instead'\n , DeprecationWarning)\n", (6022, 6130), False, 'import warnings\n'), ((6716, 6766), 'sklearn.externals.joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs', 'verbose': 'self.verbose'}), '(n_jobs=self.n_jobs, verbose=self.verbose)\n', (6724, 6766), False, 'from sklearn.externals.joblib import Parallel, delayed\n'), ((9010, 9038), 'pandas.concat', 'pd.concat', (['proba_dfs'], {'axis': '(1)'}), '(proba_dfs, axis=1)\n', (9019, 9038), True, 'import pandas as pd\n'), ((9562, 9593), 'gravity_learn.utils.check_is_fitted', 'check_is_fitted', (['self', '"""fitted"""'], {}), "(self, 'fitted')\n", (9577, 9593), False, 'from gravity_learn.utils import force_array, check_cv, fit_model, check_is_fitted\n'), ((10131, 10159), 'pandas.concat', 'pd.concat', (['proba_dfs'], {'axis': '(1)'}), '(proba_dfs, axis=1)\n', (10140, 10159), True, 'import pandas as pd\n'), ((10503, 10523), 'gravity_learn.utils.force_array', 'force_array', (['df_pred'], {}), '(df_pred)\n', (10514, 10523), False, 'from gravity_learn.utils import force_array, check_cv, fit_model, check_is_fitted\n'), ((15338, 15369), 'gravity_learn.utils.check_is_fitted', 'check_is_fitted', (['self', '"""fitted"""'], {}), "(self, 'fitted')\n", (15353, 15369), False, 'from gravity_learn.utils import force_array, check_cv, fit_model, check_is_fitted\n'), ((15600, 15631), 'gravity_learn.utils.check_is_fitted', 'check_is_fitted', (['self', '"""fitted"""'], {}), "(self, 'fitted')\n", (15615, 15631), False, 'from gravity_learn.utils import force_array, check_cv, fit_model, check_is_fitted\n'), ((16119, 16169), 'sklearn.externals.joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs', 'verbose': 'self.verbose'}), '(n_jobs=self.n_jobs, verbose=self.verbose)\n', (16127, 16169), False, 'from sklearn.externals.joblib import Parallel, delayed\n'), ((16891, 16920), 'numpy.hstack', 'np.hstack', (['pred_out_of_sample'], {}), '(pred_out_of_sample)\n', (16900, 16920), True, 'import numpy as np\n'), ((17763, 17794), 'gravity_learn.utils.check_is_fitted', 'check_is_fitted', (['self', '"""fitted"""'], {}), "(self, 'fitted')\n", (17778, 17794), False, 'from gravity_learn.utils import force_array, check_cv, fit_model, check_is_fitted\n'), ((18699, 18727), 'pandas.concat', 'pd.concat', (['proba_dfs'], {'axis': '(1)'}), '(proba_dfs, axis=1)\n', (18708, 18727), True, 'import pandas as pd\n'), ((19069, 19089), 'gravity_learn.utils.force_array', 'force_array', (['df_pred'], {}), '(df_pred)\n', (19080, 19089), False, 'from gravity_learn.utils import force_array, check_cv, fit_model, check_is_fitted\n'), ((3453, 3498), 'pandas.DataFrame', 'pd.DataFrame', (['base_predictions'], {'index': 'X.index'}), '(base_predictions, index=X.index)\n', (3465, 3498), True, 'import pandas as pd\n'), ((3843, 3888), 'pandas.DataFrame', 'pd.DataFrame', (['base_predictions'], {'index': 'X.index'}), '(base_predictions, index=X.index)\n', (3855, 3888), True, 'import pandas as pd\n'), ((5969, 6000), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(3)', 'shuffle': '(True)'}), '(n_splits=3, shuffle=True)\n', (5974, 6000), False, 'from sklearn.model_selection import KFold, StratifiedKFold\n'), ((9270, 9285), 'pandas.DataFrame', 'pd.DataFrame', (['y'], {}), '(y)\n', (9282, 9285), True, 'import pandas as pd\n'), ((11773, 11787), 'gravity_learn.utils.force_array', 'force_array', (['X'], {}), '(X)\n', (11784, 11787), False, 'from gravity_learn.utils import force_array, check_cv, fit_model, check_is_fitted\n'), ((11867, 11881), 'gravity_learn.utils.force_array', 'force_array', (['y'], {}), '(y)\n', (11878, 11881), False, 'from gravity_learn.utils import force_array, check_cv, fit_model, check_is_fitted\n'), ((12545, 12565), 'pandas.concat', 'pd.concat', (['proba_dfs'], {}), '(proba_dfs)\n', (12554, 12565), True, 'import pandas as pd\n'), ((12683, 12712), 'gravity_learn.utils.force_array', 'force_array', (['df_out_of_sample'], {}), '(df_out_of_sample)\n', (12694, 12712), False, 'from gravity_learn.utils import force_array, check_cv, fit_model, check_is_fitted\n'), ((3173, 3234), 'sklearn.model_selection.cross_val_predict', 'cross_val_predict', (['model', 'X', 'y'], {'cv': 'cv', 'method': '"""predict_proba"""'}), "(model, X, y, cv=cv, method='predict_proba')\n", (3190, 3234), False, 'from sklearn.model_selection import cross_val_predict\n'), ((6888, 6903), 'pandas.DataFrame', 'pd.DataFrame', (['y'], {}), '(y)\n', (6900, 6903), True, 'import pandas as pd\n'), ((11647, 11659), 'gravity_learn.utils.check_cv', 'check_cv', (['cv'], {}), '(cv)\n', (11655, 11659), False, 'from gravity_learn.utils import force_array, check_cv, fit_model, check_is_fitted\n'), ((11991, 12006), 'copy.deepcopy', 'deepcopy', (['model'], {}), '(model)\n', (11999, 12006), False, 'from copy import deepcopy\n'), ((15918, 15935), 'gravity_learn.utils.check_cv', 'check_cv', (['self.cv'], {}), '(self.cv)\n', (15926, 15935), False, 'from gravity_learn.utils import force_array, check_cv, fit_model, check_is_fitted\n'), ((16196, 16226), 'sklearn.externals.joblib.delayed', 'delayed', (['_base_model_cross_val'], {}), '(_base_model_cross_val)\n', (16203, 16226), False, 'from sklearn.externals.joblib import Parallel, delayed\n'), ((6549, 6566), 'gravity_learn.utils.check_cv', 'check_cv', (['self.cv'], {}), '(self.cv)\n', (6557, 6566), False, 'from gravity_learn.utils import force_array, check_cv, fit_model, check_is_fitted\n'), ((6946, 6964), 'sklearn.externals.joblib.delayed', 'delayed', (['fit_model'], {}), '(fit_model)\n', (6953, 6964), False, 'from sklearn.externals.joblib import Parallel, delayed\n'), ((7280, 7298), 'sklearn.externals.joblib.delayed', 'delayed', (['fit_model'], {}), '(fit_model)\n', (7287, 7298), False, 'from sklearn.externals.joblib import Parallel, delayed\n'), ((7654, 7672), 'sklearn.externals.joblib.delayed', 'delayed', (['fit_model'], {}), '(fit_model)\n', (7661, 7672), False, 'from sklearn.externals.joblib import Parallel, delayed\n'), ((16246, 16261), 'copy.deepcopy', 'deepcopy', (['model'], {}), '(model)\n', (16254, 16261), False, 'from copy import deepcopy\n'), ((17184, 17202), 'sklearn.externals.joblib.delayed', 'delayed', (['fit_model'], {}), '(fit_model)\n', (17191, 17202), False, 'from sklearn.externals.joblib import Parallel, delayed\n'), ((6988, 7003), 'copy.deepcopy', 'deepcopy', (['model'], {}), '(model)\n', (6996, 7003), False, 'from copy import deepcopy\n'), ((7322, 7337), 'copy.deepcopy', 'deepcopy', (['model'], {}), '(model)\n', (7330, 7337), False, 'from copy import deepcopy\n'), ((7696, 7711), 'copy.deepcopy', 'deepcopy', (['model'], {}), '(model)\n', (7704, 7711), False, 'from copy import deepcopy\n'), ((17226, 17241), 'copy.deepcopy', 'deepcopy', (['model'], {}), '(model)\n', (17234, 17241), False, 'from copy import deepcopy\n'), ((7392, 7406), 'gravity_learn.utils.force_array', 'force_array', (['y'], {}), '(y)\n', (7403, 7406), False, 'from gravity_learn.utils import force_array, check_cv, fit_model, check_is_fitted\n')]
|
import numpy as np
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils import check_array
import faiss
def _default_index(d):
index = faiss.index_factory(d, "IVF2048,Flat", faiss.METRIC_INNER_PRODUCT)
index.nprobe = 256
return index
class ApproximateClassifierMixin(LinearClassifierMixin):
def decision_function(self, X):
if not hasattr(self, 'coef_') or self.coef_ is None:
raise NotFittedError("This %(name)s instance is not fitted "
"yet" % {'name': type(self).__name__})
self._train_index()
X = check_array(X, accept_sparse=False)
n_features = self.coef_.shape[1]
if X.shape[1] != n_features:
raise ValueError("X has %d features per sample; expecting %d"
% (X.shape[1], n_features))
D, I = self.index_.search(X.astype(np.float32), 1)
return D, I
def _train_index(self):
if not hasattr(self, 'index_'):
self.index_ = _default_index(self.coef_.shape[1])
self.coef_ = np.ascontiguousarray(self.coef_, dtype=np.float32)
self.index_.train(self.coef_)
self.index_.add(self.coef_)
return self
def fast(cls):
assert LinearClassifierMixin in cls.mro(), "Can only speed up linear classifiers"
return type(cls.__name__, (ApproximateClassifierMixin,) + cls.__bases__, dict(cls.__dict__))
|
[
"faiss.index_factory",
"sklearn.utils.check_array",
"numpy.ascontiguousarray"
] |
[((261, 327), 'faiss.index_factory', 'faiss.index_factory', (['d', '"""IVF2048,Flat"""', 'faiss.METRIC_INNER_PRODUCT'], {}), "(d, 'IVF2048,Flat', faiss.METRIC_INNER_PRODUCT)\n", (280, 327), False, 'import faiss\n'), ((713, 748), 'sklearn.utils.check_array', 'check_array', (['X'], {'accept_sparse': '(False)'}), '(X, accept_sparse=False)\n', (724, 748), False, 'from sklearn.utils import check_array\n'), ((1196, 1246), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['self.coef_'], {'dtype': 'np.float32'}), '(self.coef_, dtype=np.float32)\n', (1216, 1246), True, 'import numpy as np\n')]
|
"""
Created on Thursday Mar 26 2020
<NAME>
based on
https://www.kaggle.com/bardor/covid-19-growing-rate
https://github.com/CSSEGISandData/COVID-19
https://github.com/imdevskp
https://www.kaggle.com/yamqwe/covid-19-status-israel
"""
import datetime
import numpy as np
import pandas as pd
import seaborn as sns
import plotly.express as px
import plotly.graph_objs as go
import matplotlib.pyplot as plt
from plotly.subplots import make_subplots
import folium
import plotly
import os
import time
import matplotlib.dates as mdates
plt.style.use('dark_background')
# Write Log file
class MyWriter:
def __init__(self, *writers):
self.writers = writers
def write(self, text):
for w in self.writers:
w.write(text)
def flush(self):
for w in self.writers:
w.flush()
# bar plot
def bar_country_plot(full_data, groupby='Date', inputs=['Confirmed', 'Active', 'Recovered', 'Deaths'],
fname='_cases_bars', log=False):
# Confirmed vs Recovered and Death
if isinstance(full_data.Date.max(), str):
day = datetime.datetime.strptime(full_data.Date.max(), '%m/%d/%y').strftime('%d%m%y')
else:
day = full_data.Date.max().strftime('%d%m%y')
title_string = full_data.State + ' Cases' + ' for' + day
with open(os.path.join(os.getcwd(), time.strftime("%d%m%Y"), day + '_' + full_data.State + '_' + fname + '.html'), 'a') as ff:
fig = px.bar(full_data, x=groupby, y=inputs, color=inputs, template='ggplot2', log_y=True,
title=title_string, hover_name=inputs)
fig.layout.template = 'plotly_dark'
# fig.show()
ff.write(fig.to_html(full_html=False, include_plotlyjs='cdn', default_width='100%'))
f = plt.figure(figsize=(9, 7))
colors = ['blue', 'green', 'cyan', 'magenta', 'cyan', 'red', 'black']
alphas = [1, 0.75, 0.75, 1]
title_string = str()
for cnt in range(len(inputs)):
k = inputs[cnt]
plt.bar(full_data[groupby], full_data[k], label=k, alpha=alphas[cnt], log=log, color=colors[cnt])
title_string = title_string + k + ' vs '
plt.xlabel('Date')
plt.ylabel("Count")
plt.legend(frameon=True, fontsize=12)
plt.title(title_string[:-4], fontsize=30)
f.autofmt_xdate()
plt.show()
plt.savefig(os.path.join(os.getcwd(), day + '_' + str(full_data['Country'].unique().values) + '.png'))
return f
##############################################################################################
# Normalise
def normalise_func(input_data, inputs=['Confirmed', 'Deaths', 'Recovered', 'Active'], name='NormPop',
normaliseTo='Population', factor=1e6, toRound=False):
for cnt in range(len(inputs)):
k = inputs[cnt]
new_name = name+k
input_data.loc[:, new_name] = 0
# Normalise to Population with factor of 1M
input_data.loc[:, new_name] = (input_data[k].values * factor / (input_data[normaliseTo].values + 1e-6)).clip(0)
if toRound:
input_data.loc[input_data.loc[:, new_name] > 1, new_name] = input_data.loc[input_data.loc[:, new_name] > 1, new_name].astype(int)
return input_data
############################################################################################################
# Events
def add_events(input_data, events):
input_data.loc[:, 'Event'] = ''
for cnt in range(events.shape[0]):
input_data.loc[input_data['Date'] == events.Date[cnt], 'Event'] = events.Event[cnt]
return input_data
######################################################################################################
# Growth
def growth_func(input_data, inputs, numDays=1, name='Growth', normalise=True, prediction_Range=1):
for cnt in range(len(inputs)):
k = inputs[cnt]
input_data.loc[:, name+k] = 0
if normalise:
input_data.loc[:, name+k] = ((input_data[k] / input_data[k].shift(numDays)) ** prediction_Range - 1) * 100.0 # .clip(0)
input_data.loc[input_data[k].shift(-numDays) == 0, name+k] = 0
else:
input_data[name+k] = (input_data[k] - input_data[k].shift(numDays)) # .clip(0)
return input_data
############################################################################################################
# add the population and age columns for the given data
def add_pop_age_data(input_data, world_population):
world_pop = None
input_data.loc[:, 'Population'] = np.nan
input_data.loc[:, 'Age'] = np.nan
for val in input_data.Country.unique():
curr = world_population[world_population['Country'] == val]
cntries = input_data.Country == val
try:
input_data.loc[cntries, 'Population'] = curr['Population'].values
input_data.loc[cntries, 'Age'] = curr['Age'].values
if world_pop is not None:
world_pop = pd.concat([world_pop, curr], axis=0, sort=False)
else:
world_pop = curr
except ValueError:
pass
return input_data, world_pop
#########################################################################################
# extract data according to group(Date and State) and if flag add_value is True add the country value to string of State
def group_extract_data(full_data, world_population, groupby=['Date', 'State', 'Country'], inputs=['Confirmed'],
threshould=5000, add_value=True):
sorted_data = full_data.sort_values(groupby)
group = sorted_data[groupby[1]].unique()
latest = sorted_data[sorted_data.Date == sorted_data.Date.max()]
remain_data = latest[latest[inputs] > threshould][groupby[1]].unique()
relevant = sorted_data.copy()
for val in group:
if (remain_data != val).all():
relevant = relevant[relevant[groupby[1]].str.endswith(val) != True]
elif not relevant[groupby[2]].str.endswith(val).any() and add_value:
relevant.loc[relevant[groupby[1]].str.endswith(val), groupby[1]] = \
relevant.loc[relevant[groupby[1]].str.endswith(val), groupby[2]].values[0] + \
'_' + val
relevant, world_pop = add_pop_age_data(relevant, world_population)
return relevant, world_pop
################################################################################################
# Create Sum Table
def create_table(indata, day, inputs=['Confirmed', 'Deaths', 'Recovered', 'Active'],
h_columns=['Current Day', 'Total', 'Max Value'], title_string='', height='100%',
fname='_World_Daily_Situation_Summarise_Table'):
head = indata[inputs].keys().values.tolist()
head.insert(0, h_columns[0])
body = [h_columns[1:]]
for cnt in range(len(inputs)):
body.append(indata[inputs[cnt]].values)
with open(os.path.join(os.getcwd(), time.strftime("%d%m%Y"), day.strftime('%d%m%y') + fname + '.html'), 'a') as f:
fig = go.Figure(data=[go.Table(header=dict(values=head, height=35, align=['left', 'center']),
cells=dict(values=body, height=28, align='left'))])
fig.layout.template = 'plotly_dark'
fig.layout.title = day.strftime('%d/%m/%y ') + title_string
# fig.show()
f.write(fig.to_html(full_html=False, include_plotlyjs='cdn', default_height=height))
########################################################################################################
# Create countries bar
def countries_bar(indata, day, groupby=['Country'], inputs=None, count=30, fname='_World_Daily_Situation'):
if inputs is None:
inputs = indata.keys()[1:].values
with open(os.path.join(os.getcwd(), time.strftime("%d%m%Y"), day.strftime('%d%m%y') + fname + '.html'), 'a') as f:
for cnt in range(len(inputs)-1, -1, -1):
k = inputs[cnt]
cur_data = indata.sort_values(k, ascending=0).reset_index()
cur_data = cur_data[:count]
if k == 'Population' or k == 'Age':
add_str = ''
else:
add_str = ' Cases'
if cnt in range(4):
f_str = 'Total '
else:
f_str = ''
title_string = f_str + k + add_str + ' for ' + day.strftime('%d/%m/%y') + ': ' + str(count) \
+ ' countries from ' + str(indata.shape[0])
fig = px.bar(cur_data, x=groupby[0], y=k, color=groupby[0], text=k, template='ggplot2', log_y=True,
title=title_string) # , hover_name=groupby[0])
fig.layout.template = 'plotly_dark'
fig.update_traces(texttemplate='%{text:.2s}', textposition='outside')
fig.update_layout(uniformtext_minsize=8, uniformtext_mode='hide')
# fig.show()
f.write(fig.to_html(full_html=False, include_plotlyjs='cdn'))
# Create World Map
def create_map(data, world_pop, location=[31, 35]):
# Israel location start
# Affected place in world map including Confirm , Active, Deaths and Recovery
worldmap = folium.Map(location=location, zoom_start=4, tiles='Stamen Terrain')
for lat, long, country, state, conf, death, recover, active in zip(data['Lat'], data['Long'], data['Country'],
data['State'], data['Confirmed'], data['Deaths'],
data['Recovered'], data['Active']):
cur_pop = world_pop[world_pop['Country'] == country].reset_index()
if isinstance(state, str) and state != country or not cur_pop.sum().any():
popup_str = str(country) + '<br>' + 'State: ' + str(state) + '<br>' +\
'PositiveCases:' + str(conf) + '<br>' +\
'Active:' + str(int(active)) + '<br>' +\
'Recovered:' + str(int(recover)) + '<br>' +\
'Deaths:' + str(death) + '<br>'
elif np.isnan(cur_pop['Age'][0]):
popup_str = str(country) + ' Population:' + str(cur_pop['Population'][0]) + '<br>'\
'Positive:' + str(conf) + '<br>' + \
'Active:' + str(int(active)) + '<br>' + \
'Recovered:' + str(int(recover)) + '<br>' + \
'Deaths:' + str(death) + '<br>'
else:
popup_str = str(country) + ' Population:' + str(cur_pop['Population'][0]) + \
' Median Age:' + str(int(cur_pop['Age'][0])) + '<br>' + \
'Positive:' + str(conf) + '<br>' + \
'Active:' + str(int(active)) + '<br>' + \
'Recovered:' + str(int(recover)) + '<br>' + \
'Deaths:' + str(death) + '<br>'
folium.CircleMarker([lat, long], radius=5, color='red', popup=popup_str, fill_color='red',
fill_opacity=0.7).add_to(worldmap)
# in IPython Notebook, Jupyter
worldmap
day = data.Date.max().strftime('%d%m%y')
worldmap.save(os.path.join(os.getcwd(), time.strftime("%d%m%Y"), day + '_WorldMap.html'))
###################################################################################################
# bar plot according to cases
def case_groupby_bar(full_data, world_population, groupby=['Date', 'State', 'Country'],
inputs=['Confirmed', 'Recovered', 'Deaths', 'Active'], threshould=[10000, 1000, 100, 10000],
normalise=True, fname='_Cases_WorldData_Bars', factor=1e6):
daily = full_data.sort_values(groupby)
states = daily[groupby[1]].unique()
day = full_data.Date.max().strftime('%d/%m/%y')
array_relevant = []
for cnt in range(len(inputs)):
k = inputs[cnt]
with open(os.path.join(os.getcwd(), time.strftime("%d%m%Y"), full_data.Date.max().strftime('%d%m%y') + '_' + k + fname + '.html'), 'a') as f:
relevant, world_pop = group_extract_data(daily, world_population, groupby, k, threshould[cnt])
array_relevant.append(relevant)
srelevant = relevant.sort_values([groupby[0], groupby[1], k], ascending=[1, 1, 0])
srelevant.Date = [datetime.datetime.strftime(d, '%d/%m/%Y') for d in srelevant.Date]
num_contries = len(relevant[groupby[1]].unique())
title_string = k + ' Cases' + ' over ' + str(threshould[cnt]) + ' for ' + day + ': ' \
+ str(num_contries) + ' items from ' + str(len(states))
fig = px.bar(srelevant, y=groupby[1], x=k, color=groupby[1], template='ggplot2', orientation='h',
log_x=True, title=title_string, hover_name=groupby[1], animation_frame=groupby[0],
animation_group=groupby[1])
fig.layout.template = 'plotly_dark'
# soup = BeautifulSoup(ff)
height = str(np.max([100, num_contries/25 * 100])) + '%'
f.write(fig.to_html(full_html=False, include_plotlyjs='cdn', default_width='100%', default_height=height))
# in IPython Notebook, Jupyter, etc
# fig.show()
# Another way to save
# fig.write_html(os.path.join(os.getcwd(), full_data.Date.max().strftime('%d%m%y') + '_WorldData.html'))
del fig
if normalise:
# Normalise to Population with factor of 1M
norm_srelevant = srelevant.copy()
norm_srelevant.loc[:, k] = (norm_srelevant[k].values * factor /
norm_srelevant['Population'].values).clip(0)
norm_srelevant.loc[norm_srelevant.loc[:, k] > 1, k] = norm_srelevant.loc[norm_srelevant.loc[:, k] > 1, k].astype(int)
num_contries = len(relevant[groupby[1]].unique())
title_string = k + ' Cases' + ' over ' + str(threshould[cnt]) + ' Normalized to ' + str(int(factor/1e6)) \
+ 'M population' + ' for ' + day + ': ' + str(num_contries) + ' items from ' \
+ str(len(states))
fig = px.bar(norm_srelevant, y=groupby[1], x=k, color=groupby[1], template='ggplot2', log_x=True,
orientation='h', title=title_string, hover_name=groupby[1], animation_frame=groupby[0],
animation_group=groupby[1])
fig.layout.template = 'plotly_dark'
height = str(np.max([100, num_contries/25 * 100])) + '%'
f.write(fig.to_html(full_html=False, include_plotlyjs='cdn', default_width='100%', default_height=height))
del fig
# Normalised to inputs[0]: Confirmed
if cnt > 0:
# probability of dying/ recovered if infected by the virus (%)
norm_srelevant = srelevant.copy()
norm_srelevant.loc[:, k] = (norm_srelevant[k].values /
(norm_srelevant[inputs[0]].values + 1e-6)).clip(0)
norm_srelevant.loc[norm_srelevant[k] > 1, k] = 1
num_contries = len(relevant[groupby[1]].unique())
title_string = k + ' Cases' + ' over ' + str(threshould[cnt]) + ' Normalized to ' + inputs[0] \
+ ' for ' + day + ': ' + str(num_contries) + ' items from ' + str(len(states))\
+ '<br>"Probability" of ' + k + ' If Infected by the Virus'
fig = px.bar(norm_srelevant, y=groupby[1], x=k, color=groupby[1], template='ggplot2',
orientation='h', title=title_string, hover_name=groupby[1], animation_frame=groupby[0],
animation_group=groupby[1])
fig.layout.template = 'plotly_dark'
height = str(np.max([100, num_contries/25 * 100])) + '%'
f.write(fig.to_html(full_html=False, include_plotlyjs='cdn', default_width='100%', default_height=height))
del fig
#################################################################################################
# scatter plot
def scatter_country_plot(full_data, inputs=['Confirmed', 'Recovered', 'Deaths', 'Active'], base='Date', prefix='',
fname=' Total Cases ', add_growth_rates=False, num_days_for_rate=14, annotations=None,
add_events_text=False, factor=1.0, mat_plt=False, day=''):
if not day:
if isinstance(full_data.Date.max(), str):
day = datetime.datetime.strptime(full_data.Date.max(), '%m/%d/%y').strftime('%d%m%y')
else:
day = full_data.Date.max().strftime('%d/%m/%y')
try:
not_country = 0
country = full_data['Country'].unique()
state = full_data['State'].unique()
except:
not_country = 1
if not_country or country.shape[0] > 1:
title_string = day + fname + 'Various Cases'
save_string = full_data.Date.max().strftime('%d%m%y') + fname + '.png'
elif state != country:
title_string = country[0] + ' -- ' + state[0] + ' - ' + day + ' ' + fname
save_string = full_data.Date.max().strftime('%d%m%y') + '_' + country[0] + '_' + state[0] + '_' +\
fname.replace(' ', '_') +'.png'
else:
title_string = state[0] + ' - ' + day + ' - ' + fname
save_string = full_data.Date.max().strftime('%d%m%y') + '_' + state[0] + '_' + fname.replace(' ', '_') +'.png'
# colors = plotly.colors.DEFAULT_PLOTLY_COLORS
colors = plotly.colors.qualitative.Light24
if '#FED4C4' in colors:
colors.remove('#FED4C4')
fig = make_subplots(rows=1, cols=2, subplot_titles=("Linear Plot", "Log Plot"))
fig_cnt = -1
customdata = None
for cnt in range(len(inputs)):
case_k = inputs[cnt]
k = prefix + case_k
y = (full_data[k] * factor).fillna(0)
# y[np.isinf(y)] = 0
if base != 'Date':
customdata = full_data.Date
if add_events_text:
trace = go.Scatter(x=full_data[base], y=y, mode="markers+lines+text", name=case_k, customdata=customdata,
text=full_data.Event, marker=dict(size=8, color=colors[cnt]))
else:
trace = go.Scatter(x=full_data[base], y=y, mode="markers+lines", name=case_k, customdata=customdata,
marker=dict(size=8, color=colors[cnt]))
fig.add_trace(trace, row=1, col=1)
fig_cnt +=1
fig.add_trace(trace, row=1, col=2)
fig_cnt += 1
if fig_cnt % 2 == 1:
fig.data[fig_cnt-1].update(showlegend=False)
fig.update_traces(mode="markers+lines", hovertemplate=None)
if base != 'Date':
fig.update_traces(hovertemplate='%{y}<br>%{customdata| %_d %b %Y}')
if add_growth_rates:
len_rate = full_data[k].shape[0]
grows_rate = full_data['Growth' + base].fillna(0).values / 100.0
grows_rate[np.isinf(grows_rate)] = 0
vec = np.arange(0, round(len_rate*1/3))
one_third = grows_rate[vec].mean()
if one_third > 0:
grow_one_third = one_third * full_data[base] + full_data[k][vec[0]] * factor
add_trace1 = go.Scatter(x=full_data[base], y=grow_one_third, mode="lines",
name='Linear estimation: ' + str(full_data[k][vec[0]]) + ' + '
+ str(round(one_third, 3)) + '*' + base + '<br>' + str(round(one_third, 3))
+ ' - estim on first onethird of ' + base,
line=dict(dash="dash", width=3))
fig.add_trace(add_trace1, row=1, col=1)
fig.add_trace(add_trace1, row=1, col=2)
# estimation for two last weeks
vec = np.arange(np.max([1, len_rate-num_days_for_rate]), len_rate)
last_week = (full_data[k][vec[-1]] - full_data[k][vec[0]]) \
/ np.max([1e-6, (full_data[base][vec[-1]] - full_data[base][vec[0]])])
if not np.isinf(last_week) and last_week > 0:
bias = int(full_data[k][vec[-1]] - full_data[base][vec[-1]] * last_week)
grow_one_third = last_week * full_data[base] + bias * factor
add_trace2 = go.Scatter(x=full_data[base][round(len_rate*1/3):], y=grow_one_third[round(len_rate*1/3):],
mode="lines", name='Linear estimation: ' + str(bias) + ' + '
+ str(round(last_week, 3)) + '*' + base + '<br>'
+ str(round(last_week, 3)) + ' - estim on '
+ str(num_days_for_rate) + ' last days from ' + base,
line=dict(dash="dash", width=3))
fig.add_trace(add_trace2, row=1, col=1)
fig.add_trace(add_trace2, row=1, col=2)
fig.update_yaxes(range=[full_data[k][0], full_data[k][len_rate-1]], row=1, col=1)
if annotations is not None:
fig.update_annotations(annotations)
fig.update_layout(template='plotly_dark', hovermode="x", title=title_string,
yaxis=dict(title=fname), xaxis=dict(title=base), yaxis2=dict(title=fname, type='log'),
xaxis2=dict(title=base))
# fig.show()
if mat_plt:
fig_mat, ax = plt.subplots(figsize=(8, 6))
colors = ['blue', 'green', 'yellow', 'magenta', 'cyan', 'red', 'black']
max_values = []
for cnt in range(len(inputs)):
case_k = inputs[cnt]
k = prefix + case_k
full_data[k] = full_data[k].fillna(0)
ax = sns.scatterplot(x=base, y=k, data=full_data, color=colors[cnt])
plt.plot(full_data[base], full_data[k], zorder=1, color=colors[cnt], label=k)
if not np.isinf(max(full_data[k])):
max_values.append(max(full_data[k]))
ax.set_xlim([full_data['Date'].iloc[0], full_data['Date'].iloc[-1] + datetime.timedelta(days=1)])
if max(full_data[prefix + inputs[0]]) > 1:
max_value = max(max_values) + np.diff(full_data[k]).max()
min_value = -1
else:
max_value = max(max_values) + np.diff(full_data[k]).max()
min_value = 0
ax.set_ylim([min_value, max_value])
plt.legend(frameon=True, fontsize=12)
plt.grid()
plt.ylabel(fname)
plt.title(title_string, fontsize=16)
fig_mat.autofmt_xdate()
plt.savefig(os.path.join(os.getcwd(), save_string))
return fig
###################################################################################################################
# country analysis script
def country_analysis(clean_db, world_pop, country='China', state='Hubei', plt=False, fromFirstConfirm=False,
events=None, num_days_for_rate=14):
if isinstance(clean_db.Date.max(), str):
day = datetime.datetime.strptime(clean_db.Date.max(), '%m%d%y').strftime('%d%m%y')
else:
day = clean_db.Date.max().strftime('%d%m%y')
data = clean_db[clean_db['Country'] == country]
data = data.sort_values(by='Date', ascending=1)
today = data.Date.iloc[-1].strftime('%d.%m.%y')
if state:
data = data[data['State'] == state]
elif (data.State.unique() == country).any():
data = data[data['State'] == country]
else:
data = data.groupby(['Date', 'Country']).sum()
if fromFirstConfirm:
data = (data.loc[data.loc[:, 'Confirmed'] > 0, :]).reset_index()
else:
data = data.reset_index()
data['Active'] = (data['Confirmed'] - data['Recovered'] - data['Deaths']).astype(int) # .clip(0)
inputs = ['Confirmed', 'Recovered', 'Deaths', 'Active']
data = growth_func(data, inputs, numDays=1, name='New', normalise=False)
data = growth_func(data, inputs, numDays=1, name='Growth', normalise=True)
cur_pop_data = world_pop[world_pop['Country'] == country].reset_index()
data.loc[:, 'Population'] = cur_pop_data['Population'].values[0]
data.loc[:, 'Age'] = cur_pop_data['Age'].values[0]
data = normalise_func(data, name='NormPop', normaliseTo='Population', factor=1e6, toRound=True)
data = normalise_func(data, inputs=['Deaths', 'Recovered', 'Active'], name='NormConfirm', normaliseTo='Confirmed',
factor=1, toRound=True)
add_event = False
if events is not None:
data = add_events(data, events)
add_event = True
# Growth Rate
# last_days = data['Confirmed'].shift()[-3:]
# gr = data['Confirmed'][-3:] / last_days
# gr[last_days == 0] = 0
growth_rate = (data['Confirmed'][-3:] / data['Confirmed'].shift()[-3:]).fillna(0).mean()
growth_death = (data['Deaths'][-3:] / data['Deaths'].shift()[-3:]).fillna(0).mean()
growth_recovered = (data['Recovered'][-3:] / data['Recovered'].shift()[-3:]).fillna(0).mean()
prediction_cnfm = 0
prediction_dth = 0
prediction_rcv = 0
expected_cnfrm = 0
expected_dth = 0
expected_rcv = 0
if growth_rate != 0 and growth_rate != 1 and not np.isinf(growth_rate):
prediction_cnfm = (np.log(2)/np.log(growth_rate)).clip(0).astype(int)
expected_cnfrm = (data['Confirmed'].iloc[-1] * growth_rate).astype(int)
if growth_death != 0 and growth_death != 1 and not np.isinf(growth_death):
prediction_dth = (np.log(2)/np.log(growth_death)).clip(0).astype(int)
expected_dth = (data['Deaths'].iloc[-1] * growth_death).astype(int)
if growth_recovered != 0 and growth_recovered != 1 and not np.isinf(growth_recovered):
prediction_rcv = (np.log(2)/np.log(growth_recovered)).clip(0).astype(int)
expected_rcv = (data['Recovered'].iloc[-1] * growth_recovered).astype(int)
print('\n', country)
print('Mean Growth Rate for 3 last days : Confirmed %.2f%%, Deaths %.2f%%, Recovered %.2f%%'
% (round((growth_rate-1)*100.0, 2), round((growth_death-1)*100.0, 2), round((growth_recovered-1)*100.0, 2)))
print('Today\'s %s [confirmed, death, recovered] : %d, %d, %d ' % (today, data['Confirmed'].iloc[-1],
data['Deaths'].iloc[-1], data['Recovered'].iloc[-1]))
print('Expected Tomorrow [confirmed, death, recovered] : %d, %d, %d ' %
(expected_cnfrm, expected_dth, expected_rcv))
# logarithm of x to the given base, calculated as log(x)/log(base)
days = [prediction_cnfm, prediction_dth, prediction_rcv]
print('Twice the number of cases given the current growth rate in %s days' % days)
annot = dict(xref='paper', yref='paper', x=0.2, y=0.95, align='left', font=dict(size=12),
text='Mean Growth Rate for 3 last days: Confirmed ' + str(round((growth_rate-1)*100.0, 2))
+ '%, Deaths ' + str(round((growth_death-1)*100.0, 2)) + '%, Recovered '
+ str(round((growth_recovered-1)*100.0, 2))
+ '%<br>Today\'s ' + str(today) + ' [confirmed, death, recovered] : '
+ str(data['Confirmed'].iloc[-1]) + ' ' + str(data['Deaths'].iloc[-1]) + ' '
+ str(data['Recovered'].iloc[-1].astype(int))
+ '<br>Expected Tomorrow [confirmed, death, recovered] : '
+ str(expected_cnfrm) + ' ' + str(expected_dth) + ' ' + str(expected_rcv)
+ '<br>Twice the number of cases given the current growth rate in '
+ str(prediction_cnfm) + ' ' + str(prediction_dth) + ' ' + str(prediction_rcv) + ' days')
if plt:
if country[-1] == '*':
country = country[:-1]
with open(os.path.join(os.getcwd(), time.strftime("%d%m%Y"), day + '_' + country + '_Various_Cases.html'), 'a') as f:
fsc1 = scatter_country_plot(data, add_events_text=add_event)
fsc2 = scatter_country_plot(data, prefix='New', fname='Daily New Cases', add_events_text=add_event)
fsc3 = scatter_country_plot(data, prefix='NormPop', fname='Total Cases Normalised for 1M Population',
add_events_text=add_event)
fsc4 = scatter_country_plot(data, inputs=['Deaths', 'Recovered', 'Active'], prefix='NormConfirm',
factor=100.0, add_events_text=add_event,
fname='Normalised for Total Confirmed Cases - '
'Probability to Case If infected by the virus (%)')
fsc5 = scatter_country_plot(data, prefix='Growth', add_events_text=add_event,
fname='Growing rate in % a day', annotations=annot)
fsc6 = scatter_country_plot(data, inputs=['Deaths'], add_events_text=add_event, base='Recovered',
add_growth_rates=True, num_days_for_rate=num_days_for_rate,
fname='Cases Ratio: Deaths vs Recovered')
f.write(fsc1.to_html(full_html=False, include_plotlyjs='cdn'))
f.write(fsc2.to_html(full_html=False, include_plotlyjs='cdn'))
f.write(fsc3.to_html(full_html=False, include_plotlyjs='cdn'))
f.write(fsc4.to_html(full_html=False, include_plotlyjs='cdn'))
f.write(fsc5.to_html(full_html=False, include_plotlyjs='cdn'))
f.write(fsc6.to_html(full_html=False, include_plotlyjs='cdn'))
return data
###########################################################################################################
# plot with threshoulds on cases
def case_thresh_plot(full_data, threshDays=[10, 10], inputs=['Confirmed', 'Deaths'], prefix='', ref_cntry='Israel',
base='Date', factor=1.0, fname=' Corona virus situation since the ', annotations=[], log=False,
add_growth_rates=False, threshValues=[1, 1]):
if isinstance(full_data.Date.max(), str):
day = datetime.datetime.strptime(full_data.Date.max(), '%m/%d/%y').strftime('%d%m%y')
else:
day = full_data.Date.max().strftime('%d%m%y')
countries = full_data.Country.unique()
today = full_data.Date.iloc[-1].strftime('%d.%m.%y')
title_string = full_data.Date.max().strftime('%d/%m/%y') + ' - ' + str(len(countries)) + ' ' + fname
colors = plotly.colors.qualitative.Light24
if '#FED4C4' in colors:
colors.remove('#FED4C4')
ref_db = full_data[full_data.Country == ref_cntry]
ref_db = ref_db.sort_values([base])
fig = make_subplots(rows=1, cols=2, subplot_titles=(prefix + ' ' + inputs[0] + ' Cases',
prefix + ' ' + inputs[1] + ' Cases'))
showlegend = True
for cnt in range(len(inputs)):
case_k = inputs[cnt]
k = prefix + case_k
threshDay = threshDays[cnt]
threshValue = threshValues[cnt]
max_value = []
customdata = None
if cnt % 2:
showlegend = False
for cntry in range(len(countries)):
curr = full_data[full_data.Country == countries[cntry]]
thresh_data = curr.loc[curr.loc[:, k] * factor > threshValue, :]
thresh_data = thresh_data[threshDay:]
if thresh_data.values.any():
thresh_data = thresh_data.sort_values([base, k])
max_value.append(thresh_data[k].max())
customdata = thresh_data[base]
since_days = np.arange(0, thresh_data.shape[0])
trace = go.Scatter(x=since_days, y=thresh_data[k], mode="markers+lines", name=countries[cntry],
marker=dict(size=10, color=colors[cntry]), showlegend=showlegend, customdata=customdata)
fig.add_trace(trace, row=1, col=cnt+1)
fig.update_traces(hovertemplate=None)
fig.update_traces(hovertemplate='%{y}<br>%{customdata| %_d %b %Y}')
if add_growth_rates:
for cnt in range(len(inputs)):
case_k = inputs[cnt]
k = prefix + case_k
threshDay = threshDays[cnt]
threshValue = threshValues[cnt]
showlegend = True
if cnt % 2:
showlegend = False
threshed_ref_db = ref_db.loc[ref_db.loc[:, k] * factor > threshValue, :]
threshed_ref_db = threshed_ref_db[threshDay:]
if threshed_ref_db.values.any():
if 'Growth' + k not in threshed_ref_db.keys():
threshed_ref_db = growth_func(threshed_ref_db, [k])
grows_rate = threshed_ref_db['Growth' + k].fillna(0).values / 100.0 + 1
grows_rate[np.isinf(grows_rate)] = 0
growth_rate_mean = grows_rate[-3:].mean()
else:
threshed_ref_db = thresh_data.copy()
growth_rate_mean = (threshed_ref_db[k][-3:] / threshed_ref_db[k].shift()[-3:]).fillna(0).mean() # .clip(0)
if growth_rate_mean != 0 and growth_rate_mean != 1 and not np.isinf(growth_rate_mean) and not np.isnan(growth_rate_mean):
gr_days = (np.log(2) / np.log(growth_rate_mean)).astype(int)
prev_value = threshed_ref_db[k].iloc[-2].astype(int)
next_value = (threshed_ref_db[k].iloc[-1] * growth_rate_mean).astype(int)
else:
gr_days = 0
prev_value = 0
next_value = 0
growth_rate_mean = 0
if gr_days:
annot = dict(xref='paper', yref='paper', x=0.2 + cnt*0.55, y=0.87, align='left', font=dict(size=13),
text='Mean Growth Rate for 3 last days in ' + threshed_ref_db.Country.values[0] + ' : '
+ str(round((growth_rate_mean - 1) * 100.0, 2))
+ '%<br>Today\'s ' + str(today) + ' ' + inputs[cnt] + ': ' + str(prev_value)
+ '<br>Expected Tomorrow: ' + str(next_value)
+ '<br>Twice the number of cases given the current growth rate in ' + str(gr_days)
+ ' days')
fig.add_annotation(annot)
num_dates = threshed_ref_db[base].shape[0]
if num_dates:
since_days = np.arange(0, threshed_ref_db.shape[0])
max_value.append(threshed_ref_db[k].max())
thresh = threshed_ref_db[k].values[0]
grow15 = np.clip(thresh * (1.15 ** (np.linspace(1, num_dates, num_dates, endpoint=True))), 0, max(max_value)).astype(int)
fig.add_trace(go.Scatter(x=since_days, y=grow15, mode="lines", name='Grows 15% a day',
line=dict(dash="dash", width=3, color=colors[cntry+1]), showlegend=showlegend),
row=1, col=cnt+1) # threshed_ref_db[base]
grow08 = np.clip(thresh * (1.08 ** (np.linspace(1, num_dates, num_dates, endpoint=True))), 0, max(max_value)).astype(int)
fig.add_trace(go.Scatter(x=since_days, y=grow08, mode="lines", name='Grows 8% a day',
line=dict(dash="dashdot", width=3, color=colors[cntry+2]), showlegend=showlegend),
row=1, col=cnt+1)
if growth_rate_mean:
cur_value = threshed_ref_db[k].values[-3]
if cur_value > 0.8*max(max_value):
cur_value = min(max_value)
grow_cur = np.clip(cur_value * (growth_rate_mean ** (np.linspace(1, num_dates, num_dates, endpoint=True))), 0, max(max_value)).astype(int)
gr = int((growth_rate_mean - 1) * 100.0)
fig.add_trace(go.Scatter(x=since_days, y=grow_cur, mode="lines",
name='Grows ' + str(gr) + '% a day from last 3 days', showlegend=showlegend,
line=dict(dash="dot", width=3, color=colors[cntry+3])), row=1, col=cnt+1)
xaxis2 = 'Days since the ' + str(threshDays[1]) + 'th from the ' + str(threshValues[1]) + 'th case value'
xaxis1 = 'Days since the ' + str(threshDays[0]) + 'th from the ' + str(threshValues[0]) + 'th case value'
if log:
fig.update_layout(hovermode="x", title=title_string, template='plotly_dark',
xaxis=dict(title=xaxis1), xaxis2=dict(title=xaxis2),
yaxis=dict(title=prefix + ' ' + inputs[0] + ' Cases', type='log'),
yaxis2=dict(title=prefix + ' ' + inputs[1] + ' Cases', type='log'))
else:
fig.update_layout(hovermode="x", title=title_string, template='plotly_dark',
xaxis=dict(title=xaxis1), xaxis2=dict(title=xaxis2),
yaxis=dict(title=prefix + ' ' + inputs[0] + ' Cases'),
yaxis2=dict(title=prefix + ' ' + inputs[1] + ' Cases'))
return fig
###################################################################################################################
# line plot
def line_country_plot(full_data, inputs=['Confirmed', 'Recovered', 'Deaths', 'Active'], base='Date', prefixes=[''],
fname=' Total Cases ', add_growth_rates=False, annotations=None, add_events_text=False,
factor=1.0, mat_plt=False, day=''):
if not day:
if isinstance(full_data.Date.max(), str):
day = datetime.datetime.strptime(full_data.Date.max(), '%m/%d/%y').strftime('%d%m%y')
else:
day = full_data.Date.max().strftime('%d/%m/%y')
try:
not_country = 0
country = full_data['Country'].unique()
state = full_data['State'].unique()
except:
not_country = 1
if not_country or country.shape[0] > 1:
title_string = day + fname + 'Various Cases'
save_string = full_data.Date.max().strftime('%d%m%y') + fname + '.png'
elif state != country:
title_string = country[0] + ' -- ' + state[0] + ' - ' + day + ' ' + fname
save_string = full_data.Date.max().strftime('%d%m%y') + '_' + country[0] + '_' + state[0] + '_' +\
fname.replace(' ', '_') +'.png'
else:
title_string = state[0] + ' - ' + day + ' - ' + fname
save_string = full_data.Date.max().strftime('%d%m%y') + '_' + state[0] + '_' + fname.replace(' ', '_') +'.png'
fig = make_subplots(rows=1, cols=2, subplot_titles=("Linear Plot", "Log Plot"))
fig_cnt = -1
customdata = None
for pr_cnt in range(len(prefixes)):
prefix = prefixes[pr_cnt]
if prefix:
colors = ['blue', 'yellow', 'green', 'magenta', 'cyan', 'red', 'black']
else:
colors = plotly.colors.DEFAULT_PLOTLY_COLORS
for cnt in range(len(inputs)):
case_k = inputs[cnt]
k = prefix + case_k
if k in full_data.keys():
y = (full_data[k] * factor).fillna(0)
# y[np.isinf(y)] = 0
if base != 'Date':
customdata = full_data.Date
if add_events_text:
trace = go.Scatter(x=full_data[base], y=y, mode="markers+lines+text", name=k, customdata=customdata,
text=full_data.Event, marker=dict(size=4, color=colors[cnt]))
else:
trace = go.Scatter(x=full_data[base], y=y, mode="markers+lines", name=k, customdata=customdata,
marker=dict(size=4, color=colors[cnt]))
fig.add_trace(trace, row=1, col=1)
fig_cnt +=1
fig.add_trace(trace, row=1, col=2)
fig_cnt += 1
if fig_cnt % 2 == 1:
fig.data[fig_cnt-1].update(showlegend=False)
fig.update_traces(mode="markers+lines", hovertemplate=None)
if base != 'Date':
fig.update_traces(hovertemplate='%{y}<br>%{customdata| %_d %b %Y}')
if add_growth_rates:
grows_rate = full_data['Growth' + base].fillna(0).values / 100.0
grows_rate[np.isinf(grows_rate)] = 0
len_rate = len(grows_rate)
vec = np.arange(0, round(len_rate*1/3))
one_third = grows_rate[vec].mean()
if one_third > 0:
grow_one_third = one_third * full_data[base] + full_data[k][vec[0]] * factor
add_trace1 = go.Scatter(x=full_data[base], y=grow_one_third, mode="lines",
name='Linear estimation: ' + str(full_data[k][vec[0]]) + ' + '
+ str(round(one_third, 2)) + '*' + base + '<br>' + str(round(one_third, 2))
+ ' - estim on first onethird of ' + base,
line=dict(dash="dash", width=3))
fig.add_trace(add_trace1, row=1, col=1)
fig.add_trace(add_trace1, row=1, col=2)
grows_rate = full_data['GrowthConfirmed'].fillna(0).values / 100.0
grows_rate[np.isinf(grows_rate)] = 0
len_rate = len(grows_rate)
vec = np.arange(round(0.9*len_rate), len_rate)
one_third = grows_rate[vec].mean()
if one_third > 0:
grow_one_third = one_third * full_data[base] + full_data[k][vec[0]-round(0.1*len_rate)] * factor
add_trace2 = go.Scatter(x=full_data[base][round(len_rate*1/3):], y=grow_one_third[round(len_rate*1/3):],
mode="lines", name='Linear estimation: '
+ str(full_data[k][vec[0]-round(0.1*len_rate)]) + ' + '
+ str(round(one_third, 2)) + '*' + base + '<br>'
+ str(round(one_third, 2)) + ' - estim on 0.1 last from Confirmed',
line=dict(dash="dash", width=3))
fig.add_trace(add_trace2, row=1, col=1)
fig.add_trace(add_trace2, row=1, col=2)
fig.update_yaxes(range=[full_data[k][0], full_data[k][len_rate-1]], row=1, col=1)
if annotations is not None:
fig.update_annotations(annotations)
fig.update_layout(template='plotly_dark', hovermode="x", title=title_string,
yaxis=dict(title=fname), xaxis=dict(title=base), yaxis2=dict(title=fname, type='log'),
xaxis2=dict(title=base))
if mat_plt:
fig_mat, ax = plt.subplots(figsize=(8, 6))
colors = ['blue', 'green', 'yellow', 'magenta', 'cyan', 'red', 'black']
max_values = []
for cnt in range(len(inputs)):
case_k = inputs[cnt]
k = prefix + case_k
full_data[k] = full_data[k].fillna(0)
ax = sns.scatterplot(x=base, y=k, data=full_data, color=colors[cnt])
plt.plot(full_data[base], full_data[k], zorder=1, color=colors[cnt], label=k)
if not np.isinf(max(full_data[k])):
max_values.append(max(full_data[k]))
ax.set_xlim([full_data['Date'].iloc[0], full_data['Date'].iloc[-1] + datetime.timedelta(days=1)])
if max(full_data[prefix + inputs[0]]) > 1:
max_value = max(max_values) + np.diff(full_data[k]).max()
min_value = -1
else:
max_value = max(max_values) + np.diff(full_data[k]).max()
min_value = 0
ax.set_ylim([min_value, max_value])
plt.legend(frameon=True, fontsize=12)
plt.grid()
plt.ylabel(fname)
plt.title(title_string, fontsize=16)
fig_mat.autofmt_xdate()
plt.savefig(os.path.join(os.getcwd(), save_string))
return fig
###################################################################################################################
|
[
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"numpy.log",
"seaborn.scatterplot",
"datetime.timedelta",
"folium.CircleMarker",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.style.use",
"numpy.diff",
"folium.Map",
"numpy.max",
"numpy.linspace",
"numpy.isinf",
"plotly.subplots.make_subplots",
"numpy.isnan",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"plotly.express.bar",
"time.strftime",
"os.getcwd",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.bar",
"datetime.datetime.strftime",
"pandas.concat",
"matplotlib.pyplot.subplots"
] |
[((554, 586), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""dark_background"""'], {}), "('dark_background')\n", (567, 586), True, 'import matplotlib.pyplot as plt\n'), ((1813, 1839), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 7)'}), '(figsize=(9, 7))\n', (1823, 1839), True, 'import matplotlib.pyplot as plt\n'), ((2201, 2219), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (2211, 2219), True, 'import matplotlib.pyplot as plt\n'), ((2225, 2244), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (2235, 2244), True, 'import matplotlib.pyplot as plt\n'), ((2250, 2287), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(True)', 'fontsize': '(12)'}), '(frameon=True, fontsize=12)\n', (2260, 2287), True, 'import matplotlib.pyplot as plt\n'), ((2293, 2334), 'matplotlib.pyplot.title', 'plt.title', (['title_string[:-4]'], {'fontsize': '(30)'}), '(title_string[:-4], fontsize=30)\n', (2302, 2334), True, 'import matplotlib.pyplot as plt\n'), ((2363, 2373), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2371, 2373), True, 'import matplotlib.pyplot as plt\n'), ((9303, 9370), 'folium.Map', 'folium.Map', ([], {'location': 'location', 'zoom_start': '(4)', 'tiles': '"""Stamen Terrain"""'}), "(location=location, zoom_start=4, tiles='Stamen Terrain')\n", (9313, 9370), False, 'import folium\n'), ((18121, 18194), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(1)', 'cols': '(2)', 'subplot_titles': "('Linear Plot', 'Log Plot')"}), "(rows=1, cols=2, subplot_titles=('Linear Plot', 'Log Plot'))\n", (18134, 18194), False, 'from plotly.subplots import make_subplots\n'), ((31423, 31547), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(1)', 'cols': '(2)', 'subplot_titles': "(prefix + ' ' + inputs[0] + ' Cases', prefix + ' ' + inputs[1] + ' Cases')"}), "(rows=1, cols=2, subplot_titles=(prefix + ' ' + inputs[0] +\n ' Cases', prefix + ' ' + inputs[1] + ' Cases'))\n", (31436, 31547), False, 'from plotly.subplots import make_subplots\n'), ((39469, 39542), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(1)', 'cols': '(2)', 'subplot_titles': "('Linear Plot', 'Log Plot')"}), "(rows=1, cols=2, subplot_titles=('Linear Plot', 'Log Plot'))\n", (39482, 39542), False, 'from plotly.subplots import make_subplots\n'), ((1495, 1622), 'plotly.express.bar', 'px.bar', (['full_data'], {'x': 'groupby', 'y': 'inputs', 'color': 'inputs', 'template': '"""ggplot2"""', 'log_y': '(True)', 'title': 'title_string', 'hover_name': 'inputs'}), "(full_data, x=groupby, y=inputs, color=inputs, template='ggplot2',\n log_y=True, title=title_string, hover_name=inputs)\n", (1501, 1622), True, 'import plotly.express as px\n'), ((2046, 2148), 'matplotlib.pyplot.bar', 'plt.bar', (['full_data[groupby]', 'full_data[k]'], {'label': 'k', 'alpha': 'alphas[cnt]', 'log': 'log', 'color': 'colors[cnt]'}), '(full_data[groupby], full_data[k], label=k, alpha=alphas[cnt], log=\n log, color=colors[cnt])\n', (2053, 2148), True, 'import matplotlib.pyplot as plt\n'), ((22081, 22109), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (22093, 22109), True, 'import matplotlib.pyplot as plt\n'), ((23078, 23115), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(True)', 'fontsize': '(12)'}), '(frameon=True, fontsize=12)\n', (23088, 23115), True, 'import matplotlib.pyplot as plt\n'), ((23125, 23135), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (23133, 23135), True, 'import matplotlib.pyplot as plt\n'), ((23145, 23162), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['fname'], {}), '(fname)\n', (23155, 23162), True, 'import matplotlib.pyplot as plt\n'), ((23172, 23208), 'matplotlib.pyplot.title', 'plt.title', (['title_string'], {'fontsize': '(16)'}), '(title_string, fontsize=16)\n', (23181, 23208), True, 'import matplotlib.pyplot as plt\n'), ((43865, 43893), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (43877, 43893), True, 'import matplotlib.pyplot as plt\n'), ((44862, 44899), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(True)', 'fontsize': '(12)'}), '(frameon=True, fontsize=12)\n', (44872, 44899), True, 'import matplotlib.pyplot as plt\n'), ((44909, 44919), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (44917, 44919), True, 'import matplotlib.pyplot as plt\n'), ((44929, 44946), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['fname'], {}), '(fname)\n', (44939, 44946), True, 'import matplotlib.pyplot as plt\n'), ((44956, 44992), 'matplotlib.pyplot.title', 'plt.title', (['title_string'], {'fontsize': '(16)'}), '(title_string, fontsize=16)\n', (44965, 44992), True, 'import matplotlib.pyplot as plt\n'), ((2406, 2417), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2415, 2417), False, 'import os\n'), ((8615, 8733), 'plotly.express.bar', 'px.bar', (['cur_data'], {'x': 'groupby[0]', 'y': 'k', 'color': 'groupby[0]', 'text': 'k', 'template': '"""ggplot2"""', 'log_y': '(True)', 'title': 'title_string'}), "(cur_data, x=groupby[0], y=k, color=groupby[0], text=k, template=\n 'ggplot2', log_y=True, title=title_string)\n", (8621, 8733), True, 'import plotly.express as px\n'), ((10238, 10265), 'numpy.isnan', 'np.isnan', (["cur_pop['Age'][0]"], {}), "(cur_pop['Age'][0])\n", (10246, 10265), True, 'import numpy as np\n'), ((11359, 11370), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11368, 11370), False, 'import os\n'), ((11372, 11395), 'time.strftime', 'time.strftime', (['"""%d%m%Y"""'], {}), "('%d%m%Y')\n", (11385, 11395), False, 'import time\n'), ((12838, 13048), 'plotly.express.bar', 'px.bar', (['srelevant'], {'y': 'groupby[1]', 'x': 'k', 'color': 'groupby[1]', 'template': '"""ggplot2"""', 'orientation': '"""h"""', 'log_x': '(True)', 'title': 'title_string', 'hover_name': 'groupby[1]', 'animation_frame': 'groupby[0]', 'animation_group': 'groupby[1]'}), "(srelevant, y=groupby[1], x=k, color=groupby[1], template='ggplot2',\n orientation='h', log_x=True, title=title_string, hover_name=groupby[1],\n animation_frame=groupby[0], animation_group=groupby[1])\n", (12844, 13048), True, 'import plotly.express as px\n'), ((22392, 22455), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': 'base', 'y': 'k', 'data': 'full_data', 'color': 'colors[cnt]'}), '(x=base, y=k, data=full_data, color=colors[cnt])\n', (22407, 22455), True, 'import seaborn as sns\n'), ((22469, 22546), 'matplotlib.pyplot.plot', 'plt.plot', (['full_data[base]', 'full_data[k]'], {'zorder': '(1)', 'color': 'colors[cnt]', 'label': 'k'}), '(full_data[base], full_data[k], zorder=1, color=colors[cnt], label=k)\n', (22477, 22546), True, 'import matplotlib.pyplot as plt\n'), ((25917, 25938), 'numpy.isinf', 'np.isinf', (['growth_rate'], {}), '(growth_rate)\n', (25925, 25938), True, 'import numpy as np\n'), ((26156, 26178), 'numpy.isinf', 'np.isinf', (['growth_death'], {}), '(growth_death)\n', (26164, 26178), True, 'import numpy as np\n'), ((26400, 26426), 'numpy.isinf', 'np.isinf', (['growth_recovered'], {}), '(growth_recovered)\n', (26408, 26426), True, 'import numpy as np\n'), ((44176, 44239), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': 'base', 'y': 'k', 'data': 'full_data', 'color': 'colors[cnt]'}), '(x=base, y=k, data=full_data, color=colors[cnt])\n', (44191, 44239), True, 'import seaborn as sns\n'), ((44253, 44330), 'matplotlib.pyplot.plot', 'plt.plot', (['full_data[base]', 'full_data[k]'], {'zorder': '(1)', 'color': 'colors[cnt]', 'label': 'k'}), '(full_data[base], full_data[k], zorder=1, color=colors[cnt], label=k)\n', (44261, 44330), True, 'import matplotlib.pyplot as plt\n'), ((1376, 1387), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1385, 1387), False, 'import os\n'), ((1389, 1412), 'time.strftime', 'time.strftime', (['"""%d%m%Y"""'], {}), "('%d%m%Y')\n", (1402, 1412), False, 'import time\n'), ((5038, 5086), 'pandas.concat', 'pd.concat', (['[world_pop, curr]'], {'axis': '(0)', 'sort': '(False)'}), '([world_pop, curr], axis=0, sort=False)\n', (5047, 5086), True, 'import pandas as pd\n'), ((7023, 7034), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7032, 7034), False, 'import os\n'), ((7036, 7059), 'time.strftime', 'time.strftime', (['"""%d%m%Y"""'], {}), "('%d%m%Y')\n", (7049, 7059), False, 'import time\n'), ((7878, 7889), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7887, 7889), False, 'import os\n'), ((7891, 7914), 'time.strftime', 'time.strftime', (['"""%d%m%Y"""'], {}), "('%d%m%Y')\n", (7904, 7914), False, 'import time\n'), ((11076, 11188), 'folium.CircleMarker', 'folium.CircleMarker', (['[lat, long]'], {'radius': '(5)', 'color': '"""red"""', 'popup': 'popup_str', 'fill_color': '"""red"""', 'fill_opacity': '(0.7)'}), "([lat, long], radius=5, color='red', popup=popup_str,\n fill_color='red', fill_opacity=0.7)\n", (11095, 11188), False, 'import folium\n'), ((12503, 12544), 'datetime.datetime.strftime', 'datetime.datetime.strftime', (['d', '"""%d/%m/%Y"""'], {}), "(d, '%d/%m/%Y')\n", (12529, 12544), False, 'import datetime\n'), ((14448, 14665), 'plotly.express.bar', 'px.bar', (['norm_srelevant'], {'y': 'groupby[1]', 'x': 'k', 'color': 'groupby[1]', 'template': '"""ggplot2"""', 'log_x': '(True)', 'orientation': '"""h"""', 'title': 'title_string', 'hover_name': 'groupby[1]', 'animation_frame': 'groupby[0]', 'animation_group': 'groupby[1]'}), "(norm_srelevant, y=groupby[1], x=k, color=groupby[1], template=\n 'ggplot2', log_x=True, orientation='h', title=title_string, hover_name=\n groupby[1], animation_frame=groupby[0], animation_group=groupby[1])\n", (14454, 14665), True, 'import plotly.express as px\n'), ((19501, 19521), 'numpy.isinf', 'np.isinf', (['grows_rate'], {}), '(grows_rate)\n', (19509, 19521), True, 'import numpy as np\n'), ((20419, 20460), 'numpy.max', 'np.max', (['[1, len_rate - num_days_for_rate]'], {}), '([1, len_rate - num_days_for_rate])\n', (20425, 20460), True, 'import numpy as np\n'), ((20571, 20638), 'numpy.max', 'np.max', (['[1e-06, full_data[base][vec[-1]] - full_data[base][vec[0]]]'], {}), '([1e-06, full_data[base][vec[-1]] - full_data[base][vec[0]]])\n', (20577, 20638), True, 'import numpy as np\n'), ((23276, 23287), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (23285, 23287), False, 'import os\n'), ((32394, 32428), 'numpy.arange', 'np.arange', (['(0)', 'thresh_data.shape[0]'], {}), '(0, thresh_data.shape[0])\n', (32403, 32428), True, 'import numpy as np\n'), ((35288, 35326), 'numpy.arange', 'np.arange', (['(0)', 'threshed_ref_db.shape[0]'], {}), '(0, threshed_ref_db.shape[0])\n', (35297, 35326), True, 'import numpy as np\n'), ((45060, 45071), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (45069, 45071), False, 'import os\n'), ((12104, 12115), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (12113, 12115), False, 'import os\n'), ((12117, 12140), 'time.strftime', 'time.strftime', (['"""%d%m%Y"""'], {}), "('%d%m%Y')\n", (12130, 12140), False, 'import time\n'), ((13208, 13246), 'numpy.max', 'np.max', (['[100, num_contries / 25 * 100]'], {}), '([100, num_contries / 25 * 100])\n', (13214, 13246), True, 'import numpy as np\n'), ((15893, 16097), 'plotly.express.bar', 'px.bar', (['norm_srelevant'], {'y': 'groupby[1]', 'x': 'k', 'color': 'groupby[1]', 'template': '"""ggplot2"""', 'orientation': '"""h"""', 'title': 'title_string', 'hover_name': 'groupby[1]', 'animation_frame': 'groupby[0]', 'animation_group': 'groupby[1]'}), "(norm_srelevant, y=groupby[1], x=k, color=groupby[1], template=\n 'ggplot2', orientation='h', title=title_string, hover_name=groupby[1],\n animation_frame=groupby[0], animation_group=groupby[1])\n", (15899, 16097), True, 'import plotly.express as px\n'), ((20660, 20679), 'numpy.isinf', 'np.isinf', (['last_week'], {}), '(last_week)\n', (20668, 20679), True, 'import numpy as np\n'), ((22729, 22755), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (22747, 22755), False, 'import datetime\n'), ((28534, 28545), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (28543, 28545), False, 'import os\n'), ((28547, 28570), 'time.strftime', 'time.strftime', (['"""%d%m%Y"""'], {}), "('%d%m%Y')\n", (28560, 28570), False, 'import time\n'), ((33972, 33998), 'numpy.isinf', 'np.isinf', (['growth_rate_mean'], {}), '(growth_rate_mean)\n', (33980, 33998), True, 'import numpy as np\n'), ((34007, 34033), 'numpy.isnan', 'np.isnan', (['growth_rate_mean'], {}), '(growth_rate_mean)\n', (34015, 34033), True, 'import numpy as np\n'), ((41250, 41270), 'numpy.isinf', 'np.isinf', (['grows_rate'], {}), '(grows_rate)\n', (41258, 41270), True, 'import numpy as np\n'), ((42294, 42314), 'numpy.isinf', 'np.isinf', (['grows_rate'], {}), '(grows_rate)\n', (42302, 42314), True, 'import numpy as np\n'), ((44513, 44539), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (44531, 44539), False, 'import datetime\n'), ((14799, 14837), 'numpy.max', 'np.max', (['[100, num_contries / 25 * 100]'], {}), '([100, num_contries / 25 * 100])\n', (14805, 14837), True, 'import numpy as np\n'), ((22855, 22876), 'numpy.diff', 'np.diff', (['full_data[k]'], {}), '(full_data[k])\n', (22862, 22876), True, 'import numpy as np\n'), ((22969, 22990), 'numpy.diff', 'np.diff', (['full_data[k]'], {}), '(full_data[k])\n', (22976, 22990), True, 'import numpy as np\n'), ((33611, 33631), 'numpy.isinf', 'np.isinf', (['grows_rate'], {}), '(grows_rate)\n', (33619, 33631), True, 'import numpy as np\n'), ((44639, 44660), 'numpy.diff', 'np.diff', (['full_data[k]'], {}), '(full_data[k])\n', (44646, 44660), True, 'import numpy as np\n'), ((44753, 44774), 'numpy.diff', 'np.diff', (['full_data[k]'], {}), '(full_data[k])\n', (44760, 44774), True, 'import numpy as np\n'), ((16248, 16286), 'numpy.max', 'np.max', (['[100, num_contries / 25 * 100]'], {}), '([100, num_contries / 25 * 100])\n', (16254, 16286), True, 'import numpy as np\n'), ((25968, 25977), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (25974, 25977), True, 'import numpy as np\n'), ((25978, 25997), 'numpy.log', 'np.log', (['growth_rate'], {}), '(growth_rate)\n', (25984, 25997), True, 'import numpy as np\n'), ((26207, 26216), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (26213, 26216), True, 'import numpy as np\n'), ((26217, 26237), 'numpy.log', 'np.log', (['growth_death'], {}), '(growth_death)\n', (26223, 26237), True, 'import numpy as np\n'), ((26455, 26464), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (26461, 26464), True, 'import numpy as np\n'), ((26465, 26489), 'numpy.log', 'np.log', (['growth_recovered'], {}), '(growth_recovered)\n', (26471, 26489), True, 'import numpy as np\n'), ((34063, 34072), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (34069, 34072), True, 'import numpy as np\n'), ((34075, 34099), 'numpy.log', 'np.log', (['growth_rate_mean'], {}), '(growth_rate_mean)\n', (34081, 34099), True, 'import numpy as np\n'), ((35495, 35546), 'numpy.linspace', 'np.linspace', (['(1)', 'num_dates', 'num_dates'], {'endpoint': '(True)'}), '(1, num_dates, num_dates, endpoint=True)\n', (35506, 35546), True, 'import numpy as np\n'), ((35936, 35987), 'numpy.linspace', 'np.linspace', (['(1)', 'num_dates', 'num_dates'], {'endpoint': '(True)'}), '(1, num_dates, num_dates, endpoint=True)\n', (35947, 35987), True, 'import numpy as np\n'), ((36564, 36615), 'numpy.linspace', 'np.linspace', (['(1)', 'num_dates', 'num_dates'], {'endpoint': '(True)'}), '(1, num_dates, num_dates, endpoint=True)\n', (36575, 36615), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2021 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of PANDORA_MCCNN
#
# https://github.com/CNES/Pandora_MCCNN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains all functions to generate the training and testing dataset on the Data Fusion Contest generated
with Beefrost
"""
import os
import glob
import argparse
import numpy as np
import h5py
import rasterio
from numba import njit
@njit()
def compute_mask(disp_map, mask_ref, mask_sec, patch_size):
"""
Masks invalid pixels : pixel outside epipolar image
:param disp_map: disparity map
:type disp_map: 2D numpy array
:param mask_ref: left epipolar image mask : with the convention 0 is valid pixel in epipolar image
:type mask_ref: 2D numpy array
:param mask_sec: right epipolar image mask : with the convention 0 is valid pixel in epipolar image
:type mask_sec: 2D numpy array
:param patch_size: patch size
:type patch_size: int
:return: the disparity map with invalid pixels = -9999
:rtype: 2D numpy array
"""
radius = int(patch_size / 2)
nb_row, nb_col = disp_map.shape
for row in range(radius, nb_row - radius):
for col in range(radius, nb_col - radius):
disp = disp_map[row, col]
# Matching in the right image
match = int(col + disp)
# Negative matching for training, with maximum negative displacement for creating negative example
neg_match = match - 6
# If negative example is inside right epipolar image
if radius < neg_match < (nb_col - radius) and radius < neg_match < (nb_row - radius):
patch_ref = mask_ref[(row - radius) : (row + radius + 1), (col - radius) : (col + radius + 1)]
patch_sec = mask_sec[(row - radius) : (row + radius + 1), (match - radius) : (match + radius + 1)]
# Invalid patch : outside left epipolar image
if np.sum(patch_ref != 0) != 0:
disp_map[row, col] = -9999
# Invalid patch : outside right epipolar image
if np.sum(patch_sec != 0) != 0:
disp_map[row, col] = -9999
neg_patch_sec = mask_sec[
(row - radius) : (row + radius + 1), (neg_match - radius) : (neg_match + radius + 1)
]
# Invalid patch : outside right epipolar image
if np.sum(neg_patch_sec != 0) != 0:
disp_map[row, col] = -9999
# Negative example cannot be created
else:
disp_map[row, col] = -9999
return disp_map
def save_dataset(img, sample, img_name, img_file, sample_file):
"""
Save the sample in hdf5 files :
- images are saved in the img_file file: creation of a dataset for each image pair
- sample are saved in the sample_file file : creation of dataset containing valid pixels
The dataset name is the ground truth file ( exemple : JAX_004_009_007_LEFT_DSP.tif )
:param img: images
:type img: np.array (2, 1024, 1024, 3) ( 2 = left image, right image)
:param sample: samples of the image
:type sample: np.array(number of valid pixels for all the images, 4).
The last dimension is : number of the image, row, col, disparity for the pixel p(row, col)
:param img_name: name of the current image pair ( name of the gt disparity )
:type img_name: string
:param img_file: image database file
:type img_file: hdf5 file
:param sample_file: training or testing database file
:type sample_file: hdf5 file
"""
sample_file.create_dataset(img_name, data=sample)
img_file.create_dataset(img_name, data=img)
def fusion_contest(input_dir, output):
"""
Preprocess and create data fusion contest hdf5 database
:param input_dir: path to the input directory
:type input_dir: string
:param output: output directory
:type output: string
"""
img_file = h5py.File(os.path.join(output, "images_training_dataset_fusion_contest.hdf5"), "w")
training_file = h5py.File(os.path.join(output, "training_dataset_fusion_contest.hdf5"), "w")
img_testing_file = h5py.File(os.path.join(output, "images_testing_dataset_fusion_contest.hdf5"), "w")
testing_file = h5py.File(os.path.join(output, "testing_dataset_fusion_contest.hdf5"), "w")
gt = glob.glob(input_dir + "/*/left_epipolar_disp.tif")
nb_img = len(gt)
# Shuffle the file list
indices = np.arange(nb_img)
np.random.seed(0)
np.random.shuffle(indices)
gt = [gt[i] for i in indices]
# 90 % Training, 10 % Testing
end_training = int(nb_img * 0.9)
for num_image in range(nb_img):
name_image = gt[num_image].split(input_dir)[1].split("/")[1]
path_image = gt[num_image].split("left_epipolar_disp.tif")[0]
# Read images
left = rasterio.open(os.path.join(path_image, "left_epipolar_image.tif")).read(1)
left_mask = rasterio.open(os.path.join(path_image, "left_epipolar_mask.tif")).read(1)
right = rasterio.open(os.path.join(path_image, "right_epipolar_image.tif")).read(1)
right_mask = rasterio.open(os.path.join(path_image, "right_epipolar_mask.tif")).read(1)
dsp = rasterio.open(gt[num_image]).read(1)
mask_dsp = rasterio.open(os.path.join(path_image, "left_epipolar_disp_mask.tif")).read(1)
cross_checking = rasterio.open(os.path.join(path_image, "valid_disp.tif")).read(1)
# Mask disparities
mask_disp = compute_mask(dsp, left_mask, right_mask, 11)
# Remove invalid pixels : invalidated by cross-checking mask and with invalid disparity
mask_disp[np.where(cross_checking == 255)] = -9999
mask_disp[np.where(mask_dsp == 255)] = -9999
# Change the disparity convention to ref(x,y) = sec(x-d,y)
mask_disp *= -1
# Remove invalid disparity
valid_row, valid_col = np.where(mask_disp != 9999)
# Red band selection
left = np.squeeze(left[0, :, :])
right = np.squeeze(right[0, :, :])
# Normalization
valid_left = np.where(left_mask == 0)
valid_right = np.where(right_mask == 0)
left[valid_left] = (left[valid_left] - left[valid_left].mean()) / left[valid_left].std()
right[valid_right] = (right[valid_right] - right[valid_right].mean()) / right[valid_right].std()
# data np.array of shape ( number of valid pixels the current image, 4 )
# 4 = number of the image, row, col, disparity for the pixel p(row, col)
valid_disp = np.column_stack(
(np.zeros_like(valid_row) + num_image, valid_row, valid_col, mask_disp[valid_row, valid_col])
).astype(np.float32)
# img of shape (2, 2048, 2048, 3)
img = np.stack((left, right), axis=0)
if num_image > end_training:
save_dataset(img, valid_disp, name_image, img_testing_file, testing_file)
else:
save_dataset(img, valid_disp, name_image, img_file, training_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Script for creating the training data fusion contest database. "
"it will create the following files: "
"- training_dataset_fusion_contest.hdf5, which contains training"
" coordinates of the valid pixels and their disparity."
"- testing_dataset_fusion_contest.hdf5, which contains testing "
"coordinates of the valid pixels and their disparity."
"- images_training_dataset_fusion_contest.hdf5, which contains the red"
" band normalized training images"
"- images_testing_dataset_fusion_contest.hdf5, which contains the red"
" band normalized testing images"
)
parser.add_argument("input_data", help="Path to the input directory containing the data")
parser.add_argument("output_dir", help="Path to the output directory ")
args = parser.parse_args()
fusion_contest(args.input_data, args.output_dir)
|
[
"argparse.ArgumentParser",
"numpy.arange",
"numpy.where",
"rasterio.open",
"numba.njit",
"os.path.join",
"numpy.squeeze",
"numpy.stack",
"numpy.sum",
"numpy.random.seed",
"numpy.zeros_like",
"glob.glob",
"numpy.random.shuffle"
] |
[((987, 993), 'numba.njit', 'njit', ([], {}), '()\n', (991, 993), False, 'from numba import njit\n'), ((4961, 5011), 'glob.glob', 'glob.glob', (["(input_dir + '/*/left_epipolar_disp.tif')"], {}), "(input_dir + '/*/left_epipolar_disp.tif')\n", (4970, 5011), False, 'import glob\n'), ((5076, 5093), 'numpy.arange', 'np.arange', (['nb_img'], {}), '(nb_img)\n', (5085, 5093), True, 'import numpy as np\n'), ((5098, 5115), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5112, 5115), True, 'import numpy as np\n'), ((5120, 5146), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (5137, 5146), True, 'import numpy as np\n'), ((7669, 8247), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Script for creating the training data fusion contest database. it will create the following files: - training_dataset_fusion_contest.hdf5, which contains training coordinates of the valid pixels and their disparity.- testing_dataset_fusion_contest.hdf5, which contains testing coordinates of the valid pixels and their disparity.- images_training_dataset_fusion_contest.hdf5, which contains the red band normalized training images- images_testing_dataset_fusion_contest.hdf5, which contains the red band normalized testing images"""'}), "(description=\n 'Script for creating the training data fusion contest database. it will create the following files: - training_dataset_fusion_contest.hdf5, which contains training coordinates of the valid pixels and their disparity.- testing_dataset_fusion_contest.hdf5, which contains testing coordinates of the valid pixels and their disparity.- images_training_dataset_fusion_contest.hdf5, which contains the red band normalized training images- images_testing_dataset_fusion_contest.hdf5, which contains the red band normalized testing images'\n )\n", (7692, 8247), False, 'import argparse\n'), ((4579, 4646), 'os.path.join', 'os.path.join', (['output', '"""images_training_dataset_fusion_contest.hdf5"""'], {}), "(output, 'images_training_dataset_fusion_contest.hdf5')\n", (4591, 4646), False, 'import os\n'), ((4683, 4743), 'os.path.join', 'os.path.join', (['output', '"""training_dataset_fusion_contest.hdf5"""'], {}), "(output, 'training_dataset_fusion_contest.hdf5')\n", (4695, 4743), False, 'import os\n'), ((4783, 4849), 'os.path.join', 'os.path.join', (['output', '"""images_testing_dataset_fusion_contest.hdf5"""'], {}), "(output, 'images_testing_dataset_fusion_contest.hdf5')\n", (4795, 4849), False, 'import os\n'), ((4885, 4944), 'os.path.join', 'os.path.join', (['output', '"""testing_dataset_fusion_contest.hdf5"""'], {}), "(output, 'testing_dataset_fusion_contest.hdf5')\n", (4897, 4944), False, 'import os\n'), ((6523, 6550), 'numpy.where', 'np.where', (['(mask_disp != 9999)'], {}), '(mask_disp != 9999)\n', (6531, 6550), True, 'import numpy as np\n'), ((6596, 6621), 'numpy.squeeze', 'np.squeeze', (['left[0, :, :]'], {}), '(left[0, :, :])\n', (6606, 6621), True, 'import numpy as np\n'), ((6638, 6664), 'numpy.squeeze', 'np.squeeze', (['right[0, :, :]'], {}), '(right[0, :, :])\n', (6648, 6664), True, 'import numpy as np\n'), ((6711, 6735), 'numpy.where', 'np.where', (['(left_mask == 0)'], {}), '(left_mask == 0)\n', (6719, 6735), True, 'import numpy as np\n'), ((6758, 6783), 'numpy.where', 'np.where', (['(right_mask == 0)'], {}), '(right_mask == 0)\n', (6766, 6783), True, 'import numpy as np\n'), ((7379, 7410), 'numpy.stack', 'np.stack', (['(left, right)'], {'axis': '(0)'}), '((left, right), axis=0)\n', (7387, 7410), True, 'import numpy as np\n'), ((6271, 6302), 'numpy.where', 'np.where', (['(cross_checking == 255)'], {}), '(cross_checking == 255)\n', (6279, 6302), True, 'import numpy as np\n'), ((6330, 6355), 'numpy.where', 'np.where', (['(mask_dsp == 255)'], {}), '(mask_dsp == 255)\n', (6338, 6355), True, 'import numpy as np\n'), ((5838, 5866), 'rasterio.open', 'rasterio.open', (['gt[num_image]'], {}), '(gt[num_image])\n', (5851, 5866), False, 'import rasterio\n'), ((2522, 2544), 'numpy.sum', 'np.sum', (['(patch_ref != 0)'], {}), '(patch_ref != 0)\n', (2528, 2544), True, 'import numpy as np\n'), ((2681, 2703), 'numpy.sum', 'np.sum', (['(patch_sec != 0)'], {}), '(patch_sec != 0)\n', (2687, 2703), True, 'import numpy as np\n'), ((3006, 3032), 'numpy.sum', 'np.sum', (['(neg_patch_sec != 0)'], {}), '(neg_patch_sec != 0)\n', (3012, 3032), True, 'import numpy as np\n'), ((5481, 5532), 'os.path.join', 'os.path.join', (['path_image', '"""left_epipolar_image.tif"""'], {}), "(path_image, 'left_epipolar_image.tif')\n", (5493, 5532), False, 'import os\n'), ((5576, 5626), 'os.path.join', 'os.path.join', (['path_image', '"""left_epipolar_mask.tif"""'], {}), "(path_image, 'left_epipolar_mask.tif')\n", (5588, 5626), False, 'import os\n'), ((5666, 5718), 'os.path.join', 'os.path.join', (['path_image', '"""right_epipolar_image.tif"""'], {}), "(path_image, 'right_epipolar_image.tif')\n", (5678, 5718), False, 'import os\n'), ((5763, 5814), 'os.path.join', 'os.path.join', (['path_image', '"""right_epipolar_mask.tif"""'], {}), "(path_image, 'right_epipolar_mask.tif')\n", (5775, 5814), False, 'import os\n'), ((5908, 5963), 'os.path.join', 'os.path.join', (['path_image', '"""left_epipolar_disp_mask.tif"""'], {}), "(path_image, 'left_epipolar_disp_mask.tif')\n", (5920, 5963), False, 'import os\n'), ((6012, 6054), 'os.path.join', 'os.path.join', (['path_image', '"""valid_disp.tif"""'], {}), "(path_image, 'valid_disp.tif')\n", (6024, 6054), False, 'import os\n'), ((7200, 7224), 'numpy.zeros_like', 'np.zeros_like', (['valid_row'], {}), '(valid_row)\n', (7213, 7224), True, 'import numpy as np\n')]
|
import numpy as np
# NOTE: these all assume a sample rate of 1000Hz and 0-centered(ish)
BUTTER2_45_55_NOTCH = [[0.95654323, -1.82035157, 0.95654323, 1., -1.84458768, 0.9536256 ],
[1. , -1.90305207, 1. , 1., -1.87701816, 0.95947072]]
BUTTER4_45_55_NOTCH = [[0.92117099, -1.75303637, 0.92117099, 1., -1.83993124, 0.94153282],
[1. , -1.90305207, 1. , 1., -1.85827897, 0.94562794],
[1. , -1.90305207, 1. , 1., -1.85916949, 0.9741553 ],
[1. , -1.90305207, 1. , 1., -1.89861232, 0.9783552 ]]
BUTTER8_45_55_NOTCH = [[0.85123494, -1.61994442, 0.85123494, 1., -1.84135423, 0.93909556],
[1. , -1.90305207, 1. , 1., -1.85081373, 0.94130689],
[1. , -1.90305207, 1. , 1., -1.84098214, 0.94640431],
[1. , -1.90305207, 1. , 1., -1.86712758, 0.95177517],
[1. , -1.90305207, 1. , 1., -1.85070766, 0.96298756],
[1. , -1.90305207, 1. , 1., -1.88761855, 0.96842656],
[1. , -1.90305207, 1. , 1., -1.86966575, 0.98667654],
[1. , -1.90305207, 1. , 1., -1.90969867, 0.98897339]]
BUTTER2_55_65_NOTCH = [[0.95654323, -1.77962093, 0.95654323, 1., -1.80093517, 0.95415195],
[1. , -1.860471 , 1. , 1., -1.83739919, 0.95894143]]
BUTTER4_55_65_NOTCH = [[0.92117099, -1.71381192, 0.92117099, 1., -1.79756457, 0.94190374],
[1. , -1.860471 , 1. , 1., -1.81789764, 0.94525555],
[1. , -1.860471 , 1. , 1., -1.81413419, 0.97453194],
[1. , -1.860471 , 1. , 1., -1.8595667 , 0.97797707]]
BUTTER8_55_65_NOTCH = [[0.85123494, -1.58369793, 0.85123494, 1., -1.799555 , 0.93929634],
[1. , -1.860471 , 1. , 1., -1.81000016, 0.94110568],
[1. , -1.860471 , 1. , 1., -1.79799514, 0.94688937],
[1. , -1.860471 , 1. , 1., -1.82714508, 0.95128761],
[1. , -1.860471 , 1. , 1., -1.80636275, 0.96347614],
[1. , -1.860471 , 1. , 1., -1.84831785, 0.96793547],
[1. , -1.860471 , 1. , 1., -1.82397995, 0.98688239],
[1. , -1.860471 , 1. , 1., -1.87082063, 0.9887671 ]]
class ButterworthFilter():
def __init__(self, coeffs):
self.order = len(coeffs)
self.coeffs = np.array(coeffs)
self.z = np.array([[0.0]*2]*self.order) # order x 2 array of zeros
def next_sample(self, xn):
for s in range(self.order):
xn_tmp = xn # make a temp copy
xn = self.coeffs[s, 0] * xn_tmp + self.z[s, 0]
self.z[s, 0] = (self.coeffs[s, 1] * xn_tmp - self.coeffs[s, 4] * xn + self.z[s, 1])
self.z[s, 1] = (self.coeffs[s, 2] * xn_tmp - self.coeffs[s, 5] * xn)
return xn
|
[
"numpy.array"
] |
[((2752, 2768), 'numpy.array', 'np.array', (['coeffs'], {}), '(coeffs)\n', (2760, 2768), True, 'import numpy as np\n'), ((2786, 2820), 'numpy.array', 'np.array', (['([[0.0] * 2] * self.order)'], {}), '([[0.0] * 2] * self.order)\n', (2794, 2820), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 12 08:23:58 2020
@author: sumanth
"""
import numpy as np
import cv2
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
def pre_dect(frame,faceNet,model):
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),(104.0, 177.0, 123.0))
faceNet.setInput(blob)
detections = faceNet.forward()
faces = []
locs = []
preds = []
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence >= 0.168:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
face = frame[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
face = np.expand_dims(face, axis=0)
faces.append(face)
locs.append((startX, startY, endX, endY))
for k in faces:
preds.append(model.predict(k))
return (locs, preds)
|
[
"cv2.dnn.blobFromImage",
"numpy.array",
"tensorflow.keras.applications.mobilenet_v2.preprocess_input",
"cv2.cvtColor",
"numpy.expand_dims",
"tensorflow.keras.preprocessing.image.img_to_array",
"cv2.resize"
] |
[((341, 409), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['frame', '(1.0)', '(300, 300)', '(104.0, 177.0, 123.0)'], {}), '(frame, 1.0, (300, 300), (104.0, 177.0, 123.0))\n', (362, 409), False, 'import cv2\n'), ((976, 1013), 'cv2.cvtColor', 'cv2.cvtColor', (['face', 'cv2.COLOR_BGR2RGB'], {}), '(face, cv2.COLOR_BGR2RGB)\n', (988, 1013), False, 'import cv2\n'), ((1034, 1062), 'cv2.resize', 'cv2.resize', (['face', '(224, 224)'], {}), '(face, (224, 224))\n', (1044, 1062), False, 'import cv2\n'), ((1083, 1101), 'tensorflow.keras.preprocessing.image.img_to_array', 'img_to_array', (['face'], {}), '(face)\n', (1095, 1101), False, 'from tensorflow.keras.preprocessing.image import img_to_array\n'), ((1122, 1144), 'tensorflow.keras.applications.mobilenet_v2.preprocess_input', 'preprocess_input', (['face'], {}), '(face)\n', (1138, 1144), False, 'from tensorflow.keras.applications.mobilenet_v2 import preprocess_input\n'), ((1165, 1193), 'numpy.expand_dims', 'np.expand_dims', (['face'], {'axis': '(0)'}), '(face, axis=0)\n', (1179, 1193), True, 'import numpy as np\n'), ((689, 711), 'numpy.array', 'np.array', (['[w, h, w, h]'], {}), '([w, h, w, h])\n', (697, 711), True, 'import numpy as np\n')]
|
import shutil
import subprocess # nosec # have to use subprocess
import warnings
from collections import Counter
from copy import deepcopy
from os import listdir, makedirs
from os.path import abspath, basename, dirname, exists, isfile, join
from subprocess import PIPE # nosec # have to use subprocess
from tempfile import mkdtemp
import f90nml
import numpy as np
import pandas as pd
from dateutil.relativedelta import relativedelta
from openscm_units import unit_registry
from scmdata import run_append
from .config import _wine_installed, config
from .errors import InvalidTemporalResError, NoReaderWriterError
from .io import MAGICCData, read_cfg_file
from .io.utils import _get_openscm_var_from_filepath
from .scenarios import zero_emissions
from .utils import get_date_time_string
IS_WINDOWS = config["is_windows"]
class WineNotInstalledError(Exception):
"""Exception raised if wine is not installed but is required"""
def _copy_files(source, target, recursive=False):
"""
Copy all the files in source directory to target.
If ``recursive``, include subdirectories, otherwise ignores subdirectories.
"""
if recursive:
shutil.copytree(source, target)
return
source_files = listdir(source)
if not exists(target):
makedirs(target)
for filename in source_files:
full_filename = join(source, filename)
if isfile(full_filename):
shutil.copy(full_filename, target)
def _clean_value(v):
if isinstance(v, str):
return v.strip()
elif isinstance(v, list):
if isinstance(v[0], str):
return [i.replace("\0", "").strip().replace("\n", "") for i in v]
return v
class MAGICCBase(object):
"""
Provides access to the MAGICC binary and configuration.
To enable multiple MAGICC 'setups' to be configured independently,
the MAGICC directory containing the input files, configuration
and binary is copied to a new folder. The configuration in this
MAGICC copy can then be edited without impacting other instances or your
original MAGICC distribution.
A ``MAGICC`` instance first has to be setup by calling
``create_copy``. If many model runs are being performed this step only has
to be performed once. The ``run`` method can then be called many times
without re-copying the files each time. Between each call to ``run``, the
configuration files can be updated to perform runs with different
configurations.
Parameters
----------
root_dir : str
If ``root_dir`` is supplied, an existing MAGICC 'setup' is
used.
"""
version = None
_scen_file_name = "SCENARIO.SCEN7"
def __init__(self, root_dir=None, strict=True):
"""
Initialise
Parameters
----------
root_dir : str
Root directory of the MAGICC package. If ``None``, a temporary
copy of MAGICC is made based on the result of `
`self.get_exectuable()``.
strict: bool
If True, enforce the configuration checks, otherwise a warning
is raised if any invalid configuration is found and the run is
continued. Setting ``strict=False`` is only recommended for
experienced users of MAGICC.
"""
self.root_dir = root_dir
self.config = None
self.executable = self.get_executable()
self.strict = strict
if root_dir is not None:
self.is_temp = False
else:
# Create a temp directory
self.is_temp = True
def __enter__(self):
if self.is_temp and self.run_dir is None:
self.create_copy()
return self
def __exit__(self, *args, **kwargs):
self.remove_temp_copy()
def create_copy(self):
"""
Initialises a temporary directory structure and copy of MAGICC
configuration files and binary.
The root folder and ``bin`` folders are copied (not recursively). The
``run`` folder is copied recursively.
"""
if self.executable is None or not isfile(self.executable):
raise FileNotFoundError(
"Could not find MAGICC{} executable: {}".format(
self.version, self.executable
)
)
if self.is_temp:
if self.root_dir is not None:
raise AssertionError(
"A temp copy for this instance has already been created"
)
self.root_dir = mkdtemp(prefix="pymagicc-")
if exists(self.run_dir):
raise Exception("A copy of MAGICC has already been created.")
if not exists(self.root_dir):
makedirs(self.root_dir)
exec_dir = basename(self.original_dir)
# Copy a subset of folders from the MAGICC `original_dir`
# Also copy anything which is in the root of the MAGICC distribution
# Assumes that the MAGICC binary is in a folder one level below the root
# of the MAGICC distribution. i.e. /run/magicc.exe or /bin/magicc
dirs_to_copy = [".", "bin"]
dirs_to_copy_recursive = ["run"]
# Check that the executable is in a valid sub directory
if exec_dir not in dirs_to_copy + dirs_to_copy_recursive:
raise AssertionError("binary must be in bin/ or run/ directory")
for d in dirs_to_copy + dirs_to_copy_recursive:
source_dir = abspath(join(self.original_dir, "..", d))
if exists(source_dir):
_copy_files(
source_dir,
join(self.root_dir, d),
recursive=d in dirs_to_copy_recursive,
)
# Create an empty out dir
# MAGICC assumes that the 'out' directory already exists
makedirs(join(self.root_dir, "out"))
# Create basic configuration files so magicc can run
self.set_years()
self.set_config()
@property
def binary_name(self):
"""
Name of the MAGICC binary file
Returns
-------
str
Name of the binary file
"""
return basename(self.executable)
@property
def original_dir(self):
"""
Directory of the MAGICC package.
This is the directory which contains the ``run`` and ``out`` folders.
Returns
-------
str
Path of the MAGICC package
"""
return dirname(self.executable)
@property
def run_dir(self):
"""
Run directory of the MAGICC package.
This path always ends in ``run``.
Returns
-------
str
Path of the run directory
"""
if self.root_dir is None:
return None
return join(self.root_dir, "run")
@property
def out_dir(self):
"""
Output directory of the MAGICC package.
This path always ends in ``out``.
Returns
-------
str
Path of the output directory
"""
if self.root_dir is None:
return None
return join(self.root_dir, "out")
@property
def default_config(self):
"""
Default configuration for a run
Returns
-------
:obj:`f90nml.Namelist`
Namelist object containing the default configuration
"""
base = f90nml.read(join(self.run_dir, "MAGCFG_DEFAULTALL.CFG"))
user = f90nml.read(join(self.run_dir, "MAGCFG_USER.CFG"))
self._default_config = deepcopy(base)
def _deep_update(b, o):
for k, v in o.items():
if isinstance(v, dict):
_deep_update(b[k], v)
else:
b.update(o)
_deep_update(self._default_config, user)
return self._default_config
def run(self, scenario=None, only=None, debug=False, **kwargs):
"""
Run MAGICC and parse the output.
As a reminder, putting ``out_parameters=1`` will cause MAGICC to write out its
parameters into ``out/PARAMETERS.OUT`` and they will then be read into
``output.metadata["parameters"]`` where ``output`` is the returned object.
Any logged output from running magicc will be in``output.metadata["stderr"]``.
For MAGICC7 and above, The level of logging can be controlled with the
``debug`` argument.
Any subannual files output by MAGICC will be ignored by this function. These
files can be read in manually using :class:`pymagicc.io.MAGICCData` directly.
Parameters
----------
scenario : :obj:`pymagicc.io.MAGICCData`
Scenario to run. If None MAGICC will simply run with whatever config has
already been set.
only : list of str
If not None, only extract variables in this list.
debug: {True, False, "verbose"}
If true, MAGICC will run in debug mode with the maximum amount of logging.
If "verbose", MAGICC will be run in verbose mode.
kwargs
Other config values to pass to MAGICC for the run
Returns
-------
:obj:`pymagicc.io.MAGICCData`
MAGICCData object containing that data in its ``df`` attribute and
metadata and parameters (depending on the value of ``include_parameters``)
in its ``metadata`` attribute.
Raises
------
ValueError
If no output is found which matches the list specified in ``only``.
subprocess.CalledProcessError
If MAGICC fails to run. Check the 'stderr' key of the result's `metadata`
attribute to inspect the results output from MAGICC.
ValueError
The user attempts to use ``debug`` with MAGICC6
"""
if not exists(self.root_dir):
raise FileNotFoundError(self.root_dir)
if self.executable is None:
raise ValueError(
"MAGICC executable not found, try setting an environment variable `MAGICC_EXECUTABLE_{}=/path/to/binary`".format(
self.version
)
)
if scenario is not None:
kwargs = self.set_emission_scenario_setup(scenario, kwargs)
yr_config = {}
if "startyear" in kwargs:
yr_config["startyear"] = kwargs.pop("startyear")
if "endyear" in kwargs:
yr_config["endyear"] = kwargs.pop("endyear")
if yr_config:
self.set_years(**yr_config)
# should be able to do some other nice metadata stuff re how magicc was run
# etc. here
kwargs.setdefault("rundate", get_date_time_string())
self.update_config(**kwargs)
self.check_config()
exec_dir = basename(self.original_dir)
command = [join(self.root_dir, exec_dir, self.binary_name)]
if self.version >= 7:
if debug == "verbose":
command.append("--verbose")
elif debug:
command.append("--debug")
elif debug:
raise ValueError("MAGICC6 has no debug capability")
if not IS_WINDOWS and self.binary_name.endswith(".exe"): # pragma: no cover
if not _wine_installed:
raise WineNotInstalledError(
"Wine is not installed but is required to run `.exe` binaries"
)
command.insert(0, "wine")
try:
res = subprocess.run( # nosec # on Windows shell=True is required
command,
check=True,
# thank you https://stackoverflow.com/a/53209196 for Python 3.6 hack
stdout=PIPE,
stderr=PIPE,
cwd=self.run_dir,
shell=IS_WINDOWS,
)
except subprocess.CalledProcessError as exc:
print("stderr:\n{}".format(exc.stderr.decode()))
raise exc
outfiles = self._get_output_filenames()
read_cols = {"climate_model": ["MAGICC{}".format(self.version)]}
if scenario is not None:
read_cols["model"] = scenario["model"].unique().tolist()
read_cols["scenario"] = scenario["scenario"].unique().tolist()
else:
read_cols.setdefault("model", ["unspecified"])
read_cols.setdefault("scenario", ["unspecified"])
mdata = []
for filepath in outfiles:
if filepath.startswith("DAT_VOLCANIC_RF.") or "SUBANN" in filepath:
warnings.warn(
"Not reading file: {}. Monthly data are not read in automatically by `run`. "
"Use `MAGICCData` instead.".format(filepath)
)
continue
try:
openscm_var = _get_openscm_var_from_filepath(filepath)
if only is None or openscm_var in only:
tempdata = MAGICCData(
join(self.out_dir, filepath), columns=deepcopy(read_cols)
)
mdata.append(tempdata)
except (NoReaderWriterError, InvalidTemporalResError):
# TODO: something like warnings.warn("Could not read {}".format(filepath))
continue
if not mdata and only is not None:
raise ValueError("No output found for only={}".format(only))
if not mdata:
if self.strict:
raise ValueError("No output found. Check configuration")
else:
# No data was loaded return an empty MAGICCData object
mdata = MAGICCData(
data={},
columns={
"model": [],
"unit": [],
"variable": [],
"region": [],
"scenario": [],
},
)
else:
mdata = run_append(mdata)
try:
run_paras = self.read_parameters()
self.config = run_paras
mdata.metadata["parameters"] = run_paras
except FileNotFoundError:
pass
mdata.metadata["stderr"] = res.stderr.decode("ascii")
levels_to_warn = ["WARNING", "ERROR", "FATAL"]
for level in levels_to_warn:
if "<{}>".format(level) in mdata.metadata["stderr"]:
warnings.warn(
"magicc logged a {} message. Check the 'stderr' key of the "
"result's `metadata` attribute.".format(level)
)
return mdata
def _get_output_filenames(self):
outfiles = [f for f in listdir(self.out_dir) if f != "PARAMETERS.OUT"]
bin_out = [
f.split(".")[0]
for f in outfiles
if f.startswith("DAT_") and f.endswith(".BINOUT")
]
extras = []
for f in outfiles:
var_name, ext = f.split(".")
if ext != "BINOUT" and var_name not in bin_out:
extras.append(f)
return [f + ".BINOUT" for f in bin_out] + extras
def _check_failed(self, msg):
if self.strict:
raise ValueError(msg)
else:
warnings.warn(msg)
def check_config(self):
"""Check that our MAGICC ``.CFG`` files are set to safely work with PYMAGICC
For further detail about why this is required, please see :ref:`MAGICC flags`.
Raises
------
ValueError
If we are not certain that the config written by PYMAGICC will overwrite
all other config i.e. that there will be no unexpected behaviour. A
ValueError will also be raised if the user tries to use more than one
scenario file.
"""
cfg_error_msg = (
"PYMAGICC is not the only tuning model that will be used by "
"`MAGCFG_USER.CFG`: your run is likely to fail/do odd things"
)
emisscen_error_msg = (
"You have more than one `FILE_EMISSCEN_X` flag set. Using more than "
"one emissions scenario is hard to debug and unnecessary with "
"Pymagicc's Dataframe scenario input. Please combine all your "
"scenarios into one Dataframe with Pymagicc and Pandas, then feed "
"this single Dataframe into Pymagicc's run API."
)
nml_to_check = "nml_allcfgs"
usr_cfg = read_cfg_file(join(self.run_dir, "MAGCFG_USER.CFG"))
for k in usr_cfg[nml_to_check]:
if k.startswith("file_tuningmodel"):
first_tuningmodel = k in ["file_tuningmodel", "file_tuningmodel_1"]
if first_tuningmodel:
if usr_cfg[nml_to_check][k] != "PYMAGICC":
self._check_failed(cfg_error_msg)
elif usr_cfg[nml_to_check][k] not in ["USER", ""]:
self._check_failed(cfg_error_msg)
elif k.startswith("file_emisscen_"):
if usr_cfg[nml_to_check][k] not in ["NONE", ""]:
self._check_failed(emisscen_error_msg)
self._check_config()
def write(self, mdata, name):
"""Write an input file to disk
Parameters
----------
mdata : :obj:`pymagicc.io.MAGICCData`
A MAGICCData instance with the data to write
name : str
The name of the file to write. The file will be written to the MAGICC
instance's run directory i.e. ``self.run_dir``
"""
mdata.write(join(self.run_dir, name), self.version)
def read_parameters(self):
"""
Read a parameters.out file
Returns
-------
dict
A dictionary containing all the configuration used by MAGICC
"""
param_fname = join(self.out_dir, "PARAMETERS.OUT")
if not exists(param_fname):
raise FileNotFoundError("No PARAMETERS.OUT found")
with open(param_fname) as nml_file:
parameters = dict(f90nml.read(nml_file))
for group in ["nml_years", "nml_allcfgs", "nml_outputcfgs"]:
parameters[group] = dict(parameters[group])
for k, v in parameters[group].items():
parameters[group][k] = _clean_value(v)
parameters[group.replace("nml_", "")] = parameters.pop(group)
self.config = parameters
return parameters
def remove_temp_copy(self):
"""
Removes a temporary copy of the MAGICC version shipped with Pymagicc.
"""
if self.is_temp and self.root_dir is not None:
shutil.rmtree(self.root_dir)
self.root_dir = None
def set_config(
self,
filename="MAGTUNE_PYMAGICC.CFG",
top_level_key="<KEY>",
conflict="warn",
**kwargs,
):
"""
Create a configuration file for MAGICC.
Writes a fortran namelist in run_dir.
Parameters
----------
filename : str
Name of configuration file to write
top_level_key : str
Name of namelist to be written in the
configuration file
conflict : {'warn', 'ignore'}
If 'warn', when a flag needs to be replaced by a different name (because,
for example, the flag name changed between MAGICC versions), a warning is
raised. If 'ignore', no warning is raised when a replacement is required.
kwargs
Other parameters to pass to the configuration file. No
validation on the parameters is performed.
Returns
-------
dict
The contents of the namelist which was written to file
Warning
-------
If a key is renamed, a warning is raised
Raises
------
ValueError
An invalid value for ``conflict`` is supplied
"""
kwargs = self._check_and_format_config(kwargs)
fname = join(self.run_dir, filename)
conf = {top_level_key: kwargs}
conf = self._fix_legacy_keys(conf, conflict=conflict)
f90nml.write(conf, fname, force=True)
return conf
def update_config(
self,
filename="MAGTUNE_PYMAGICC.CFG",
top_level_key="<KEY>",
conflict="warn",
**kwargs,
):
"""Updates a configuration file for MAGICC
Updates the contents of a fortran namelist in the run directory,
creating a new namelist if none exists.
Parameters
----------
filename : str
Name of configuration file to write
top_level_key : str
Name of namelist to be written in the
configuration file
conflict : {'warn', 'ignore'}
If 'warn', when a flag needs to be replaced by a different name (because,
for example, the flag name changed between MAGICC versions), a warning is
raised. If 'ignore', no warning is raised when a replacement is required.
kwargs
Other parameters to pass to the configuration file. No
validation on the parameters is performed.
Returns
-------
dict
The contents of the namelist which was written to file
Warning
-------
If a key is renamed, a warning is raised
Raises
------
ValueError
An invalid value for ``conflict`` is supplied
"""
kwargs = self._check_and_format_config(kwargs)
fname = join(self.run_dir, filename)
if exists(fname):
conf = f90nml.read(fname)
else:
conf = {top_level_key: {}}
conf[top_level_key].update(kwargs)
conf = self._fix_legacy_keys(conf, conflict=conflict)
f90nml.write(conf, fname, force=True)
return conf
def _fix_legacy_keys(self, conf, conflict="warn"):
"""
Go through config and fix any keys which are misnamed.
For example, fix any keys which have been renamed between MAGICC versions to
match the new names.
Parameters
----------
conf :obj:`f90nml.Namelist`
Configuration to check
conflict : {'warn', 'ignore'}
If 'warn', when a conflict is found, a warning is raised. If 'ignore', no
warning is raised when a conflict is found.
Returns
-------
:obj:`f90nml.Namelist`
Configuration with updated keys
Warning
-------
If a key is renamed, a warning is raised
Raises
------
ValueError
An invalid value for ``conflict`` is supplied
"""
valid_conflicts = ["warn", "ignore"]
if conflict not in valid_conflicts:
raise ValueError("`conflict` must be one of: {}".format(valid_conflicts))
cfg_key = "<KEY>"
if cfg_key not in conf:
return conf
new_conf = deepcopy(conf)
for wrong_key, right_key in self._config_renamings.items():
if wrong_key in new_conf[cfg_key]:
new_conf[cfg_key][right_key] = new_conf[cfg_key].pop(wrong_key)
if conflict == "warn":
warnings.warn(
"Altering config flag {} to {}".format(wrong_key, right_key)
)
return new_conf
def set_zero_config(self):
"""Set config such that radiative forcing and temperature output will be zero
This method is intended as a convenience only, it does not handle everything in
an obvious way. Adjusting the parameter settings still requires great care and
may behave unepexctedly.
"""
# zero_emissions is imported from scenarios module
# TODO: setup MAGICC6 so it puts extra variables in right place and hence
# warning about ignoring some data disappears
zero_emissions.write(join(self.run_dir, self._scen_file_name), self.version)
time = zero_emissions.filter(variable="Emissions|CH4", region="World")[
"time"
].values
no_timesteps = len(time)
# value doesn't actually matter as calculations are done from difference but
# chose sensible value nonetheless
co2_conc_pi = 722
co2_conc = co2_conc_pi * np.ones(no_timesteps)
co2_conc_df = pd.DataFrame(
{
"time": time,
"scenario": "idealised",
"model": "unspecified",
"climate_model": "unspecified",
"variable": "Atmospheric Concentrations|CO2",
"unit": "ppm",
"todo": "SET",
"region": "World",
"value": co2_conc,
}
)
co2_conc_writer = MAGICCData(co2_conc_df)
co2_conc_filename = "HIST_CONSTANT_CO2_CONC.IN"
co2_conc_writer.metadata = {
"header": "Constant pre-industrial CO2 concentrations"
}
co2_conc_writer.write(join(self.run_dir, co2_conc_filename), self.version)
ch4_conc_pi = 722
ch4_conc = ch4_conc_pi * np.ones(no_timesteps)
ch4_conc_df = pd.DataFrame(
{
"time": time,
"scenario": "idealised",
"model": "unspecified",
"climate_model": "unspecified",
"variable": "Atmospheric Concentrations|CH4",
"unit": "ppb",
"todo": "SET",
"region": "World",
"value": ch4_conc,
}
)
ch4_conc_writer = MAGICCData(ch4_conc_df)
ch4_conc_filename = "HIST_CONSTANT_CH4_CONC.IN"
ch4_conc_writer.metadata = {
"header": "Constant pre-industrial CH4 concentrations"
}
ch4_conc_writer.write(join(self.run_dir, ch4_conc_filename), self.version)
fgas_conc_pi = 0
fgas_conc = fgas_conc_pi * np.ones(no_timesteps)
varname = "FGAS_CONC"
fgas_conc_df = pd.DataFrame(
{
"time": time,
"scenario": "idealised",
"model": "unspecified",
"climate_model": "unspecified",
"variable": varname,
"unit": "ppt",
"todo": "SET",
"region": "World",
"value": fgas_conc,
}
)
fgas_conc_writer = MAGICCData(fgas_conc_df)
fgas_conc_filename = "HIST_ZERO_{}.IN".format(varname)
fgas_conc_writer.metadata = {"header": "Zero concentrations"}
fgas_conc_writer.write(join(self.run_dir, fgas_conc_filename), self.version)
def_config = self.default_config
tmp_nml = f90nml.Namelist({"nml_allcfgs": {"fgas_files_conc": 1}})
fgas_files_conc_flag = list(
self._fix_legacy_keys(tmp_nml, conflict="ignore")["nml_allcfgs"].keys()
)[0]
fgas_conc_files = [fgas_conc_filename] * len(
def_config["nml_allcfgs"][fgas_files_conc_flag]
)
self.set_config(
conflict="ignore",
file_emisscen=self._scen_file_name,
rf_initialization_method="ZEROSTARTSHIFT",
rf_total_constantafteryr=10000,
file_co2i_emis="",
file_co2b_emis="",
file_co2_conc=co2_conc_filename,
co2_switchfromconc2emis_year=10000,
file_ch4i_emis="",
file_ch4b_emis="",
file_ch4n_emis="",
file_ch4_conc=ch4_conc_filename,
ch4_switchfromconc2emis_year=10000,
file_n2oi_emis="",
file_n2ob_emis="",
file_n2on_emis="",
file_n2o_conc="",
n2o_switchfromconc2emis_year=1750,
file_noxi_emis="",
file_noxb_emis="",
file_noxi_ot="",
file_noxb_ot="",
file_noxt_rf="",
file_soxnb_ot="",
file_soxi_ot="",
file_soxt_rf="",
file_soxi_emis="",
file_soxb_emis="",
file_soxn_emis="",
file_oci_emis="",
file_ocb_emis="",
file_oci_ot="",
file_ocb_ot="",
file_oci_rf="",
file_ocb_rf="",
file_bci_emis="",
file_bcb_emis="",
file_bci_ot="",
file_bcb_ot="",
file_bci_rf="",
file_bcb_rf="",
bcoc_switchfromrf2emis_year=1750,
file_nh3i_emis="",
file_nh3b_emis="",
file_nmvoci_emis="",
file_nmvocb_emis="",
file_coi_emis="",
file_cob_emis="",
file_mineraldust_rf="",
file_landuse_rf="",
file_bcsnow_rf="",
# rf_fgassum_scale=0, # this appears to do nothing, hence the next two lines
fgas_switchfromconc2emis_year=10000,
rf_mhalosum_scale=0,
stratoz_o3scale=0,
rf_volcanic_scale=0,
rf_solar_scale=0,
mhalo_switchfromconc2emis_year=1750,
fgas_files_conc=fgas_conc_files,
)
def _check_and_format_config(self, config_dict):
self._check_for_duplicate_keys(config_dict)
config_dict = self._convert_out_config_flags_to_integers(config_dict)
return config_dict
@staticmethod
def _check_for_duplicate_keys(config_dict):
keys_lower = [v.lower() for v in config_dict.keys()]
counts = Counter(keys_lower)
if any([v > 1 for v in counts.values()]):
duplicate_keys = [
[ck for ck in config_dict.keys() if ck.lower() == k.lower()]
for k, v in counts.items()
if v > 1
]
error_msg = (
"The following configuration keys clash because configs are "
"case insensitive: {}".format(
", ".join([str(v) for v in duplicate_keys])
)
)
raise ValueError(error_msg)
@staticmethod
def _convert_out_config_flags_to_integers(config_dict):
valid_out_flags = [
"out_emissions",
"out_gwpemissions",
"out_sum_gwpemissions",
"out_concentrations",
"out_carboncycle",
"out_forcing",
"out_forcing_subannual",
"out_temperature",
"out_temperature_subannual",
"out_sealevel",
"out_parameters",
"out_misc",
"out_lifetimes",
"out_timeseriesmix",
"out_rcpdata",
"out_summaryidx",
"out_tempoceanlayers",
"out_oceanarea",
"out_heatuptake",
"out_warnings",
"out_precipinput",
"out_aogcmtuning",
"out_ccycletuning",
"out_observationaltuning",
"out_keydata_1",
"out_keydata_2",
"out_inverseemis",
"out_surfaceforcing",
"out_permafrost",
"out_allowanydynamicvars",
]
for key in valid_out_flags:
if key in config_dict:
# MAGICC expects 1 and 0 instead of True/False
config_dict[key] = 1 if config_dict[key] else 0
return config_dict
def set_years(self, startyear=1765, endyear=2100):
"""
Set the start and end dates of the simulations.
Parameters
----------
startyear : int
Start year of the simulation
endyear : int
End year of the simulation
Returns
-------
dict
The contents of the namelist
"""
# TODO: test altering stepsperyear, I think 1, 2 and 24 should all work
return self.set_config(
"MAGCFG_NMLYEARS.CFG",
"nml_years",
endyear=endyear,
startyear=startyear,
stepsperyear=12,
)
def set_output_variables(self, write_ascii=True, write_binary=False, **kwargs):
"""Set the output configuration, minimising output as much as possible
There are a number of configuration parameters which control which variables
are written to file and in which format. Limiting the variables that are
written to file can greatly speed up the running of MAGICC. By default,
calling this function without specifying any variables will disable all output
by setting all of MAGICC's ``out_xx`` flags to ``0``.
This convenience function should not be confused with ``set_config`` or
``update_config`` which allow the user to set/update the configuration flags
directly, without the more convenient syntax and default behaviour provided by
this function.
Parameters
----------
write_ascii : bool
If true, MAGICC is configured to write output files as human readable ascii files.
write_binary : bool
If true, MAGICC is configured to write binary output files. These files are much faster
to process and write, but are not human readable.
**kwargs:
List of variables to write out. A list of possible options are as follows. This
may not be a complete list.
'emissions',
'gwpemissions',
'sum_gwpemissions',
'concentrations',
'carboncycle',
'forcing',
'surfaceforcing',
'permafrost',
'temperature',
'sealevel',
'parameters',
'misc',
'lifetimes',
'timeseriesmix',
'rcpdata',
'summaryidx',
'inverseemis',
'tempoceanlayers',
'oceanarea',
'heatuptake',
'warnings',
'precipinput',
'aogcmtuning',
'ccycletuning',
'observationaltuning',
'keydata_1',
'keydata_2'
"""
if not (write_ascii or write_binary):
raise AssertionError("write_binary and/or write_ascii must be configured")
if write_binary and write_ascii:
ascii_binary = "BOTH"
elif write_ascii:
ascii_binary = "ASCII"
else:
ascii_binary = "BINARY"
# defaults
outconfig = {
"out_emissions": 0,
"out_gwpemissions": 0,
"out_sum_gwpemissions": 0,
"out_concentrations": 0,
"out_carboncycle": 0,
"out_forcing": 0,
"out_surfaceforcing": 0,
"out_permafrost": 0,
"out_temperature": 0,
"out_sealevel": 0,
"out_parameters": 0,
"out_misc": 0,
"out_timeseriesmix": 0,
"out_rcpdata": 0,
"out_summaryidx": 0,
"out_inverseemis": 0,
"out_tempoceanlayers": 0,
"out_heatuptake": 0,
"out_ascii_binary": ascii_binary,
"out_warnings": 0,
"out_precipinput": 0,
"out_aogcmtuning": 0,
"out_ccycletuning": 0,
"out_observationaltuning": 0,
"out_keydata_1": 0,
"out_keydata_2": 0,
}
if self.version == 7:
outconfig["out_oceanarea"] = 0
outconfig["out_lifetimes"] = 0
for kw in kwargs:
val = 1 if kwargs[kw] else 0 # convert values to 0/1 instead of booleans
outconfig["out_" + kw.lower()] = val
self.update_config(**outconfig)
def get_executable(self):
"""
Get path to MAGICC executable being used
Returns
-------
str
Path to MAGICC executable being used
"""
return config["executable_{}".format(self.version)]
def diagnose_tcr_ecs_tcre(self, **kwargs):
"""
Diagnose TCR, ECS and TCRE
The transient climate response (TCR), is the global-mean temperature response
per unit cumulative |CO2| emissions at the time at which atmospheric |CO2|
concentrations double in an experiment where atmospheric |CO2| concentrations
are increased at 1% per year from pre-industrial levels (1pctCO2 experiment).
The equilibrium climate sensitivity (ECS), is the equilibrium global-mean
temperature response to an instantaneous doubling of atmospheric |CO2|
concentrations (abrupt-2xCO2 experiment).
The transient climate response to emissions (TCRE), is the global-mean
temperature response per unit cumulative |CO2| emissions at the time at which
atmospheric |CO2| concentrations double in the 1pctCO2 experiment.
Please note that sometimes the run length won't be long enough to allow
MAGICC's oceans to fully equilibrate and hence the ECS value might not be what
you expect (it should match the value of ``core_climatesensitivity``).
Parameters
----------
**kwargs
parameter values to use in the diagnosis e.g. ``core_climatesensitivity=4``
Returns
-------
dict
Dictionary with keys: "ecs" - the diagnosed ECS; "tcr" - the diagnosed
TCR; "tcre" - the diagnosed TCRE; "timeseries" - the relevant model input
and output timeseries used in the experiment i.e. atmospheric |CO2|
concentrations, inverse |CO2| emissions, total radiative forcing and
global-mean surface temperature
"""
ecs_res = self.diagnose_ecs(**kwargs)
tcr_tcre_res = self.diagnose_tcr_tcre(**kwargs)
out = {**ecs_res, **tcr_tcre_res}
out["timeseries"] = run_append(
[ecs_res["timeseries"], tcr_tcre_res["timeseries"]]
)
return out
def diagnose_ecs(self, **kwargs):
"""
Diagnose ECS
The equilibrium climate sensitivity (ECS), is the equilibrium global-mean
temperature response to an instantaneous doubling of atmospheric |CO2|
concentrations (abrupt-2xCO2 experiment).
Please note that sometimes the run length won't be long enough to allow
MAGICC's oceans to fully equilibrate and hence the ECS value might not be what
you expect (it should match the value of ``core_climatesensitivity``).
Parameters
----------
**kwargs
parameter values to use in the diagnosis e.g. ``core_climatesensitivity=4``
Returns
-------
dict
Dictionary with keys: "ecs" - the diagnosed ECS; "timeseries" - the
relevant model input and output timeseries used in the experiment i.e.
atmospheric |CO2| concentrations, inverse |CO2| emissions, total radiative
forcing and global-mean surface temperature
"""
self._diagnose_ecs_config_setup(**kwargs)
timeseries = self.run(
scenario=None,
only=[
"Atmospheric Concentrations|CO2",
"Radiative Forcing",
"Surface Temperature",
],
)
timeseries["scenario"] = "abrupt-2xCO2"
ecs = self.get_ecs_from_diagnosis_results(timeseries)
return {"ecs": ecs, "timeseries": timeseries}
def diagnose_tcr_tcre(self, **kwargs):
"""
Diagnose TCR and TCRE
The transient climate response (TCR), is the global-mean temperature response
per unit cumulative |CO2| emissions at the time at which atmospheric |CO2|
concentrations double in an experiment where atmospheric |CO2| concentrations
are increased at 1% per year from pre-industrial levels (1pctCO2 experiment).
The transient climate response to emissions (TCRE), is the global-mean
temperature response per unit cumulative |CO2| emissions at the time at which
atmospheric |CO2| concentrations double in the 1pctCO2 experiment.
Parameters
----------
**kwargs
parameter values to use in the diagnosis e.g. ``core_climatesensitivity=4``
Returns
-------
dict
Dictionary with keys: "tcr" - the diagnosed TCR; "tcre" - the diagnosed
TCRE; "timeseries" - the relevant model input and output timeseries used
in the experiment i.e. atmospheric |CO2| concentrations, inverse |CO2|
emissions, total radiative forcing and global-mean surface temperature
"""
self._diagnose_tcr_tcre_config_setup(**kwargs)
timeseries = self.run(
scenario=None,
only=[
"Atmospheric Concentrations|CO2",
"INVERSEEMIS",
"Radiative Forcing",
"Surface Temperature",
],
)
# drop all the irrelevant inverse emissions
timeseries = timeseries.filter(
variable="Inverse Emissions*", level=1, keep=False
)
# drop the final year as concs stay constant from some reason,
# MAGICC bug...
timeseries = timeseries.filter(time=timeseries["time"].max(), keep=False)
timeseries["scenario"] = "1pctCO2"
tcr, tcre = self.get_tcr_tcre_from_diagnosis_results(timeseries)
return {"tcr": tcr, "tcre": tcre, "timeseries": timeseries}
def _diagnose_ecs_config_setup(self, **kwargs):
self.set_years(
startyear=1750, endyear=4200
) # 4200 seems to be the max I can push too without an error
self.update_config(
FILE_CO2_CONC="ABRUPT2XCO2_CO2_CONC.IN",
CO2_SWITCHFROMCONC2EMIS_YEAR=30000,
RF_TOTAL_RUNMODUS="CO2",
RF_TOTAL_CONSTANTAFTERYR=2000,
**kwargs,
)
def _diagnose_tcr_tcre_config_setup(self, **kwargs):
self.set_years(startyear=1750, endyear=2020)
self.update_config(
FILE_CO2_CONC="1PCTCO2_CO2_CONC.IN",
CO2_SWITCHFROMCONC2EMIS_YEAR=30000,
RF_TOTAL_RUNMODUS="CO2",
RF_TOTAL_CONSTANTAFTERYR=3000,
OUT_INVERSEEMIS=1,
**kwargs,
)
def get_ecs_from_diagnosis_results(self, results_ecs_run):
"""
Diagnose ECS from the results of the abrupt-2xCO2 experiment
Parameters
----------
results_ecs_run : :obj:`ScmRun`
Results of the abrupt-2xCO2 experiment, must contain atmospheric |CO2|
concentrations, total radiative forcing and surface temperature.
Returns
-------
ecs : :obj:`pint.quantity.Quantity`
ECS diagnosed from ``results_ecs_run``
"""
global_co2_concs = results_ecs_run.filter(
variable="Atmospheric Concentrations|CO2", region="World"
)
ecs_time, ecs_start_time = self._get_ecs_ecs_start_yr_from_CO2_concs(
global_co2_concs
)
global_total_rf = results_ecs_run.filter(
variable="Radiative Forcing", region="World"
)
self._check_ecs_total_RF(global_total_rf, jump_time=ecs_start_time)
global_temp = results_ecs_run.filter(
variable="Surface Temperature", region="World"
)
self._check_ecs_temp(global_temp)
ecs = float(global_temp.filter(time=ecs_time).values.squeeze())
unit = global_temp.get_unique_meta("unit", no_duplicates=True)
ecs = ecs * unit_registry(unit)
return ecs
def get_tcr_tcre_from_diagnosis_results(self, results_tcr_tcre_run):
"""
Diagnose TCR and TCRE from the results of the 1pctCO2 experiment
Parameters
----------
results_tcr_tcre_run : :obj:`ScmRun`
Results of the 1pctCO2 experiment, must contain atmospheric |CO2|
concentrations, inverse |CO2| emissions, total radiative forcing and
surface temperature.
Returns
-------
tcr, tcre : :obj:`pint.quantity.Quantity`, :obj:`pint.quantity.Quantity`
TCR and TCRE diagnosed from ``results_tcr_tcre_run``
"""
global_co2_concs = results_tcr_tcre_run.filter(
variable="Atmospheric Concentrations|CO2", region="World"
)
(tcr_time, tcr_start_time,) = self._get_tcr_tcr_start_yr_from_CO2_concs(
global_co2_concs
)
if tcr_time.year != tcr_start_time.year + 70: # pragma: no cover # emergency
raise AssertionError("Has the definition of TCR and TCRE changed?")
global_inverse_co2_emms = results_tcr_tcre_run.filter(
variable="Inverse Emissions|CO2|MAGICC Fossil and Industrial",
region="World",
)
global_total_rf = results_tcr_tcre_run.filter(
variable="Radiative Forcing", region="World"
)
self._check_tcr_tcre_total_RF(global_total_rf, tcr_time=tcr_time)
global_temp = results_tcr_tcre_run.filter(
variable="Surface Temperature", region="World"
)
self._check_tcr_tcre_temp(global_temp)
tcr = float(global_temp.filter(time=tcr_time).values.squeeze())
tcr_unit = global_temp.get_unique_meta("unit", no_duplicates=True)
tcr = tcr * unit_registry(tcr_unit)
tcre_cumulative_emms = float(
global_inverse_co2_emms.filter(
year=range(tcr_start_time.year, tcr_time.year)
).values.sum()
)
emms_unit = global_inverse_co2_emms.get_unique_meta("unit", no_duplicates=True)
years = global_inverse_co2_emms["year"].values.squeeze()
if not np.all((years[1:] - years[:-1]) == 1): # pragma: no cover
raise AssertionError(
"TCR/TCRE diagnosis assumed to be on annual timestep. Please "
"raise an issue at "
"https://github.com/openscm/pymagicc/issues to discuss "
"your use case"
)
# can now safely assume that our simple sum has done the right thing
tcre_cumulative_emms_unit = unit_registry(emms_unit) * unit_registry("yr")
tcre_cumulative_emms = tcre_cumulative_emms * tcre_cumulative_emms_unit
tcre = tcr / tcre_cumulative_emms
return tcr, tcre
def _get_ecs_ecs_start_yr_from_CO2_concs(self, df_co2_concs):
co2_concs = df_co2_concs.timeseries()
co2_conc_0 = co2_concs.iloc[0, 0]
t_start = co2_concs.columns.min()
t_end = co2_concs.columns.max()
ecs_start_time = co2_concs.iloc[
:, co2_concs.values.squeeze() > co2_conc_0
].columns[0]
spin_up_co2_concs = (
_filter_time_range(df_co2_concs, lambda x: t_start <= x < ecs_start_time)
.timeseries()
.values.squeeze()
)
if not (spin_up_co2_concs == co2_conc_0).all():
raise ValueError(
"The ECS CO2 concs look wrong, they are not constant before they start rising"
)
co2_conc_final = 2 * co2_conc_0
eqm_co2_concs = (
_filter_time_range(df_co2_concs, lambda x: ecs_start_time <= x <= t_end)
.timeseries()
.values.squeeze()
)
if not np.isclose(eqm_co2_concs, co2_conc_final).all():
raise ValueError(
"The ECS CO2 concs look wrong, they are not constant after doubling"
)
ecs_time = df_co2_concs["time"].iloc[-1]
return ecs_time, ecs_start_time
def _get_tcr_tcr_start_yr_from_CO2_concs(self, df_co2_concs):
co2_concs = df_co2_concs.timeseries()
co2_conc_0 = co2_concs.iloc[0, 0]
t_start = co2_concs.columns.min()
t_end = co2_concs.columns.max()
tcr_start_time = co2_concs.iloc[
:, co2_concs.values.squeeze() > co2_conc_0
].columns[0] - relativedelta(years=1)
tcr_time = tcr_start_time + relativedelta(years=70)
spin_up_co2_concs = (
_filter_time_range(df_co2_concs, lambda x: t_start <= x <= tcr_start_time)
.timeseries()
.values.squeeze()
)
if not (spin_up_co2_concs == co2_conc_0).all():
raise ValueError(
"The TCR/TCRE CO2 concs look wrong, they are not constant before they start rising"
)
actual_rise_co2_concs = (
_filter_time_range(df_co2_concs, lambda x: tcr_start_time <= x <= t_end)
.timeseries()
.values.squeeze()
)
# this will blow up if we switch to diagnose tcr/ecs with a monthly run...
expected_rise_co2_concs = co2_conc_0 * 1.01 ** np.arange(
len(actual_rise_co2_concs)
)
rise_co2_concs_correct = np.isclose(
actual_rise_co2_concs, expected_rise_co2_concs
).all()
if not rise_co2_concs_correct:
raise ValueError("The TCR/TCRE CO2 concs look wrong during the rise period")
return tcr_time, tcr_start_time
def _check_ecs_total_RF(self, df_total_rf, jump_time):
total_rf = df_total_rf.timeseries()
total_rf_max = total_rf.values.squeeze().max()
t_start = total_rf.columns.min()
t_end = total_rf.columns.max()
spin_up_rf = (
_filter_time_range(df_total_rf, lambda x: t_start <= x < jump_time)
.timeseries()
.values.squeeze()
)
if not (spin_up_rf == 0).all():
raise ValueError(
"The ECS total radiative forcing looks wrong, it is not all zero before concentrations start rising"
)
eqm_rf = (
_filter_time_range(df_total_rf, lambda x: jump_time <= x <= t_end)
.timeseries()
.values.squeeze()
)
if not (eqm_rf == total_rf_max).all():
raise ValueError(
"The ECS total radiative forcing looks wrong, it is not constant after concentrations double"
)
def _check_tcr_tcre_total_RF(self, df_total_rf, tcr_time):
total_rf = df_total_rf.timeseries()
t_start = total_rf.columns.min()
tcr_start_time = tcr_time - relativedelta(years=70)
spin_up_rf = (
_filter_time_range(df_total_rf, lambda x: t_start <= x <= tcr_start_time)
.timeseries()
.values.squeeze()
)
if not (spin_up_rf == 0).all():
raise ValueError(
"The TCR/TCRE total radiative forcing looks wrong, it is not all zero before concentrations start rising"
)
rf_vls = total_rf.values.squeeze()
rf_minus_previous_yr = rf_vls[1:] - rf_vls[:-1]
if not np.all(rf_minus_previous_yr >= 0):
raise ValueError(
"The TCR/TCRE total radiative forcing looks wrong, it is not rising after concentrations start rising"
)
def _check_ecs_temp(self, df_temp):
self._check_tcr_ecs_tcre_temp(
df_temp, "The ECS surface temperature looks wrong, it decreases"
)
def _check_tcr_tcre_temp(self, df_temp):
self._check_tcr_ecs_tcre_temp(
df_temp, "The TCR/TCRE surface temperature looks wrong, it decreases"
)
def _check_tcr_ecs_tcre_temp(self, df_temp, message):
tmp_vls = df_temp.timeseries().values.squeeze()
tmp_minus_previous_yr = tmp_vls[1:] - tmp_vls[:-1]
if not np.all(tmp_minus_previous_yr >= 0):
raise ValueError(message)
def set_emission_scenario_setup(self, scenario, config_dict):
"""Set the emissions flags correctly.
Parameters
----------
scenario : :obj:`pymagicc.io.MAGICCData`
Scenario to run.
config_dict : dict
Dictionary with current input configurations which is to be validated and
updated where necessary.
Returns
-------
dict
Updated configuration
"""
self.write(scenario, self._scen_file_name)
emis_flag = list(
self._fix_legacy_keys(
f90nml.Namelist({"nml_allcfgs": {"file_emisscen": "junk"}}),
conflict="ignore",
)["nml_allcfgs"].keys()
)[0]
config_dict[emis_flag] = self._scen_file_name
return config_dict
def _check_config(self):
"""
Check config above and beyond those checked by ``self.check_config``
"""
pass
class MAGICC6(MAGICCBase):
version = 6
_scen_file_name = "SCENARIO.SCEN"
_config_renamings = {
"file_emisscen": "file_emissionscenario",
"fgas_files_conc": "file_fgas_conc",
"mhalo_switchfromconc2emis_year": "mhalo_switch_conc2emis_yr",
}
@property
def default_config(self):
"""
Default configuration to use in a run
"""
base = f90nml.read(join(self.run_dir, "MAGCFG_DEFAULTALL_69.CFG"))
user = f90nml.read(join(self.run_dir, "MAGCFG_USER.CFG"))
self._default_config = deepcopy(base)
self._default_config.update(user)
return self._default_config
def _check_tcr_ecs_tcre_total_RF(self, df_total_rf, tcr_time, ecs_time):
super()._check_tcr_ecs_tcre_total_RF(df_total_rf, tcr_time, ecs_time)
# can be more careful with checks MAGICC6 only has logarithmic CO2 forcing
# i.e. linear rise in forcing
total_rf = df_total_rf.timeseries()
total_rf_max = total_rf.values.squeeze().max()
tcre_start_time = tcr_time - relativedelta(years=70)
actual_rise_rf = (
_filter_time_range(df_total_rf, lambda x: tcre_start_time <= x <= tcr_time)
.timeseries()
.values.squeeze()
)
# this will blow up if we switch to diagnose tcr/ecs with a monthly run...
expected_rise_rf = total_rf_max / 70.0 * np.arange(71)
rise_rf_correct = np.isclose(actual_rise_rf, expected_rise_rf).all()
if not rise_rf_correct:
raise ValueError(
"The TCR/ECS/TCRE total radiative forcing looks wrong during the rise period"
)
def _check_config(self):
cfg = self.update_config()
if "file_emissionscenario" in cfg["nml_allcfgs"]:
if cfg["nml_allcfgs"]["file_emissionscenario"].endswith("SCEN7"):
self._check_failed("MAGICC6 cannot run SCEN7 files")
class MAGICC7(MAGICCBase):
version = 7
_config_renamings = {
"file_emissionscenario": "file_emisscen",
"file_fgas_conc": "fgas_files_conc",
"mhalo_switch_conc2emis_yr": "mhalo_switchfromconc2emis_year",
}
def create_copy(self):
"""
Initialises a temporary directory structure and copy of MAGICC
configuration files and binary.
This will also overwrite the value of all ``file_tuningmodel_x`` flags to
ensure that Pymagicc's configurations will be read. If ``self.strict``, this
will also overwrite the value of all ``file_emisscen_x`` flags to ensure that
only Pymagicc's scenario input is used. This overwrite behaviour can be
removed once the MAGICC7 binary is publicly released as we can then create a
Pymagicc specific MAGCFG_USER.CFG rather than relying on whatever is in the
user's current copy.
"""
super(MAGICC7, self).create_copy()
self.update_config(
"MAGCFG_USER.CFG",
**{
"file_tuningmodel_1": "PYMAGICC",
"file_tuningmodel_2": "USER",
"file_tuningmodel_3": "USER",
"file_tuningmodel_4": "USER",
"file_tuningmodel_5": "USER",
"file_tuningmodel_6": "USER",
"file_tuningmodel_7": "USER",
"file_tuningmodel_8": "USER",
"file_tuningmodel_9": "USER",
"file_tuningmodel_10": "USER",
},
)
if self.strict:
self.update_config(
"MAGCFG_USER.CFG",
**{
"file_emisscen_2": "NONE",
"file_emisscen_3": "NONE",
"file_emisscen_4": "NONE",
"file_emisscen_5": "NONE",
"file_emisscen_6": "NONE",
"file_emisscen_7": "NONE",
"file_emisscen_8": "NONE",
},
)
def _diagnose_tcr_ecs_tcre_config_setup(self, **kwargs):
super()._diagnose_tcr_ecs_tcre_config_setup(**kwargs)
# also need to lock CH4 and N2O in case OLBL forcing mode is being used
self.update_config(
FILE_CH4_CONC="TCRECS_CH4_CONC.IN",
CH4_SWITCHFROMCONC2EMIS_YEAR=30000,
FILE_N2O_CONC="TCRECS_N2O_CONC.IN",
N2O_SWITCHFROMCONC2EMIS_YEAR=30000,
)
def _check_config(self):
pass
def _filter_time_range(scmdf, filter_func):
# TODO: move into openscm
tdf = scmdf.timeseries()
tdf = tdf.iloc[:, tdf.columns.map(filter_func)]
return MAGICCData(tdf)
|
[
"f90nml.write",
"dateutil.relativedelta.relativedelta",
"f90nml.Namelist",
"copy.deepcopy",
"numpy.arange",
"os.path.exists",
"os.listdir",
"subprocess.run",
"pandas.DataFrame",
"warnings.warn",
"numpy.ones",
"openscm_units.unit_registry",
"os.path.isfile",
"os.path.dirname",
"scmdata.run_append",
"tempfile.mkdtemp",
"shutil.copy",
"f90nml.read",
"numpy.isclose",
"os.makedirs",
"os.path.join",
"shutil.copytree",
"collections.Counter",
"os.path.basename",
"shutil.rmtree",
"numpy.all"
] |
[((1232, 1247), 'os.listdir', 'listdir', (['source'], {}), '(source)\n', (1239, 1247), False, 'from os import listdir, makedirs\n'), ((1164, 1195), 'shutil.copytree', 'shutil.copytree', (['source', 'target'], {}), '(source, target)\n', (1179, 1195), False, 'import shutil\n'), ((1260, 1274), 'os.path.exists', 'exists', (['target'], {}), '(target)\n', (1266, 1274), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((1284, 1300), 'os.makedirs', 'makedirs', (['target'], {}), '(target)\n', (1292, 1300), False, 'from os import listdir, makedirs\n'), ((1360, 1382), 'os.path.join', 'join', (['source', 'filename'], {}), '(source, filename)\n', (1364, 1382), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((1394, 1415), 'os.path.isfile', 'isfile', (['full_filename'], {}), '(full_filename)\n', (1400, 1415), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((4600, 4620), 'os.path.exists', 'exists', (['self.run_dir'], {}), '(self.run_dir)\n', (4606, 4620), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((4791, 4818), 'os.path.basename', 'basename', (['self.original_dir'], {}), '(self.original_dir)\n', (4799, 4818), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((6202, 6227), 'os.path.basename', 'basename', (['self.executable'], {}), '(self.executable)\n', (6210, 6227), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((6514, 6538), 'os.path.dirname', 'dirname', (['self.executable'], {}), '(self.executable)\n', (6521, 6538), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((6845, 6871), 'os.path.join', 'join', (['self.root_dir', '"""run"""'], {}), "(self.root_dir, 'run')\n", (6849, 6871), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((7184, 7210), 'os.path.join', 'join', (['self.root_dir', '"""out"""'], {}), "(self.root_dir, 'out')\n", (7188, 7210), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((7618, 7632), 'copy.deepcopy', 'deepcopy', (['base'], {}), '(base)\n', (7626, 7632), False, 'from copy import deepcopy\n'), ((10896, 10923), 'os.path.basename', 'basename', (['self.original_dir'], {}), '(self.original_dir)\n', (10904, 10923), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((17967, 18003), 'os.path.join', 'join', (['self.out_dir', '"""PARAMETERS.OUT"""'], {}), "(self.out_dir, 'PARAMETERS.OUT')\n", (17971, 18003), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((20164, 20192), 'os.path.join', 'join', (['self.run_dir', 'filename'], {}), '(self.run_dir, filename)\n', (20168, 20192), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((20302, 20339), 'f90nml.write', 'f90nml.write', (['conf', 'fname'], {'force': '(True)'}), '(conf, fname, force=True)\n', (20314, 20339), False, 'import f90nml\n'), ((21739, 21767), 'os.path.join', 'join', (['self.run_dir', 'filename'], {}), '(self.run_dir, filename)\n', (21743, 21767), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((21780, 21793), 'os.path.exists', 'exists', (['fname'], {}), '(fname)\n', (21786, 21793), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((22000, 22037), 'f90nml.write', 'f90nml.write', (['conf', 'fname'], {'force': '(True)'}), '(conf, fname, force=True)\n', (22012, 22037), False, 'import f90nml\n'), ((23188, 23202), 'copy.deepcopy', 'deepcopy', (['conf'], {}), '(conf)\n', (23196, 23202), False, 'from copy import deepcopy\n'), ((24604, 24839), 'pandas.DataFrame', 'pd.DataFrame', (["{'time': time, 'scenario': 'idealised', 'model': 'unspecified',\n 'climate_model': 'unspecified', 'variable':\n 'Atmospheric Concentrations|CO2', 'unit': 'ppm', 'todo': 'SET',\n 'region': 'World', 'value': co2_conc}"], {}), "({'time': time, 'scenario': 'idealised', 'model': 'unspecified',\n 'climate_model': 'unspecified', 'variable':\n 'Atmospheric Concentrations|CO2', 'unit': 'ppm', 'todo': 'SET',\n 'region': 'World', 'value': co2_conc})\n", (24616, 24839), True, 'import pandas as pd\n'), ((25416, 25651), 'pandas.DataFrame', 'pd.DataFrame', (["{'time': time, 'scenario': 'idealised', 'model': 'unspecified',\n 'climate_model': 'unspecified', 'variable':\n 'Atmospheric Concentrations|CH4', 'unit': 'ppb', 'todo': 'SET',\n 'region': 'World', 'value': ch4_conc}"], {}), "({'time': time, 'scenario': 'idealised', 'model': 'unspecified',\n 'climate_model': 'unspecified', 'variable':\n 'Atmospheric Concentrations|CH4', 'unit': 'ppb', 'todo': 'SET',\n 'region': 'World', 'value': ch4_conc})\n", (25428, 25651), True, 'import pandas as pd\n'), ((26261, 26468), 'pandas.DataFrame', 'pd.DataFrame', (["{'time': time, 'scenario': 'idealised', 'model': 'unspecified',\n 'climate_model': 'unspecified', 'variable': varname, 'unit': 'ppt',\n 'todo': 'SET', 'region': 'World', 'value': fgas_conc}"], {}), "({'time': time, 'scenario': 'idealised', 'model': 'unspecified',\n 'climate_model': 'unspecified', 'variable': varname, 'unit': 'ppt',\n 'todo': 'SET', 'region': 'World', 'value': fgas_conc})\n", (26273, 26468), True, 'import pandas as pd\n'), ((26972, 27028), 'f90nml.Namelist', 'f90nml.Namelist', (["{'nml_allcfgs': {'fgas_files_conc': 1}}"], {}), "({'nml_allcfgs': {'fgas_files_conc': 1}})\n", (26987, 27028), False, 'import f90nml\n'), ((29737, 29756), 'collections.Counter', 'Counter', (['keys_lower'], {}), '(keys_lower)\n', (29744, 29756), False, 'from collections import Counter\n'), ((38037, 38100), 'scmdata.run_append', 'run_append', (["[ecs_res['timeseries'], tcr_tcre_res['timeseries']]"], {}), "([ecs_res['timeseries'], tcr_tcre_res['timeseries']])\n", (38047, 38100), False, 'from scmdata import run_append\n'), ((53343, 53357), 'copy.deepcopy', 'deepcopy', (['base'], {}), '(base)\n', (53351, 53357), False, 'from copy import deepcopy\n'), ((1429, 1463), 'shutil.copy', 'shutil.copy', (['full_filename', 'target'], {}), '(full_filename, target)\n', (1440, 1463), False, 'import shutil\n'), ((4560, 4587), 'tempfile.mkdtemp', 'mkdtemp', ([], {'prefix': '"""pymagicc-"""'}), "(prefix='pymagicc-')\n", (4567, 4587), False, 'from tempfile import mkdtemp\n'), ((4712, 4733), 'os.path.exists', 'exists', (['self.root_dir'], {}), '(self.root_dir)\n', (4718, 4733), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((4747, 4770), 'os.makedirs', 'makedirs', (['self.root_dir'], {}), '(self.root_dir)\n', (4755, 4770), False, 'from os import listdir, makedirs\n'), ((5541, 5559), 'os.path.exists', 'exists', (['source_dir'], {}), '(source_dir)\n', (5547, 5559), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((5860, 5886), 'os.path.join', 'join', (['self.root_dir', '"""out"""'], {}), "(self.root_dir, 'out')\n", (5864, 5886), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((7476, 7519), 'os.path.join', 'join', (['self.run_dir', '"""MAGCFG_DEFAULTALL.CFG"""'], {}), "(self.run_dir, 'MAGCFG_DEFAULTALL.CFG')\n", (7480, 7519), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((7548, 7585), 'os.path.join', 'join', (['self.run_dir', '"""MAGCFG_USER.CFG"""'], {}), "(self.run_dir, 'MAGCFG_USER.CFG')\n", (7552, 7585), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((9931, 9952), 'os.path.exists', 'exists', (['self.root_dir'], {}), '(self.root_dir)\n', (9937, 9952), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((10943, 10990), 'os.path.join', 'join', (['self.root_dir', 'exec_dir', 'self.binary_name'], {}), '(self.root_dir, exec_dir, self.binary_name)\n', (10947, 10990), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((11589, 11691), 'subprocess.run', 'subprocess.run', (['command'], {'check': '(True)', 'stdout': 'PIPE', 'stderr': 'PIPE', 'cwd': 'self.run_dir', 'shell': 'IS_WINDOWS'}), '(command, check=True, stdout=PIPE, stderr=PIPE, cwd=self.\n run_dir, shell=IS_WINDOWS)\n', (11603, 11691), False, 'import subprocess\n'), ((14078, 14095), 'scmdata.run_append', 'run_append', (['mdata'], {}), '(mdata)\n', (14088, 14095), False, 'from scmdata import run_append\n'), ((15363, 15381), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (15376, 15381), False, 'import warnings\n'), ((16590, 16627), 'os.path.join', 'join', (['self.run_dir', '"""MAGCFG_USER.CFG"""'], {}), "(self.run_dir, 'MAGCFG_USER.CFG')\n", (16594, 16627), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((17695, 17719), 'os.path.join', 'join', (['self.run_dir', 'name'], {}), '(self.run_dir, name)\n', (17699, 17719), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((18020, 18039), 'os.path.exists', 'exists', (['param_fname'], {}), '(param_fname)\n', (18026, 18039), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((18792, 18820), 'shutil.rmtree', 'shutil.rmtree', (['self.root_dir'], {}), '(self.root_dir)\n', (18805, 18820), False, 'import shutil\n'), ((21814, 21832), 'f90nml.read', 'f90nml.read', (['fname'], {}), '(fname)\n', (21825, 21832), False, 'import f90nml\n'), ((24167, 24207), 'os.path.join', 'join', (['self.run_dir', 'self._scen_file_name'], {}), '(self.run_dir, self._scen_file_name)\n', (24171, 24207), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((24560, 24581), 'numpy.ones', 'np.ones', (['no_timesteps'], {}), '(no_timesteps)\n', (24567, 24581), True, 'import numpy as np\n'), ((25259, 25296), 'os.path.join', 'join', (['self.run_dir', 'co2_conc_filename'], {}), '(self.run_dir, co2_conc_filename)\n', (25263, 25296), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((25372, 25393), 'numpy.ones', 'np.ones', (['no_timesteps'], {}), '(no_timesteps)\n', (25379, 25393), True, 'import numpy as np\n'), ((26071, 26108), 'os.path.join', 'join', (['self.run_dir', 'ch4_conc_filename'], {}), '(self.run_dir, ch4_conc_filename)\n', (26075, 26108), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((26185, 26206), 'numpy.ones', 'np.ones', (['no_timesteps'], {}), '(no_timesteps)\n', (26192, 26206), True, 'import numpy as np\n'), ((26858, 26896), 'os.path.join', 'join', (['self.run_dir', 'fgas_conc_filename'], {}), '(self.run_dir, fgas_conc_filename)\n', (26862, 26896), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((43770, 43789), 'openscm_units.unit_registry', 'unit_registry', (['unit'], {}), '(unit)\n', (43783, 43789), False, 'from openscm_units import unit_registry\n'), ((45568, 45591), 'openscm_units.unit_registry', 'unit_registry', (['tcr_unit'], {}), '(tcr_unit)\n', (45581, 45591), False, 'from openscm_units import unit_registry\n'), ((45943, 45978), 'numpy.all', 'np.all', (['(years[1:] - years[:-1] == 1)'], {}), '(years[1:] - years[:-1] == 1)\n', (45949, 45978), True, 'import numpy as np\n'), ((46385, 46409), 'openscm_units.unit_registry', 'unit_registry', (['emms_unit'], {}), '(emms_unit)\n', (46398, 46409), False, 'from openscm_units import unit_registry\n'), ((46412, 46431), 'openscm_units.unit_registry', 'unit_registry', (['"""yr"""'], {}), "('yr')\n", (46425, 46431), False, 'from openscm_units import unit_registry\n'), ((48173, 48195), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'years': '(1)'}), '(years=1)\n', (48186, 48195), False, 'from dateutil.relativedelta import relativedelta\n'), ((48232, 48255), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'years': '(70)'}), '(years=70)\n', (48245, 48255), False, 'from dateutil.relativedelta import relativedelta\n'), ((50476, 50499), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'years': '(70)'}), '(years=70)\n', (50489, 50499), False, 'from dateutil.relativedelta import relativedelta\n'), ((50997, 51030), 'numpy.all', 'np.all', (['(rf_minus_previous_yr >= 0)'], {}), '(rf_minus_previous_yr >= 0)\n', (51003, 51030), True, 'import numpy as np\n'), ((51728, 51762), 'numpy.all', 'np.all', (['(tmp_minus_previous_yr >= 0)'], {}), '(tmp_minus_previous_yr >= 0)\n', (51734, 51762), True, 'import numpy as np\n'), ((53198, 53244), 'os.path.join', 'join', (['self.run_dir', '"""MAGCFG_DEFAULTALL_69.CFG"""'], {}), "(self.run_dir, 'MAGCFG_DEFAULTALL_69.CFG')\n", (53202, 53244), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((53273, 53310), 'os.path.join', 'join', (['self.run_dir', '"""MAGCFG_USER.CFG"""'], {}), "(self.run_dir, 'MAGCFG_USER.CFG')\n", (53277, 53310), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((53851, 53874), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'years': '(70)'}), '(years=70)\n', (53864, 53874), False, 'from dateutil.relativedelta import relativedelta\n'), ((54190, 54203), 'numpy.arange', 'np.arange', (['(71)'], {}), '(71)\n', (54199, 54203), True, 'import numpy as np\n'), ((4122, 4145), 'os.path.isfile', 'isfile', (['self.executable'], {}), '(self.executable)\n', (4128, 4145), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((5492, 5524), 'os.path.join', 'join', (['self.original_dir', '""".."""', 'd'], {}), "(self.original_dir, '..', d)\n", (5496, 5524), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((14805, 14826), 'os.listdir', 'listdir', (['self.out_dir'], {}), '(self.out_dir)\n', (14812, 14826), False, 'from os import listdir, makedirs\n'), ((18179, 18200), 'f90nml.read', 'f90nml.read', (['nml_file'], {}), '(nml_file)\n', (18190, 18200), False, 'import f90nml\n'), ((49057, 49115), 'numpy.isclose', 'np.isclose', (['actual_rise_co2_concs', 'expected_rise_co2_concs'], {}), '(actual_rise_co2_concs, expected_rise_co2_concs)\n', (49067, 49115), True, 'import numpy as np\n'), ((54230, 54274), 'numpy.isclose', 'np.isclose', (['actual_rise_rf', 'expected_rise_rf'], {}), '(actual_rise_rf, expected_rise_rf)\n', (54240, 54274), True, 'import numpy as np\n'), ((5642, 5664), 'os.path.join', 'join', (['self.root_dir', 'd'], {}), '(self.root_dir, d)\n', (5646, 5664), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((47547, 47588), 'numpy.isclose', 'np.isclose', (['eqm_co2_concs', 'co2_conc_final'], {}), '(eqm_co2_concs, co2_conc_final)\n', (47557, 47588), True, 'import numpy as np\n'), ((13080, 13108), 'os.path.join', 'join', (['self.out_dir', 'filepath'], {}), '(self.out_dir, filepath)\n', (13084, 13108), False, 'from os.path import abspath, basename, dirname, exists, isfile, join\n'), ((13118, 13137), 'copy.deepcopy', 'deepcopy', (['read_cols'], {}), '(read_cols)\n', (13126, 13137), False, 'from copy import deepcopy\n'), ((52403, 52462), 'f90nml.Namelist', 'f90nml.Namelist', (["{'nml_allcfgs': {'file_emisscen': 'junk'}}"], {}), "({'nml_allcfgs': {'file_emisscen': 'junk'}})\n", (52418, 52462), False, 'import f90nml\n')]
|
import json
import json
import boto3
import re
import json
import collections
import os
import pandas as pd
import csv
from csv import writer
# boto3 S3 initialization
s3_client = boto3.client("s3")
import numpy as np
def lambda_handler(event, context):
# TODO implement
bucketname = 'sourcedatab00870639'
# event contains all information about uploaded object
print("Event :", event)
# Bucket Name where file was uploaded
sourcebucket = event['Records'][0]['s3']['bucket']['name']
# Filename of object (with path)
file_key_name = event['Records'][0]['s3']['object']['key']
input_file = os.path.join(sourcebucket, file_key_name)
# Start the function that processes the incoming data.
bucket = bucketname
key = file_key_name
response = s3_client.get_object(Bucket=sourcebucket, Key=file_key_name)
content = response['Body'].read().decode('utf-8')
x = content.split()
stopwords = ['ourselves', 'hers', 'between', 'yourself', 'but', 'again', 'there', 'about', 'once', 'during', 'out',
'very', 'having', 'with', 'they', 'own', 'an', 'be', 'some', 'for', 'do', 'its', 'yours', 'such',
'into', 'of', 'most', 'itself', 'other', 'off', 'is', 's', 'am', 'or', 'who', 'as', 'from', 'him',
'each', 'the', 'themselves', 'until', 'below', 'are', 'we', 'these', 'your', 'his', 'through', 'don',
'nor', 'me', 'were', 'her', 'more', 'himself', 'this', 'down', 'should', 'our', 'their', 'while',
'above', 'both', 'up', 'to', 'ours', 'had', 'she', 'all', 'no', 'when', 'at', 'any', 'before', 'them',
'same', 'and', 'been', 'have', 'in', 'will', 'on', 'does', 'yourselves', 'then', 'that', 'because',
'what', 'over', 'why', 'so', 'can', 'did', 'not', 'now', 'under', 'he', 'you', 'herself', 'has',
'just', 'where', 'too', 'only', 'myself', 'which', 'those', 'i', 'after', 'few', 'whom', 't', 'being',
'if', 'theirs', 'my', 'against', 'a', 'by', 'doing', 'it', 'how', 'further', 'was', 'here', 'than']
stop_words = set(stopwords)
tokens_without_sw = [w for w in x if w not in stop_words]
current_word = []
next_word = []
data_list = [['Current_Word', 'Next_Word', 'Levenshtein_distance']]
def levenshteindistance(var1, var2):
size_x = len(var1) + 1
size_y = len(var2) + 1
matrix = np.zeros((size_x, size_y))
for x in range(size_x):
matrix[x, 0] = x
for y in range(size_y):
matrix[0, y] = y
for x in range(1, size_x):
for y in range(1, size_y):
if seq1[x - 1] == seq2[y - 1]:
matrix[x, y] = min(matrix[x - 1, y] + 1, matrix[x - 1, y - 1], matrix[x, y - 1] + 1)
else:
matrix[x, y] = min(matrix[x - 1, y] + 1, matrix[x - 1, y - 1] + 1, matrix[x, y - 1] + 1)
return (matrix[size_x - 1, size_y - 1])
for i in range(len(tokens_without_sw) - 1):
data_list.append([tokens_without_sw[i], tokens_without_sw[i + 1],
levenshteindistance(tokens_without_sw[i], tokens_without_sw[i + 1])])
print(tokens_without_sw)
df = pd.DataFrame(data_list)
bytes_to_write = df.to_csv(None, header=None, index=False).encode()
file_name = "testVector.csv"
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucketname)
key = file_name
ans = []
current_data = s3_client.get_object(Bucket=bucketname, Key=file_name)
lines = csv.reader(current_data)
for row in lines:
ans.append(row)
for d in data_list:
ans.append(d)
file_name = "trainVector.csv"
resfile = s3.get_object(Bucket="sourcedatab00870639", Key=file_name)
restext = resfile["Body"].read().decode('utf-8')
updated_data = restext + "\n" + "\n".join(str(item).strip('[]') for item in words_list)
s3.put_object(Body=updated_data, Bucket="sourcedatab00870639 ", Key=file_name)
print(updated_data)
|
[
"boto3.client",
"os.path.join",
"numpy.zeros",
"boto3.resource",
"pandas.DataFrame",
"csv.reader"
] |
[((181, 199), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (193, 199), False, 'import boto3\n'), ((625, 666), 'os.path.join', 'os.path.join', (['sourcebucket', 'file_key_name'], {}), '(sourcebucket, file_key_name)\n', (637, 666), False, 'import os\n'), ((2414, 2440), 'numpy.zeros', 'np.zeros', (['(size_x, size_y)'], {}), '((size_x, size_y))\n', (2422, 2440), True, 'import numpy as np\n'), ((3336, 3356), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (3350, 3356), False, 'import boto3\n'), ((3512, 3536), 'csv.reader', 'csv.reader', (['current_data'], {}), '(current_data)\n', (3522, 3536), False, 'import csv\n'), ((3189, 3212), 'pandas.DataFrame', 'pd.DataFrame', (['data_list'], {}), '(data_list)\n', (3201, 3212), True, 'import pandas as pd\n')]
|
"""
Friends-of-Friends (FOF) for N-body simulations
<NAME> - Oct 2016
"""
from __future__ import absolute_import, print_function
from lizard.periodic import pad_unitcube
from scipy.spatial import Delaunay
from scipy.sparse import csr_matrix, csgraph
from numpy import square, flatnonzero, ones, zeros_like, cumsum, concatenate, \
arange, searchsorted, bincount, sort, diff, int8, argsort, array
from lizard.log import MarkUp, null_log
def fof_groups(pos, b, log=null_log):
"""
Friends-of-Friends on the period unit cube
pos - (n,ndim) positions in [0,1]^ndim
b - linking length
returns labels - (n,) array of integers for each connected component.
This FoF algorithm computes the fixed radius connectivity by computing the
Delaunay tesselation (DT) for each link and then breaking those links that are
too long.
The reason this works is that the Relative Neighbourhood Graph (RNG) is a
subgraph of the DT, and so any pair of points separated by a distance R will be
connected by links of <R, and so it is enough to use the DT to establish
connectivity.
"""
print('Padding the unit cube', file=log)
pad_idx, pad_pos = pad_unitcube(pos, b)
all_pos = concatenate((pos, pad_pos), axis=0) + b
all_pos *= 1.0/(1+2*b)
b_scaled = b/(1+2*b)
print('Added {:,} points, performing'.format(len(pad_idx)),
MarkUp.OKBLUE+'Delaunay tesselation'+MarkUp.ENDC,
'of {:,} points'.format(len(all_pos)), file=log)
dlny = Delaunay(all_pos)
# construct list of links
indptr, indices = dlny.vertex_neighbor_vertices
idx1 = zeros_like(indices)
idx1[indptr[1:-1]] = 1
idx1 = cumsum(idx1)
idx2 = indices
print('{:,} links, disconnecting those with r>%.5f'.format(len(indices))%b, file=log)
# find all links < b using square distance
dist2 = square(all_pos[idx1] - all_pos[idx2]).sum(1)
del dlny
keep = flatnonzero(dist2<float(b_scaled*b_scaled))
idx1, idx2 = idx1[keep], idx2[keep]
print('{:,} links left, removing periodic images'.format(len(idx1)), file=log)
# Make the map back to the original IDs
old_id = arange(len(all_pos))
old_id[len(pos):] = pad_idx
idx1, idx2 = old_id[idx1], old_id[idx2]
# remove repeats
idx_sort = argsort(idx1*len(pos)+idx2)
idx1,idx2 = idx1[idx_sort], idx2[idx_sort]
if len(idx1)>0:
keep = array([0] + list(flatnonzero(diff(idx1) | diff(idx2))+1), dtype=idx2.dtype)
idx1, idx2 = idx1[keep], idx2[keep]
# make a sparse matrix of connectivity
print('{:,} links, building sparse matrix'.format(len(idx1)), file=log)
indices = idx2
indptr = searchsorted(idx1, arange(len(pos)+1))
mat = csr_matrix((ones(len(indices), dtype=int8), indices, indptr),
shape=(len(pos), len(pos)))
print('Finding connected components',file=log)
n_comps, labels = csgraph.connected_components(mat, directed=False)
print('From {:,} links between {:,} points found {:,} connected components'.format(len(idx1), len(pos), n_comps), file=log)
show_largest = min(n_comps, 3)
npts = sort(bincount(labels))[-show_largest:]
print('{:,} largest'.format(show_largest), MarkUp.OKBLUE+'FoF groups'+MarkUp.ENDC,
'have', MarkUp.OKBLUE+' '.join('{:,}'.format(i) for i in npts),
'points'+MarkUp.ENDC, file=log)
return labels
def test_labels():
""" Test with some 64^3 data """
from lizard.log import VerboseTimingLog
log = VerboseTimingLog()
import numpy as np
parts = np.load('/mainvol/peter.creasey/bigdata/runs/test_const_pmkick/out/lizard_snap_134.npz')
pos = parts['pos']
boxsize = 5600
nbox = len(pos)**(1.0/3.0)
print(pos.max(axis=0), boxsize, nbox, file=log)
labels = fof_groups(pos*(1.0/boxsize), b=0.2/nbox, log=log)
print('labels in', labels.min(), labels.max(), file=log)
bins = np.bincount(labels)
part_lim = 20 # ignore anything with < part_lim particles
NO_FOF = labels.max()+1
newlab = np.where(bins[labels]<part_lim, NO_FOF, np.arange(len(bins))[labels])
bins = bincount(newlab)
halo_counts = sort(bins[:NO_FOF-1])
print('halo counts', halo_counts[-10:][::-1], file=log)
# Top 10
idx = []
lab_sort = np.argsort(bins[:NO_FOF-1])
import pylab as pl
for i in range(50):
lab = lab_sort[-i-1]
idx_i = np.flatnonzero(labels==lab)
pl.plot(pos[idx_i][:,2], pos[idx_i][:,1], marker=',', ls='none')
pl.xlim(0,5600)
pl.ylim(0,5600)
pl.show()
def test_random_dist(n=64):
""" Random n^3 point placement """
from lizard.log import VerboseTimingLog
log = VerboseTimingLog()
from numpy.random import RandomState
rs = RandomState(seed=123)
pos = rs.rand(3*(n**3)).reshape((n**3,3))
fof_labels = fof_groups(pos, b=0.2/n, log=log)
if __name__=='__main__':
# test_labels()
test_random_dist(n=100)
|
[
"numpy.argsort",
"numpy.random.RandomState",
"pylab.ylim",
"lizard.periodic.pad_unitcube",
"pylab.plot",
"numpy.sort",
"numpy.flatnonzero",
"numpy.diff",
"lizard.log.VerboseTimingLog",
"pylab.xlim",
"numpy.concatenate",
"numpy.square",
"numpy.bincount",
"pylab.show",
"scipy.sparse.csgraph.connected_components",
"scipy.spatial.Delaunay",
"numpy.cumsum",
"numpy.load",
"numpy.zeros_like"
] |
[((1200, 1220), 'lizard.periodic.pad_unitcube', 'pad_unitcube', (['pos', 'b'], {}), '(pos, b)\n', (1212, 1220), False, 'from lizard.periodic import pad_unitcube\n'), ((1523, 1540), 'scipy.spatial.Delaunay', 'Delaunay', (['all_pos'], {}), '(all_pos)\n', (1531, 1540), False, 'from scipy.spatial import Delaunay\n'), ((1635, 1654), 'numpy.zeros_like', 'zeros_like', (['indices'], {}), '(indices)\n', (1645, 1654), False, 'from numpy import square, flatnonzero, ones, zeros_like, cumsum, concatenate, arange, searchsorted, bincount, sort, diff, int8, argsort, array\n'), ((1693, 1705), 'numpy.cumsum', 'cumsum', (['idx1'], {}), '(idx1)\n', (1699, 1705), False, 'from numpy import square, flatnonzero, ones, zeros_like, cumsum, concatenate, arange, searchsorted, bincount, sort, diff, int8, argsort, array\n'), ((2927, 2976), 'scipy.sparse.csgraph.connected_components', 'csgraph.connected_components', (['mat'], {'directed': '(False)'}), '(mat, directed=False)\n', (2955, 2976), False, 'from scipy.sparse import csr_matrix, csgraph\n'), ((3534, 3552), 'lizard.log.VerboseTimingLog', 'VerboseTimingLog', ([], {}), '()\n', (3550, 3552), False, 'from lizard.log import VerboseTimingLog\n'), ((3588, 3686), 'numpy.load', 'np.load', (['"""/mainvol/peter.creasey/bigdata/runs/test_const_pmkick/out/lizard_snap_134.npz"""'], {}), "(\n '/mainvol/peter.creasey/bigdata/runs/test_const_pmkick/out/lizard_snap_134.npz'\n )\n", (3595, 3686), True, 'import numpy as np\n'), ((3941, 3960), 'numpy.bincount', 'np.bincount', (['labels'], {}), '(labels)\n', (3952, 3960), True, 'import numpy as np\n'), ((4150, 4166), 'numpy.bincount', 'bincount', (['newlab'], {}), '(newlab)\n', (4158, 4166), False, 'from numpy import square, flatnonzero, ones, zeros_like, cumsum, concatenate, arange, searchsorted, bincount, sort, diff, int8, argsort, array\n'), ((4185, 4208), 'numpy.sort', 'sort', (['bins[:NO_FOF - 1]'], {}), '(bins[:NO_FOF - 1])\n', (4189, 4208), False, 'from numpy import square, flatnonzero, ones, zeros_like, cumsum, concatenate, arange, searchsorted, bincount, sort, diff, int8, argsort, array\n'), ((4309, 4338), 'numpy.argsort', 'np.argsort', (['bins[:NO_FOF - 1]'], {}), '(bins[:NO_FOF - 1])\n', (4319, 4338), True, 'import numpy as np\n'), ((4543, 4559), 'pylab.xlim', 'pl.xlim', (['(0)', '(5600)'], {}), '(0, 5600)\n', (4550, 4559), True, 'import pylab as pl\n'), ((4563, 4579), 'pylab.ylim', 'pl.ylim', (['(0)', '(5600)'], {}), '(0, 5600)\n', (4570, 4579), True, 'import pylab as pl\n'), ((4583, 4592), 'pylab.show', 'pl.show', ([], {}), '()\n', (4590, 4592), True, 'import pylab as pl\n'), ((4715, 4733), 'lizard.log.VerboseTimingLog', 'VerboseTimingLog', ([], {}), '()\n', (4731, 4733), False, 'from lizard.log import VerboseTimingLog\n'), ((4785, 4806), 'numpy.random.RandomState', 'RandomState', ([], {'seed': '(123)'}), '(seed=123)\n', (4796, 4806), False, 'from numpy.random import RandomState\n'), ((1235, 1270), 'numpy.concatenate', 'concatenate', (['(pos, pad_pos)'], {'axis': '(0)'}), '((pos, pad_pos), axis=0)\n', (1246, 1270), False, 'from numpy import square, flatnonzero, ones, zeros_like, cumsum, concatenate, arange, searchsorted, bincount, sort, diff, int8, argsort, array\n'), ((4429, 4458), 'numpy.flatnonzero', 'np.flatnonzero', (['(labels == lab)'], {}), '(labels == lab)\n', (4443, 4458), True, 'import numpy as np\n'), ((4474, 4540), 'pylab.plot', 'pl.plot', (['pos[idx_i][:, 2]', 'pos[idx_i][:, 1]'], {'marker': '""","""', 'ls': '"""none"""'}), "(pos[idx_i][:, 2], pos[idx_i][:, 1], marker=',', ls='none')\n", (4481, 4540), True, 'import pylab as pl\n'), ((1876, 1913), 'numpy.square', 'square', (['(all_pos[idx1] - all_pos[idx2])'], {}), '(all_pos[idx1] - all_pos[idx2])\n', (1882, 1913), False, 'from numpy import square, flatnonzero, ones, zeros_like, cumsum, concatenate, arange, searchsorted, bincount, sort, diff, int8, argsort, array\n'), ((3158, 3174), 'numpy.bincount', 'bincount', (['labels'], {}), '(labels)\n', (3166, 3174), False, 'from numpy import square, flatnonzero, ones, zeros_like, cumsum, concatenate, arange, searchsorted, bincount, sort, diff, int8, argsort, array\n'), ((2446, 2456), 'numpy.diff', 'diff', (['idx1'], {}), '(idx1)\n', (2450, 2456), False, 'from numpy import square, flatnonzero, ones, zeros_like, cumsum, concatenate, arange, searchsorted, bincount, sort, diff, int8, argsort, array\n'), ((2459, 2469), 'numpy.diff', 'diff', (['idx2'], {}), '(idx2)\n', (2463, 2469), False, 'from numpy import square, flatnonzero, ones, zeros_like, cumsum, concatenate, arange, searchsorted, bincount, sort, diff, int8, argsort, array\n')]
|
import numpy as np
from numpy.core.fromnumeric import size
class BrownionPathGen:
def __init__(self, NumPaths, Maturity):
self.NumPaths = NumPaths
self.Maturity = Maturity # this is in days
# this is not optimal lets make a matrix of the std normal and then perform the operation to
# change its mean and std but for now lets leave it as it is
def GenerateCrossSection(self, Last_Mean, DiffTime):
Normals = np.random.standard_normal(size=[self.NumPaths, 1])
# have to adjust for leap year
# between two crosssection the time spend it difftime so var is also proportional to diff time
Var = DiffTime/365
Std = Var**0.5
# so basically the next cross-section will be data which was produced by last cross secion +
# std*RN . this can be proved to produce normal dist with mean given by last cross section and std .
Adjusted_Normals = Std*Normals+Last_Mean
return Adjusted_Normals
def GeneratePaths(self):
Path = np.zeros([self.NumPaths, 1])
Paths = [Path]
# lets find out a matrix operation to do this . will be much faster
# Maturity is a number for now but should be a date which should be compared to the global date
for i in range(0, self.Maturity - 1):
# this difftime is for now 1 but we may change it in future to make it more advance
Paths.append(self.GenerateCrossSection(
Last_Mean=Paths[i], DiffTime=1))
return Paths
|
[
"numpy.random.standard_normal",
"numpy.zeros"
] |
[((453, 503), 'numpy.random.standard_normal', 'np.random.standard_normal', ([], {'size': '[self.NumPaths, 1]'}), '(size=[self.NumPaths, 1])\n', (478, 503), True, 'import numpy as np\n'), ((1037, 1065), 'numpy.zeros', 'np.zeros', (['[self.NumPaths, 1]'], {}), '([self.NumPaths, 1])\n', (1045, 1065), True, 'import numpy as np\n')]
|
"""
Module defining transfer functions
"""
from typing import List, Optional, Dict, Any, Union
from pydantic import validator, constr
import numpy as np
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from resistics.common import Metadata
class Component(Metadata):
"""
Data class for a single component in a Transfer function
Example
-------
>>> from resistics.transfunc import Component
>>> component = Component(real=[1, 2, 3, 4, 5], imag=[-5, -4, -3, -2 , -1])
>>> component.get_value(0)
(1-5j)
>>> component.to_numpy()
array([1.-5.j, 2.-4.j, 3.-3.j, 4.-2.j, 5.-1.j])
"""
real: List[float]
"""The real part of the component"""
imag: List[float]
"""The complex part of the component"""
def get_value(self, eval_idx: int) -> complex:
"""Get the value for an evaluation frequency"""
return self.real[eval_idx] + 1j * self.imag[eval_idx]
def to_numpy(self) -> np.ndarray:
"""Get the component as a numpy complex array"""
return np.array(self.real) + 1j * np.array(self.imag)
def get_component_key(out_chan: str, in_chan: str) -> str:
"""
Get key for out channel and in channel combination in the solution
Parameters
----------
out_chan : str
The output channel
in_chan : str
The input channel
Returns
-------
str
The component key
Examples
--------
>>> from resistics.regression import get_component_key
>>> get_component_key("Ex", "Hy")
'ExHy'
"""
return f"{out_chan}{in_chan}"
class TransferFunction(Metadata):
"""
Define a generic transfer function
This class is a describes generic transfer function, including:
- The output channels for the transfer function
- The input channels for the transfer function
- The cross channels for the transfer function
The cross channels are the channels that will be used to calculate out the
cross powers for the regression.
This generic parent class has no implemented plotting function. However,
child classes may have a plotting function as different transfer functions
may need different types of plots.
.. note::
Users interested in writing a custom transfer function should inherit
from this generic Transfer function
See Also
--------
ImpandanceTensor : Transfer function for the MT impedance tensor
Tipper : Transfer function for the MT tipper
Examples
--------
A generic example
>>> tf = TransferFunction(variation="example", out_chans=["bye", "see you", "ciao"], in_chans=["hello", "hi_there"])
>>> print(tf.to_string())
| bye | | bye_hello bye_hi_there | | hello |
| see you | = | see you_hello see you_hi_there | | hi_there |
| ciao | | ciao_hello ciao_hi_there |
Combining the impedance tensor and the tipper into one TransferFunction
>>> tf = TransferFunction(variation="combined", out_chans=["Ex", "Ey"], in_chans=["Hx", "Hy", "Hz"])
>>> print(tf.to_string())
| Ex | | Ex_Hx Ex_Hy Ex_Hz | | Hx |
| Ey | = | Ey_Hx Ey_Hy Ey_Hz | | Hy |
| Hz |
"""
_types: Dict[str, type] = {}
"""Store types which will help automatic instantiation"""
name: Optional[str] = None
"""The name of the transfer function, this will be set automatically"""
variation: constr(max_length=16) = "generic"
"""A short additional bit of information about this variation"""
out_chans: List[str]
"""The output channels"""
in_chans: List[str]
"""The input channels"""
cross_chans: Optional[List[str]] = None
"""The channels to use for calculating the cross spectra"""
n_out: Optional[int] = None
"""The number of output channels"""
n_in: Optional[int] = None
"""The number of input channels"""
n_cross: Optional[int] = None
"""The number of cross power channels"""
def __init_subclass__(cls) -> None:
"""
Used to automatically register child transfer functions in `_types`
When a TransferFunction child class is imported, it is added to the base
TransferFunction _types variable. Later, this dictionary of class types
can be used to initialise a specific child transfer function from a
dictonary as long as that specific child transfer fuction has already
been imported and it is called from a pydantic class that will validate
the inputs.
The intention of this method is to support initialising transfer
functions from JSON files. This is a similar approach to
ResisticsProcess.
"""
cls._types[cls.__name__] = cls
@classmethod
def __get_validators__(cls):
"""Get the validators that will be used by pydantic"""
yield cls.validate
@classmethod
def validate(
cls, value: Union["TransferFunction", Dict[str, Any]]
) -> "TransferFunction":
"""
Validate a TransferFunction
Parameters
----------
value : Union[TransferFunction, Dict[str, Any]]
A TransferFunction child class or a dictionary
Returns
-------
TransferFunction
A TransferFunction or TransferFunction child class
Raises
------
ValueError
If the value is neither a TransferFunction or a dictionary
KeyError
If name is not in the dictionary
ValueError
If initialising from dictionary fails
Examples
--------
The following example will show how a child TransferFunction class
can be instantiated using a dictionary and the parent TransferFunction
(but only as long as that child class has been imported).
>>> from resistics.transfunc import TransferFunction
Show known TransferFunction types in built into resistics
>>> for entry in TransferFunction._types.items():
... print(entry)
('ImpedanceTensor', <class 'resistics.transfunc.ImpedanceTensor'>)
('Tipper', <class 'resistics.transfunc.Tipper'>)
Now let's initialise an ImpedanceTensor from the base TransferFunction
and a dictionary.
>>> mytf = {"name": "ImpedanceTensor", "variation": "ecross", "cross_chans": ["Ex", "Ey"]}
>>> test = TransferFunction(**mytf)
Traceback (most recent call last):
...
KeyError: 'out_chans'
This is not quite what we were expecting. The generic TransferFunction
requires out_chans to be defined, but they are not in the dictionary as
the ImpedanceTensor child class defaults these. To get this to work,
instead use the validate class method. This is the class method used by
pydantic when instantiating.
>>> mytf = {"name": "ImpedanceTensor", "variation": "ecross", "cross_chans": ["Ex", "Ey"]}
>>> test = TransferFunction.validate(mytf)
>>> test.summary()
{
'name': 'ImpedanceTensor',
'variation': 'ecross',
'out_chans': ['Ex', 'Ey'],
'in_chans': ['Hx', 'Hy'],
'cross_chans': ['Ex', 'Ey'],
'n_out': 2,
'n_in': 2,
'n_cross': 2
}
That's more like it. This will raise errors if an unknown type of
TransferFunction is received.
>>> mytf = {"name": "NewTF", "cross_chans": ["Ex", "Ey"]}
>>> test = TransferFunction.validate(mytf)
Traceback (most recent call last):
...
ValueError: Unable to initialise NewTF from dictionary
Or if the dictionary does not have a name key
>>> mytf = {"cross_chans": ["Ex", "Ey"]}
>>> test = TransferFunction.validate(mytf)
Traceback (most recent call last):
...
KeyError: 'No name provided for initialisation of TransferFunction'
Unexpected inputs will also raise an error
>>> test = TransferFunction.validate(5)
Traceback (most recent call last):
...
ValueError: TransferFunction unable to initialise from <class 'int'>
"""
if isinstance(value, TransferFunction):
return value
if not isinstance(value, dict):
raise ValueError(
f"TransferFunction unable to initialise from {type(value)}"
)
if "name" not in value:
raise KeyError("No name provided for initialisation of TransferFunction")
# check if it is a TransferFunction
name = value.pop("name")
if name == "TransferFunction":
return cls(**value)
# check other known Transfer Functions
try:
return cls._types[name](**value)
except Exception:
raise ValueError(f"Unable to initialise {name} from dictionary")
@validator("name", always=True)
def validate_name(cls, value: Union[str, None]) -> str:
"""Inialise the name attribute of the transfer function"""
if value is None:
return cls.__name__
return value
@validator("cross_chans", always=True)
def validate_cross_chans(
cls, value: Union[None, List[str]], values: Dict[str, Any]
) -> List[str]:
"""Validate cross spectra channels"""
if value is None:
return values["in_chans"]
return value
@validator("n_out", always=True)
def validate_n_out(cls, value: Union[None, int], values: Dict[str, Any]) -> int:
"""Validate number of output channels"""
if value is None:
return len(values["out_chans"])
return value
@validator("n_in", always=True)
def validate_n_in(cls, value: Union[None, int], values: Dict[str, Any]) -> int:
"""Validate number of input channels"""
if value is None:
return len(values["in_chans"])
return value
@validator("n_cross", always=True)
def validate_n_cross(cls, value: Union[None, int], values: Dict[str, Any]) -> int:
"""Validate number of cross channels"""
if value is None:
return len(values["cross_chans"])
return value
def n_eqns_per_output(self) -> int:
"""Get the number of equations per output"""
return len(self.cross_chans)
def n_regressors(self) -> int:
"""Get the number of regressors"""
return self.n_in
def to_string(self):
"""Get the transfer function as as string"""
n_lines = max(len(self.in_chans), len(self.out_chans))
lens = [len(x) for x in self.in_chans] + [len(x) for x in self.out_chans]
max_len = max(lens)
line_equals = (n_lines - 1) // 2
outstr = ""
for il in range(n_lines):
out_chan = self._out_chan_string(il, max_len)
in_chan = self._in_chan_string(il, max_len)
tensor = self._tensor_string(il, max_len)
eq = "=" if il == line_equals else " "
outstr += f"{out_chan} {eq} {tensor} {in_chan}\n"
return outstr.rstrip("\n")
def _out_chan_string(self, il: int, max_len: int) -> str:
"""Get the out channels string"""
if il >= self.n_out:
empty_len = max_len + 4
return f"{'':{empty_len}s}"
return f"| { self.out_chans[il]:{max_len}s} |"
def _in_chan_string(self, il: int, max_len: int) -> str:
"""Get the in channel string"""
if il >= self.n_in:
return ""
return f"| { self.in_chans[il]:{max_len}s} |"
def _tensor_string(self, il: int, max_len: int) -> str:
"""Get the tensor string"""
if il >= self.n_out:
element_len = ((max_len * 2 + 1) + 1) * self.n_in + 3
return f"{'':{element_len}s}"
elements = "| "
for chan in self.in_chans:
component = f"{self.out_chans[il]}_{chan}"
elements += f"{component:{2*max_len + 1}s} "
elements += "|"
return elements
class ImpedanceTensor(TransferFunction):
"""
Standard magnetotelluric impedance tensor
Notes
-----
Information about data units
- Magnetic permeability in nT . m / A
- Electric (E) data is in mV/m
- Magnetic (H) data is in nT
- Z = E/H is in mV / m . nT
- Units of resistance = Ohm = V / A
Examples
--------
>>> from resistics.transfunc import ImpedanceTensor
>>> tf = ImpedanceTensor()
>>> print(tf.to_string())
| Ex | = | Ex_Hx Ex_Hy | | Hx |
| Ey | | Ey_Hx Ey_Hy | | Hy |
"""
variation: constr(max_length=16) = "default"
out_chans: List[str] = ["Ex", "Ey"]
in_chans: List[str] = ["Hx", "Hy"]
@staticmethod
def get_resistivity(periods: np.ndarray, component: Component) -> np.ndarray:
"""
Get apparent resistivity for a component
Parameters
----------
periods : np.ndarray
The periods of the component
component : Component
The component values
Returns
-------
np.ndarray
Apparent resistivity
"""
squared = np.power(np.absolute(component.to_numpy()), 2)
return 0.2 * periods * squared
@staticmethod
def get_phase(key: str, component: Component) -> np.ndarray:
"""
Get the phase for the component
.. note::
Components ExHx and ExHy are wrapped around in [0,90]
Parameters
----------
key : str
The component name
component : Component
The component values
Returns
-------
np.ndarray
The phase values
"""
phase = np.angle(component.to_numpy())
# unwrap into specific quadrant and convert to degrees
phase = np.unwrap(phase) * 180 / np.pi
if key == "ExHx" or key == "ExHy":
phase = np.mod(phase, 360) - 180
return phase
@staticmethod
def get_fig(
x_lim: Optional[List[float]] = None,
res_lim: Optional[List[float]] = None,
phs_lim: Optional[List[float]] = None,
) -> go.Figure:
"""
Get a figure for plotting the ImpedanceTensor
Parameters
----------
x_lim : Optional[List[float]], optional
The x limits, to be provided as powers of 10, by default None. For
example, for 0.001, use -3
res_lim : Optional[List[float]], optional
The y limits for resistivity, to be provided as powers of 10, by
default None. For example, for 1000, use 3
phs_lim : Optional[List[float]], optional
The phase limits, by default None
Returns
-------
go.Figure
Plotly figure
"""
from resistics.plot import PLOTLY_MARGIN, PLOTLY_TEMPLATE
fig = make_subplots(
rows=2,
cols=1,
shared_xaxes=True,
vertical_spacing=0.08,
subplot_titles=["Apparent resistivity", "Phase"],
)
# apparent resistivity axes
fig.update_xaxes(type="log", showticklabels=True, row=1, col=1)
fig.update_yaxes(title_text="App. resistivity (Ohm m)", row=1, col=1)
fig.update_yaxes(type="log", row=1, col=1)
if x_lim is not None:
fig.update_xaxes(range=x_lim, row=1, col=1)
if res_lim is not None:
fig.update_yaxes(range=res_lim, row=1, col=1)
# phase axes
fig.update_xaxes(title_text="Period (s)", type="log", row=2, col=1)
fig.update_xaxes(showticklabels=True, row=2, col=1)
# fig.update_yaxes(scaleanchor="x", scaleratio=1, row=1, col=1)
fig.update_yaxes(title_text="Phase (degrees)", row=2, col=1)
if phs_lim is not None:
fig.update_yaxes(range=phs_lim, row=2, col=1)
# update the layout
fig.update_layout(template=PLOTLY_TEMPLATE, margin=dict(PLOTLY_MARGIN))
return fig
@staticmethod
def plot(
freqs: List[float],
components: Dict[str, Component],
fig: Optional[go.Figure] = None,
to_plot: Optional[List[str]] = None,
legend: str = "Impedance tensor",
x_lim: Optional[List[float]] = None,
res_lim: Optional[List[float]] = None,
phs_lim: Optional[List[float]] = None,
symbol: Optional[str] = "circle",
) -> go.Figure:
"""
Plot the Impedance tensor
Parameters
----------
freqs : List[float]
The frequencies where the impedance tensor components have been
calculated
components : Dict[str, Component]
The component data
fig : Optional[go.Figure], optional
Figure to add to, by default None
to_plot : Optional[List[str]], optional
The components to plot, by default all of the components of the
impedance tensor
legend : str, optional
Legend prefix for the components, by default "Impedance tensor"
x_lim : Optional[List[float]], optional
The x limits, to be provided as powers of 10, by default None. For
example, for 0.001, use -3. Only used when a figure is not provided.
res_lim : Optional[List[float]], optional
The y limits for resistivity, to be provided as powers of 10, by
default None. For example, for 1000, use 3. Only used when a figure
is not provided.
phs_lim : Optional[List[float]], optional
The phase limits, by default None. Only used when a figure is not
provided.
symbol : Optional[str], optional
The marker symbol to use, by default "circle"
Returns
-------
go.Figure
[description]
"""
if fig is None:
fig = ImpedanceTensor.get_fig(x_lim=x_lim, res_lim=res_lim, phs_lim=phs_lim)
if to_plot is None:
to_plot = ["ExHy", "EyHx", "ExHx", "EyHy"]
periods = np.reciprocal(freqs)
colors = {"ExHx": "orange", "EyHy": "green", "ExHy": "red", "EyHx": "blue"}
for comp in to_plot:
res = ImpedanceTensor.get_resistivity(periods, components[comp])
phs = ImpedanceTensor.get_phase(comp, components[comp])
comp_legend = f"{legend} - {comp}"
scatter = go.Scatter(
x=periods,
y=res,
mode="lines+markers",
marker=dict(color=colors[comp], symbol=symbol),
line=dict(color=colors[comp]),
name=comp_legend,
legendgroup=comp_legend,
)
fig.add_trace(scatter, row=1, col=1)
scatter = go.Scatter(
x=periods,
y=phs,
mode="lines+markers",
marker=dict(color=colors[comp], symbol=symbol),
line=dict(color=colors[comp]),
name=comp_legend,
legendgroup=comp_legend,
showlegend=False,
)
fig.add_trace(scatter, row=2, col=1)
return fig
class Tipper(TransferFunction):
"""
Magnetotelluric tipper
The tipper components are Tx = HzHx and Ty = HzHy
The tipper length is sqrt(Re(Tx)^2 + Re(Ty)^2)
The tipper angle is arctan (Re(Ty)/Re(Tx))
Notes
-----
Information about units
- Tipper T = H/H is dimensionless
Examples
--------
>>> from resistics.transfunc import Tipper
>>> tf = Tipper()
>>> print(tf.to_string())
| Hz | = | Hz_Hx Hz_Hy | | Hx |
| Hy |
"""
variation: constr(max_length=16) = "default"
out_chans: List[str] = ["Hz"]
in_chans: List[str] = ["Hx", "Hy"]
def get_length(self, components: Dict[str, Component]) -> np.ndarray:
"""Get the tipper length"""
txRe = components["HzHx"].real
tyRe = components["HzHy"].real
return np.sqrt(np.power(txRe, 2) + np.power(tyRe, 2))
def get_real_angle(self, components: Dict[str, Component]) -> np.ndarray:
"""Get the real angle"""
txRe = np.array(components["HzHx"].real)
tyRe = np.array(components["HzHy"].real)
return np.arctan(tyRe / txRe) * 180 / np.pi
def get_imag_angle(self, components: Dict[str, Component]) -> np.ndarray:
"""Get the imaginary angle"""
txIm = np.array(components["HzHx"].imag)
tyIm = np.array(components["HzHy"].imag)
return np.arctan(tyIm / txIm) * 180 / np.pi
def plot(
self,
freqs: List[float],
components: Dict[str, Component],
x_lim: Optional[List[float]] = None,
len_lim: Optional[List[float]] = None,
ang_lim: Optional[List[float]] = None,
) -> go.Figure:
"""
Plot the impedance tensor
.. warning::
This probably needs further checking and verification
Parameters
----------
freqs : List[float]
The x axis frequencies
components : Dict[str, Component]
The component data
x_lim : Optional[List[float]], optional
The x limits, to be provided as powers of 10, by default None. For
example, for 0.001, use -3
len_lim : Optional[List[float]], optional
The y limits for tipper length, to be provided as powers of 10, by
default None. For example, for 1000, use 3
ang_lim : Optional[List[float]], optional
The angle limits, by default None
Returns
-------
go.Figure
Plotly figure
"""
import warnings
from plotly.subplots import make_subplots
warnings.warn("Plotting of tippers needs further verification")
periods = np.reciprocal(freqs)
if x_lim is None:
x_lim = [-3, 5]
if len_lim is None:
len_lim = [-2, 6]
if ang_lim is None:
ang_lim = [-10, 100]
fig = make_subplots(
rows=2,
cols=1,
shared_xaxes=True,
vertical_spacing=0.08,
subplot_titles=["Length", "Angles"],
)
fig.update_layout(width=1000, autosize=True)
# x axes
fig.update_xaxes(title_text="Period (s)", type="log", range=x_lim, row=1, col=1)
fig.update_xaxes(showticklabels=True, row=1, col=1)
fig.update_xaxes(title_text="Period (s)", type="log", range=x_lim, row=2, col=1)
fig.update_xaxes(showticklabels=True, row=2, col=1)
# y axes
fig.update_yaxes(title_text="Tipper length", row=1, col=1)
# fig.update_yaxes(type="log", row=1, col=1)
# fig.update_yaxes(scaleanchor="x", scaleratio=1, row=1, col=1)
fig.update_yaxes(title_text="Angle (degrees)", row=2, col=1)
# plot the tipper length
scatter = go.Scatter(
x=periods,
y=self.get_length(components),
mode="lines+markers",
marker=dict(color="red"),
line=dict(color="red"),
name="Tipper length",
)
fig.add_trace(scatter, row=1, col=1)
# plot the real angle
scatter = go.Scatter(
x=periods,
y=self.get_real_angle(components),
mode="lines+markers",
marker=dict(color="green"),
line=dict(color="green"),
name="Real angle",
)
fig.add_trace(scatter, row=2, col=1)
# plot the imag angle
scatter = go.Scatter(
x=periods,
y=self.get_imag_angle(components),
mode="lines+markers",
marker=dict(color="blue"),
line=dict(color="blue"),
name="Imag angle",
)
fig.add_trace(scatter, row=2, col=1)
return fig
|
[
"plotly.subplots.make_subplots",
"pydantic.validator",
"numpy.reciprocal",
"numpy.power",
"pydantic.constr",
"numpy.unwrap",
"numpy.array",
"warnings.warn",
"numpy.mod",
"numpy.arctan"
] |
[((3479, 3500), 'pydantic.constr', 'constr', ([], {'max_length': '(16)'}), '(max_length=16)\n', (3485, 3500), False, 'from pydantic import validator, constr\n'), ((8972, 9002), 'pydantic.validator', 'validator', (['"""name"""'], {'always': '(True)'}), "('name', always=True)\n", (8981, 9002), False, 'from pydantic import validator, constr\n'), ((9215, 9252), 'pydantic.validator', 'validator', (['"""cross_chans"""'], {'always': '(True)'}), "('cross_chans', always=True)\n", (9224, 9252), False, 'from pydantic import validator, constr\n'), ((9507, 9538), 'pydantic.validator', 'validator', (['"""n_out"""'], {'always': '(True)'}), "('n_out', always=True)\n", (9516, 9538), False, 'from pydantic import validator, constr\n'), ((9770, 9800), 'pydantic.validator', 'validator', (['"""n_in"""'], {'always': '(True)'}), "('n_in', always=True)\n", (9779, 9800), False, 'from pydantic import validator, constr\n'), ((10029, 10062), 'pydantic.validator', 'validator', (['"""n_cross"""'], {'always': '(True)'}), "('n_cross', always=True)\n", (10038, 10062), False, 'from pydantic import validator, constr\n'), ((12687, 12708), 'pydantic.constr', 'constr', ([], {'max_length': '(16)'}), '(max_length=16)\n', (12693, 12708), False, 'from pydantic import validator, constr\n'), ((19821, 19842), 'pydantic.constr', 'constr', ([], {'max_length': '(16)'}), '(max_length=16)\n', (19827, 19842), False, 'from pydantic import validator, constr\n'), ((14981, 15106), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(2)', 'cols': '(1)', 'shared_xaxes': '(True)', 'vertical_spacing': '(0.08)', 'subplot_titles': "['Apparent resistivity', 'Phase']"}), "(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.08,\n subplot_titles=['Apparent resistivity', 'Phase'])\n", (14994, 15106), False, 'from plotly.subplots import make_subplots\n'), ((18166, 18186), 'numpy.reciprocal', 'np.reciprocal', (['freqs'], {}), '(freqs)\n', (18179, 18186), True, 'import numpy as np\n'), ((20306, 20339), 'numpy.array', 'np.array', (["components['HzHx'].real"], {}), "(components['HzHx'].real)\n", (20314, 20339), True, 'import numpy as np\n'), ((20355, 20388), 'numpy.array', 'np.array', (["components['HzHy'].real"], {}), "(components['HzHy'].real)\n", (20363, 20388), True, 'import numpy as np\n'), ((20573, 20606), 'numpy.array', 'np.array', (["components['HzHx'].imag"], {}), "(components['HzHx'].imag)\n", (20581, 20606), True, 'import numpy as np\n'), ((20622, 20655), 'numpy.array', 'np.array', (["components['HzHy'].imag"], {}), "(components['HzHy'].imag)\n", (20630, 20655), True, 'import numpy as np\n'), ((21894, 21957), 'warnings.warn', 'warnings.warn', (['"""Plotting of tippers needs further verification"""'], {}), "('Plotting of tippers needs further verification')\n", (21907, 21957), False, 'import warnings\n'), ((21977, 21997), 'numpy.reciprocal', 'np.reciprocal', (['freqs'], {}), '(freqs)\n', (21990, 21997), True, 'import numpy as np\n'), ((22186, 22298), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(2)', 'cols': '(1)', 'shared_xaxes': '(True)', 'vertical_spacing': '(0.08)', 'subplot_titles': "['Length', 'Angles']"}), "(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.08,\n subplot_titles=['Length', 'Angles'])\n", (22199, 22298), False, 'from plotly.subplots import make_subplots\n'), ((1063, 1082), 'numpy.array', 'np.array', (['self.real'], {}), '(self.real)\n', (1071, 1082), True, 'import numpy as np\n'), ((1090, 1109), 'numpy.array', 'np.array', (['self.imag'], {}), '(self.imag)\n', (1098, 1109), True, 'import numpy as np\n'), ((13927, 13943), 'numpy.unwrap', 'np.unwrap', (['phase'], {}), '(phase)\n', (13936, 13943), True, 'import numpy as np\n'), ((14021, 14039), 'numpy.mod', 'np.mod', (['phase', '(360)'], {}), '(phase, 360)\n', (14027, 14039), True, 'import numpy as np\n'), ((20140, 20157), 'numpy.power', 'np.power', (['txRe', '(2)'], {}), '(txRe, 2)\n', (20148, 20157), True, 'import numpy as np\n'), ((20160, 20177), 'numpy.power', 'np.power', (['tyRe', '(2)'], {}), '(tyRe, 2)\n', (20168, 20177), True, 'import numpy as np\n'), ((20404, 20426), 'numpy.arctan', 'np.arctan', (['(tyRe / txRe)'], {}), '(tyRe / txRe)\n', (20413, 20426), True, 'import numpy as np\n'), ((20671, 20693), 'numpy.arctan', 'np.arctan', (['(tyIm / txIm)'], {}), '(tyIm / txIm)\n', (20680, 20693), True, 'import numpy as np\n')]
|
import numpy
import pytest
import orthopy
import quadpy
from helpers import check_degree_ortho
schemes = [
quadpy.e2r2.haegemans_piessens_a(),
quadpy.e2r2.haegemans_piessens_b(),
quadpy.e2r2.rabinowitz_richter_1(),
quadpy.e2r2.rabinowitz_richter_2(),
quadpy.e2r2.rabinowitz_richter_3(),
quadpy.e2r2.rabinowitz_richter_4(),
quadpy.e2r2.rabinowitz_richter_5(),
quadpy.e2r2.stroud_4_1(),
quadpy.e2r2.stroud_5_1(),
quadpy.e2r2.stroud_5_2(),
quadpy.e2r2.stroud_7_1(),
quadpy.e2r2.stroud_7_2(),
quadpy.e2r2.stroud_9_1(),
quadpy.e2r2.stroud_11_1(),
quadpy.e2r2.stroud_11_2(),
quadpy.e2r2.stroud_13_1(),
quadpy.e2r2.stroud_15_1(),
quadpy.e2r2.stroud_secrest_5(),
quadpy.e2r2.stroud_secrest_6(),
]
@pytest.mark.parametrize("scheme", schemes)
def test_scheme(scheme, tol=1.0e-14):
assert scheme.points.dtype == numpy.float64, scheme.name
assert scheme.weights.dtype == numpy.float64, scheme.name
# degree = check_degree(
# lambda poly: scheme.integrate(poly),
# integrate_monomial_over_enr2,
# 2,
# scheme.degree + 1,
# tol=tol,
# )
# assert degree == scheme.degree, "{} Observed: {} expected: {}".format(
# scheme.name, degree, scheme.degree
# )
def eval_orthopolys(x):
return numpy.concatenate(
orthopy.e2r2.tree(x, scheme.degree + 1, symbolic=False)
)
vals = scheme.integrate(eval_orthopolys)
# Put vals back into the tree structure:
# len(approximate[k]) == k+1
approximate = [
vals[k * (k + 1) // 2 : (k + 1) * (k + 2) // 2]
for k in range(scheme.degree + 2)
]
exact = [numpy.zeros(k + 1) for k in range(scheme.degree + 2)]
exact[0][0] = numpy.sqrt(numpy.pi)
degree = check_degree_ortho(approximate, exact, abs_tol=tol)
assert degree >= scheme.degree, "{} -- Observed: {}, expected: {}".format(
scheme.name, degree, scheme.degree
)
return
@pytest.mark.parametrize("scheme", [quadpy.e2r2.rabinowitz_richter_1()])
def test_show(scheme):
scheme.show()
return
if __name__ == "__main__":
# scheme_ = quadpy.e2r2.Stroud["7-2"]()
# test_scheme(scheme_, 1.0e-14)
# test_show(scheme_)
from helpers import find_equal
find_equal(schemes)
|
[
"quadpy.e2r2.rabinowitz_richter_3",
"quadpy.e2r2.rabinowitz_richter_4",
"quadpy.e2r2.stroud_15_1",
"numpy.sqrt",
"quadpy.e2r2.haegemans_piessens_b",
"quadpy.e2r2.rabinowitz_richter_1",
"quadpy.e2r2.stroud_4_1",
"quadpy.e2r2.rabinowitz_richter_2",
"quadpy.e2r2.rabinowitz_richter_5",
"quadpy.e2r2.haegemans_piessens_a",
"quadpy.e2r2.stroud_5_1",
"quadpy.e2r2.stroud_secrest_6",
"quadpy.e2r2.stroud_13_1",
"quadpy.e2r2.stroud_5_2",
"quadpy.e2r2.stroud_7_1",
"quadpy.e2r2.stroud_7_2",
"helpers.check_degree_ortho",
"quadpy.e2r2.stroud_11_2",
"quadpy.e2r2.stroud_9_1",
"quadpy.e2r2.stroud_11_1",
"quadpy.e2r2.stroud_secrest_5",
"helpers.find_equal",
"pytest.mark.parametrize",
"numpy.zeros",
"orthopy.e2r2.tree"
] |
[((770, 812), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""scheme"""', 'schemes'], {}), "('scheme', schemes)\n", (793, 812), False, 'import pytest\n'), ((113, 147), 'quadpy.e2r2.haegemans_piessens_a', 'quadpy.e2r2.haegemans_piessens_a', ([], {}), '()\n', (145, 147), False, 'import quadpy\n'), ((153, 187), 'quadpy.e2r2.haegemans_piessens_b', 'quadpy.e2r2.haegemans_piessens_b', ([], {}), '()\n', (185, 187), False, 'import quadpy\n'), ((193, 227), 'quadpy.e2r2.rabinowitz_richter_1', 'quadpy.e2r2.rabinowitz_richter_1', ([], {}), '()\n', (225, 227), False, 'import quadpy\n'), ((233, 267), 'quadpy.e2r2.rabinowitz_richter_2', 'quadpy.e2r2.rabinowitz_richter_2', ([], {}), '()\n', (265, 267), False, 'import quadpy\n'), ((273, 307), 'quadpy.e2r2.rabinowitz_richter_3', 'quadpy.e2r2.rabinowitz_richter_3', ([], {}), '()\n', (305, 307), False, 'import quadpy\n'), ((313, 347), 'quadpy.e2r2.rabinowitz_richter_4', 'quadpy.e2r2.rabinowitz_richter_4', ([], {}), '()\n', (345, 347), False, 'import quadpy\n'), ((353, 387), 'quadpy.e2r2.rabinowitz_richter_5', 'quadpy.e2r2.rabinowitz_richter_5', ([], {}), '()\n', (385, 387), False, 'import quadpy\n'), ((393, 417), 'quadpy.e2r2.stroud_4_1', 'quadpy.e2r2.stroud_4_1', ([], {}), '()\n', (415, 417), False, 'import quadpy\n'), ((423, 447), 'quadpy.e2r2.stroud_5_1', 'quadpy.e2r2.stroud_5_1', ([], {}), '()\n', (445, 447), False, 'import quadpy\n'), ((453, 477), 'quadpy.e2r2.stroud_5_2', 'quadpy.e2r2.stroud_5_2', ([], {}), '()\n', (475, 477), False, 'import quadpy\n'), ((483, 507), 'quadpy.e2r2.stroud_7_1', 'quadpy.e2r2.stroud_7_1', ([], {}), '()\n', (505, 507), False, 'import quadpy\n'), ((513, 537), 'quadpy.e2r2.stroud_7_2', 'quadpy.e2r2.stroud_7_2', ([], {}), '()\n', (535, 537), False, 'import quadpy\n'), ((543, 567), 'quadpy.e2r2.stroud_9_1', 'quadpy.e2r2.stroud_9_1', ([], {}), '()\n', (565, 567), False, 'import quadpy\n'), ((573, 598), 'quadpy.e2r2.stroud_11_1', 'quadpy.e2r2.stroud_11_1', ([], {}), '()\n', (596, 598), False, 'import quadpy\n'), ((604, 629), 'quadpy.e2r2.stroud_11_2', 'quadpy.e2r2.stroud_11_2', ([], {}), '()\n', (627, 629), False, 'import quadpy\n'), ((635, 660), 'quadpy.e2r2.stroud_13_1', 'quadpy.e2r2.stroud_13_1', ([], {}), '()\n', (658, 660), False, 'import quadpy\n'), ((666, 691), 'quadpy.e2r2.stroud_15_1', 'quadpy.e2r2.stroud_15_1', ([], {}), '()\n', (689, 691), False, 'import quadpy\n'), ((697, 727), 'quadpy.e2r2.stroud_secrest_5', 'quadpy.e2r2.stroud_secrest_5', ([], {}), '()\n', (725, 727), False, 'import quadpy\n'), ((733, 763), 'quadpy.e2r2.stroud_secrest_6', 'quadpy.e2r2.stroud_secrest_6', ([], {}), '()\n', (761, 763), False, 'import quadpy\n'), ((1770, 1790), 'numpy.sqrt', 'numpy.sqrt', (['numpy.pi'], {}), '(numpy.pi)\n', (1780, 1790), False, 'import numpy\n'), ((1805, 1856), 'helpers.check_degree_ortho', 'check_degree_ortho', (['approximate', 'exact'], {'abs_tol': 'tol'}), '(approximate, exact, abs_tol=tol)\n', (1823, 1856), False, 'from helpers import check_degree_ortho\n'), ((2298, 2317), 'helpers.find_equal', 'find_equal', (['schemes'], {}), '(schemes)\n', (2308, 2317), False, 'from helpers import find_equal\n'), ((1698, 1716), 'numpy.zeros', 'numpy.zeros', (['(k + 1)'], {}), '(k + 1)\n', (1709, 1716), False, 'import numpy\n'), ((2035, 2069), 'quadpy.e2r2.rabinowitz_richter_1', 'quadpy.e2r2.rabinowitz_richter_1', ([], {}), '()\n', (2067, 2069), False, 'import quadpy\n'), ((1370, 1425), 'orthopy.e2r2.tree', 'orthopy.e2r2.tree', (['x', '(scheme.degree + 1)'], {'symbolic': '(False)'}), '(x, scheme.degree + 1, symbolic=False)\n', (1387, 1425), False, 'import orthopy\n')]
|
"""
Displays FISH data, raw and deconvolved, with spots detected using starFISH
"""
from skimage.io import imread
import numpy as np
from napari import Viewer, gui_qt
raw = imread('data-njs/smFISH/raw.tif')
deconvolved = imread('data-njs/smFISH/deconvolved.tif')
spots = np.loadtxt('data-njs/smFISH/spots.csv', delimiter=',')
print(raw.shape)
with gui_qt():
# create an empty viewer
viewer = Viewer()
# add the raw images
raw_layer = viewer.add_image(raw, name='images', colormap='gray', contrast_limits=(140.0, 1300.0))
decon_layer = viewer.add_image(deconvolved, name='deconvolved', colormap='gray', contrast_limits=(0.0, 0.2))
decon_layer.visible = False
spots_layer = viewer.add_points(spots, face_color='red',
edge_color='red', symbol='ring', size=8,
n_dimensional=True, name='spots')
spots_layer.opacity = 0.5
@viewer.bind_key('s')
def swap(viewer):
"""Swaps dims
"""
viewer.dims.order = np.roll(viewer.dims.order, 1)
|
[
"napari.Viewer",
"numpy.roll",
"napari.gui_qt",
"skimage.io.imread",
"numpy.loadtxt"
] |
[((175, 208), 'skimage.io.imread', 'imread', (['"""data-njs/smFISH/raw.tif"""'], {}), "('data-njs/smFISH/raw.tif')\n", (181, 208), False, 'from skimage.io import imread\n'), ((223, 264), 'skimage.io.imread', 'imread', (['"""data-njs/smFISH/deconvolved.tif"""'], {}), "('data-njs/smFISH/deconvolved.tif')\n", (229, 264), False, 'from skimage.io import imread\n'), ((273, 327), 'numpy.loadtxt', 'np.loadtxt', (['"""data-njs/smFISH/spots.csv"""'], {'delimiter': '""","""'}), "('data-njs/smFISH/spots.csv', delimiter=',')\n", (283, 327), True, 'import numpy as np\n'), ((352, 360), 'napari.gui_qt', 'gui_qt', ([], {}), '()\n', (358, 360), False, 'from napari import Viewer, gui_qt\n'), ((404, 412), 'napari.Viewer', 'Viewer', ([], {}), '()\n', (410, 412), False, 'from napari import Viewer, gui_qt\n'), ((1041, 1070), 'numpy.roll', 'np.roll', (['viewer.dims.order', '(1)'], {}), '(viewer.dims.order, 1)\n', (1048, 1070), True, 'import numpy as np\n')]
|
import numpy as np
from scipy.spatial.distance import pdist, squareform
import scipy.cluster.hierarchy as hy
import matplotlib.pyplot as plt
# Creating a cluster of clusters function
def clusters(number=20, cnumber=5, csize=10):
# Note that the way the clusters are positioned is Gaussian randomness.
rnum = np.random.rand(cnumber, 2)
rn = rnum[:, 0] * number
rn = rn.astype(int)
rn[np.where(rn < 5)] = 5
rn[np.where(rn > number / 2.)] = round(number / 2., 0)
ra = rnum[:, 1] * 2.9
ra[np.where(ra < 1.5)] = 1.5
cls = np.random.randn(number, 3) * csize
# Random multipliers for central point of cluster
rxyz = np.random.randn(cnumber - 1, 3)
for i in xrange(cnumber - 1):
tmp = np.random.randn(rn[i + 1], 3)
x = tmp[:, 0] + (rxyz[i, 0] * csize)
y = tmp[:, 1] + (rxyz[i, 1] * csize)
z = tmp[:, 2] + (rxyz[i, 2] * csize)
tmp = np.column_stack([x, y, z])
cls = np.vstack([cls, tmp])
return cls
# Generate a cluster of clusters and distance matrix.
cls = clusters()
D = pdist(cls[:, 0:2])
D = squareform(D)
# Compute and plot first dendrogram.
fig = plt.figure(figsize=(8, 8))
ax1 = fig.add_axes([0.09, 0.1, 0.2, 0.6])
Y1 = hy.linkage(D, method='complete')
cutoff = 0.3 * np.max(Y1[:, 2])
Z1 = hy.dendrogram(Y1, orientation='right', color_threshold=cutoff)
ax1.xaxis.set_visible(False)
ax1.yaxis.set_visible(False)
# Compute and plot second dendrogram.
ax2 = fig.add_axes([0.3, 0.71, 0.6, 0.2])
Y2 = hy.linkage(D, method='average')
cutoff = 0.3 * np.max(Y2[:, 2])
Z2 = hy.dendrogram(Y2, color_threshold=cutoff)
ax2.xaxis.set_visible(False)
ax2.yaxis.set_visible(False)
# Plot distance matrix.
ax3 = fig.add_axes([0.3, 0.1, 0.6, 0.6])
idx1 = Z1['leaves']
idx2 = Z2['leaves']
D = D[idx1, :]
D = D[:, idx2]
ax3.matshow(D, aspect='auto', origin='lower', cmap=plt.cm.YlGnBu)
ax3.xaxis.set_visible(False)
ax3.yaxis.set_visible(False)
# Plot colorbar.
fig.savefig('scipy_352_ex1.pdf', bbox='tight')
|
[
"scipy.spatial.distance.squareform",
"scipy.cluster.hierarchy.dendrogram",
"numpy.random.rand",
"numpy.where",
"scipy.spatial.distance.pdist",
"numpy.column_stack",
"numpy.max",
"matplotlib.pyplot.figure",
"scipy.cluster.hierarchy.linkage",
"numpy.vstack",
"numpy.random.randn"
] |
[((1070, 1088), 'scipy.spatial.distance.pdist', 'pdist', (['cls[:, 0:2]'], {}), '(cls[:, 0:2])\n', (1075, 1088), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((1093, 1106), 'scipy.spatial.distance.squareform', 'squareform', (['D'], {}), '(D)\n', (1103, 1106), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((1151, 1177), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (1161, 1177), True, 'import matplotlib.pyplot as plt\n'), ((1225, 1257), 'scipy.cluster.hierarchy.linkage', 'hy.linkage', (['D'], {'method': '"""complete"""'}), "(D, method='complete')\n", (1235, 1257), True, 'import scipy.cluster.hierarchy as hy\n'), ((1295, 1357), 'scipy.cluster.hierarchy.dendrogram', 'hy.dendrogram', (['Y1'], {'orientation': '"""right"""', 'color_threshold': 'cutoff'}), "(Y1, orientation='right', color_threshold=cutoff)\n", (1308, 1357), True, 'import scipy.cluster.hierarchy as hy\n'), ((1502, 1533), 'scipy.cluster.hierarchy.linkage', 'hy.linkage', (['D'], {'method': '"""average"""'}), "(D, method='average')\n", (1512, 1533), True, 'import scipy.cluster.hierarchy as hy\n'), ((1571, 1612), 'scipy.cluster.hierarchy.dendrogram', 'hy.dendrogram', (['Y2'], {'color_threshold': 'cutoff'}), '(Y2, color_threshold=cutoff)\n', (1584, 1612), True, 'import scipy.cluster.hierarchy as hy\n'), ((318, 344), 'numpy.random.rand', 'np.random.rand', (['cnumber', '(2)'], {}), '(cnumber, 2)\n', (332, 344), True, 'import numpy as np\n'), ((657, 688), 'numpy.random.randn', 'np.random.randn', (['(cnumber - 1)', '(3)'], {}), '(cnumber - 1, 3)\n', (672, 688), True, 'import numpy as np\n'), ((1273, 1289), 'numpy.max', 'np.max', (['Y1[:, 2]'], {}), '(Y1[:, 2])\n', (1279, 1289), True, 'import numpy as np\n'), ((1549, 1565), 'numpy.max', 'np.max', (['Y2[:, 2]'], {}), '(Y2[:, 2])\n', (1555, 1565), True, 'import numpy as np\n'), ((405, 421), 'numpy.where', 'np.where', (['(rn < 5)'], {}), '(rn < 5)\n', (413, 421), True, 'import numpy as np\n'), ((434, 461), 'numpy.where', 'np.where', (['(rn > number / 2.0)'], {}), '(rn > number / 2.0)\n', (442, 461), True, 'import numpy as np\n'), ((519, 537), 'numpy.where', 'np.where', (['(ra < 1.5)'], {}), '(ra < 1.5)\n', (527, 537), True, 'import numpy as np\n'), ((556, 582), 'numpy.random.randn', 'np.random.randn', (['number', '(3)'], {}), '(number, 3)\n', (571, 582), True, 'import numpy as np\n'), ((737, 766), 'numpy.random.randn', 'np.random.randn', (['rn[i + 1]', '(3)'], {}), '(rn[i + 1], 3)\n', (752, 766), True, 'import numpy as np\n'), ((916, 942), 'numpy.column_stack', 'np.column_stack', (['[x, y, z]'], {}), '([x, y, z])\n', (931, 942), True, 'import numpy as np\n'), ((957, 978), 'numpy.vstack', 'np.vstack', (['[cls, tmp]'], {}), '([cls, tmp])\n', (966, 978), True, 'import numpy as np\n')]
|
from unittest import mock
import gym
import numpy as np
from tests.fixtures.envs.dummy import DummyEnv
class DummyDiscretePixelEnv(DummyEnv):
"""
A dummy discrete pixel environment.
It follows Atari game convention, where actions are 'NOOP', 'FIRE', ...
It also contains self.unwrapped.ale.lives, get_action_meanings for testing.
Several properties are made for testing purpose as following:
-Observations are
after reset : np.ones(self._shape).
action 1 (FIRE): np.full(self._shape, 2).
otherwise : random if self.random is True,
otherwise previous state + action.
-The environment has 5 lives.
-Done will be True if
-all 5 lives are exhausted
-env.step(2), followed by env.step(1)
"""
def __init__(self, random=True):
super().__init__(random, obs_dim=(10, 10, 3), action_dim=5)
self.unwrapped.get_action_meanings = self._get_action_meanings
self.unwrapped.ale = mock.Mock()
self.unwrapped.ale.lives = self.get_lives
self._observation_space = gym.spaces.Box(
low=0, high=255, shape=self._obs_dim, dtype=np.uint8)
self.step_called = 0
self._prev_action = None
@property
def observation_space(self):
"""Return an observation space."""
return self._observation_space
@observation_space.setter
def observation_space(self, observation_space):
self._observation_space = observation_space
@property
def action_space(self):
"""Return an action space."""
return gym.spaces.Discrete(self._action_dim)
def _get_action_meanings(self):
return ['NOOP', 'FIRE', 'SLEEP', 'EAT', 'PLAY']
def get_lives(self):
"""Get number of lives."""
return self._lives
def reset(self):
"""Reset the environment."""
self.state = np.ones(self._obs_dim, dtype=np.uint8)
self._lives = 5
self.step_called = 0
return self.state
def step(self, action):
"""
Step the environment.
Before gym fixed overflow issue for sample() in
np.uint8 environment, we will handle the sampling here.
We need high=256 since np.random.uniform sample from [low, high)
(includes low, but excludes high).
"""
done = False
if self.state is not None:
# Simulating FIRE action
if action == 1:
if self._prev_action == 2:
done = True
obs = np.full(self._obs_dim, 2, dtype=np.uint8)
else:
if self.random:
obs = np.random.uniform(
low=0, high=256, size=self._obs_dim).astype(np.uint8)
else:
obs = self.state + action
if self._lives == 0:
raise RuntimeError("DummyEnv: Cannot step when lives = 0!")
self._lives -= 1
if self._lives == 0:
done = True
else:
raise RuntimeError(
"DummyEnv: reset() must be called before step()!")
self.step_called += 1
self._prev_action = action
return obs, 0, done, {'ale.lives': self._lives}
|
[
"unittest.mock.Mock",
"numpy.ones",
"gym.spaces.Discrete",
"gym.spaces.Box",
"numpy.random.uniform",
"numpy.full"
] |
[((1029, 1040), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (1038, 1040), False, 'from unittest import mock\n'), ((1127, 1195), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': 'self._obs_dim', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=self._obs_dim, dtype=np.uint8)\n', (1141, 1195), False, 'import gym\n'), ((1649, 1686), 'gym.spaces.Discrete', 'gym.spaces.Discrete', (['self._action_dim'], {}), '(self._action_dim)\n', (1668, 1686), False, 'import gym\n'), ((1959, 1997), 'numpy.ones', 'np.ones', (['self._obs_dim'], {'dtype': 'np.uint8'}), '(self._obs_dim, dtype=np.uint8)\n', (1966, 1997), True, 'import numpy as np\n'), ((2635, 2676), 'numpy.full', 'np.full', (['self._obs_dim', '(2)'], {'dtype': 'np.uint8'}), '(self._obs_dim, 2, dtype=np.uint8)\n', (2642, 2676), True, 'import numpy as np\n'), ((2756, 2810), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0)', 'high': '(256)', 'size': 'self._obs_dim'}), '(low=0, high=256, size=self._obs_dim)\n', (2773, 2810), True, 'import numpy as np\n')]
|
import torch
import numpy as np
from ctc_decoders import Scorer, ctc_beam_search_decoder_batch
"""
# 安装语言模型
sudo apt-get install build-essential libboost-all-dev cmake zlib1g-dev libbz2-dev liblzma-dev
git clone https://github.com/NVIDIA/OpenSeq2Seq -b ctc-decoders
mv OpenSeq2Seq/decoders .
rm -rf OpenSeq2Seq
cd decoders
./setup.sh
cd ..
"""
class BeamSearchDecoderWithLM(torch.nn.Module):
def __init__(
self, vocab, beam_width, alpha, beta, lm_path, num_cpus, cutoff_prob=1.0, cutoff_top_n=40):
if lm_path is not None:
self.scorer = Scorer(alpha, beta, model_path=lm_path, vocabulary=vocab)
else:
self.scorer = None
self.vocab = vocab
self.beam_width = beam_width
self.num_cpus = num_cpus
self.cutoff_prob = cutoff_prob
self.cutoff_top_n = cutoff_top_n
@torch.no_grad()
def forward(self, log_probs, log_probs_length):
probs = self.revert_softmax(log_probs)
probs_list = []
for i, prob in enumerate(probs):
probs_list.append(prob[: log_probs_length[i], :])
results = ctc_beam_search_decoder_batch(
probs_list,
self.vocab,
beam_size=self.beam_width,
num_processes=self.num_cpus,
ext_scoring_func=self.scorer,
cutoff_prob=self.cutoff_prob,
cutoff_top_n=self.cutoff_top_n,
)
result = [item[0][1] for item in results]
return result
def revert_softmax(self, logits):
"""
对对数概率还原其softmax值,用于计算语言模型分数
"""
result = np.zeros_like(logits)
for i in range(logits.shape[0]):
item = logits[i]
e = np.exp(item - np.max(item))
result[i] = e / e.sum(axis=-1).reshape([item.shape[0], 1])
return result
if __name__ == '__main__':
vocab = [c.strip() for c in open("data/aishell1-vocab.txt", 'r').readlines()]
lm_path = "/data/chenc/asr/minhang/atc-service/asr/checkpoints/kenlm/cn.arpa"
decoder = BeamSearchDecoderWithLM(vocab=vocab,
beam_width=40,
alpha=1.,
beta=1.,
lm_path=lm_path,
num_cpus=6,
cutoff_prob=1, cutoff_top_n=40)
log_prob = torch.randn((2,1000,4334), dtype=torch.float32)
log_prob = torch.log_softmax(log_prob, dim=-1).numpy()
lengths = torch.IntTensor([100,200]).numpy()
out = decoder.forward(log_probs=log_prob, log_probs_length=lengths)
print()
|
[
"torch.log_softmax",
"ctc_decoders.ctc_beam_search_decoder_batch",
"numpy.max",
"ctc_decoders.Scorer",
"torch.no_grad",
"numpy.zeros_like",
"torch.randn",
"torch.IntTensor"
] |
[((859, 874), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (872, 874), False, 'import torch\n'), ((2354, 2403), 'torch.randn', 'torch.randn', (['(2, 1000, 4334)'], {'dtype': 'torch.float32'}), '((2, 1000, 4334), dtype=torch.float32)\n', (2365, 2403), False, 'import torch\n'), ((1119, 1329), 'ctc_decoders.ctc_beam_search_decoder_batch', 'ctc_beam_search_decoder_batch', (['probs_list', 'self.vocab'], {'beam_size': 'self.beam_width', 'num_processes': 'self.num_cpus', 'ext_scoring_func': 'self.scorer', 'cutoff_prob': 'self.cutoff_prob', 'cutoff_top_n': 'self.cutoff_top_n'}), '(probs_list, self.vocab, beam_size=self.\n beam_width, num_processes=self.num_cpus, ext_scoring_func=self.scorer,\n cutoff_prob=self.cutoff_prob, cutoff_top_n=self.cutoff_top_n)\n', (1148, 1329), False, 'from ctc_decoders import Scorer, ctc_beam_search_decoder_batch\n'), ((1604, 1625), 'numpy.zeros_like', 'np.zeros_like', (['logits'], {}), '(logits)\n', (1617, 1625), True, 'import numpy as np\n'), ((573, 630), 'ctc_decoders.Scorer', 'Scorer', (['alpha', 'beta'], {'model_path': 'lm_path', 'vocabulary': 'vocab'}), '(alpha, beta, model_path=lm_path, vocabulary=vocab)\n', (579, 630), False, 'from ctc_decoders import Scorer, ctc_beam_search_decoder_batch\n'), ((2417, 2452), 'torch.log_softmax', 'torch.log_softmax', (['log_prob'], {'dim': '(-1)'}), '(log_prob, dim=-1)\n', (2434, 2452), False, 'import torch\n'), ((2475, 2502), 'torch.IntTensor', 'torch.IntTensor', (['[100, 200]'], {}), '([100, 200])\n', (2490, 2502), False, 'import torch\n'), ((1726, 1738), 'numpy.max', 'np.max', (['item'], {}), '(item)\n', (1732, 1738), True, 'import numpy as np\n')]
|
from datetime import datetime, date, timedelta
import pandas as pd
import networkx as nx
from itertools import combinations
import numpy as np
class TeamworkStudyRunner:
def __init__(self, notes, window_in_days, step_in_days):
notes.sort_values('date', inplace=True)
self.notes = notes
self.DELTA = np.timedelta64(window_in_days, 'D')
self.STEP = np.timedelta64(step_in_days, 'D')
first_date = notes['date'].iloc[0]
last_date = notes['date'].iloc[-1]
self.date_range = np.arange(first_date, last_date - self.DELTA, self.STEP)
def __iter__(self):
for start_date in self.date_range:
end_date = start_date + self.DELTA
date_of_care = end_date + self.STEP
notes_in_window = self.notes.query('date >= @start_date & date <= @end_date')
notes_for_care_date = self.notes.query('date > @end_date & date <= @date_of_care')
num_rows = len(notes_for_care_date.index)
if num_rows == 0: continue
yield CareDate(notes_in_window, notes_for_care_date)
class CareDate:
def __init__(self, notes_in_window, notes_for_care_date):
self.notes_in_window = notes_in_window
self.notes_for_care_date = notes_for_care_date
self.care_team_dict = {}
self.__populate_care_team_dict()
def __populate_care_team_dict(self):
discharge_ids_for_date = self.notes_for_care_date.discharge_id.unique()
for discharge_id in discharge_ids_for_date:
drs_for_discharge_id = self.notes_for_care_date.query('discharge_id == @discharge_id').dr.unique()
self.care_team_dict[discharge_id] = drs_for_discharge_id
def __iter__(self):
for discharge_id, care_team in self.care_team_dict.items():
yield CareTeam(self.notes_in_window, discharge_id, care_team)
class CareTeam:
def __init__(self, notes_in_window, discharge_id, care_team):
self.notes_in_window = notes_in_window
self.discharge_id = discharge_id
self.care_team = care_team
self.care_team_edges = [sorted(edge) for edge in list(combinations(care_team, 2))]
self.G = nx.Graph()
self.unique_dates = notes_in_window.date.unique()
self.__create_graph()
def __create_graph(self):
for note_date in self.unique_dates:
notes_for_date = self.notes_in_window.query('date == @note_date')
discharge_ids_for_date = notes_for_date.discharge_id.unique()
for discharge_id in discharge_ids_for_date:
drs_for_discharge_id = notes_for_date.query('discharge_id == @discharge_id').dr.unique()
care_team_edges_for_discharge_id = [edge for edge in list(combinations(drs_for_discharge_id, 2))
if sorted(edge) in self.care_team_edges]
for edge in care_team_edges_for_discharge_id:
self.__add_edge_to_G(edge)
def __add_edge_to_G(self, edge):
data = self.G.get_edge_data(*edge, default=None)
weight = 1 if data is None else data['weight'] + 1
self.G.add_edge(*edge, weight=weight)
|
[
"numpy.timedelta64",
"itertools.combinations",
"networkx.Graph",
"numpy.arange"
] |
[((330, 365), 'numpy.timedelta64', 'np.timedelta64', (['window_in_days', '"""D"""'], {}), "(window_in_days, 'D')\n", (344, 365), True, 'import numpy as np\n'), ((386, 419), 'numpy.timedelta64', 'np.timedelta64', (['step_in_days', '"""D"""'], {}), "(step_in_days, 'D')\n", (400, 419), True, 'import numpy as np\n'), ((533, 589), 'numpy.arange', 'np.arange', (['first_date', '(last_date - self.DELTA)', 'self.STEP'], {}), '(first_date, last_date - self.DELTA, self.STEP)\n', (542, 589), True, 'import numpy as np\n'), ((2234, 2244), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (2242, 2244), True, 'import networkx as nx\n'), ((2188, 2214), 'itertools.combinations', 'combinations', (['care_team', '(2)'], {}), '(care_team, 2)\n', (2200, 2214), False, 'from itertools import combinations\n'), ((2799, 2836), 'itertools.combinations', 'combinations', (['drs_for_discharge_id', '(2)'], {}), '(drs_for_discharge_id, 2)\n', (2811, 2836), False, 'from itertools import combinations\n')]
|
# nodenet/utilities/commons.py
# Description:
# "commons.py" provide commons utilities that can be use widely.
# Copyright 2018 NOOXY. All Rights Reserved.
from nodenet.imports.commons import *
import numpy as np2
# np2 for cupy compabable
def cut_dataset_by_ratio_ramdom(datasets, cut_ratio = 0.1):
dimension = len(datasets[0].shape)
valid_data_size = int(len(datasets[0])*cut_ratio)
input_data = np2.array(datasets[0].tolist())
output_data = np2.array(datasets[1].tolist())
input_data_valid = np2.empty([0]+list(input_data.shape[1:len(input_data.shape)]))
output_data_valid = np2.empty([0]+list(output_data.shape[1:len(output_data.shape)]))
for x in range(valid_data_size):
index = np2.random.randint(len(input_data))
input_data_valid = np2.concatenate((input_data_valid, np2.array(([input_data[index].tolist()]))), axis=0)
output_data_valid = np2.concatenate((output_data_valid, np2.array(([output_data[index].tolist()]))), axis=0)
input_data = np2.delete(input_data, index, axis=0)
output_data = np2.delete(output_data, index, axis=0)
input_data = np.array(input_data.tolist())
output_data = np.array(output_data.tolist())
input_data_valid = np.array(input_data_valid.tolist())
output_data_valid = np.array(output_data_valid.tolist())
return [input_data, output_data, input_data_valid, output_data_valid]
def shuffle_datasets(datasets):
a = np2.array(datasets[0].tolist())
b = np2.array(datasets[1].tolist())
assert len(a) == len(b)
order = np2.random.permutation(len(a))
return [np.array(a[order].tolist()), np.array(b[order].tolist())]
def get_mini_batch_ramdom(datasets, mini_batch_size):
input_data = datasets[0]
output_data = datasets[1]
rand_range = len(input_data)-mini_batch_size
start_index = 0
if rand_range != 0:
start_index = int(np.random.randint(len(input_data)-mini_batch_size))
return [input_data[start_index:start_index+mini_batch_size], output_data[start_index:start_index+mini_batch_size]]
def get_mini_batch_ramdom2(datasets, mini_batch_size):
dimension = len(datasets[0].shape)
data_size = mini_batch_size
input_data = datasets[0]
output_data = datasets[1]
index_list = []
input_data_result = np.empty([0]+list(input_data.shape[1:len(input_data.shape)]))
output_data_result = np.empty([0]+list(input_data.shape[1:len(input_data.shape)]))
index = np.random.randint(len(input_data))
for x in range(data_size):
while index in index_list:
index = np.random.randint(len(input_data))
index_list.append(index)
input_data_result = np.concatenate((input_data_result, [input_data[index]]))
output_data_result = np.concatenate((output_data_result, [output_data[index]]))
return [input_data_result, output_data_result]
|
[
"numpy.delete"
] |
[((1010, 1047), 'numpy.delete', 'np2.delete', (['input_data', 'index'], {'axis': '(0)'}), '(input_data, index, axis=0)\n', (1020, 1047), True, 'import numpy as np2\n'), ((1070, 1108), 'numpy.delete', 'np2.delete', (['output_data', 'index'], {'axis': '(0)'}), '(output_data, index, axis=0)\n', (1080, 1108), True, 'import numpy as np2\n')]
|
import tensorflow as tf
import csv
import time
from datetime import timedelta
import sys
import numpy as np
from tensorflow.python.training import training_util
from tensorflow.contrib import slim
from tensorflow.python.ops import variables as tf_variables
from ..configuration import *
from .. import trainer, evaluator, metrics
from ..task_spec import get_task_spec
from .text_classification_dataset import TextClassificationDataset
def _load_embeddings(vocabulary_size, embeddings_size,
filename_prefix='embeddings', from_dir=DIR_DATA_WORD2VEC):
embeddings = []
embeddings_file = '{}_{}_{}'.format(filename_prefix, vocabulary_size, embeddings_size)
with open(os.path.join(from_dir, embeddings_file), 'r') as file:
reader = csv.reader(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for row in reader:
embeddings.append([float(r) for r in row])
return embeddings
class TextClassificationTrainer(trainer.Trainer):
"""
Helper class to run the training and create the model for the training. See trainer.Trainer for
more details.
"""
def __init__(self, dataset, text_classification_model, log_dir=DIR_TC_LOGDIR,
use_end_sequence=False, task_spec=None, max_steps=None):
self.text_classification_model = text_classification_model
self.use_end_sequence = use_end_sequence
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
super(TextClassificationTrainer, self).__init__(log_dir=log_dir, dataset=dataset,
task_spec=task_spec, max_steps=max_steps,
monitored_training_session_config=config)
def model(self, input_text_begin, input_text_end, gene, variation, expected_labels, batch_size,
vocabulary_size=VOCABULARY_SIZE, embeddings_size=EMBEDDINGS_SIZE, output_classes=9):
# embeddings
embeddings = _load_embeddings(vocabulary_size, embeddings_size)
# global step
self.global_step = training_util.get_or_create_global_step()
# model
with slim.arg_scope(self.text_classification_model.model_arg_scope()):
outputs = self.text_classification_model.model(input_text_begin, input_text_end,
gene, variation, output_classes,
embeddings=embeddings,
batch_size=batch_size)
# loss
targets = self.text_classification_model.targets(expected_labels, output_classes)
self.loss = self.text_classification_model.loss(targets, outputs)
tf.summary.scalar('loss', self.loss)
# learning rate
self.optimizer, self.learning_rate = \
self.text_classification_model.optimize(self.loss, self.global_step)
if self.learning_rate is not None:
tf.summary.scalar('learning_rate', self.learning_rate)
# metrics
self.metrics = metrics.single_label(outputs['prediction'], targets)
# saver to save the model
self.saver = tf.train.Saver()
# check a nan value in the loss
self.loss = tf.check_numerics(self.loss, 'loss is nan')
return None
def create_graph(self, dataset_tensor, batch_size):
input_text_begin, input_text_end, gene, variation, expected_labels = dataset_tensor
if not self.use_end_sequence:
input_text_end = None
return self.model(input_text_begin, input_text_end, gene, variation, expected_labels, batch_size)
def step(self, session, graph_data):
lr, _, loss, step, metrics = \
session.run([self.learning_rate, self.optimizer, self.loss, self.global_step,
self.metrics])
if self.is_chief and time.time() > self.print_timestamp + 5 * 60:
self.print_timestamp = time.time()
elapsed_time = str(timedelta(seconds=time.time() - self.init_time))
m = 'step: {} loss: {:0.4f} learning_rate = {:0.6f} elapsed seconds: {} ' \
'precision: {} recall: {} accuracy: {}'
logging.info(m.format(step, loss, lr, elapsed_time,
metrics['precision'], metrics['recall'], metrics['accuracy']))
def after_create_session(self, session, coord):
self.init_time = time.time()
self.print_timestamp = time.time()
class TextClassificationTest(evaluator.Evaluator):
"""Evaluator for distributed training"""
def __init__(self, dataset, text_classification_model, output_path, log_dir=DIR_TC_LOGDIR,
use_end_sequence=False,max_steps=None):
self.use_end_sequence = use_end_sequence
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
super(TextClassificationTest, self).__init__(checkpoints_dir=log_dir, dataset=dataset,
output_path=output_path, max_steps=max_steps,
singular_monitored_session_config=config)
self.text_classification_model = text_classification_model
self.eval_writer = tf.summary.FileWriter(log_dir)
def model(self, input_text_begin, input_text_end, gene, variation, expected_labels, batch_size,
vocabulary_size=VOCABULARY_SIZE, embeddings_size=EMBEDDINGS_SIZE, output_classes=9):
# embeddings
embeddings = _load_embeddings(vocabulary_size, embeddings_size)
# model
with slim.arg_scope(self.text_classification_model.model_arg_scope()):
outputs = self.text_classification_model.model(input_text_begin, input_text_end,
gene, variation, output_classes,
embeddings=embeddings,
batch_size=batch_size,
training=False)
# loss
targets = self.text_classification_model.targets(expected_labels, output_classes)
loss = self.text_classification_model.loss(targets, outputs)
self.accumulated_loss = tf.Variable(0.0, dtype=tf.float32, name='accumulated_loss',
trainable=False)
self.accumulated_loss = tf.assign_add(self.accumulated_loss, loss)
step = tf.Variable(0, dtype=tf.int32, name='eval_step', trainable=False)
step_increase = tf.assign_add(step, 1)
self.loss = self.accumulated_loss / tf.cast(step_increase, dtype=tf.float32)
tf.summary.scalar('loss', self.loss)
# metrics
self.metrics = metrics.single_label(outputs['prediction'], targets, moving_average=False)
return None
def create_graph(self, dataset_tensor, batch_size):
input_text_begin, input_text_end, gene, variation, expected_labels = dataset_tensor
if not self.use_end_sequence:
input_text_end = None
graph_data = self.model(input_text_begin, input_text_end, gene, variation,
expected_labels, batch_size)
return graph_data
def step(self, session, graph_data, summary_op):
summary, self.loss_result, self.metrics_results = \
session.run([summary_op, self.loss, self.metrics])
return summary
def end(self, session):
super(TextClassificationTest, self).end(session)
chk_step = int(self.lastest_checkpoint.split('-')[-1])
m = 'step: {} loss: {:0.4f} precision: {} recall: {} accuracy: {}'
logging.info(m.format(chk_step, self.loss_result, self.metrics_results['precision'],
self.metrics_results['recall'], self.metrics_results['accuracy']))
def after_create_session(self, session, coord):
# checkpoints_file = os.path.join(self.checkpoints_dir, 'checkpoint')
# alt_checkpoints_dir = '{}_tp'.format(self.checkpoints_dir)
# import glob
# files = glob.glob('{}/model.ckpt-*.data-*'.format(alt_checkpoints_dir))
# chk_step = 0
# for f in files:
# num = f.split('model.ckpt-')[1].split('.')[0]
# num = int(num)
# if chk_step == 0 or num < chk_step:
# chk_step = num
# if chk_step != 0:
# ckpt_files = glob.glob('{}/model.ckpt-{}.data-*'.format(alt_checkpoints_dir, chk_step))
# ckpt_files = [x.split('/')[-1] for x in ckpt_files]
# for f in ckpt_files + ['model.ckpt-{}.index', 'model.ckpt-{}.meta']:
# f = f.format(chk_step)
# os.rename(os.path.join(alt_checkpoints_dir, f), os.path.join(self.checkpoints_dir, f))
# with open(checkpoints_file, 'wb') as f:
# f.write('model_checkpoint_path: "./model.ckpt-{}"\n'.format(chk_step))
# f.write('all_model_checkpoint_paths: "./model.ckpt-{}"\n'.format(chk_step))
super(TextClassificationTest, self).after_create_session(session, coord)
# with open(checkpoints_file, 'wb') as f:
# f.write('model_checkpoint_path: "./model.ckpt-"\n')
# f.write('all_model_checkpoint_paths: "./model.ckpt-"\n')
class TextClassificationEval(evaluator.Evaluator):
"""Evaluator for text classification"""
def __init__(self, dataset, text_classification_model, output_path, log_dir=DIR_TC_LOGDIR,
use_end_sequence=False):
self.use_end_sequence = use_end_sequence
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
super(TextClassificationEval, self).__init__(checkpoints_dir=log_dir,
output_path=output_path,
infinite_loop=False,
singular_monitored_session_config=config)
self.dataset = dataset
self.text_classification_model = text_classification_model
def model(self, input_text_begin, input_text_end, gene, variation, batch_size,
vocabulary_size=VOCABULARY_SIZE, embeddings_size=EMBEDDINGS_SIZE, output_classes=9):
# embeddings
embeddings = _load_embeddings(vocabulary_size, embeddings_size)
# global step
self.global_step = training_util.get_or_create_global_step()
self.global_step = tf.assign_add(self.global_step, 1)
# model
with tf.control_dependencies([self.global_step]):
with slim.arg_scope(self.text_classification_model.model_arg_scope()):
self.outputs = self.text_classification_model.model(input_text_begin, input_text_end,
gene, variation, output_classes,
embeddings=embeddings,
batch_size=batch_size,
training=False)
# restore only the trainable variables
self.saver = tf.train.Saver(var_list=tf_variables.trainable_variables())
return self.outputs
def create_graph(self, dataset_tensor, batch_size):
input_text_begin, input_text_end, gene, variation = dataset_tensor
if not self.use_end_sequence:
input_text_end = None
return self.model(input_text_begin, input_text_end, gene, variation, batch_size)
def after_create_session(self, session, coord):
super(TextClassificationEval, self).after_create_session(session, coord)
print('ID,class1,class2,class3,class4,class5,class6,class7,class8,class9')
def step(self, session, graph_data, summary_op):
step, predictions = session.run([self.global_step, self.outputs['prediction']])
predictions = predictions[0]
predictions = [p + 0.01 for p in predictions] # penalize less the mistakes
sum = np.sum(predictions)
predictions = [p / sum for p in predictions]
print('{},{}'.format(step, ','.join(['{:.3f}'.format(x) for x in predictions])))
return None
import logging
def main(model, name, sentence_split=False, end_sequence=USE_END_SEQUENCE, batch_size=TC_BATCH_SIZE):
"""
Main method to execute the text_classification models
:param ModelSimple model: object model based on ModelSimple
:param str name: name of the model
:param bool sentence_split: whether to split the dataset in sentneces or not,
only used for hatt model
:param bool end_sequence: whether to use or not the end of the sequences in the models
:param int batch_size: batch size of the models
"""
logging.getLogger().setLevel(logging.INFO)
log_dir = '{}_{}'.format(DIR_TC_LOGDIR, name)
if len(sys.argv) > 1 and sys.argv[1] == 'test':
# execute the test with the train dataset
dataset = TextClassificationDataset(type='train', sentence_split=sentence_split)
tester = TextClassificationTest(dataset=dataset, text_classification_model=model,
log_dir=log_dir,
output_path=os.path.join(log_dir, 'test_trainset'),
use_end_sequence=end_sequence)
tester.run()
elif len(sys.argv) > 1 and sys.argv[1] == 'validate':
dataset = TextClassificationDataset(type='val', sentence_split=sentence_split)
tester = TextClassificationTest(dataset=dataset, text_classification_model=model,
log_dir=log_dir,
output_path=os.path.join(log_dir, 'validate'),
use_end_sequence=end_sequence)
tester.run()
elif len(sys.argv) > 1 and sys.argv[1] == 'eval':
# evaluate the data of the test dataset. We submit this output to kaggle
dataset = TextClassificationDataset(type='test', sentence_split=sentence_split)
evaluator = TextClassificationEval(dataset=dataset, text_classification_model=model,
log_dir=log_dir,
output_path=os.path.join(log_dir, 'test'),
use_end_sequence=end_sequence)
evaluator.run()
elif len(sys.argv) > 1 and sys.argv[1] == 'eval_stage2':
# evaluate the data of the test dataset. We submit this output to kaggle
dataset = TextClassificationDataset(type='stage2_test', sentence_split=sentence_split)
evaluator = TextClassificationEval(dataset=dataset, text_classification_model=model,
log_dir=log_dir,
output_path=os.path.join(log_dir, 'test_stage2'),
use_end_sequence=end_sequence)
evaluator.run()
else:
# training
task_spec = get_task_spec(with_evaluator=USE_LAST_WORKER_FOR_VALIDATION)
if task_spec.join_if_ps():
# join if it is a parameters server and do nothing else
return
with(tf.gfile.Open(os.path.join(DIR_DATA_TEXT_CLASSIFICATION, 'train_set'))) as f:
max_steps = int(TC_EPOCHS * len(f.readlines()) / batch_size)
if task_spec.is_evaluator():
dataset = TextClassificationDataset(type='val', sentence_split=sentence_split)
# evaluator running in the last worker
tester = TextClassificationTest(dataset=dataset, text_classification_model=model,
log_dir=log_dir,
output_path=os.path.join(log_dir, 'val'),
use_end_sequence=end_sequence,
max_steps=max_steps)
tester.run()
else:
dataset = TextClassificationDataset(type='train', sentence_split=sentence_split)
trainer = TextClassificationTrainer(dataset=dataset, text_classification_model=model,
log_dir=log_dir, use_end_sequence=end_sequence,
task_spec=task_spec, max_steps=max_steps)
trainer.run(epochs=TC_EPOCHS, batch_size=batch_size)
|
[
"tensorflow.cast",
"logging.getLogger",
"tensorflow.python.training.training_util.get_or_create_global_step",
"tensorflow.check_numerics",
"tensorflow.Variable",
"tensorflow.train.Saver",
"numpy.sum",
"tensorflow.control_dependencies",
"tensorflow.assign_add",
"time.time",
"tensorflow.python.ops.variables.trainable_variables",
"tensorflow.ConfigProto",
"tensorflow.summary.scalar",
"tensorflow.summary.FileWriter",
"csv.reader"
] |
[((769, 842), 'csv.reader', 'csv.reader', (['file'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(file, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_MINIMAL)\n', (779, 842), False, 'import csv\n'), ((1423, 1439), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1437, 1439), True, 'import tensorflow as tf\n'), ((2116, 2157), 'tensorflow.python.training.training_util.get_or_create_global_step', 'training_util.get_or_create_global_step', ([], {}), '()\n', (2155, 2157), False, 'from tensorflow.python.training import training_util\n'), ((2791, 2827), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'self.loss'], {}), "('loss', self.loss)\n", (2808, 2827), True, 'import tensorflow as tf\n'), ((3242, 3258), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (3256, 3258), True, 'import tensorflow as tf\n'), ((3319, 3362), 'tensorflow.check_numerics', 'tf.check_numerics', (['self.loss', '"""loss is nan"""'], {}), "(self.loss, 'loss is nan')\n", (3336, 3362), True, 'import tensorflow as tf\n'), ((4512, 4523), 'time.time', 'time.time', ([], {}), '()\n', (4521, 4523), False, 'import time\n'), ((4555, 4566), 'time.time', 'time.time', ([], {}), '()\n', (4564, 4566), False, 'import time\n'), ((4884, 4900), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (4898, 4900), True, 'import tensorflow as tf\n'), ((5331, 5361), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['log_dir'], {}), '(log_dir)\n', (5352, 5361), True, 'import tensorflow as tf\n'), ((6380, 6456), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'dtype': 'tf.float32', 'name': '"""accumulated_loss"""', 'trainable': '(False)'}), "(0.0, dtype=tf.float32, name='accumulated_loss', trainable=False)\n", (6391, 6456), True, 'import tensorflow as tf\n'), ((6533, 6575), 'tensorflow.assign_add', 'tf.assign_add', (['self.accumulated_loss', 'loss'], {}), '(self.accumulated_loss, loss)\n', (6546, 6575), True, 'import tensorflow as tf\n'), ((6591, 6656), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'dtype': 'tf.int32', 'name': '"""eval_step"""', 'trainable': '(False)'}), "(0, dtype=tf.int32, name='eval_step', trainable=False)\n", (6602, 6656), True, 'import tensorflow as tf\n'), ((6681, 6703), 'tensorflow.assign_add', 'tf.assign_add', (['step', '(1)'], {}), '(step, 1)\n', (6694, 6703), True, 'import tensorflow as tf\n'), ((6797, 6833), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'self.loss'], {}), "('loss', self.loss)\n", (6814, 6833), True, 'import tensorflow as tf\n'), ((9735, 9751), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (9749, 9751), True, 'import tensorflow as tf\n'), ((10547, 10588), 'tensorflow.python.training.training_util.get_or_create_global_step', 'training_util.get_or_create_global_step', ([], {}), '()\n', (10586, 10588), False, 'from tensorflow.python.training import training_util\n'), ((10616, 10650), 'tensorflow.assign_add', 'tf.assign_add', (['self.global_step', '(1)'], {}), '(self.global_step, 1)\n', (10629, 10650), True, 'import tensorflow as tf\n'), ((12220, 12239), 'numpy.sum', 'np.sum', (['predictions'], {}), '(predictions)\n', (12226, 12239), True, 'import numpy as np\n'), ((3036, 3090), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learning_rate"""', 'self.learning_rate'], {}), "('learning_rate', self.learning_rate)\n", (3053, 3090), True, 'import tensorflow as tf\n'), ((4031, 4042), 'time.time', 'time.time', ([], {}), '()\n', (4040, 4042), False, 'import time\n'), ((6748, 6788), 'tensorflow.cast', 'tf.cast', (['step_increase'], {'dtype': 'tf.float32'}), '(step_increase, dtype=tf.float32)\n', (6755, 6788), True, 'import tensorflow as tf\n'), ((10680, 10723), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[self.global_step]'], {}), '([self.global_step])\n', (10703, 10723), True, 'import tensorflow as tf\n'), ((12958, 12977), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (12975, 12977), False, 'import logging\n'), ((3951, 3962), 'time.time', 'time.time', ([], {}), '()\n', (3960, 3962), False, 'import time\n'), ((11369, 11403), 'tensorflow.python.ops.variables.trainable_variables', 'tf_variables.trainable_variables', ([], {}), '()\n', (11401, 11403), True, 'from tensorflow.python.ops import variables as tf_variables\n'), ((4092, 4103), 'time.time', 'time.time', ([], {}), '()\n', (4101, 4103), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 27 15:16:34 2021
@author: ag
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import matplotlib.transforms as transforms
import re
def confidence_ellipse(x, y, n_std=1.0, weights=None, ax=None, facecolor='none', **kwargs):
"""
Create a plot of the covariance confidence ellipse of *x* and *y*.
Parameters
----------
x, y : array-like, shape (n, )
Input data.
ax : matplotlib.axes.Axes
The axes object to draw the ellipse into.
n_std : float
The number of standard deviations to determine the ellipse's radiuses.
**kwargs
Forwarded to `~matplotlib.patches.Ellipse`
Returns
-------
matplotlib.patches.Ellipse
"""
if x.size != y.size:
raise ValueError("x and y must be the same size")
if not ax:
ax = plt.gca()
if 'label' in kwargs:
kwargs['label'] = re.sub(r'SSP(\d)(\d)(\d)',r'SSP\1-\2.\3', kwargs['label'])
if weights is None:
cov = np.cov(x, y)
mean_x = np.mean(x)
mean_y = np.mean(y)
else:
cov = np.cov(x, y, aweights = weights)
sumw = np.sum(weights)
mean_x = np.sum(x*weights)/sumw
mean_y = np.sum(y*weights)/sumw
pearson = cov[0, 1]/np.sqrt(cov[0, 0] * cov[1, 1])
# Using a special case to obtain the eigenvalues of this
# two-dimensionl dataset.
ell_radius_x = np.sqrt(1 + pearson)
ell_radius_y = np.sqrt(1 - pearson)
ellipse = Ellipse((0, 0), width=ell_radius_x * 2, height=ell_radius_y * 2,
facecolor=f'{facecolor}22', edgecolor=f'{facecolor}', **kwargs)
# Calculating the stdandard deviation of x from
# the squareroot of the variance and multiplying
# with the given number of standard deviations.
scale_x = np.sqrt(cov[0, 0]) * n_std
# calculating the stdandard deviation of y ...
scale_y = np.sqrt(cov[1, 1]) * n_std
transf = transforms.Affine2D() \
.rotate_deg(45) \
.scale(scale_x, scale_y) \
.translate(mean_x, mean_y)
ellipse.set_transform(transf + ax.transData)
return ax.add_patch(ellipse)
#FROM https://stackoverflow.com/questions/21844024/weighted-percentile-using-numpy
def weighted_quantile(values, quantiles, sample_weight=None,
values_sorted=False, old_style=False):
""" Very close to numpy.percentile, but supports weights.
NOTE: quantiles should be in [0, 1]!
:param values: numpy.array with data
:param quantiles: array-like with many quantiles needed
:param sample_weight: array-like of the same length as `array`
:param values_sorted: bool, if True, then will avoid sorting of
initial array
:param old_style: if True, will correct output to be consistent
with numpy.percentile.
:return: numpy.array with computed quantiles.
"""
values = np.array(values)
quantiles = np.array(quantiles)
if sample_weight is None:
sample_weight = np.ones(len(values))
sample_weight = np.array(sample_weight)
assert np.all(quantiles >= 0) and np.all(quantiles <= 1), \
'quantiles should be in [0, 1]'
if not values_sorted:
sorter = np.argsort(values)
values = values[sorter]
sample_weight = sample_weight[sorter]
weighted_quantiles = np.cumsum(sample_weight) - 0.5 * sample_weight
if old_style:
# To be convenient with numpy.percentile
weighted_quantiles -= weighted_quantiles[0]
weighted_quantiles /= weighted_quantiles[-1]
else:
weighted_quantiles /= np.sum(sample_weight)
return np.interp(quantiles, weighted_quantiles, values)
|
[
"numpy.mean",
"numpy.all",
"numpy.sqrt",
"matplotlib.pyplot.gca",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"numpy.cov",
"matplotlib.transforms.Affine2D",
"numpy.cumsum",
"numpy.interp",
"re.sub",
"matplotlib.patches.Ellipse"
] |
[((1466, 1486), 'numpy.sqrt', 'np.sqrt', (['(1 + pearson)'], {}), '(1 + pearson)\n', (1473, 1486), True, 'import numpy as np\n'), ((1506, 1526), 'numpy.sqrt', 'np.sqrt', (['(1 - pearson)'], {}), '(1 - pearson)\n', (1513, 1526), True, 'import numpy as np\n'), ((1541, 1674), 'matplotlib.patches.Ellipse', 'Ellipse', (['(0, 0)'], {'width': '(ell_radius_x * 2)', 'height': '(ell_radius_y * 2)', 'facecolor': 'f"""{facecolor}22"""', 'edgecolor': 'f"""{facecolor}"""'}), "((0, 0), width=ell_radius_x * 2, height=ell_radius_y * 2, facecolor=\n f'{facecolor}22', edgecolor=f'{facecolor}', **kwargs)\n", (1548, 1674), False, 'from matplotlib.patches import Ellipse\n'), ((2940, 2956), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (2948, 2956), True, 'import numpy as np\n'), ((2973, 2992), 'numpy.array', 'np.array', (['quantiles'], {}), '(quantiles)\n', (2981, 2992), True, 'import numpy as np\n'), ((3088, 3111), 'numpy.array', 'np.array', (['sample_weight'], {}), '(sample_weight)\n', (3096, 3111), True, 'import numpy as np\n'), ((3675, 3723), 'numpy.interp', 'np.interp', (['quantiles', 'weighted_quantiles', 'values'], {}), '(quantiles, weighted_quantiles, values)\n', (3684, 3723), True, 'import numpy as np\n'), ((902, 911), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (909, 911), True, 'import matplotlib.pyplot as plt\n'), ((965, 1028), 're.sub', 're.sub', (['"""SSP(\\\\d)(\\\\d)(\\\\d)"""', '"""SSP\\\\1-\\\\2.\\\\3"""', "kwargs['label']"], {}), "('SSP(\\\\d)(\\\\d)(\\\\d)', 'SSP\\\\1-\\\\2.\\\\3', kwargs['label'])\n", (971, 1028), False, 'import re\n'), ((1063, 1075), 'numpy.cov', 'np.cov', (['x', 'y'], {}), '(x, y)\n', (1069, 1075), True, 'import numpy as np\n'), ((1093, 1103), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (1100, 1103), True, 'import numpy as np\n'), ((1121, 1131), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (1128, 1131), True, 'import numpy as np\n'), ((1156, 1186), 'numpy.cov', 'np.cov', (['x', 'y'], {'aweights': 'weights'}), '(x, y, aweights=weights)\n', (1162, 1186), True, 'import numpy as np\n'), ((1204, 1219), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (1210, 1219), True, 'import numpy as np\n'), ((1325, 1355), 'numpy.sqrt', 'np.sqrt', (['(cov[0, 0] * cov[1, 1])'], {}), '(cov[0, 0] * cov[1, 1])\n', (1332, 1355), True, 'import numpy as np\n'), ((1864, 1882), 'numpy.sqrt', 'np.sqrt', (['cov[0, 0]'], {}), '(cov[0, 0])\n', (1871, 1882), True, 'import numpy as np\n'), ((1957, 1975), 'numpy.sqrt', 'np.sqrt', (['cov[1, 1]'], {}), '(cov[1, 1])\n', (1964, 1975), True, 'import numpy as np\n'), ((3123, 3145), 'numpy.all', 'np.all', (['(quantiles >= 0)'], {}), '(quantiles >= 0)\n', (3129, 3145), True, 'import numpy as np\n'), ((3150, 3172), 'numpy.all', 'np.all', (['(quantiles <= 1)'], {}), '(quantiles <= 1)\n', (3156, 3172), True, 'import numpy as np\n'), ((3260, 3278), 'numpy.argsort', 'np.argsort', (['values'], {}), '(values)\n', (3270, 3278), True, 'import numpy as np\n'), ((3383, 3407), 'numpy.cumsum', 'np.cumsum', (['sample_weight'], {}), '(sample_weight)\n', (3392, 3407), True, 'import numpy as np\n'), ((3642, 3663), 'numpy.sum', 'np.sum', (['sample_weight'], {}), '(sample_weight)\n', (3648, 3663), True, 'import numpy as np\n'), ((1237, 1256), 'numpy.sum', 'np.sum', (['(x * weights)'], {}), '(x * weights)\n', (1243, 1256), True, 'import numpy as np\n'), ((1277, 1296), 'numpy.sum', 'np.sum', (['(y * weights)'], {}), '(y * weights)\n', (1283, 1296), True, 'import numpy as np\n'), ((1998, 2019), 'matplotlib.transforms.Affine2D', 'transforms.Affine2D', ([], {}), '()\n', (2017, 2019), True, 'import matplotlib.transforms as transforms\n')]
|
import sys
import numpy as np
rng = np.random.default_rng()
# dt = np.dtype('i,i,i,i,i,i,i,i,i,i,U16,U16,U16,U16,f,f,f,f,f,f,f,f')
nrows = 2000000
filename = 'data/bigmixed.csv'
print("Generating {}".format(filename))
with open(filename, 'w') as f:
for k in range(nrows):
values1 = rng.integers(1, 1000, size=10).tolist()
values2 = rng.choice(['abc', 'def', 'αβγ', 'apple', 'orange'], size=4).tolist()
values3 = (rng.integers(0, 100, size=8)/8).tolist()
values = values1 + values2 + values3
s = ','.join(f'{v}' for v in values) + '\n'
f.write(s)
q, r = divmod(100*(k+1), nrows)
if r == 0:
print("\r{:3d}%".format(q), end='')
sys.stdout.flush()
print()
|
[
"sys.stdout.flush",
"numpy.random.default_rng"
] |
[((39, 62), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (60, 62), True, 'import numpy as np\n'), ((723, 741), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (739, 741), False, 'import sys\n')]
|
#!/usr/bin/env python3
import argparse
import re
import sys
import zipfile
import numpy as np
import bert_wrapper
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("input_conllu", type=str, help="Input CoNLL-U file")
parser.add_argument("output_npz", type=str, help="Output NPZ file")
parser.add_argument("--batch_size", default=16, type=int, help="Batch size")
parser.add_argument("--casing", default=bert_wrapper.BertWrapper.CASING_UNCASED, help="Bert model casing")
parser.add_argument("--language", default=bert_wrapper.BertWrapper.LANGUAGE_MULTILINGUAL, help="Bert model language")
parser.add_argument("--layer_indices", default="-1,-2,-3,-4", type=str, help="Bert model layers to average")
parser.add_argument("--size", default=bert_wrapper.BertWrapper.SIZE_BASE, help="Bert model size")
parser.add_argument("--threads", default=4, type=int, help="Threads to use")
parser.add_argument("--with_cls", default=False, action="store_true", help="Return also CLS embedding")
args = parser.parse_args()
args.layer_indices = list(map(int, args.layer_indices.split(",")))
# Load CoNLL-U file
sentences = []
with open(args.input_conllu, mode="r", encoding="utf-8") as conllu_file:
in_sentence = False
for line in conllu_file:
line = line.rstrip("\n")
if line:
if not in_sentence:
sentences.append([])
in_sentence = True
if re.match(r"^[0-9]*\t", line):
columns = line.split("\t")
assert len(columns) == 10
sentences[-1].append(columns[1])
else:
in_sentence = False
if line.startswith("#"): continue
print("Loaded CoNLL-U file with {} sentences and {} words.".format(len(sentences), sum(map(len, sentences))), file=sys.stderr)
bert = bert_wrapper.BertWrapper(language=args.language, size=args.size, casing=args.casing, layer_indices=args.layer_indices,
with_cls=args.with_cls, threads=args.threads, batch_size=args.batch_size)
with zipfile.ZipFile(args.output_npz, mode="w", compression=zipfile.ZIP_STORED) as output_npz:
for i, embeddings in enumerate(bert.bert_embeddings(sentences)):
if (i + 1) % 100 == 0: print("Processed {}/{} sentences.".format(i + 1, len(sentences)), file=sys.stderr)
with output_npz.open("arr_{}".format(i), mode="w") as embeddings_file:
np.save(embeddings_file, embeddings)
print("Done, all embeddings saved.", file=sys.stderr)
|
[
"zipfile.ZipFile",
"argparse.ArgumentParser",
"bert_wrapper.BertWrapper",
"re.match",
"numpy.save"
] |
[((157, 182), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (180, 182), False, 'import argparse\n'), ((1946, 2147), 'bert_wrapper.BertWrapper', 'bert_wrapper.BertWrapper', ([], {'language': 'args.language', 'size': 'args.size', 'casing': 'args.casing', 'layer_indices': 'args.layer_indices', 'with_cls': 'args.with_cls', 'threads': 'args.threads', 'batch_size': 'args.batch_size'}), '(language=args.language, size=args.size, casing=\n args.casing, layer_indices=args.layer_indices, with_cls=args.with_cls,\n threads=args.threads, batch_size=args.batch_size)\n', (1970, 2147), False, 'import bert_wrapper\n'), ((2184, 2258), 'zipfile.ZipFile', 'zipfile.ZipFile', (['args.output_npz'], {'mode': '"""w"""', 'compression': 'zipfile.ZIP_STORED'}), "(args.output_npz, mode='w', compression=zipfile.ZIP_STORED)\n", (2199, 2258), False, 'import zipfile\n'), ((1527, 1555), 're.match', 're.match', (['"""^[0-9]*\\\\t"""', 'line'], {}), "('^[0-9]*\\\\t', line)\n", (1535, 1555), False, 'import re\n'), ((2564, 2600), 'numpy.save', 'np.save', (['embeddings_file', 'embeddings'], {}), '(embeddings_file, embeddings)\n', (2571, 2600), True, 'import numpy as np\n')]
|
import sys, math, numpy, struct
import matplotlib.pyplot as plt
class readBinaryModels(object):
'''Class for reading binary models'''
def __init__(self, fil):
'''Initialize'''
super(readBinaryModels, self).__init__()
self.fread = open(fil, "rb")
self.head = None
self.model = None
def close(self):
'''Close file'''
self.fread.close()
def __readHeader(self):
'''Return header'''
head = []
byte = self.fread.read(4)
if len(byte) == 0:
return None
head.append(*struct.unpack('i', byte))
head.append(*struct.unpack('d', self.fread.read(8)))
head.append(*struct.unpack('d', self.fread.read(8)))
head.append(*struct.unpack('i', self.fread.read(4)))
head.append(*struct.unpack('i', self.fread.read(4)))
return head
def nextModel(self):
'''Calculate next model, unpacked'''
# Read header
self.head = self.__readHeader()
if self.head is None:
return False
self.model = []
for ii in range(self.head[3]):
s = []
for jj in range(self.head[4]):
s.append(*struct.unpack('d', self.fread.read(8)))
self.model.append(s)
return True
def readOnlyHeader(self):
'''Look only for the header and skip the rest'''
# Read header
self.head = self.__readHeader()
if self.head is None:
return False
# Skip file
for ii in range(head[3]):
for jj in range(head[4]):
self.fread.read(8)
return True
def main():
'''Get evolution of one element in epsilon or [X/Fe]'''
# Check arguments
if len(sys.argv) < 4:
print("Usage python {} <(eps|xfe|massf)> <model>".format(sys.argv[0]), end = " ")
print("<elem1> [elem2, elem3, ...]")
return 1
data = "../../data/species.dat"
mode = sys.argv[1]
archivo = sys.argv[2]
elms = sys.argv[3:]
solarH = 0.7381
# Read "species.dat" and store all the values in lists
species = "../../data/species.dat"
atomicNum = []; atomicMass = []; namesZ = {}
with open(species, "r") as fread:
for line in fread:
lnlst = line.split()
# Correct special names
if lnlst[1] == "d" or lnlst[2] == "0":
lnlst[1] = "h"
# Now relate positions with atomic numbers, atomic masses, and names
zNum = int(lnlst[0]) - int(lnlst[2])
atomicNum.append(zNum)
atomicMass.append(int(lnlst[0]))
namesZ[lnlst[1]] = zNum
# Read all initial solar values
solar = "../../data/solarVals.dat"
solarValues = {}
with open(solar, "r") as fread:
for line in fread:
lnlst = line.split()
isotName = lnlst[0] + lnlst[2]
# Add mass fraction value per atomic number
key = namesZ[lnlst[0]]; val = float(lnlst[1])*float(lnlst[2])
solarValues[key] = solarValues.get(key, 0) + val
# Now go model by model, calculating everything for every element
modelObj = readBinaryModels(archivo)
# Each line has mass, temperature, rho, radiat
# and elements in number fraction
ages = []; evolEps = []; evolXFe = []; evolMassF = []
while True:
isNewModel = modelObj.nextModel()
if not isNewModel:
break
header = modelObj.head
model = modelObj.model
# Get the age
age = 10**(header[2] - 3)
if len(ages) == 0:
ages.append(age)
else:
ages.append(age - ages[0])
# Report some progress
print(len(ages))
# Find the surface for this model
for ii in range(1, len(model)):
mass = (model[ii - 1][0] + model[ii][0])*0.5
# If found surface, extract information
if mass >= 0.85:
prevLine = model[ii - 1]
newLine = model[ii]
# Take all abundances
dens = [(x + y)*0.5 for (x, y) in zip(prevLine[4:], newLine[4:])]
epsVals = {}; xFeVals = {}; mFVals = {}
# Add the values for each element
for ii in range(len(atomicNum)):
key = atomicNum[ii]
epsVals[key] = epsVals.get(key, 0) + dens[ii]
mFVals[key] = mFVals.get(key, 0) + dens[ii]*atomicMass[ii]
xFeVals[key] = mFVals[key]
# Now calculate values of interest
feVal = xFeVals[namesZ["fe"]]
sunFeVal = solarValues[namesZ["fe"]]
selectedEps = []; selectedFe = []; selectedMassF = []
for elem in elms:
try:
val = epsVals[namesZ[elem]]/solarH + 1e-100
except KeyError:
print("{} is not on the list".format(elem))
except:
raise
val = math.log10(val) + 12
selectedEps.append(val)
try:
val = xFeVals[namesZ[elem]]/feVal + 1e-100
except KeyError:
print("{} is not on the list".format(elem))
except:
raise
sunVal = solarValues.get(namesZ[elem], 1e-100)/sunFeVal
val = math.log10(val) - math.log10(sunVal)
selectedFe.append(val)
try:
val = mFVals[namesZ[elem]] + 1e-100
except KeyError:
print("{} is not on the list".format(elem))
except:
raise
selectedMassF.append(val)
break
evolEps.append(selectedEps)
evolXFe.append(selectedFe)
evolMassF.append(selectedMassF)
# Transform age and evol values to something plottable
ages[0] = 0
evolEps = numpy.transpose(numpy.array(evolEps))
evolXFe = numpy.transpose(numpy.array(evolXFe))
evolMassF = numpy.transpose(numpy.array(evolMassF))
# Now plot values
if mode == "eps":
for ii in range(len(elms)):
evEps = evolEps[ii]
minLen = min(len(ages), len(evEps))
plt.plot(ages[0:minLen], evEps[0:minLen], label = elms[ii], lw = 2)
plt.xlabel("TP-AGB time in ky")
plt.ylabel("Log epsilon")
#plt.ylim([-2, 5])
plt.ylim([0, 5])
elif mode == "xfe":
for ii in range(len(elms)):
evXFe = evolXFe[ii]
minLen = min(len(ages), len(evXFe))
plt.plot(ages[0:minLen], evXFe[0:minLen], label = elms[ii], lw = 2)
plt.xlabel("TP-AGB time in ky")
plt.ylabel("[X/Fe]")
plt.ylim([-0.2, 2])
elif mode == "massf":
for ii in range(len(elms)):
evMassF = evolMassF[ii]
minLen = min(len(ages), len(evMassF))
plt.plot(ages[0:minLen], evMassF[0:minLen], label = elms[ii], lw = 2)
plt.xlabel("TP-AGB time in ky")
plt.ylabel("Mass fraction")
plt.yscale("log")
#plt.legend(loc = 0)
plt.show()
return 0
if __name__ == "__main__":
main()
|
[
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"struct.unpack",
"matplotlib.pyplot.ylim",
"math.log10",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.show"
] |
[((7333, 7343), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7341, 7343), True, 'import matplotlib.pyplot as plt\n'), ((6152, 6172), 'numpy.array', 'numpy.array', (['evolEps'], {}), '(evolEps)\n', (6163, 6172), False, 'import sys, math, numpy, struct\n'), ((6204, 6224), 'numpy.array', 'numpy.array', (['evolXFe'], {}), '(evolXFe)\n', (6215, 6224), False, 'import sys, math, numpy, struct\n'), ((6258, 6280), 'numpy.array', 'numpy.array', (['evolMassF'], {}), '(evolMassF)\n', (6269, 6280), False, 'import sys, math, numpy, struct\n'), ((6532, 6563), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""TP-AGB time in ky"""'], {}), "('TP-AGB time in ky')\n", (6542, 6563), True, 'import matplotlib.pyplot as plt\n'), ((6572, 6597), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Log epsilon"""'], {}), "('Log epsilon')\n", (6582, 6597), True, 'import matplotlib.pyplot as plt\n'), ((6633, 6649), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 5]'], {}), '([0, 5])\n', (6641, 6649), True, 'import matplotlib.pyplot as plt\n'), ((6455, 6518), 'matplotlib.pyplot.plot', 'plt.plot', (['ages[0:minLen]', 'evEps[0:minLen]'], {'label': 'elms[ii]', 'lw': '(2)'}), '(ages[0:minLen], evEps[0:minLen], label=elms[ii], lw=2)\n', (6463, 6518), True, 'import matplotlib.pyplot as plt\n'), ((6880, 6911), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""TP-AGB time in ky"""'], {}), "('TP-AGB time in ky')\n", (6890, 6911), True, 'import matplotlib.pyplot as plt\n'), ((6920, 6940), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""[X/Fe]"""'], {}), "('[X/Fe]')\n", (6930, 6940), True, 'import matplotlib.pyplot as plt\n'), ((6949, 6968), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-0.2, 2]'], {}), '([-0.2, 2])\n', (6957, 6968), True, 'import matplotlib.pyplot as plt\n'), ((590, 614), 'struct.unpack', 'struct.unpack', (['"""i"""', 'byte'], {}), "('i', byte)\n", (603, 614), False, 'import sys, math, numpy, struct\n'), ((6803, 6866), 'matplotlib.pyplot.plot', 'plt.plot', (['ages[0:minLen]', 'evXFe[0:minLen]'], {'label': 'elms[ii]', 'lw': '(2)'}), '(ages[0:minLen], evXFe[0:minLen], label=elms[ii], lw=2)\n', (6811, 6866), True, 'import matplotlib.pyplot as plt\n'), ((7209, 7240), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""TP-AGB time in ky"""'], {}), "('TP-AGB time in ky')\n", (7219, 7240), True, 'import matplotlib.pyplot as plt\n'), ((7249, 7276), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mass fraction"""'], {}), "('Mass fraction')\n", (7259, 7276), True, 'import matplotlib.pyplot as plt\n'), ((7285, 7302), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (7295, 7302), True, 'import matplotlib.pyplot as plt\n'), ((7130, 7195), 'matplotlib.pyplot.plot', 'plt.plot', (['ages[0:minLen]', 'evMassF[0:minLen]'], {'label': 'elms[ii]', 'lw': '(2)'}), '(ages[0:minLen], evMassF[0:minLen], label=elms[ii], lw=2)\n', (7138, 7195), True, 'import matplotlib.pyplot as plt\n'), ((5113, 5128), 'math.log10', 'math.log10', (['val'], {}), '(val)\n', (5123, 5128), False, 'import sys, math, numpy, struct\n'), ((5536, 5551), 'math.log10', 'math.log10', (['val'], {}), '(val)\n', (5546, 5551), False, 'import sys, math, numpy, struct\n'), ((5554, 5572), 'math.log10', 'math.log10', (['sunVal'], {}), '(sunVal)\n', (5564, 5572), False, 'import sys, math, numpy, struct\n')]
|
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.pipeline import make_pipeline
import matplotlib.pyplot as plt
import numpy as np
import random
#===============================================================================================#
# Number of cases per day of covid 19 in the US for 218 days
cases = [
1,0,1,0,3,0,0,0,0,2,1,0,3,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,1,0,8,6,23,25,
20,66,47,64,147,225,290,278,414,267,338,1237,755,2797,3419,4777,3528,5836,8821,10934,
10115,13987,16916,17965,19332,18251,22635,22562,27043,26135,34864,30683,26065,43438,
21597,31534,31705,33251,33288,29145,24156,26385,27158,29164,29002,29916,25995,29468,
26490,25858,37144,29873,33161,29256,23371,23901,25512,31787,30369,29794,29763,19138,
22303,23366,30861,25996,26660,23792,18106,21467,20869,27191,22977,31967,13284,24481,
23405,22860,20522,24268,26229,15342,24958,16429,19680,21304,18123,23553,26177,14790,
24955,14676,20555,29034,29214,17919,17598,17376,20486,21744,22317,25468,21957,18577,
28392,22834,27828,32218,32411,27616,26657,34313,37667,40588,44602,44703,41390,35664,
43644,54357,52730,57718,52228,44361,46329,50304,64771,59260,66281,62918,60469,58858,
60971,67404,72045,74710,67574,63201,57777,63028,70106,72219,74818,64582,61795,54448,
59862,65935,68042,68605,58947,47576,49716,49988,53685,55836,62042,54590,48690,40522,
55540,56307,52799,56729,54686,41893,38986,39318,46500,44864,46754,45265,38679,33076,
37086,46393
]
days = list(range(len(cases)))
print(len(days))
days = np.asarray(days)
cases = np.asarray(cases)
days = days[:, np.newaxis]
cases = cases[:, np.newaxis]
plt.scatter(days, cases)
plt.show()
xseq = np.linspace(days.min(), days.max(), 300).reshape(-1,1)
regr = make_pipeline(PolynomialFeatures(12), LinearRegression())
regr.fit(days, cases)
plt.scatter(days, cases)
plt.plot(xseq, regr.predict(xseq), color = "red")
plt.show()
#===============================================================================================#
# Ref
# https://espanol.cdc.gov/coronavirus/2019-ncov/cases-updates/previouscases.html
|
[
"sklearn.preprocessing.PolynomialFeatures",
"numpy.asarray",
"matplotlib.pyplot.scatter",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.show"
] |
[((1680, 1696), 'numpy.asarray', 'np.asarray', (['days'], {}), '(days)\n', (1690, 1696), True, 'import numpy as np\n'), ((1705, 1722), 'numpy.asarray', 'np.asarray', (['cases'], {}), '(cases)\n', (1715, 1722), True, 'import numpy as np\n'), ((1782, 1806), 'matplotlib.pyplot.scatter', 'plt.scatter', (['days', 'cases'], {}), '(days, cases)\n', (1793, 1806), True, 'import matplotlib.pyplot as plt\n'), ((1807, 1817), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1815, 1817), True, 'import matplotlib.pyplot as plt\n'), ((1970, 1994), 'matplotlib.pyplot.scatter', 'plt.scatter', (['days', 'cases'], {}), '(days, cases)\n', (1981, 1994), True, 'import matplotlib.pyplot as plt\n'), ((2045, 2055), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2053, 2055), True, 'import matplotlib.pyplot as plt\n'), ((1903, 1925), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', (['(12)'], {}), '(12)\n', (1921, 1925), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((1927, 1945), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1943, 1945), False, 'from sklearn.linear_model import LinearRegression\n')]
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test cases for Poisson distribution"""
import numpy as np
from scipy import stats
import mindspore.context as context
import mindspore.nn as nn
import mindspore.nn.probability.distribution as msd
from mindspore import Tensor
from mindspore import dtype
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Prob(nn.Cell):
"""
Test class: probability of Poisson distribution.
"""
def __init__(self):
super(Prob, self).__init__()
self.p = msd.Poisson([0.5], dtype=dtype.float32)
def construct(self, x_):
return self.p.prob(x_)
def test_pdf():
"""
Test pdf.
"""
poisson_benchmark = stats.poisson(mu=0.5)
expect_pdf = poisson_benchmark.pmf([-1.0, 0.0, 1.0]).astype(np.float32)
pdf = Prob()
x_ = Tensor(np.array([-1.0, 0.0, 1.0]).astype(np.float32), dtype=dtype.float32)
output = pdf(x_)
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_pdf) < tol).all()
class LogProb(nn.Cell):
"""
Test class: log probability of Poisson distribution.
"""
def __init__(self):
super(LogProb, self).__init__()
self.p = msd.Poisson([0.5], dtype=dtype.float32)
def construct(self, x_):
return self.p.log_prob(x_)
def test_log_likelihood():
"""
Test log_pdf.
"""
poisson_benchmark = stats.poisson(mu=0.5)
expect_logpdf = poisson_benchmark.logpmf([1.0, 2.0]).astype(np.float32)
logprob = LogProb()
x_ = Tensor(np.array([1.0, 2.0]).astype(np.float32), dtype=dtype.float32)
output = logprob(x_)
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_logpdf) < tol).all()
class Basics(nn.Cell):
"""
Test class: mean/sd/mode of Poisson distribution.
"""
def __init__(self):
super(Basics, self).__init__()
self.p = msd.Poisson([1.44], dtype=dtype.float32)
def construct(self):
return self.p.mean(), self.p.sd(), self.p.mode()
def test_basics():
"""
Test mean/standard/mode deviation.
"""
basics = Basics()
mean, sd, mode = basics()
expect_mean = 1.44
expect_sd = 1.2
expect_mode = 1
tol = 1e-6
assert (np.abs(mean.asnumpy() - expect_mean) < tol).all()
assert (np.abs(sd.asnumpy() - expect_sd) < tol).all()
assert (np.abs(mode.asnumpy() - expect_mode) < tol).all()
class Sampling(nn.Cell):
"""
Test class: sample of Poisson distribution.
"""
def __init__(self, shape, seed=0):
super(Sampling, self).__init__()
self.p = msd.Poisson([[1.0], [0.5]], seed=seed, dtype=dtype.float32)
self.shape = shape
def construct(self, rate=None):
return self.p.sample(self.shape, rate)
def test_sample():
"""
Test sample.
"""
shape = (2, 3)
seed = 10
rate = Tensor([1.0, 2.0, 3.0], dtype=dtype.float32)
sample = Sampling(shape, seed=seed)
output = sample(rate)
assert output.shape == (2, 3, 3)
class CDF(nn.Cell):
"""
Test class: cdf of Poisson distribution.
"""
def __init__(self):
super(CDF, self).__init__()
self.p = msd.Poisson([0.5], dtype=dtype.float32)
def construct(self, x_):
return self.p.cdf(x_)
def test_cdf():
"""
Test cdf.
"""
poisson_benchmark = stats.poisson(mu=0.5)
expect_cdf = poisson_benchmark.cdf([-1.0, 0.0, 1.0]).astype(np.float32)
cdf = CDF()
x_ = Tensor(np.array([-1.0, 0.0, 1.0]).astype(np.float32), dtype=dtype.float32)
output = cdf(x_)
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_cdf) < tol).all()
class LogCDF(nn.Cell):
"""
Test class: log_cdf of Poisson distribution.
"""
def __init__(self):
super(LogCDF, self).__init__()
self.p = msd.Poisson([0.5], dtype=dtype.float32)
def construct(self, x_):
return self.p.log_cdf(x_)
def test_log_cdf():
"""
Test log_cdf.
"""
poisson_benchmark = stats.poisson(mu=0.5)
expect_logcdf = poisson_benchmark.logcdf([0.5, 1.0, 2.5]).astype(np.float32)
logcdf = LogCDF()
x_ = Tensor(np.array([0.5, 1.0, 2.5]).astype(np.float32), dtype=dtype.float32)
output = logcdf(x_)
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_logcdf) < tol).all()
class SF(nn.Cell):
"""
Test class: survival function of Poisson distribution.
"""
def __init__(self):
super(SF, self).__init__()
self.p = msd.Poisson([0.5], dtype=dtype.float32)
def construct(self, x_):
return self.p.survival_function(x_)
def test_survival():
"""
Test survival function.
"""
poisson_benchmark = stats.poisson(mu=0.5)
expect_survival = poisson_benchmark.sf([-1.0, 0.0, 1.0]).astype(np.float32)
survival = SF()
x_ = Tensor(np.array([-1.0, 0.0, 1.0]).astype(np.float32), dtype=dtype.float32)
output = survival(x_)
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_survival) < tol).all()
class LogSF(nn.Cell):
"""
Test class: log survival function of Poisson distribution.
"""
def __init__(self):
super(LogSF, self).__init__()
self.p = msd.Poisson([0.5], dtype=dtype.float32)
def construct(self, x_):
return self.p.log_survival(x_)
def test_log_survival():
"""
Test log survival function.
"""
poisson_benchmark = stats.poisson(mu=0.5)
expect_logsurvival = poisson_benchmark.logsf([-1.0, 0.0, 1.0]).astype(np.float32)
logsurvival = LogSF()
x_ = Tensor(np.array([-1.0, 0.0, 1.0]).astype(np.float32), dtype=dtype.float32)
output = logsurvival(x_)
tol = 1e-6
assert (np.abs(output.asnumpy() - expect_logsurvival) < tol).all()
|
[
"mindspore.context.set_context",
"numpy.array",
"scipy.stats.poisson",
"mindspore.nn.probability.distribution.Poisson",
"mindspore.Tensor"
] |
[((924, 992), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': '"""Ascend"""'}), "(mode=context.GRAPH_MODE, device_target='Ascend')\n", (943, 992), True, 'import mindspore.context as context\n'), ((1334, 1355), 'scipy.stats.poisson', 'stats.poisson', ([], {'mu': '(0.5)'}), '(mu=0.5)\n', (1347, 1355), False, 'from scipy import stats\n'), ((2002, 2023), 'scipy.stats.poisson', 'stats.poisson', ([], {'mu': '(0.5)'}), '(mu=0.5)\n', (2015, 2023), False, 'from scipy import stats\n'), ((3448, 3492), 'mindspore.Tensor', 'Tensor', (['[1.0, 2.0, 3.0]'], {'dtype': 'dtype.float32'}), '([1.0, 2.0, 3.0], dtype=dtype.float32)\n', (3454, 3492), False, 'from mindspore import Tensor\n'), ((3926, 3947), 'scipy.stats.poisson', 'stats.poisson', ([], {'mu': '(0.5)'}), '(mu=0.5)\n', (3939, 3947), False, 'from scipy import stats\n'), ((4575, 4596), 'scipy.stats.poisson', 'stats.poisson', ([], {'mu': '(0.5)'}), '(mu=0.5)\n', (4588, 4596), False, 'from scipy import stats\n'), ((5263, 5284), 'scipy.stats.poisson', 'stats.poisson', ([], {'mu': '(0.5)'}), '(mu=0.5)\n', (5276, 5284), False, 'from scipy import stats\n'), ((5966, 5987), 'scipy.stats.poisson', 'stats.poisson', ([], {'mu': '(0.5)'}), '(mu=0.5)\n', (5979, 5987), False, 'from scipy import stats\n'), ((1162, 1201), 'mindspore.nn.probability.distribution.Poisson', 'msd.Poisson', (['[0.5]'], {'dtype': 'dtype.float32'}), '([0.5], dtype=dtype.float32)\n', (1173, 1201), True, 'import mindspore.nn.probability.distribution as msd\n'), ((1811, 1850), 'mindspore.nn.probability.distribution.Poisson', 'msd.Poisson', (['[0.5]'], {'dtype': 'dtype.float32'}), '([0.5], dtype=dtype.float32)\n', (1822, 1850), True, 'import mindspore.nn.probability.distribution as msd\n'), ((2482, 2522), 'mindspore.nn.probability.distribution.Poisson', 'msd.Poisson', (['[1.44]'], {'dtype': 'dtype.float32'}), '([1.44], dtype=dtype.float32)\n', (2493, 2522), True, 'import mindspore.nn.probability.distribution as msd\n'), ((3180, 3239), 'mindspore.nn.probability.distribution.Poisson', 'msd.Poisson', (['[[1.0], [0.5]]'], {'seed': 'seed', 'dtype': 'dtype.float32'}), '([[1.0], [0.5]], seed=seed, dtype=dtype.float32)\n', (3191, 3239), True, 'import mindspore.nn.probability.distribution as msd\n'), ((3755, 3794), 'mindspore.nn.probability.distribution.Poisson', 'msd.Poisson', (['[0.5]'], {'dtype': 'dtype.float32'}), '([0.5], dtype=dtype.float32)\n', (3766, 3794), True, 'import mindspore.nn.probability.distribution as msd\n'), ((4392, 4431), 'mindspore.nn.probability.distribution.Poisson', 'msd.Poisson', (['[0.5]'], {'dtype': 'dtype.float32'}), '([0.5], dtype=dtype.float32)\n', (4403, 4431), True, 'import mindspore.nn.probability.distribution as msd\n'), ((5059, 5098), 'mindspore.nn.probability.distribution.Poisson', 'msd.Poisson', (['[0.5]'], {'dtype': 'dtype.float32'}), '([0.5], dtype=dtype.float32)\n', (5070, 5098), True, 'import mindspore.nn.probability.distribution as msd\n'), ((5759, 5798), 'mindspore.nn.probability.distribution.Poisson', 'msd.Poisson', (['[0.5]'], {'dtype': 'dtype.float32'}), '([0.5], dtype=dtype.float32)\n', (5770, 5798), True, 'import mindspore.nn.probability.distribution as msd\n'), ((1465, 1491), 'numpy.array', 'np.array', (['[-1.0, 0.0, 1.0]'], {}), '([-1.0, 0.0, 1.0])\n', (1473, 1491), True, 'import numpy as np\n'), ((2140, 2160), 'numpy.array', 'np.array', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (2148, 2160), True, 'import numpy as np\n'), ((4056, 4082), 'numpy.array', 'np.array', (['[-1.0, 0.0, 1.0]'], {}), '([-1.0, 0.0, 1.0])\n', (4064, 4082), True, 'import numpy as np\n'), ((4716, 4741), 'numpy.array', 'np.array', (['[0.5, 1.0, 2.5]'], {}), '([0.5, 1.0, 2.5])\n', (4724, 4741), True, 'import numpy as np\n'), ((5401, 5427), 'numpy.array', 'np.array', (['[-1.0, 0.0, 1.0]'], {}), '([-1.0, 0.0, 1.0])\n', (5409, 5427), True, 'import numpy as np\n'), ((6116, 6142), 'numpy.array', 'np.array', (['[-1.0, 0.0, 1.0]'], {}), '([-1.0, 0.0, 1.0])\n', (6124, 6142), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from nltk.tokenize import TweetTokenizer
from nltk.stem.snowball import SnowballStemmer
import numpy as np
from collections import defaultdict,Counter
import logging as logger
from nortok.stopwords import get_norwegian_stopwords
import pickle
import gzip
def dd_def():
return 0
def _fit_tokenizer(texts,tokfunc,max_length=None):
"""
Makes word dictionaries based on text. Zero (0) is reserved.
"""
doc_count=0
wordcount=Counter()
ind2word={}
word2ind=defaultdict(dd_def)
maxInd=0
for text in texts:
out=tokfunc(text,max_length=max_length)
for c in out:
wordcount[c]+=1
os=set(out)
for c in os:
if c not in word2ind:
maxInd+=1
word2ind[c]=maxInd
ind2word[maxInd]=c
return word2ind,ind2word,wordcount
def _max_word_vocab(word2ind,ind2word,wcs,max_words=None):
if max_words is None:
return word2ind,ind2word
if len(word2ind)<max_words:
logger.info('len(word2ind)<=max_words:{0}'.format(max_words))
return word2ind,ind2word
w2i=defaultdict(dd_def)
i2w={}
wordInd=1
for w in wcs:
word=w[0]
if wordInd>=max_words:
break
w2i[word]=wordInd
i2w[wordInd]=word
wordInd+=1
return w2i,i2w
def _texts_to_seqs(texts,tokfunc,word2ind,max_len,n_texts=None):
if n_texts is None:
n_texts=len(texts)
seqs=np.zeros((n_texts,max_len),dtype='int32')
for iii,txt in enumerate(texts):
toks=tokfunc(txt,max_len)
ml=min(max_len,len(toks))
seqs[iii,:ml]=[word2ind[tok] for tok in toks]
return seqs
def texts_to_seqs_var(texts,tokfunc,word2ind,max_len=None):
for txt in texts:
toks=tokfunc(txt,max_length=max_len)
yield [word2ind[tok] for tok in toks]
class BaseTokenizer(object):
def __init__(self,word2ind=None,max_words=None,min_frequency=2,**kwargs):
if word2ind is not None:
self.document_count=1
self.word2ind=defaultdict(dd_def,word2ind)
self.min_frequency=min_frequency
def tokenize(self,text,max_length=None):
toks=text.split()
if max_length:
toks=toks[:max_length]
return toks
def texts_to_sequences(self,texts,max_len,n_texts=None):
seqs=_texts_to_seqs(texts,self.tokenize,self.word2ind,max_len,n_texts)
return seqs
def var_length_texts_to_sequences(self,texts):
return texts_to_seqs_var(texts,self.tokenize,self.word2ind)
def fit_tokenizer(self,texts,max_length,max_words=None):
word2ind,ind2word,wordcount=_fit_tokenizer(texts,self.tokenize,max_length=max_length)
wordcount=dict((q,r) for q,r in wordcount.items() if r>=self.min_frequency)
def skey(x):
return x[1]
wcs=list(wordcount.items())
wcs.sort(key=skey,reverse=True)
self.wordcount=wcs
self.word2ind,self.ind2word=_max_word_vocab(word2ind,ind2word,self.wordcount,max_words)
self.max_words=max_words
def prune_vocab(self,max_words=None):
if max_words>=self.max_words:
raise ValueError("Can't prune with larger vocabulary.")
self.word2ind,self.ind2word=_max_word_vocab(self.word2ind,self.ind2word,self.wordcount,max_words)
self.max_words=max_words
def save_tokenizer(self,savepath,extraobjs=None):
w2i=list(self.word2ind.items())
i2w=list(self.ind2word.items())
outdict={'word2ind':w2i,'ind2word':i2w,'max_words':self.max_words}
if extraobjs:
outdict.update(extraobjs)
with gzip.open(savepath,'wb') as ff:
pickle.dump(outdict,ff)
@staticmethod
def load_tokenizer(savepath,initClass=None):
with gzip.open(savepath,'rb') as ff:
indict=pickle.load(ff)
indict['word2ind']=defaultdict(dd_def,indict['word2ind'])
indict['ind2word']=dict(indict['ind2word'])
tok=initClass(indict)
for k,v in indict.items():
setattr(tok,k,v)
return tok
class WordTokenizer(BaseTokenizer):
def __init__(self,word2ind=None,use_stopwords=False,use_stemmer=False,max_words=None,**kwargs):
self.strip_handles=False
super(WordTokenizer, self).__init__(**kwargs)
self.tweetok=TweetTokenizer(**kwargs)
if use_stopwords:
if isinstance(use_stopwords,set):
self.stopwords=use_stopwords
else:
self.stopwords=get_norwegian_stopwords()
else:
self.stopwords=False
self.use_stemmer=use_stemmer
if use_stemmer==True:
self.stemmer=SnowballStemmer('norwegian')
def def_eobjs(self):
return {'use_stemmer':self.use_stemmer,'stopwords':self.stopwords}
def tokenize(self,text,max_length=None):
toks=self.tweetok.tokenize(text)
if max_length:
toks=toks[:max_length]
if self.stopwords and (not self.use_stemmer):
toks=[t for t in toks if t not in self.stopwords]
elif self.stopwords and self.use_stemmer:
toks=[self.stemmer.stem(t) for t in toks if t not in self.stopwords]
elif (not self.stopwords) and self.use_stemmer:
toks=[self.stemmer.stem(t) for t in toks]
return toks
def save_tokenizer(self,savepath):
eobjs=self.def_eobjs()
super(WordTokenizer, self).save_tokenizer(savepath=savepath,extraobjs=eobjs)
class RawCharTokenizer(BaseTokenizer):
def __init__(self,word2ind=None,max_words=None):
self.max_words=max_words
def tokenize(self,text,max_length=None):
toks=list(text.lower())
if max_length:
toks=toks[:max_length]
return toks
class HierarchicalTokenizer(BaseTokenizer):
def __init__(self,word2ind=None,max_words=None):
self.max_words=max_words
self.wordtok=WordTokenizer()
self.chartok=RawCharTokenizer(max_words=max_words)
def tokenize(self,text,max_len_words=512,max_len_chars=20):
wds=self.wordtok.tokenize(text,max_length=max_len_words)
toks=[self.chartok.tokenize(wd,max_length=max_len_chars)]
return toks
|
[
"nltk.tokenize.TweetTokenizer",
"pickle.dump",
"gzip.open",
"pickle.load",
"collections.Counter",
"nltk.stem.snowball.SnowballStemmer",
"numpy.zeros",
"collections.defaultdict",
"nortok.stopwords.get_norwegian_stopwords"
] |
[((494, 503), 'collections.Counter', 'Counter', ([], {}), '()\n', (501, 503), False, 'from collections import defaultdict, Counter\n'), ((533, 552), 'collections.defaultdict', 'defaultdict', (['dd_def'], {}), '(dd_def)\n', (544, 552), False, 'from collections import defaultdict, Counter\n'), ((1159, 1178), 'collections.defaultdict', 'defaultdict', (['dd_def'], {}), '(dd_def)\n', (1170, 1178), False, 'from collections import defaultdict, Counter\n'), ((1506, 1549), 'numpy.zeros', 'np.zeros', (['(n_texts, max_len)'], {'dtype': '"""int32"""'}), "((n_texts, max_len), dtype='int32')\n", (1514, 1549), True, 'import numpy as np\n'), ((3933, 3972), 'collections.defaultdict', 'defaultdict', (['dd_def', "indict['word2ind']"], {}), "(dd_def, indict['word2ind'])\n", (3944, 3972), False, 'from collections import defaultdict, Counter\n'), ((4382, 4406), 'nltk.tokenize.TweetTokenizer', 'TweetTokenizer', ([], {}), '(**kwargs)\n', (4396, 4406), False, 'from nltk.tokenize import TweetTokenizer\n'), ((2098, 2127), 'collections.defaultdict', 'defaultdict', (['dd_def', 'word2ind'], {}), '(dd_def, word2ind)\n', (2109, 2127), False, 'from collections import defaultdict, Counter\n'), ((3690, 3715), 'gzip.open', 'gzip.open', (['savepath', '"""wb"""'], {}), "(savepath, 'wb')\n", (3699, 3715), False, 'import gzip\n'), ((3734, 3758), 'pickle.dump', 'pickle.dump', (['outdict', 'ff'], {}), '(outdict, ff)\n', (3745, 3758), False, 'import pickle\n'), ((3839, 3864), 'gzip.open', 'gzip.open', (['savepath', '"""rb"""'], {}), "(savepath, 'rb')\n", (3848, 3864), False, 'import gzip\n'), ((3890, 3905), 'pickle.load', 'pickle.load', (['ff'], {}), '(ff)\n', (3901, 3905), False, 'import pickle\n'), ((4739, 4767), 'nltk.stem.snowball.SnowballStemmer', 'SnowballStemmer', (['"""norwegian"""'], {}), "('norwegian')\n", (4754, 4767), False, 'from nltk.stem.snowball import SnowballStemmer\n'), ((4573, 4598), 'nortok.stopwords.get_norwegian_stopwords', 'get_norwegian_stopwords', ([], {}), '()\n', (4596, 4598), False, 'from nortok.stopwords import get_norwegian_stopwords\n')]
|
import numpy.testing as test
import numpy as np
from unittest import TestCase
from PyFVCOM.ocean import *
class OceanToolsTest(TestCase):
def setUp(self):
""" Make a set of data for the various ocean tools functions """
self.lat = 30
self.z = np.array(9712.02)
self.t = np.array(40)
self.s = np.array(40)
self.p = np.array(10000)
self.pr = np.array(0)
self.c = np.array(1.888091)
self.td = np.array(20) # for dens_jackett
self.sd = np.array(20) # for dens_jackett
self.pd = np.array(1000) # for dens_jackett
self.cond = np.array(53000) # for cond2salt
self.h = np.array((10, 20, 30, 100)) # depths for stokes
self.U = 0.25 # U for stokes and dissipation
self.omega = 1 / 44714.1647021416 # omega for stokes
self.z0 = np.array((0.0025)) # z0 for stokes
self.rho = 1025
self.temp = np.arange(-20, 50, 10)
self.dew = np.linspace(0, 20, len(self.temp))
# Use some of the Fofonoff and Millard (1983) checks.
def test_sw_svan(self):
""" Specific volume anomaly """
test_svan = 9.8130210e-6
res_svan = sw_svan(self.t, self.s, self.p)
test.assert_almost_equal(res_svan, test_svan, decimal=1)
def test_res_z(self):
""" Pressure to depth """
test_z = 9712.02
res_z = pressure2depth(self.p, self.lat)
# Hmmm, not very accurate!
test.assert_almost_equal(res_z, test_z, decimal=-1)
# The return to depth is a bit inaccurate, not sure why.
def test_depth2pressure(self):
""" Depth to pressure """
test_p = 9712.653
res_pres = depth2pressure(self.z, self.lat)
# Hmmm, horribly inaccurate!
test.assert_almost_equal(res_pres, test_p, decimal=-4)
def test_cp_sw(self):
""" Specific heat of seawater """
test_cp = 3849.5
res_cp = cp_sw(self.t, self.s, self.p)
test.assert_almost_equal(res_cp, test_cp, decimal=1)
def test_dT_adiab_sw(self):
""" Adiabatic temperature gradient """
test_atg = 0.0003255976
res_atg = dT_adiab_sw(self.t, self.s, self.p)
test.assert_almost_equal(res_atg, test_atg, decimal=6)
def test_theta_sw(self):
""" Potential temperature for sea water """
test_theta = 36.89073
res_theta = theta_sw(self.t, self.s, self.p, self.pr)
test.assert_almost_equal(res_theta, test_theta, decimal=2)
def test_sw_sal78(self):
""" Salinity from conductivity, temperature and pressure (sw_sal78) """
test_salinity = 40
res_sal78 = sw_sal78(self.c, self.t, self.p)
test.assert_almost_equal(res_sal78, test_salinity, decimal=5)
def test_dens_jackett(self):
""" Density from temperature, salinity and pressure """
test_dens = 1017.728868019642
res_dens = dens_jackett(self.td, self.sd, self.pd)
test.assert_equal(res_dens, test_dens)
def test_cond2salt(self):
""" Conductivity to salinity """
test_salt = 34.935173507811783
res_salt = cond2salt(self.cond)
test.assert_equal(res_salt, test_salt)
# def test_stokes(self):
# """ Stokes number """
# test_stokes, test_u_star, test_delta = np.nan, np.nan, np.nan
# res_stokes, res_u_star, res_delta = stokes(self.h, self.U, self.omega, self.z0, U_star=True, delta=True)
# test.assert_equal(res_stokes, test_stokes)
# test.assert_equal(res_u_star, test_u_star)
# test.assert_equal(res_delta, test_delta)
def test_dissipation(self):
""" Tidal dissipation for a given tidal harmonic """
test_dissipation = 0.0400390625
res_dissipation = dissipation(self.rho, self.U)
test.assert_equal(res_dissipation, test_dissipation)
def test_rhum(self):
""" Relative humidity from dew temperature and air temperature """
test_rhum = np.array((487.36529085, 270.83391406, 160.16590946, 100.0, 65.47545095, 44.70251971, 31.67003471))
res_rhum = rhum(self.dew, self.temp)
test.assert_almost_equal(res_rhum, test_rhum)
|
[
"numpy.array",
"numpy.testing.assert_equal",
"numpy.arange",
"numpy.testing.assert_almost_equal"
] |
[((275, 292), 'numpy.array', 'np.array', (['(9712.02)'], {}), '(9712.02)\n', (283, 292), True, 'import numpy as np\n'), ((310, 322), 'numpy.array', 'np.array', (['(40)'], {}), '(40)\n', (318, 322), True, 'import numpy as np\n'), ((340, 352), 'numpy.array', 'np.array', (['(40)'], {}), '(40)\n', (348, 352), True, 'import numpy as np\n'), ((370, 385), 'numpy.array', 'np.array', (['(10000)'], {}), '(10000)\n', (378, 385), True, 'import numpy as np\n'), ((404, 415), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (412, 415), True, 'import numpy as np\n'), ((433, 451), 'numpy.array', 'np.array', (['(1.888091)'], {}), '(1.888091)\n', (441, 451), True, 'import numpy as np\n'), ((470, 482), 'numpy.array', 'np.array', (['(20)'], {}), '(20)\n', (478, 482), True, 'import numpy as np\n'), ((521, 533), 'numpy.array', 'np.array', (['(20)'], {}), '(20)\n', (529, 533), True, 'import numpy as np\n'), ((572, 586), 'numpy.array', 'np.array', (['(1000)'], {}), '(1000)\n', (580, 586), True, 'import numpy as np\n'), ((627, 642), 'numpy.array', 'np.array', (['(53000)'], {}), '(53000)\n', (635, 642), True, 'import numpy as np\n'), ((677, 704), 'numpy.array', 'np.array', (['(10, 20, 30, 100)'], {}), '((10, 20, 30, 100))\n', (685, 704), True, 'import numpy as np\n'), ((860, 876), 'numpy.array', 'np.array', (['(0.0025)'], {}), '(0.0025)\n', (868, 876), True, 'import numpy as np\n'), ((940, 962), 'numpy.arange', 'np.arange', (['(-20)', '(50)', '(10)'], {}), '(-20, 50, 10)\n', (949, 962), True, 'import numpy as np\n'), ((1236, 1292), 'numpy.testing.assert_almost_equal', 'test.assert_almost_equal', (['res_svan', 'test_svan'], {'decimal': '(1)'}), '(res_svan, test_svan, decimal=1)\n', (1260, 1292), True, 'import numpy.testing as test\n'), ((1471, 1522), 'numpy.testing.assert_almost_equal', 'test.assert_almost_equal', (['res_z', 'test_z'], {'decimal': '(-1)'}), '(res_z, test_z, decimal=-1)\n', (1495, 1522), True, 'import numpy.testing as test\n'), ((1777, 1831), 'numpy.testing.assert_almost_equal', 'test.assert_almost_equal', (['res_pres', 'test_p'], {'decimal': '(-4)'}), '(res_pres, test_p, decimal=-4)\n', (1801, 1831), True, 'import numpy.testing as test\n'), ((1981, 2033), 'numpy.testing.assert_almost_equal', 'test.assert_almost_equal', (['res_cp', 'test_cp'], {'decimal': '(1)'}), '(res_cp, test_cp, decimal=1)\n', (2005, 2033), True, 'import numpy.testing as test\n'), ((2208, 2262), 'numpy.testing.assert_almost_equal', 'test.assert_almost_equal', (['res_atg', 'test_atg'], {'decimal': '(6)'}), '(res_atg, test_atg, decimal=6)\n', (2232, 2262), True, 'import numpy.testing as test\n'), ((2445, 2503), 'numpy.testing.assert_almost_equal', 'test.assert_almost_equal', (['res_theta', 'test_theta'], {'decimal': '(2)'}), '(res_theta, test_theta, decimal=2)\n', (2469, 2503), True, 'import numpy.testing as test\n'), ((2702, 2763), 'numpy.testing.assert_almost_equal', 'test.assert_almost_equal', (['res_sal78', 'test_salinity'], {'decimal': '(5)'}), '(res_sal78, test_salinity, decimal=5)\n', (2726, 2763), True, 'import numpy.testing as test\n'), ((2967, 3005), 'numpy.testing.assert_equal', 'test.assert_equal', (['res_dens', 'test_dens'], {}), '(res_dens, test_dens)\n', (2984, 3005), True, 'import numpy.testing as test\n'), ((3165, 3203), 'numpy.testing.assert_equal', 'test.assert_equal', (['res_salt', 'test_salt'], {}), '(res_salt, test_salt)\n', (3182, 3203), True, 'import numpy.testing as test\n'), ((3808, 3860), 'numpy.testing.assert_equal', 'test.assert_equal', (['res_dissipation', 'test_dissipation'], {}), '(res_dissipation, test_dissipation)\n', (3825, 3860), True, 'import numpy.testing as test\n'), ((3982, 4085), 'numpy.array', 'np.array', (['(487.36529085, 270.83391406, 160.16590946, 100.0, 65.47545095, 44.70251971,\n 31.67003471)'], {}), '((487.36529085, 270.83391406, 160.16590946, 100.0, 65.47545095, \n 44.70251971, 31.67003471))\n', (3990, 4085), True, 'import numpy as np\n'), ((4134, 4179), 'numpy.testing.assert_almost_equal', 'test.assert_almost_equal', (['res_rhum', 'test_rhum'], {}), '(res_rhum, test_rhum)\n', (4158, 4179), True, 'import numpy.testing as test\n')]
|
from torch.utils.data import Sampler
import numpy as np
def get_chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def flatten(l):
return [item for sublist in l for item in sublist]
class LengthSortSampler(Sampler):
def __init__(self, data_source, bs):
super().__init__(data_source)
self.data_source = data_source
self.bs = bs
try:
int(self.data_source[0])
lengths = self.data_source
except TypeError:
lengths = [len(x) for x in self.data_source]
inds = np.argsort(lengths)[::-1]
chunks = list(get_chunks(inds, bs))
chunk_inds = list(range(len(chunks) - 1))
np.random.shuffle(chunk_inds)
chunk_inds = list(chunk_inds) + [len(chunk_inds)]
self.inds = flatten([chunks[i] for i in chunk_inds])
def __len__(self):
return len(self.data_source)
def __iter__(self):
return iter(self.inds)
|
[
"numpy.argsort",
"numpy.random.shuffle"
] |
[((704, 733), 'numpy.random.shuffle', 'np.random.shuffle', (['chunk_inds'], {}), '(chunk_inds)\n', (721, 733), True, 'import numpy as np\n'), ((576, 595), 'numpy.argsort', 'np.argsort', (['lengths'], {}), '(lengths)\n', (586, 595), True, 'import numpy as np\n')]
|
import math
import numpy as np
class Strategy:
"""Options strategy class.
Takes in a number of `StrategyLeg`'s (option contracts), and filters that determine
entry and exit conditions.
"""
def __init__(self, schema):
self.schema = schema
self.legs = []
self.conditions = []
self.exit_thresholds = (math.inf, math.inf)
def add_leg(self, leg):
"""Adds leg to the strategy"""
assert self.schema == leg.schema
leg.name = "leg_{}".format(len(self.legs) + 1)
self.legs.append(leg)
return self
def add_legs(self, legs):
"""Adds legs to the strategy"""
for leg in legs:
self.add_leg(leg)
return self
def remove_leg(self, leg_number):
"""Removes leg from the strategy"""
self.legs.pop(leg_number)
return self
def clear_legs(self):
"""Removes *all* legs from the strategy"""
self.legs = []
return self
def add_exit_thresholds(self, profit_pct=math.inf, loss_pct=math.inf):
"""Adds maximum profit/loss thresholds. Both **must** be >= 0.0
Args:
profit_pct (float, optional): Max profit level. Defaults to math.inf
loss_pct (float, optional): Max loss level. Defaults to math.inf
"""
assert profit_pct >= 0
assert loss_pct >= 0
self.exit_thresholds = (profit_pct, loss_pct)
def filter_thresholds(self, entry_cost, current_cost):
"""Returns a `pd.Series` of booleans indicating where profit (loss) levels
exceed the given thresholds.
Args:
entry_cost (pd.Series): Total _entry_ cost of inventory row.
current_cost (pd.Series): Present cost of inventory row.
Returns:
pd.Series: Indicator series with `True` for every row that
exceeds the specified profit/loss thresholds.
"""
profit_pct, loss_pct = self.exit_thresholds
excess_return = (current_cost / entry_cost + 1) * -np.sign(entry_cost)
return (excess_return >= profit_pct) | (excess_return <= -loss_pct)
def __repr__(self):
return "Strategy(legs={}, exit_thresholds={})".format(self.legs, self.exit_thresholds)
|
[
"numpy.sign"
] |
[((2077, 2096), 'numpy.sign', 'np.sign', (['entry_cost'], {}), '(entry_cost)\n', (2084, 2096), True, 'import numpy as np\n')]
|
import os, math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#from matplotlib.collections import PatchCollection
from sklearn import linear_model
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from importlib import reload
# Constants
#files = ['time_series_19-covid-Confirmed', 'time_series_19-covid-Deaths', 'time_series_19-covid-Recovered']
#labels = ['Confirmed', 'Deaths', 'Recovered']# until 23 March 2020
# Since 24 March 2020
#files = ['time_series_covid19_confirmed_global', 'time_series_covid19_deaths_global']
#labels = ['confirmed', 'deaths']
# Since 28 March 2020
files = ['time_series_covid19_confirmed_global', 'time_series_covid19_deaths_global', 'time_series_covid19_recovered_global']
labels = ['confirmed', 'deaths', 'recovered']
def open_csvs():
'''
Finding and opening your most recent data download if timestamp == None.
Alternatively, specify a substring of requested timestamp to select which files to open.
'''
timestamp = None
#timestamp = '20200330_15-26'
df=dict()
lists = list([list(), list(), list()])
with os.scandir() as it:
for entry in it:
for i in range(3):
if (timestamp==None or timestamp in entry.name) and files[i] in entry.name\
and entry.is_file():
lists[i].append(entry.name)
for i in range(3):
lists[i].sort()
df[labels[i]] = pd.read_csv(lists[i][-1])
return df
def data_preparation(df, country, output):
'''
This is used for the JHU CSSE dataset.
output can be 'confirmed', 'deaths', 'recovered', 'active' or 'all'
'active' returns dft['confirmed']-dft['deaths']-dft['recovered']
'all' returns all three as columns in a DataFrame as used in death_over_cases.py
'''
sets = dict({'EU': ['Austria', 'Belgium', 'Bulgaria', 'Croatia', 'Cyprus', 'Czechia', 'Denmark', 'Estonia', 'Finland', 'France', 'Germany', 'Greece', 'Hungary', 'Ireland', 'Italy', 'Latvia', 'Lithuania', 'Luxembourg', 'Malta', 'Netherlands', 'Poland', 'Portugal', 'Romania', 'Slovakia', 'Slovenia', 'Spain', 'Sweden']})#,
#'China': [['Anhui', 'China'], ['Beijing', 'China'], ['Chongqing', 'China'], ['Fujian', 'China'], ['Gansu', 'China'], ['Guangdong', 'China'], ['Guangxi', 'China'], ['Guizhou', 'China'], ['Hainan', 'China'], ['Hebei', 'China'], ['Heilongjiang', 'China'], ['Henan', 'China'], ['Hong Kong', 'China'], ['Hubei', 'China'], ['Hunan', 'China'], ['Inner Mongolia', 'China'], ['Jiangsu', 'China'], ['Jiangxi', 'China'], ['Jilin', 'China'], ['Liaoning', 'China'], ['Macau', 'China'], ['Ningxia', 'China'], ['Qinghai', 'China'], ['Shaanxi', 'China'], ['Shandong', 'China'], ['Shanghai', 'China'], ['Shanxi', 'China'], ['Sichuan', 'China'], ['Tianjin', 'China'], ['Tibet', 'China'], ['Xinjiang', 'China'], ['Yunnan', 'China'], ['Zhejiang', 'China']]})
#sets = dict({'EU': ['Croatia', 'Hungary']}) # test only
l = list()
if country == 'EU' or country == 'China' or country == 'Australia':
''' First, recursive implementation
l_members = list()
for member in sets[country]:
l_members.append(data_preparation(df, member, only_cases))
dft_members = pd.concat(l_members, axis=1)
return dft_members.sum(axis=1)
'''
M = dict() # these matrices are the booleans of selections for each Province/State, we take their multiple
for i in range(3):
k = labels[i]
M[k] = list()
if country == 'China' or country == 'Australia':
M[k].append((df[k]['Province/State'].notna()) & (df[k]['Country/Region']==country))
l.append(df[k][M[k][0]].iloc[:,4:].sum(axis=0))
else: # country == 'EU'
for member in sets[country]:
#print(member)
if isinstance(member, str):
M[k].append((df[k]['Province/State'].isna()) & (df[k]['Country/Region']==member))
elif len(member)==2: # if it's a pair of [Province/State, Country/Region]
M[k].append((df[k]['Province/State']==member[0])
& (df[k]['Country/Region']==member[1]))
l.append(df[k][np.sum(np.array(M[k]), axis=0)>=1].iloc[:,4:].sum(axis=0))
dft = pd.concat(l, ignore_index=True, axis=1)
#dft.rename(columns={i: labels[i] for i in range(3)}, inplace=True)
else:
for i in range(3):
k = labels[i]
if isinstance(country, str):
l.append(df[k][np.logical_and(df[k]['Province/State'].isna(),
df[k]['Country/Region']==country)].iloc[:,4:])
elif len(country)==2: # if it's a pair of [Province/State, Country/Region]
l.append(df[k][np.logical_and(df[k]['Province/State']==country[0],
df[k]['Country/Region']==country[1])].iloc[:,4:])
dft = pd.concat(l, ignore_index=True, axis=0).transpose()
#print(dft)
dft.rename(columns={i: labels[i] for i in range(3)}, inplace=True)
#print(dft)
if output=='all':
df_ts = dft
elif output=='active':
print('Number of recovered in the past eight days:')
print(dft['recovered'][-8:])
df_ts = dft['confirmed']-dft['deaths']-dft['recovered'] # On 24 March 2020, recovered is not available; on 28 March 2020 it is there again.
else:
df_ts = dft[output]
#print(df_ts)
#df_ts.rename(index={df_ts.index[i]: pd.to_datetime(df_ts.index)[i] for i in range(len(df_ts.index))}, inplace=True)
df_ts.rename(index=pd.Series(df_ts.index, index=df_ts.index).apply(lambda x: pd.to_datetime(x)), inplace=True)
#print(df_ts)
return df_ts
def rm_early_zeros(ts):
'''
Removes early zeros and NaNs from a pandas time series. It finds last (most recent) zero or NaN in
time series and omits all elements before and including this last zero or NaN. Returns the remaining
time series which is free of zeros and NaN.
pd.Series([0,0,0,0,1,2,0,0,3,6]) -> pd.Series([3,6])
'''
zeroindices = ts[(ts==0) | ts.isna()].index
if len(zeroindices)==0:
return ts
else:
successor = np.nonzero((ts.index==zeroindices.max()))[0][0] + 1
return ts[successor:]
def rm_consecutive_early_zeros(ts, keep=1):
'''
Removes first consecutive subsequence of early zeros from a pandas time series
except for the last keep if there are that many.
rm_consecutive_early_zeros(pd.Series([0,0,0,0,1,2,3,6]), 2) -> pd.Series([0,0,1,2,3,6])
'''
zeroindices = ts[ts==0].index
if len(zeroindices)==0:
return ts
else:
first_pos_index = np.nonzero((ts.index==ts[ts>0].index[0]))[0][0]
if first_pos_index <= keep:
return ts
else:
return ts[first_pos_index-keep:]
def separated(s, lang='en', k=3):
'''
Input must be a string. Puts a comma between blocks of k=3 digits:
'1000000' -> '1,000,000'
'''
if lang == 'de':
chr = '.'
else:
chr = ','
if len(s)>=5:
l=list()
for i in range(len(s)//k):
l.insert(0, s[len(s)-(i+1)*k:len(s)-i*k])
if len(s) % k !=0:
l.insert(0, s[:len(s)-(i+1)*k])
return chr.join(l)
else:
return s
def x2str(x, width):
'''
Rounds a number to tenths. If width is greater than its length, then it pads it with space.
If width<0, then it does no padding.
'''
#if x<0.1 and x>-0.1 and width>=6:
# s = '{:.3f}'.format(x) #str(round(x*1000)/1000)
if x<1 and x>-1 and width>=5:
s = '{:.2f}'.format(x) #str(round(x*100)/100)
elif x<10 and x>-10 and width>=4:
s = '{:.1f}'.format(x) #str(round(x*10)/10)
else:
s = '{:.0f}'.format(x) #str(int(round(x)))
if width > len(s):
return s.rjust(width)
else:
return s
def n2str(n, width):
'''
Takes integers. If width is greater than its length, then it pads it with space.
If width<0, then it does no padding.
'''
s = str(n)
if width > len(s):
return s.rjust(width)
else:
return s
def interpolate(df_ts, window_length):
'''
This returns (or interpolates, if not found) from the cumulatives' time series the entry at last entry minus
(window_length-1) days.
'''
# date of interest:
doi = df_ts.index[-1]-pd.Timedelta(f'{window_length-1} days')
if doi in df_ts.index:
return df_ts.loc[doi]
else:
prv = df_ts[df_ts.index<doi]
nxt = df_ts[df_ts.index>doi]
if len(prv)>0 and len(nxt)>0:
i_prv = prv.index[-1]
i_nxt = nxt.index[0]
c_prv = (i_nxt-doi).days/(i_nxt-i_prv).days
c_nxt = (doi-i_prv).days/(i_nxt-i_prv).days
return c_prv*df_ts.loc[i_prv] + c_nxt*df_ts.loc[i_nxt]
elif len(nxt)>0:
return nxt.iloc[0]
elif len(prv)>0: # It can never come this far, df_ts.iloc[-1] exists so nxt is not empty.
return prv.iloc[-1]
'''
def truncate_before(df_ts, window_length):
#This returns (or interpolates, if not found) from the time series the entry at last entry minus
# (window_length-1) days.
# date of interest:
doi = df_ts.index[-1]-pd.Timedelta(f'{window_length-1} days')
if doi in df_ts.index:
return df_ts.loc[doi:]
else:
prv = df_ts[df_ts.index<doi]
nxt = df_ts[df_ts.index>doi]
if len(prv)>0 and len(nxt)>0:
i_prv = prv.index[-1]
i_nxt = nxt.index[0]
c_prv = (i_nxt-doi).days/(i_nxt-i_prv).days
c_nxt = (doi-i_prv).days/(i_nxt-i_prv).days
df_ts.loc[doi] = c_prv*df_ts.loc[i_prv] + c_nxt*df_ts.loc[i_nxt]
df_ts = df_ts.sort_index(inplace=False)
return df_ts.loc[doi:]
elif len(nxt)>0:
df_ts.loc[doi] = nxt.iloc[0]
df_ts = df_ts.sort_index(inplace=False)
return df_ts.loc[doi:]
elif len(prv)>0: # It can never come this far, df_ts.iloc[-1] exists so nxt is not empty.
df_ts.loc[doi] = prv.iloc[-1]
df_ts = df_ts.sort_index(inplace=False)
return df_ts.loc[doi:]
'''
def truncate_before(df_ts, window_length, fill_all_missing):
'''
This returns (or interpolates, if not found) from the cumulatives' time series the entries from (last entry minus
(window_length-1) days) until the last entry.
When some days are missing from the cumulative time series df_ts, then I could assign them zero increments and
assign all increments to the first day after the gap. Instead, I spread out the growth uniformly across the
missing days. The first solution (0, 0, all increment) would give the fitting a tendency to see quickly
growing cumulatives.
'''
df_ts_new = df_ts.copy()
r = range(window_length-1, 0, -1) if fill_all_missing else [window_length-1]
for i in r:
# date of interest:
#doi = df_ts.index[-1]-pd.Timedelta(f'{window_length-1} days')
doi = df_ts.index[-1]-pd.Timedelta(f'{i} days')
if doi not in df_ts.index:
prv = df_ts[df_ts.index<doi]
nxt = df_ts[df_ts.index>doi]
if len(prv)>0 and len(nxt)>0:
i_prv = prv.index[-1]
i_nxt = nxt.index[0]
c_prv = (i_nxt-doi).days/(i_nxt-i_prv).days
c_nxt = (doi-i_prv).days/(i_nxt-i_prv).days
df_ts_new.loc[doi] = c_prv*df_ts.loc[i_prv] + c_nxt*df_ts.loc[i_nxt]
elif len(nxt)>0:
df_ts_new.loc[doi] = nxt.iloc[0]
elif len(prv)>0: # It can never come this far, df_ts.iloc[-1] exists so nxt is not empty.
df_ts_new.loc[doi] = prv.iloc[-1]
df_ts_new = df_ts_new.sort_index(inplace=False)
return df_ts_new.loc[df_ts.index[-1]-pd.Timedelta(f'{window_length-1} days'):]
def analysis(df_ts, window_length, exp_or_lin, extent='full'):
'''
df_ts: pd.Series, it is a time series, can be totals or no. per e.g. 100,000 ppl
window_length: int
exp_or_lin in ['exp', 'lin']
For 'exp', because of log2, this requires all entries in df_ts to be positive.
For 'lin', because of log2, this requires last entry in df_ts to be positive.
extent in ['full', 'minimal']
'minimal' doesn't compute predictions.
output: results = [
daily increment in natural units (units of df_ts): float,
daily growth rate in percentage: float,
doubling time in days: float or 0 for 'minimal',
current cases (df_ts.iloc[-1]),
projection_lower: type(df_ts.dtype) or 0 for 'minimal',
projection_upper: type(df_ts.dtype) or 0 for 'minimal',
model_score=R^2: float,
difference of model fit on last date and last data point in log space: float
]
model: sklearn.linear_model
#failure: 0 or 1; 1 if it failed due to nonpositive number in exponential fit or too short time series
'''
i_ts = (df_ts - df_ts.shift(1))[1:] # i for increments
#if len(i_ts)<window_length:# or (exp_or_lin=='exp' and (i_ts.iloc[-window_length:]<=0).sum()>=5):
if len(i_ts)==0 or (i_ts.index[-1]-i_ts.index[0]).days<window_length-1:
results = 8 * [0]
results[-1] = 100
return results, None
intl_lo_days = 4
intl_hi_days = 6
results = [None] * 8
results[3] = df_ts.iloc[-1]
model = linear_model.LinearRegression(fit_intercept=True)
if exp_or_lin=='exp':
df_ts_orig = df_ts.copy()
df_ts_0 = truncate_before(df_ts_orig, window_length+1, fill_all_missing=False) # For the fit to increments.
df_ts = truncate_before(df_ts, window_length+1, fill_all_missing=True)
i_ts = (df_ts - df_ts.shift(1))[1:] # i for increments
i_ts[i_ts<=0] = 1
y = i_ts.values
ylog = np.log(y)
model.fit((i_ts.index-i_ts.index[-1]).days.values.reshape(-1, 1), ylog)
results[0] = math.exp(model.intercept_)
# For doubling, the area of the increments is equal to df_ts[-1]
# cf. https://www.wolframalpha.com/input/?i=integrate+%28exp%28a+t+%2Bb%29+dt%29+from+t%3D0+to+x
if model.coef_[0]!=0:
temp2 = math.exp(model.intercept_)/model.coef_[0]
temp = model.coef_[0]*df_ts.iloc[-1]/math.exp(model.intercept_) + 1
if temp>0:
results[2] = math.log(temp)/model.coef_[0]
else:
results[2] = np.inf
else:
results[2] = df_ts.iloc[-1]/math.exp(model.intercept_)
if extent == 'full':
if model.coef_[0]!=0:
results[4] = (math.exp(model.coef_[0]*intl_lo_days)-1)*temp2 + df_ts.iloc[-1]
results[5] = (math.exp(model.coef_[0]*intl_hi_days)-1)*temp2 + df_ts.iloc[-1]
else:
results[4] = math.exp(model.intercept_)*intl_lo_days + df_ts.iloc[-1]
results[5] = math.exp(model.intercept_)*intl_hi_days + df_ts.iloc[-1]
#if (i_ts_orig.iloc[-window_length:]>0).all():
#if (truncate_before(i_ts_orig, window_length, fill_all_missing=False)>0).all():
i_ts_0 = (df_ts_0 - df_ts_0.shift(1))[1:]
if (i_ts_0>0).all():
#results[6] = model.score(np.arange(-window_length+1, 1).reshape(-1, 1), ylog)
results[6] = model.score((i_ts_0.index-i_ts_0.index[-1]).days.values.reshape(-1, 1), ylog)
else:
results[6] = 0
#if df_ts.iloc[-1]==df_ts.iloc[-window_length]:
#if df_ts.iloc[-1]==interpolate(df_ts, window_length): # If there is no growth, then exp is not good approx.
first_day = df_ts.index[-1]-pd.Timedelta(f'{window_length-1} days')
if df_ts.iloc[-1]==df_ts.loc[first_day]: # If there is no growth, then exp is not good approx.
results[7] = 100 # Exp overestimates growth by a factor of infinity.
else:
if model.coef_[0]!=0:
#results[7] = temp2*(1-math.exp(model.coef_[0]*(-window_length+1)))/(df_ts.iloc[-1]-df_ts.iloc[-window_length])-1
#results[7] = temp2*(1-math.exp(model.coef_[0]*(-window_length+1)))/(df_ts.iloc[-1]-interpolate(df_ts, window_length))-1
results[7] = temp2*(1-math.exp(model.coef_[0]*(-window_length+1)))/(df_ts.iloc[-1]-df_ts.loc[first_day])-1
else:
#results[7] = math.exp(model.intercept_)*(-window_length+1)/(df_ts.iloc[-1]-df_ts.iloc[-window_length])-1
#results[7] = math.exp(model.intercept_)*(-window_length+1)/(df_ts.iloc[-1]-interpolate(df_ts, window_length))-1
results[7] = math.exp(model.intercept_)*(-window_length+1)/(df_ts.iloc[-1]-df_ts.loc[first_day])-1
elif exp_or_lin=='lin':
df_ts_orig = df_ts.copy()
df_ts_0 = truncate_before(df_ts_orig, window_length+1, fill_all_missing=False) # For the fit to increments.
df_ts = truncate_before(df_ts, window_length+1, fill_all_missing=True)
i_ts = (df_ts - df_ts.shift(1))[1:] # i for increments
y = i_ts.values
model.fit((i_ts.index-i_ts.index[-1]).days.values.reshape(-1, 1), y)
results[0] = model.intercept_
if model.coef_[0]!=0:
if 2*model.coef_[0]*df_ts.iloc[-1] >= - model.intercept_*model.intercept_:
results[2] = (-model.intercept_ + math.sqrt(model.intercept_*model.intercept_ + 2*model.coef_[0]*df_ts.iloc[-1]))/model.coef_[0]
else:
results[2] = np.inf
else:
if model.intercept_!=0:
results[2] = df_ts.iloc[-1]/model.intercept_
else:
if df_ts.iloc[-1]!=0:
results[2] = np.inf
else:
results[2] = 0 # model.coef_[0]==model.intercept_==0
if extent == 'full':
if model.coef_[0]*model.intercept_<0 and\
((model.coef_[0]>0 and -model.intercept_<intl_lo_days*model.coef_)\
or (model.coef_[0]<0 and -model.intercept_>intl_lo_days*model.coef_)):
# there is a zero-crossing until intl_lo_days
results[4] = -model.intercept_*model.intercept_/(2*model.coef_[0]) + df_ts.iloc[-1]
results[5] = results[4]
elif model.coef_[0]*model.intercept_<0 and\
((model.coef_[0]>0 and -model.intercept_<intl_hi_days*model.coef_)\
or (model.coef_[0]<0 and -model.intercept_>intl_hi_days*model.coef_)):
# there is a zero-crossing after intl_lo_days, before intl_hi_days
results[5] = -model.intercept_*model.intercept_/(2*model.coef_[0]) + df_ts.iloc[-1]
if results[4] is None:
results[4] = (model.coef_[0]*intl_lo_days/2+model.intercept_)*intl_lo_days + df_ts.iloc[-1]
if results[5] is None:
results[5] = (model.coef_[0]*intl_hi_days/2+model.intercept_)*intl_hi_days + df_ts.iloc[-1]
#results[6] = model.score(np.arange(-window_length+1, 1).reshape(-1, 1), y)
i_ts_0 = (df_ts_0 - df_ts_0.shift(1))[1:]
results[6] = model.score((i_ts_0.index-i_ts_0.index[-1]).days.values.reshape(-1, 1), y)
#if df_ts.iloc[-1]==df_ts.iloc[-window_length]:
first_day = df_ts.index[-1]-pd.Timedelta(f'{window_length-1} days')
if df_ts.iloc[-1]==df_ts.loc[first_day]: # If there is no growth, then
if model.coef_[0]==0 and model.intercept_==0:
results[7] = 0
else:
results[7] = 100 # a nonzero linear function overestimates growth by a factor of infinity.
else:
#print(model.coef_[0], model.intercept_, '\n', df_ts.iloc[-window_length:])
#print(-(model.coef_[0]*(-window_length+1)/2+model.intercept_)*(-window_length+1))
#print(df_ts.iloc[-1]-df_ts.iloc[-window_length])
#results[7] = -(model.coef_[0]*(-window_length+1)/2+model.intercept_)*(-window_length+1)/(df_ts.iloc[-1]-df_ts.iloc[-window_length])-1 # From the integral
#results[7] = -(model.coef_[0]*(-window_length+1)/2+model.intercept_)*(-window_length+1)/(df_ts.iloc[-1]-interpolate(df_ts, window_length))-1 # From the integral
results[7] = -(model.coef_[0]*(-window_length+1)/2+model.intercept_)*(-window_length+1)/(df_ts.iloc[-1]-df_ts.loc[first_day])-1 # From the integral
#print(df_ts.iloc[-1], df_ts.loc[first_day], results[7])
#print(window_length*(2*model.intercept_+model.coef_[0]*(-window_length+1))/(2*(df_ts.iloc[-1]-df_ts.iloc[-window_length]))-1) # From summing
#print((-model.coef_[0]*(-window_length+1)*(-window_length+1)/2+(model.coef_[0]/2-model.intercept_)*(-window_length+1)+model.intercept_)/(df_ts.iloc[-1]-df_ts.iloc[-window_length])-1) # From summing
#results[7] = np.sum(model.coef_[0]*np.arange(-window_length+1, 1)+model.intercept_)/(df_ts.iloc[-1]-df_ts.iloc[-window_length])-1 # From summing
#print(results[7])
elif exp_or_lin=='mean':
df_ts_orig = df_ts.copy()
df_ts_0 = truncate_before(df_ts_orig, window_length+1, fill_all_missing=False) # For the fit to increments.
df_ts = truncate_before(df_ts, window_length+1, fill_all_missing=True)
i_ts = (df_ts - df_ts.shift(1))[1:] # i for increments
#y = i_ts.values
i_mean = i_ts.mean()
results[0] = i_mean
if results[0]!=0:
results[2] = df_ts.iloc[-1]/i_mean
else:
if df_ts.iloc[-1]!=0:
results[2] = np.inf
else:
results[2] = 0 # df_ts.iloc[-1]==i_ts.mean()==0
if extent == 'full':
results[4] = i_mean*intl_lo_days + df_ts.iloc[-1]
results[5] = i_mean*intl_hi_days + df_ts.iloc[-1]
results[6] = 0 # coefficient of determination R^2
first_day = df_ts.index[-1]-pd.Timedelta(f'{window_length-1} days')
if df_ts.iloc[-1]==df_ts.loc[first_day]: # If there is no growth, then
if i_mean==0:
results[7] = 0
else:
results[7] = 100 # a nonzero linear function overestimates growth by a factor of infinity.
else:
results[7] = -i_mean*(-window_length+1)/(df_ts.iloc[-1]-df_ts.loc[first_day])-1 # From the integral
class SkeletonModel():
def __init__(self, intercept):
self.coef_ = [0]
self.intercept_ = intercept
model = SkeletonModel(results[0])
if results[2]!=np.inf and results[2]!=0 and results[0]>0:
#print(window_length, df_ts.iloc[-window_length-1:], y, model.coef_[0], model.intercept_, results, 1/results[2])
results[1] = (math.pow(2, 1/results[2])-1)*100
else:
#y_last = (df_ts.iloc[-1]+df_ts.iloc[-2]+df_ts.iloc[-3])/3 # smoothening to lessen the impact of penultimate data point
y_last = (df_ts.iloc[-1]+df_ts.iloc[-2])/2 # smoothening to lessen the impact of penultimate data point
if y_last!=0:
results[1] = results[0]*100 / y_last
elif model.coef_[0]==0 and model.intercept_==0:
results[1] = 0
else:
results[1] = np.inf
if extent == 'minimal':
#results[2] = 0
results[4] = 0
results[5] = 0
#print(model.coef_[0], model.intercept_, results[6], results[7])
#print(window_length, results)
return results, model
def select_window_length(R, round_output):
'''
This selects the window length that is good on two aspects: R^2 and matching last value
round_output: boolean. If True, then it returns current and two projected case numbers as int.
'''
nr_col = R.shape[1]
if nr_col==8: # If we're calling this from pick_exp_vs_lin() with window_selection, then we already have this so no need to compute it again and add it as new column.
# We take l_2 norm of 10*(1-R^2) and distance column:
R.insert(nr_col, nr_col, R[6].apply(lambda x: (10*(1-x))**2)
+ R[7].apply(lambda x: x**2))
# Sort and return the row (corresponding to a window_length) with lowest l_2 norm:
#return R.sort_values(7, axis=0, ascending=True).iloc[0:1,:]
if R.shape[0]>1:
R = R.sort_values(8, axis=0, ascending=True)
#print(R)
output = list()
if round_output==True:
for i in range(R.shape[1]):
output.append(int(round(R.iloc[0,i])) if i in [3, 4, 5] else R.iloc[0,i])
else:
output = [R.iloc[0,i] for i in range(R.shape[1])]
return output, R.index[0]
#return [R.iloc[0,i] for i in range(nr_col+1)], R.index[0] # This maintains integer elements as integers, R.iloc[0,:] would cast them as float bc it creates a pd.Series with a shared type.
def pick_exp_vs_lin(r_exp, m_exp, r_lin, m_lin):
r_exp = pd.DataFrame(r_exp).T
r_exp, _ = select_window_length(r_exp, round_output=False)
r_lin = pd.DataFrame(r_lin).T
r_lin, _ = select_window_length(r_lin, round_output=False)
if r_exp[-1] < r_lin[-1]:
return r_exp[:-1], m_exp, 'exp'
else:
return r_lin[:-1], m_lin, 'lin'
#TODO this should have a switch that it should compute in densities when population size is available
def process_geounit(df_ts, window_length, exp_or_lin='both', running_extent='full'):
'''
This processes one geographical unit.
df_ts is the time series.
'''
#df_ts = rm_early_zeros(df_ts)
if exp_or_lin=='mean' and not window_length > 0:
window_length = 7
if window_length > 0:
selected_window_length = window_length
if exp_or_lin=='both':
results_e, model_e = analysis(df_ts, window_length, 'exp', running_extent)
results_l, model_l = analysis(df_ts, window_length, 'lin', running_extent)
results, model, exp_or_lin = pick_exp_vs_lin(results_e, model_e, results_l, model_l)
#print(results_e)
#print(results_l)
elif exp_or_lin=='exp':
results, model = analysis(df_ts, window_length, 'exp', running_extent)
elif exp_or_lin=='lin':
results, model = analysis(df_ts, window_length, 'lin', running_extent)
elif exp_or_lin=='mean':
results, model = analysis(df_ts, window_length, 'mean', running_extent)
else: # do a search over window_lengths for best possible fit
# minimum and maximum allowed window lengths; we test all in this closed interval
wl_lo = 7
wl_hi = 15 # this end point is not included
# Rule out zeros because we take logarithm; rule out windows longer than the time series df_ts.
#wl_hi = min(wl_hi, 1+len(df_ts[df_ts[df_ts>0].idxmin():]), 1+len(df_ts))
wl_hi = min(wl_hi, 1+len(df_ts))
if wl_hi <= wl_lo: # then abort
results, model = analysis(pd.Series([]), 1, 'exp', running_extent)
#return results, model, window_length, exp_or_lin
return pd.DataFrame([results+[window_length, exp_or_lin]]), model
'''
R = pd.DataFrame(np.zeros((wl_hi-wl_lo, 7)), index=range(wl_lo, wl_hi))
models = dict()
for wl in range(wl_lo, wl_hi): # last wl_hi-1 points must be available and positive <==
result_wl, model = analysis_exp(df_ts, wl) # last wl points must be available and positive
R.iloc[wl-wl_lo, :] = result_wl
models[wl] = model
R = R.astype({2: int, 3: int, 4: int})
results, selected_window_length = select_window_length(R)
model = models[selected_window_length]
'''
if exp_or_lin in ['exp', 'both']:
R_e = pd.DataFrame(np.zeros((wl_hi-wl_lo, 8)), index=range(wl_lo, wl_hi))
models_e = dict()
if exp_or_lin in ['lin', 'both']:
R_l = pd.DataFrame(np.zeros((wl_hi-wl_lo, 8)), index=range(wl_lo, wl_hi))
models_l = dict()
for wl in range(wl_lo, wl_hi): # last wl_hi-1 points must be available and positive <==
if exp_or_lin in ['exp', 'both']:
result_wl, model = analysis(df_ts, wl, 'exp', running_extent) # last wl points must be available and positive
R_e.iloc[wl-wl_lo, :] = result_wl
models_e[wl] = model
if exp_or_lin in ['lin', 'both']:
result_wl, model = analysis(df_ts, wl, 'lin', running_extent)
R_l.iloc[wl-wl_lo, :] = result_wl
models_l[wl] = model
if exp_or_lin in ['exp', 'both']:
results_e, selected_window_length_e = select_window_length(R_e, round_output=False)
model_e = models_e[selected_window_length_e]
if exp_or_lin in ['lin', 'both']:
results_l, selected_window_length_l = select_window_length(R_l, round_output=False)
model_l = models_l[selected_window_length_l]
if exp_or_lin == 'exp':
results, model, selected_window_length = results_e[:-1], model_e, selected_window_length_e
if exp_or_lin == 'lin':
results, model, selected_window_length = results_l[:-1], model_l, selected_window_length_l
if exp_or_lin == 'both':
results, model, exp_or_lin = pick_exp_vs_lin(results_e, model_e, results_l, model_l)
selected_window_length = selected_window_length_e if exp_or_lin=='exp'\
else selected_window_length_l
return pd.DataFrame([results+[selected_window_length, exp_or_lin]]), model
def print_header(normalise_by, population_csv=None):
print('The number of cases increases daily by /')
if population_csv is not None:
print('The number of cases per {} people increases daily by /'.format(separated(str(int(normalise_by)))))
print('The number of cases increases daily by (%)/')
print('Time it takes for the number of cases to double /')
print('Latest reported number of cases /')
if population_csv is not None:
print('Latest reported number of cases per {} people /'.format(separated(str(int(normalise_by)))))
print('My estimation for number of cases per {} people at present /'.format(separated(str(int(normalise_by)))))
else:
print('My estimation for number of cases at present /')
print('R^2 /')
print('Tail difference /')
print('Window length /')
print('Exponential (e) or linear (l) approximation\n')
def print_results(country, results, normalise_by, population_csv, wl, exp_or_lin, frmt='normal', lang='en'):
'''
frmt (format) can be 'deaths' or other. For 'deaths', there is one more decimal digit displayed for
cases per 100,000 and estimate interval is not displayed.
'''
country_width = 23 if frmt!='deaths' else 24
interval_width = 14
#if country in ['Baden-Württemberg', 'Bayern', 'Berlin', 'Brandenburg', 'Bremen',
#'Hamburg', 'Hessen', 'Mecklenburg-Vorpommern', 'Niedersachsen',
#'Nordrhein-Westfalen', 'Rheinland-Pfalz', 'Saarland', 'Sachsen',
#'Sachsen-Anhalt', 'Schleswig-Holstein', 'Thüringen',
#'Deutschland']:
if population_csv=='DEU':
pop = load_population_DEU()
elif population_csv=='world':
pop = load_population_world()
elif population_csv=='BW':
pop = load_population_BW()
else:
pop = normalise_by # We don't normalise.
if not isinstance(country, str): # If it's a province or state of a country or region.
country = country[0]
if frmt!='active': # If input is a cumulative number, then don't display negative estimates.
if results[0]<0:
results[0]=0
if results[1]<0:
results[1]=0
if population_csv is not None:
incr_per_ppl = x2str(normalise_by*results[0]/pop[country], 4 if frmt!='deaths' else 6)
else:
incr_per_ppl = ' ' * 4 if frmt!='deaths' else ' ' * 6
#if ((results[6]>=0.95 and results[7]<=0.5) or (results[7]>=-0.2 and results[7]<=0.1)) and\
# results[0]>0 and frmt!='deaths':
if ((results[6]>=0.75 and results[7]<=0.5) or (results[7]>=-0.3 and results[7]<=0.3)) and\
results[0]>0 and frmt!='deaths':
if population_csv is not None:
nr_cases_per_ppl = x2str(normalise_by*results[3]/pop[country], int(math.log10(normalise_by))+1)
est_lo_per_ppl = normalise_by*results[4]/pop[country]
est_hi_per_ppl = normalise_by*results[5]/pop[country]
else:
nr_cases_per_ppl = ' ' * int(math.log10(normalise_by))
est_lo_per_ppl = results[4]
est_hi_per_ppl = results[5]
est_per_ppl_min = min(est_lo_per_ppl, est_hi_per_ppl)
est_per_ppl_max = max(est_lo_per_ppl, est_hi_per_ppl)
interval = ('[' + x2str(est_per_ppl_min, -1) +', '\
+ x2str(est_per_ppl_max, -1) + ']').rjust(interval_width)
else:
if population_csv is not None:
nr_cases_per_ppl = x2str(normalise_by*results[3]/pop[country], int(math.log10(normalise_by))+1)
else:
nr_cases_per_ppl = ' ' * int(math.log10(normalise_by))
if frmt!='deaths':
interval = ' ' * interval_width
else:
interval = ' '
if exp_or_lin=='exp':
letter = 'e'
elif exp_or_lin=='lin':
letter = 'l'
elif exp_or_lin=='mean':
letter = 'm'
print('{0} {1} {2} {3:5.1f}% {4:7.1f} {5} {6} {7} {8} {9:4.2f} {10:5.2f} {11} {12}'.format(
country[:country_width].ljust(country_width),
x2str(results[0], 6),
incr_per_ppl,
results[1],
results[2] if results[0]>=0 else np.NaN, # if results[1]>=0 else np.NaN,
'Tage' if lang=='de' else 'days',
n2str(int(results[3]), 7),
nr_cases_per_ppl,
interval,
results[6],
results[7] if results[7]<100 else np.nan,
str(wl).rjust(2),
letter).replace('.', ',' if lang=='de' else '.'))
def plotting(df_ts, model, save_not_show, country, window_length, exp_or_lin, lang='en', panels=2):
if not isinstance(country, str): # If it's a province or state of a country or region.
country = country[0]
if panels==2:
fig, (ax0, ax1) = plt.subplots(1,2, figsize=(14.4, 4.8))
elif panels==3:
fig, (ax0, ax1, ax2) = plt.subplots(1,3, figsize=(14.4, 4.8))
if lang=='de':
line0 = 'Beobachtungen'
#line1 = 'Exponentielle Annäherung' if exp_or_lin=='exp' else 'Lineare Annäherung'
if exp_or_lin=='exp':
line1 = 'Exponentielle Annäherung'
elif exp_or_lin=='lin':
line1 = 'Lineare Annäherung'
elif exp_or_lin=='mean':
line1 = 'Annäherung mit Durchschnitt'
fig.suptitle(country + ', Stand ' + df_ts.index[-1].strftime('%d.%m.%Y'))
#plt.gcf().text(0.905, 0.86, "© <NAME>, 2020. http://COVID19de.Melykuti.Be", fontsize=8, color='lightgray', rotation=90)
plt.gcf().text(0.905, 0.242, "© <NAME>, 2021. http://COVID19de.Melykuti.Be", fontsize=8, color='lightgray', rotation=90)
else:
line0 = 'Observations'
#line1 = 'Exponential approximation' if exp_or_lin=='exp' else 'Linear approximation'
if exp_or_lin=='exp':
line1 = 'Exponential approximation'
elif exp_or_lin=='lin':
line1 = 'Linear approximation'
elif exp_or_lin=='mean':
line1 = 'Approximation with mean'
fig.suptitle(country + ', ' + df_ts.index[-1].strftime('%d %B %Y').lstrip('0'))
#plt.gcf().text(0.905, 0.862, "© <NAME>, 2020. http://COVID19.Melykuti.Be", fontsize=8, color='lightgray', rotation=90)
plt.gcf().text(0.905, 0.27, "© <NAME>, 2021. http://COVID19.Melykuti.Be", fontsize=8, color='lightgray', rotation=90)
#fig.tight_layout()
fig.subplots_adjust(bottom=0.2)
#ax1.plot(df_ts[df_ts>0], label=line0)
#ax1.plot(df_ts[df_ts>0].iloc[-window_length:].index, np.power(2, np.arange(0, window_length)*model.coef_ + model.intercept_), label=line1)
#plot_x = df_ts.iloc[-window_length:].index
plot_x = pd.date_range(df_ts.index[-1]-pd.Timedelta(f'{window_length-1} days'), df_ts.index[-1])
i_ts = (df_ts - df_ts.shift(1))[1:] # i for increments
ax0.bar(df_ts[1:].index, i_ts[-len(df_ts)+1:], color='tab:blue')
#df_ts_no0 = rm_consecutive_early_zeros(df_ts)
#df_ts_no0 = df_ts
if exp_or_lin=='exp':
if model is not None:
ax0.plot(plot_x, np.exp(model.coef_[0]*np.arange(-window_length+1, 1) + model.intercept_), color='tab:orange', linewidth=3)
if model.coef_[0]!=0:
temp2 = math.exp(model.intercept_)/model.coef_[0]
#plot_y = (np.exp(model.coef_[0]*np.arange(-window_length+1, 1)) - math.exp(model.coef_[0] * (-window_length+1)))*temp2 + df_ts.iloc[-window_length]
plot_y = (np.exp(model.coef_[0]*np.arange(-window_length+1, 1)) - math.exp(model.coef_[0] * (-window_length+1)))*temp2 + interpolate(df_ts, window_length)
else:
#plot_y = math.exp(model.intercept_)*(np.arange(-window_length+1, 1) - (-window_length+1)) + df_ts.iloc[-window_length]
plot_y = math.exp(model.intercept_)*(np.arange(-window_length+1, 1) - (-window_length+1)) + interpolate(df_ts, window_length)
ax1.plot(plot_x, plot_y, label=line1, color='tab:orange', linewidth=3)
if panels==3:
ax2.plot(plot_x, plot_y, label=line1, color='tab:orange', linewidth=3)
elif exp_or_lin=='lin' or exp_or_lin=='mean':
ax0.plot(plot_x, model.coef_[0]*np.arange(-window_length+1, 1) + model.intercept_, color='tab:pink', linewidth=3)
#plot_y = np.arange(0, window_length)*model.coef_ + model.intercept_
#plot_y = (model.coef_[0]*np.arange(-window_length+1, 1)/2+model.intercept_)*np.arange(-window_length+1, 1) + df_ts.iloc[-1]
# plot_y = (model.coef_[0]*np.arange(0, window_length)/2+model.intercept_)*np.arange(0, window_length) + df_ts.iloc[-window_length]
#plot_y = (model.coef_[0]*np.arange(-window_length+1, 1)/2+model.intercept_)*np.arange(-window_length+1, 1) - (model.coef_[0]*(-window_length+1)/2+model.intercept_)*(-window_length+1) + df_ts.iloc[-window_length]
plot_y = (model.coef_[0]*np.arange(-window_length+1, 1)/2+model.intercept_)*np.arange(-window_length+1, 1) - (model.coef_[0]*(-window_length+1)/2+model.intercept_)*(-window_length+1) + interpolate(df_ts, window_length)
ax1.plot(plot_x, plot_y, label=line1, color='tab:pink', linewidth=3)
if panels==3:
ax2.plot(plot_x, plot_y, label=line1, color='tab:pink', linewidth=3)
ax1.plot(df_ts, label=line0, color='tab:blue')
if panels==3:
ax2.plot(df_ts, label=line0, color='tab:blue')
ax2.set_yscale("log")
for tick in ax0.get_xticklabels():
tick.set_rotation(80)
for tick in ax1.get_xticklabels():
tick.set_rotation(80)
if panels==3:
for tick in ax2.get_xticklabels():
tick.set_rotation(80)
handles, labs = ax1.get_legend_handles_labels()
if model is not None:
ax1.legend((handles[1], handles[0]), (labs[1], labs[0]))
else:
ax1.legend([handles[0]], [labs[0]])
if save_not_show==0:
plt.show()
elif save_not_show==1:
imgfile = country.replace(',', '_').replace(' ', '_').replace('(', '_').replace(')', '_')\
+ '_' + df_ts.index[-1].strftime('%Y-%m-%d') + '.png'
plt.savefig(imgfile)
plt.close(fig)
def load_population_world():
pop = pd.read_csv('population_world.csv', sep='\t')
pop_ser=pd.Series(pop.Population.apply(lambda x: int(x.replace(',', ''))).values, index=pop.Country)
countries = dict()
for country in pop_ser.index:
country_new = country.strip()
countries[country_new] = pop_ser.loc[country]
return countries
def load_population_DEU():
pop = pd.read_csv('population_DEU.csv', sep='\t')
pop_ser=pd.Series(pop.insgesamt.values, index=pop.Bundesland)
countries = dict()
for country in pop_ser.index:
country_new = country.strip()
countries[country_new] = pop_ser.loc[country]
return countries
def load_population_BW(incl_density=False):
pop = pd.read_csv('population_BW.csv', sep=',')
pop_ser=pd.Series(pop['Bevölkerung insgesamt'].values, index=pop.Regionalname)
countries = dict()
for country in pop_ser.index:
countries[country] = pop_ser.loc[country]
if incl_density:
pop.rename(index=pop.Regionalname, inplace=True)
return pop.drop('Regionalname', axis=1, inplace=False)
else:
return countries
|
[
"pandas.read_csv",
"numpy.log",
"math.sqrt",
"math.log",
"numpy.array",
"pandas.plotting.register_matplotlib_converters",
"math.exp",
"math.log10",
"pandas.to_datetime",
"numpy.arange",
"matplotlib.pyplot.close",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gcf",
"os.scandir",
"numpy.nonzero",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.show",
"pandas.Series",
"numpy.logical_and",
"math.pow",
"pandas.Timedelta",
"numpy.zeros",
"pandas.concat",
"matplotlib.pyplot.subplots"
] |
[((231, 263), 'pandas.plotting.register_matplotlib_converters', 'register_matplotlib_converters', ([], {}), '()\n', (261, 263), False, 'from pandas.plotting import register_matplotlib_converters\n'), ((13594, 13643), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {'fit_intercept': '(True)'}), '(fit_intercept=True)\n', (13623, 13643), False, 'from sklearn import linear_model\n'), ((39619, 39664), 'pandas.read_csv', 'pd.read_csv', (['"""population_world.csv"""'], {'sep': '"""\t"""'}), "('population_world.csv', sep='\\t')\n", (39630, 39664), True, 'import pandas as pd\n'), ((39978, 40021), 'pandas.read_csv', 'pd.read_csv', (['"""population_DEU.csv"""'], {'sep': '"""\t"""'}), "('population_DEU.csv', sep='\\t')\n", (39989, 40021), True, 'import pandas as pd\n'), ((40034, 40087), 'pandas.Series', 'pd.Series', (['pop.insgesamt.values'], {'index': 'pop.Bundesland'}), '(pop.insgesamt.values, index=pop.Bundesland)\n', (40043, 40087), True, 'import pandas as pd\n'), ((40313, 40354), 'pandas.read_csv', 'pd.read_csv', (['"""population_BW.csv"""'], {'sep': '""","""'}), "('population_BW.csv', sep=',')\n", (40324, 40354), True, 'import pandas as pd\n'), ((40367, 40437), 'pandas.Series', 'pd.Series', (["pop['Bevölkerung insgesamt'].values"], {'index': 'pop.Regionalname'}), "(pop['Bevölkerung insgesamt'].values, index=pop.Regionalname)\n", (40376, 40437), True, 'import pandas as pd\n'), ((1146, 1158), 'os.scandir', 'os.scandir', ([], {}), '()\n', (1156, 1158), False, 'import os, math\n'), ((1474, 1499), 'pandas.read_csv', 'pd.read_csv', (['lists[i][-1]'], {}), '(lists[i][-1])\n', (1485, 1499), True, 'import pandas as pd\n'), ((4375, 4414), 'pandas.concat', 'pd.concat', (['l'], {'ignore_index': '(True)', 'axis': '(1)'}), '(l, ignore_index=True, axis=1)\n', (4384, 4414), True, 'import pandas as pd\n'), ((8537, 8578), 'pandas.Timedelta', 'pd.Timedelta', (['f"""{window_length - 1} days"""'], {}), "(f'{window_length - 1} days')\n", (8549, 8578), True, 'import pandas as pd\n'), ((14027, 14036), 'numpy.log', 'np.log', (['y'], {}), '(y)\n', (14033, 14036), True, 'import numpy as np\n'), ((14138, 14164), 'math.exp', 'math.exp', (['model.intercept_'], {}), '(model.intercept_)\n', (14146, 14164), False, 'import os, math\n'), ((24964, 24983), 'pandas.DataFrame', 'pd.DataFrame', (['r_exp'], {}), '(r_exp)\n', (24976, 24983), True, 'import pandas as pd\n'), ((25061, 25080), 'pandas.DataFrame', 'pd.DataFrame', (['r_lin'], {}), '(r_lin)\n', (25073, 25080), True, 'import pandas as pd\n'), ((29546, 29608), 'pandas.DataFrame', 'pd.DataFrame', (['[results + [selected_window_length, exp_or_lin]]'], {}), '([results + [selected_window_length, exp_or_lin]])\n', (29558, 29608), True, 'import pandas as pd\n'), ((34280, 34319), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(14.4, 4.8)'}), '(1, 2, figsize=(14.4, 4.8))\n', (34292, 34319), True, 'import matplotlib.pyplot as plt\n'), ((39318, 39328), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (39326, 39328), True, 'import matplotlib.pyplot as plt\n'), ((11243, 11268), 'pandas.Timedelta', 'pd.Timedelta', (['f"""{i} days"""'], {}), "(f'{i} days')\n", (11255, 11268), True, 'import pandas as pd\n'), ((15841, 15882), 'pandas.Timedelta', 'pd.Timedelta', (['f"""{window_length - 1} days"""'], {}), "(f'{window_length - 1} days')\n", (15853, 15882), True, 'import pandas as pd\n'), ((34370, 34409), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(14.4, 4.8)'}), '(1, 3, figsize=(14.4, 4.8))\n', (34382, 34409), True, 'import matplotlib.pyplot as plt\n'), ((36171, 36212), 'pandas.Timedelta', 'pd.Timedelta', (['f"""{window_length - 1} days"""'], {}), "(f'{window_length - 1} days')\n", (36183, 36212), True, 'import pandas as pd\n'), ((39535, 39555), 'matplotlib.pyplot.savefig', 'plt.savefig', (['imgfile'], {}), '(imgfile)\n', (39546, 39555), True, 'import matplotlib.pyplot as plt\n'), ((39564, 39578), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (39573, 39578), True, 'import matplotlib.pyplot as plt\n'), ((5046, 5085), 'pandas.concat', 'pd.concat', (['l'], {'ignore_index': '(True)', 'axis': '(0)'}), '(l, ignore_index=True, axis=0)\n', (5055, 5085), True, 'import pandas as pd\n'), ((6808, 6851), 'numpy.nonzero', 'np.nonzero', (['(ts.index == ts[ts > 0].index[0])'], {}), '(ts.index == ts[ts > 0].index[0])\n', (6818, 6851), True, 'import numpy as np\n'), ((12031, 12072), 'pandas.Timedelta', 'pd.Timedelta', (['f"""{window_length - 1} days"""'], {}), "(f'{window_length - 1} days')\n", (12043, 12072), True, 'import pandas as pd\n'), ((14393, 14419), 'math.exp', 'math.exp', (['model.intercept_'], {}), '(model.intercept_)\n', (14401, 14419), False, 'import os, math\n'), ((14705, 14731), 'math.exp', 'math.exp', (['model.intercept_'], {}), '(model.intercept_)\n', (14713, 14731), False, 'import os, math\n'), ((19414, 19455), 'pandas.Timedelta', 'pd.Timedelta', (['f"""{window_length - 1} days"""'], {}), "(f'{window_length - 1} days')\n", (19426, 19455), True, 'import pandas as pd\n'), ((22846, 22873), 'math.pow', 'math.pow', (['(2)', '(1 / results[2])'], {}), '(2, 1 / results[2])\n', (22854, 22873), False, 'import os, math\n'), ((26969, 26982), 'pandas.Series', 'pd.Series', (['[]'], {}), '([])\n', (26978, 26982), True, 'import pandas as pd\n'), ((27091, 27144), 'pandas.DataFrame', 'pd.DataFrame', (['[results + [window_length, exp_or_lin]]'], {}), '([results + [window_length, exp_or_lin]])\n', (27103, 27144), True, 'import pandas as pd\n'), ((27785, 27813), 'numpy.zeros', 'np.zeros', (['(wl_hi - wl_lo, 8)'], {}), '((wl_hi - wl_lo, 8))\n', (27793, 27813), True, 'import numpy as np\n'), ((27943, 27971), 'numpy.zeros', 'np.zeros', (['(wl_hi - wl_lo, 8)'], {}), '((wl_hi - wl_lo, 8))\n', (27951, 27971), True, 'import numpy as np\n'), ((35003, 35012), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (35010, 35012), True, 'import matplotlib.pyplot as plt\n'), ((35715, 35724), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (35722, 35724), True, 'import matplotlib.pyplot as plt\n'), ((5716, 5757), 'pandas.Series', 'pd.Series', (['df_ts.index'], {'index': 'df_ts.index'}), '(df_ts.index, index=df_ts.index)\n', (5725, 5757), True, 'import pandas as pd\n'), ((5774, 5791), 'pandas.to_datetime', 'pd.to_datetime', (['x'], {}), '(x)\n', (5788, 5791), True, 'import pandas as pd\n'), ((14484, 14510), 'math.exp', 'math.exp', (['model.intercept_'], {}), '(model.intercept_)\n', (14492, 14510), False, 'import os, math\n'), ((14567, 14581), 'math.log', 'math.log', (['temp'], {}), '(temp)\n', (14575, 14581), False, 'import os, math\n'), ((22019, 22060), 'pandas.Timedelta', 'pd.Timedelta', (['f"""{window_length - 1} days"""'], {}), "(f'{window_length - 1} days')\n", (22031, 22060), True, 'import pandas as pd\n'), ((32572, 32596), 'math.log10', 'math.log10', (['normalise_by'], {}), '(normalise_by)\n', (32582, 32596), False, 'import os, math\n'), ((33149, 33173), 'math.log10', 'math.log10', (['normalise_by'], {}), '(normalise_by)\n', (33159, 33173), False, 'import os, math\n'), ((36681, 36707), 'math.exp', 'math.exp', (['model.intercept_'], {}), '(model.intercept_)\n', (36689, 36707), False, 'import os, math\n'), ((15030, 15056), 'math.exp', 'math.exp', (['model.intercept_'], {}), '(model.intercept_)\n', (15038, 15056), False, 'import os, math\n'), ((15116, 15142), 'math.exp', 'math.exp', (['model.intercept_'], {}), '(model.intercept_)\n', (15124, 15142), False, 'import os, math\n'), ((32356, 32380), 'math.log10', 'math.log10', (['normalise_by'], {}), '(normalise_by)\n', (32366, 32380), False, 'import os, math\n'), ((33065, 33089), 'math.log10', 'math.log10', (['normalise_by'], {}), '(normalise_by)\n', (33075, 33089), False, 'import os, math\n'), ((37238, 37264), 'math.exp', 'math.exp', (['model.intercept_'], {}), '(model.intercept_)\n', (37246, 37264), False, 'import os, math\n'), ((37641, 37673), 'numpy.arange', 'np.arange', (['(-window_length + 1)', '(1)'], {}), '(-window_length + 1, 1)\n', (37650, 37673), True, 'import numpy as np\n'), ((38377, 38409), 'numpy.arange', 'np.arange', (['(-window_length + 1)', '(1)'], {}), '(-window_length + 1, 1)\n', (38386, 38409), True, 'import numpy as np\n'), ((14825, 14864), 'math.exp', 'math.exp', (['(model.coef_[0] * intl_lo_days)'], {}), '(model.coef_[0] * intl_lo_days)\n', (14833, 14864), False, 'import os, math\n'), ((14919, 14958), 'math.exp', 'math.exp', (['(model.coef_[0] * intl_hi_days)'], {}), '(model.coef_[0] * intl_hi_days)\n', (14927, 14958), False, 'import os, math\n'), ((16801, 16827), 'math.exp', 'math.exp', (['model.intercept_'], {}), '(model.intercept_)\n', (16809, 16827), False, 'import os, math\n'), ((17513, 17602), 'math.sqrt', 'math.sqrt', (['(model.intercept_ * model.intercept_ + 2 * model.coef_[0] * df_ts.iloc[-1])'], {}), '(model.intercept_ * model.intercept_ + 2 * model.coef_[0] * df_ts.\n iloc[-1])\n', (17522, 17602), False, 'import os, math\n'), ((36538, 36570), 'numpy.arange', 'np.arange', (['(-window_length + 1)', '(1)'], {}), '(-window_length + 1, 1)\n', (36547, 36570), True, 'import numpy as np\n'), ((36970, 37017), 'math.exp', 'math.exp', (['(model.coef_[0] * (-window_length + 1))'], {}), '(model.coef_[0] * (-window_length + 1))\n', (36978, 37017), False, 'import os, math\n'), ((37266, 37298), 'numpy.arange', 'np.arange', (['(-window_length + 1)', '(1)'], {}), '(-window_length + 1, 1)\n', (37275, 37298), True, 'import numpy as np\n'), ((16418, 16465), 'math.exp', 'math.exp', (['(model.coef_[0] * (-window_length + 1))'], {}), '(model.coef_[0] * (-window_length + 1))\n', (16426, 16465), False, 'import os, math\n'), ((4884, 4981), 'numpy.logical_and', 'np.logical_and', (["(df[k]['Province/State'] == country[0])", "(df[k]['Country/Region'] == country[1])"], {}), "(df[k]['Province/State'] == country[0], df[k][\n 'Country/Region'] == country[1])\n", (4898, 4981), True, 'import numpy as np\n'), ((36936, 36968), 'numpy.arange', 'np.arange', (['(-window_length + 1)', '(1)'], {}), '(-window_length + 1, 1)\n', (36945, 36968), True, 'import numpy as np\n'), ((38326, 38358), 'numpy.arange', 'np.arange', (['(-window_length + 1)', '(1)'], {}), '(-window_length + 1, 1)\n', (38335, 38358), True, 'import numpy as np\n'), ((4309, 4323), 'numpy.array', 'np.array', (['M[k]'], {}), '(M[k])\n', (4317, 4323), True, 'import numpy as np\n')]
|
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import os
import pickle
import glob
import sys
class ParameterFinder:
def __init__(self, image, _h_channel_low=0, _h_channel_high=255, _l_channel_low=0, _l_channel_high=255,
sobelx_filter=1, sobelx_low=0, sobelx_high=0,
sobely_filter=1, sobely_low=0, sobely_high=0,
magn_filter=1, magn_low=0, magn_high=0,
direction_filter=1, direction_low=0, direction_high=0,
direction_avg_filter=3, direction_thresh=0, load_params_path= "do_not_load"):
self.image = image
self._h_channel_low = _h_channel_low
self._h_channel_high = _h_channel_high
self._l_channel_low = _l_channel_low
self._l_channel_high = _l_channel_high
self._sobelx_filter = sobelx_filter
self._sobelx_low = sobelx_low
self._sobelx_high = sobelx_high
self._sobely_filter = sobely_filter
self._sobely_low = sobely_low
self._sobely_high = sobely_high
self._magn_filter = magn_filter
self._magn_low = magn_low
self._magn_high = magn_high
self._direction_filter = direction_filter
self._direction_low = direction_low
self._direction_high = direction_high
self._direction_avg_filter = direction_avg_filter
self._direction_thresh = direction_thresh
self._post_avg_filter = 1
self._post_thresh = 1
if load_params_path != "do_not_load":
[self._sobelx_filter, self._sobelx_low, self._sobelx_high, self._sobely_filter, self._sobely_low, self._sobely_high, self._magn_filter, self._magn_low, self._magn_high, self._direction_filter, self._direction_low, self._direction_high, self._direction_avg_filter, self._direction_thresh] = self.load_params(load_params_path, [self._sobelx_filter, self._sobelx_low, self._sobelx_high, self._sobely_filter, self._sobely_low, self._sobely_high, self._magn_filter, self._magn_low, self._magn_high, self._direction_filter, self._direction_low, self._direction_high, self._direction_avg_filter, self._direction_thresh])
print("self._sobelx_filter: ", self._sobelx_filter)
def onchange_h_channel_low(pos):
self._h_channel_low = pos
self._render()
def onchange_h_channel_high(pos):
self._h_channel_high = pos
self._render()
def onchange_l_channel_low(pos):
self._l_channel_low = pos
self._render()
def onchange_l_channel_high(pos):
self._l_channel_high = pos
self._render()
def onchange_sobelx_low(pos):
self._sobelx_low = pos
self._render()
def onchange_sobelx_high(pos):
self._sobelx_high = pos
self._render()
def onchange_sobelx_filter(pos):
self._sobelx_filter = pos
self._sobelx_filter += (self._sobelx_filter + 1) % 2
self._render()
def onchange_sobely_low(pos):
self._sobely_low = pos
self._render()
def onchange_sobely_high(pos):
self._sobely_high = pos
self._render()
def onchange_sobely_filter(pos):
self._sobely_filter = pos
self._sobely_filter += (self._sobely_filter + 1) % 2
self._render()
def onchange_magn_low(pos):
self._magn_low = pos
self._render()
def onchange_magn_high(pos):
self._magn_high = pos
self._render()
def onchange_magn_filter(pos):
self._magn_filter = pos
self._magn_filter += (self._magn_filter + 1) % 2
self._render()
def onchange_direction_low(pos):
self._direction_low = (pos/100)-(np.pi/2)
self._render()
def onchange_direction_high(pos):
self._direction_high = (pos/100)-(np.pi/2)
self._render()
def onchange_direction_filter(pos):
self._direction_filter = pos
self._direction_filter += (self._direction_filter + 1) % 2
self._render()
def onchange_direction_avg_filter(pos):
self._direction_avg_filter = pos
self._direction_avg_filter += (self._direction_avg_filter + 1) % 2
self._render()
def onchange_direction_thresh(pos):
self._direction_thresh = pos
self._render()
def onchange_post_avg_filter(pos):
self._post_avg_filter = pos
self._post_avg_filter += (self._post_avg_filter + 1) % 2
self._render()
def onchange_post_thresh(pos):
self._post_thresh = pos
self._render()
cv2.namedWindow('output')
cv2.createTrackbar('h_channel_low', 'output', self._h_channel_low, 255, onchange_h_channel_low)
cv2.createTrackbar('h_channel_high', 'output', self._h_channel_high, 255, onchange_h_channel_high)
cv2.createTrackbar('l_channel_low', 'output', self._l_channel_low, 255, onchange_l_channel_low)
cv2.createTrackbar('l_channel_high', 'output', self._l_channel_high, 255, onchange_l_channel_high)
cv2.createTrackbar('sobelx_low', 'output', self._sobelx_low, 255, onchange_sobelx_low)
cv2.createTrackbar('sobelx_high', 'output', self._sobelx_high, 255, onchange_sobelx_high)
cv2.createTrackbar('sobelx_filter', 'output', self._sobelx_filter, 21, onchange_sobelx_filter)
cv2.createTrackbar('sobely_low', 'output', self._sobely_low, 255, onchange_sobely_low)
cv2.createTrackbar('sobely_high', 'output', self._sobely_high, 255, onchange_sobely_high)
cv2.createTrackbar('sobely_filter', 'output', self._sobely_filter, 21, onchange_sobely_filter)
cv2.createTrackbar('magn_low', 'output', self._magn_low, 255, onchange_magn_low)
cv2.createTrackbar('magn_high', 'output', self._magn_high, 255, onchange_magn_high)
cv2.createTrackbar('magn_filter', 'output', self._magn_filter, 21, onchange_magn_filter)
cv2.createTrackbar('direction_low(rad)', 'output', self._direction_low, 314, onchange_direction_low)
cv2.createTrackbar('direction_high(rad)', 'output', self._direction_high, 314, onchange_direction_high)
cv2.createTrackbar('direction_filter', 'output', self._direction_filter, 21, onchange_direction_filter)
cv2.createTrackbar('direction_avg_filter', 'output', self._direction_avg_filter, 21, onchange_direction_avg_filter)
cv2.createTrackbar('direction_thresh', 'output', self._direction_thresh, 255, onchange_direction_thresh)
cv2.createTrackbar('post_avg_filter', 'output', self._post_avg_filter, 21, onchange_post_avg_filter)
cv2.createTrackbar('post_thresh', 'output', self._post_thresh, 255, onchange_post_thresh)
self._render()
print("Adjust the parameters as desired. Hit any key to close.")
cv2.waitKey(0)
cv2.destroyWindow('output')
self.save_params([self._sobelx_filter, self._sobelx_low, self._sobelx_high, self._sobely_filter, self._sobely_low, self._sobely_high, self._magn_filter, self._magn_low, self._magn_high, self._direction_filter, self._direction_low, self._direction_high, self._direction_avg_filter, self._direction_thresh])
def sobelx_low(self):
return self._sobelx_low
def sobelx_high(self):
return self._sobelx_high
def sobelx_filter(self):
return self._sobelx_filter
def sobely_low(self):
return self._sobely_low
def sobely_high(self):
return self._sobely_high
def sobely_filter(self):
return self._sobely_filter
def magn_low(self):
return self._magn_low
def magn_high(self):
return self._magn_high
def magn_filter(self):
return self._magn_filter
def direction_low(self):
return self._direction_low
def direction_high(self):
return self._direction_high
def direction_filter(self):
return self._direction_filter
def direction_avg_filter(self):
return self._direction_avg_filter
def direction_thresh(self):
return self._direction_thresh
def sobelxImage(self):
return self._sobelx_binary
def sobelyImage(self):
return self._sobely_binary
def magImage(self):
return self._mag_binary
def dirImage(self):
return self._dir_binary
def averageImg(self):
return self._avg_img
def thresholdImg(self):
return self._thres_img
def postAverageImg(self):
return self._post_avg_img
def postThresholdImg(self):
return self._post_thres_img
def setImage(self, img):
self.image = img
def extract_single_color(self, img, channel='gray'):
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
if(channel == 'r'):
return img[:,:,0]
elif(channel == 'g'):
return img[:,:,1]
elif(channel == 'b'):
return img[:,:,2]
elif(channel == 'gray'):
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
elif(channel == 'h'):
return hls[:,:,0]
elif(channel == 'l'):
return hls[:,:,1]
elif(channel == 's'):
return hls[:,:,2]
def abs_sobel_thresh(self, image_binary, orient='x', sobel_kernel=3, thresh=(0, 255)):
self.image_binary = image_binary
self.orient = orient
self.sobel_kernel = sobel_kernel
self.thresh = thresh
# Calculate directional gradient
if orient == 'x':
sobel_orient = cv2.Sobel(image_binary, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
elif orient == 'y':
sobel_orient = cv2.Sobel(image_binary, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
abs_sobel = np.absolute(sobel_orient)
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# Apply threshold
grad_binary = np.zeros_like(scaled_sobel)
grad_binary[(scaled_sobel > thresh[0]) & (scaled_sobel < thresh[1])] = 255 #imshow accepts 1 not!
return grad_binary
def abs_magn_thresh(self, image_binary, magn_sobel_kernel=3, thresh_2=(0, 255)):
# Calculate gradient magnitude
self.image_binary = image_binary
self.magn_sobel_kernel = magn_sobel_kernel
self.thresh_2 = thresh_2
sobel_x = cv2.Sobel(image_binary, cv2.CV_64F, 1, 0, ksize=magn_sobel_kernel())
sobel_y = cv2.Sobel(image_binary, cv2.CV_64F, 0, 1, ksize=magn_sobel_kernel())
# magn = np.sqrt(sobel_x * sobel_x + sobel_y * sobel_y)
magn = np.sqrt(np.power(sobel_x,2) + np.power(sobel_y,2))
scaled_magn = np.uint8(255*magn/np.max(magn))
# Apply threshold
magn_binary = np.zeros_like(scaled_magn)
magn_binary[(scaled_magn > (thresh_2[0])) & (scaled_magn < thresh_2[1])] = 255
return magn_binary
def abs_dir_threshold(self, image_binary, dir_sobel_kernel=3, dir_thresh=(-np.pi/2, np.pi/2)):
self.image_binary = image_binary
self.dir_sobel_kernel = dir_sobel_kernel
self.dir_thresh = dir_thresh
# Calculate gradient direction
sobel_x = cv2.Sobel(image_binary, cv2.CV_64F, 1, 0, ksize=dir_sobel_kernel)
sobel_y = cv2.Sobel(image_binary, cv2.CV_64F, 0, 1, ksize=dir_sobel_kernel)
abs_grad_x = np.absolute(sobel_x)
abs_grad_y = np.absolute(sobel_y)
direction_grad = np.arctan2(abs_grad_y, abs_grad_x)
# Apply threshold
dir_binary = np.zeros_like(direction_grad)
dir_binary[(direction_grad > dir_thresh[0]) & (direction_grad < dir_thresh[1])] = 1
return dir_binary
def abs_average(self, binary_image, filter_size=3):
# non_binary= np.zeros_like(binary_image)
# non_binary[(binary_image > 0)] = 255
# binary_image.convertTo(binary_image,CV_8U, 1/255)
# non_binary = binary_image.view('float32')
# non_binary[:] = binary_image
np.set_printoptions(threshold=sys.maxsize)
# print("binary_image: ", binary_image)
non_binary = np.zeros_like(binary_image)
non_binary[binary_image > 0] = 255
non_binary[binary_image == 0] = 1
# print("non_binary: ", non_binary)
# non_binary = zeros
output_image = cv2.blur(non_binary, (filter_size, filter_size))
# output_image = cv2.medianBlur(binary_image, filter_size)
return output_image
def abs_threshold(self, image, thres_low, thres_high=255):
binary_image = np.zeros_like(image)
binary_image[(image > thres_low) & (image < thres_high)] = 255
return binary_image
def _render(self, save_name="no_file_name"):
single_channel_h = self.extract_single_color(self.image, 'h')
single_channel_l = self.extract_single_color(self.image, 'l')
binary_channel_h = self.abs_threshold(single_channel_h, self._h_channel_low, self._h_channel_high)
binary_channel_l = self.abs_threshold(single_channel_l, self._l_channel_low, self._l_channel_high)
channels_binary = np.zeros_like(binary_channel_h)
channels_binary[(binary_channel_h > 0) | (binary_channel_l > 0)] = 255
self._sobelx_binary = self.abs_sobel_thresh(self.image, 'x', self._sobelx_filter, (self._sobelx_low, self._sobelx_high))
self._sobely_binary = self.abs_sobel_thresh(self.image, 'y', self._sobely_filter, (self._sobely_low, self._sobely_high))
self._mag_binary = self.abs_magn_thresh(self.image, self.magn_filter, (self._magn_low, self._magn_high))
self._dir_binary = self.abs_dir_threshold(self.image, self._direction_filter, (self._direction_low, self._direction_high))
self._avg_img = self.abs_average(self._dir_binary, self._direction_avg_filter)
self._thres_img = self.abs_threshold(self._avg_img, self._direction_thresh)
self.combined = np.zeros_like(self._sobelx_binary)
# self.combined[((self._sobelx_binary == 255) & (self._sobely_binary == 255)) | ((self._mag_binary == 255) & (self._thres_img == 255))] = 255
# self.combined[((self._sobelx_binary == 255) & (self._sobely_binary == 255)) | ((self._thres_img == 255))] = 255
self.combined[((self._sobelx_binary == 255) & (self._sobely_binary == 255)) | (self._mag_binary == 255) | (self._thres_img == 255)] = 255
# self.combined[((self._sobelx_binary == 255) & (self._sobely_binary == 255)) | (self._mag_binary == 255)] = 255
self._post_avg_img = self.abs_average(channels_binary, self._post_avg_filter)
self._post_thres_img = self.abs_threshold(self._post_avg_img, self._post_thresh)
if save_name == "no_file_name":
cv2.imshow('sobelx_binary', self._sobelx_binary)
cv2.imshow('sobely_binary', self._sobely_binary)
cv2.imshow('mag_binary', self._mag_binary)
cv2.imshow('direction_binary', self._dir_binary)
cv2.imshow('direction_&_avg', self._avg_img)
cv2.imshow('direction_&_avg_thresh', self._thres_img)
cv2.imshow('channels_binary', channels_binary)
self.color_binary = np.dstack(( np.zeros_like(self._sobelx_binary),((self._sobelx_binary == 255) & (self._sobely_binary == 255)), ((self._mag_binary == 255) & (self._thres_img == 255)))) * 255
if save_name == "no_file_name":
cv2.imshow('output', channels_binary)
else:
cv2.imwrite(f"test_output/{save_name}_output",channels_binary)
# cv2.imshow('output', self.color_binary)
def save_params(self, var_list):
with open("store_params/params_new",'wb') as f:
pickle.dump(var_list, f)
def load_params(self, param_file, var_list):
with open(param_file, 'rb') as f:
var_list = pickle.load(f)
return var_list
if __name__ == "__main__":
# parser = argparse.ArgumentParser(description='Visualizes the line for hough transform.')
# parser.add_argument('FILENAME')
# args = parser.parse_args()
WORKING_DIR = "/home/nbenndo/Documents/Programming/Udacity/SelfDrivingCarND/CarND-Advanced-Lane-Lines/"
os.chdir(WORKING_DIR)
FILENAME = 'test_images/test4.jpg'
IMG = cv2.imread(FILENAME)#, cv2.IMREAD_GRAYSCALE)
# crop_y_border = IMG.shape[0]//2 + 120
# img_crop_top = IMG[0:crop_y_border-1, 0:IMG.shape[1]]
# img_crop_bottom = IMG[crop_y_border:IMG.shape[0], 0:IMG.shape[1]]
# IMG = np.concatenate((img_crop_top, img_crop_bottom), axis=0)
cv2.imshow('input', IMG)
# cv2.waitKey(0)
param_finder = ParameterFinder(IMG, _h_channel_low=96, _h_channel_high=102, _l_channel_low=220, _l_channel_high=255,
sobelx_filter=3, sobelx_low=16, sobelx_high=255,
sobely_filter=3, sobely_low=36, sobely_high=255,
magn_filter=3, magn_low=15, magn_high=255,
direction_filter=15, direction_low=229, direction_high=287,
direction_avg_filter=11, direction_thresh=143)#, load_params_path="store_params/params_new")
# calculate all images with last parameter
os.chdir(f"{WORKING_DIR}/test_images")
images_test = glob.glob('*.jpg', recursive=False)
os.chdir(WORKING_DIR)
for image_path in images_test:
image = cv2.imread(f"test_images/{image_path}")
param_finder.setImage(image)
param_finder._render(image_path)
# print("Edge parameters:")
# print("GaussianBlur Filter Size: %f" % param_finder.filterSize())
# print("Threshold1: %f" % param_finder.threshold1())
# print("Threshold2: %f" % param_finder.threshold2())
# (head, tail) = os.path.split(args.FILENAME)
# (root, ext) = os.path.splitext(tail)
# smoothed_filename = os.path.join("output_images", root + "-smoothed" + ext)
# edge_filename = os.path.join("output_images", root + "-edges" + ext)
# cv2.imwrite(smoothed_filename, param_finder.smoothedImage())
# cv2.imwrite(edge_filename, param_finder.edgeImage())
cv2.destroyAllWindows()
|
[
"cv2.imshow",
"cv2.destroyAllWindows",
"numpy.arctan2",
"numpy.max",
"cv2.waitKey",
"cv2.blur",
"glob.glob",
"pickle.load",
"cv2.cvtColor",
"cv2.createTrackbar",
"cv2.imread",
"cv2.namedWindow",
"numpy.set_printoptions",
"cv2.imwrite",
"pickle.dump",
"numpy.power",
"cv2.destroyWindow",
"numpy.absolute",
"os.chdir",
"numpy.zeros_like",
"cv2.Sobel"
] |
[((16370, 16391), 'os.chdir', 'os.chdir', (['WORKING_DIR'], {}), '(WORKING_DIR)\n', (16378, 16391), False, 'import os\n'), ((16442, 16462), 'cv2.imread', 'cv2.imread', (['FILENAME'], {}), '(FILENAME)\n', (16452, 16462), False, 'import cv2\n'), ((16743, 16767), 'cv2.imshow', 'cv2.imshow', (['"""input"""', 'IMG'], {}), "('input', IMG)\n", (16753, 16767), False, 'import cv2\n'), ((17461, 17499), 'os.chdir', 'os.chdir', (['f"""{WORKING_DIR}/test_images"""'], {}), "(f'{WORKING_DIR}/test_images')\n", (17469, 17499), False, 'import os\n'), ((17518, 17553), 'glob.glob', 'glob.glob', (['"""*.jpg"""'], {'recursive': '(False)'}), "('*.jpg', recursive=False)\n", (17527, 17553), False, 'import glob\n'), ((17558, 17579), 'os.chdir', 'os.chdir', (['WORKING_DIR'], {}), '(WORKING_DIR)\n', (17566, 17579), False, 'import os\n'), ((18356, 18379), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (18377, 18379), False, 'import cv2\n'), ((4871, 4896), 'cv2.namedWindow', 'cv2.namedWindow', (['"""output"""'], {}), "('output')\n", (4886, 4896), False, 'import cv2\n'), ((4906, 5005), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""h_channel_low"""', '"""output"""', 'self._h_channel_low', '(255)', 'onchange_h_channel_low'], {}), "('h_channel_low', 'output', self._h_channel_low, 255,\n onchange_h_channel_low)\n", (4924, 5005), False, 'import cv2\n'), ((5010, 5112), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""h_channel_high"""', '"""output"""', 'self._h_channel_high', '(255)', 'onchange_h_channel_high'], {}), "('h_channel_high', 'output', self._h_channel_high, 255,\n onchange_h_channel_high)\n", (5028, 5112), False, 'import cv2\n'), ((5126, 5225), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""l_channel_low"""', '"""output"""', 'self._l_channel_low', '(255)', 'onchange_l_channel_low'], {}), "('l_channel_low', 'output', self._l_channel_low, 255,\n onchange_l_channel_low)\n", (5144, 5225), False, 'import cv2\n'), ((5230, 5332), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""l_channel_high"""', '"""output"""', 'self._l_channel_high', '(255)', 'onchange_l_channel_high'], {}), "('l_channel_high', 'output', self._l_channel_high, 255,\n onchange_l_channel_high)\n", (5248, 5332), False, 'import cv2\n'), ((5339, 5429), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""sobelx_low"""', '"""output"""', 'self._sobelx_low', '(255)', 'onchange_sobelx_low'], {}), "('sobelx_low', 'output', self._sobelx_low, 255,\n onchange_sobelx_low)\n", (5357, 5429), False, 'import cv2\n'), ((5434, 5527), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""sobelx_high"""', '"""output"""', 'self._sobelx_high', '(255)', 'onchange_sobelx_high'], {}), "('sobelx_high', 'output', self._sobelx_high, 255,\n onchange_sobelx_high)\n", (5452, 5527), False, 'import cv2\n'), ((5532, 5630), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""sobelx_filter"""', '"""output"""', 'self._sobelx_filter', '(21)', 'onchange_sobelx_filter'], {}), "('sobelx_filter', 'output', self._sobelx_filter, 21,\n onchange_sobelx_filter)\n", (5550, 5630), False, 'import cv2\n'), ((5636, 5726), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""sobely_low"""', '"""output"""', 'self._sobely_low', '(255)', 'onchange_sobely_low'], {}), "('sobely_low', 'output', self._sobely_low, 255,\n onchange_sobely_low)\n", (5654, 5726), False, 'import cv2\n'), ((5731, 5824), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""sobely_high"""', '"""output"""', 'self._sobely_high', '(255)', 'onchange_sobely_high'], {}), "('sobely_high', 'output', self._sobely_high, 255,\n onchange_sobely_high)\n", (5749, 5824), False, 'import cv2\n'), ((5829, 5927), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""sobely_filter"""', '"""output"""', 'self._sobely_filter', '(21)', 'onchange_sobely_filter'], {}), "('sobely_filter', 'output', self._sobely_filter, 21,\n onchange_sobely_filter)\n", (5847, 5927), False, 'import cv2\n'), ((5933, 6018), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""magn_low"""', '"""output"""', 'self._magn_low', '(255)', 'onchange_magn_low'], {}), "('magn_low', 'output', self._magn_low, 255, onchange_magn_low\n )\n", (5951, 6018), False, 'import cv2\n'), ((6022, 6109), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""magn_high"""', '"""output"""', 'self._magn_high', '(255)', 'onchange_magn_high'], {}), "('magn_high', 'output', self._magn_high, 255,\n onchange_magn_high)\n", (6040, 6109), False, 'import cv2\n'), ((6114, 6206), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""magn_filter"""', '"""output"""', 'self._magn_filter', '(21)', 'onchange_magn_filter'], {}), "('magn_filter', 'output', self._magn_filter, 21,\n onchange_magn_filter)\n", (6132, 6206), False, 'import cv2\n'), ((6220, 6324), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""direction_low(rad)"""', '"""output"""', 'self._direction_low', '(314)', 'onchange_direction_low'], {}), "('direction_low(rad)', 'output', self._direction_low, 314,\n onchange_direction_low)\n", (6238, 6324), False, 'import cv2\n'), ((6329, 6437), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""direction_high(rad)"""', '"""output"""', 'self._direction_high', '(314)', 'onchange_direction_high'], {}), "('direction_high(rad)', 'output', self._direction_high, \n 314, onchange_direction_high)\n", (6347, 6437), False, 'import cv2\n'), ((6441, 6548), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""direction_filter"""', '"""output"""', 'self._direction_filter', '(21)', 'onchange_direction_filter'], {}), "('direction_filter', 'output', self._direction_filter, 21,\n onchange_direction_filter)\n", (6459, 6548), False, 'import cv2\n'), ((6553, 6673), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""direction_avg_filter"""', '"""output"""', 'self._direction_avg_filter', '(21)', 'onchange_direction_avg_filter'], {}), "('direction_avg_filter', 'output', self.\n _direction_avg_filter, 21, onchange_direction_avg_filter)\n", (6571, 6673), False, 'import cv2\n'), ((6677, 6786), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""direction_thresh"""', '"""output"""', 'self._direction_thresh', '(255)', 'onchange_direction_thresh'], {}), "('direction_thresh', 'output', self._direction_thresh, \n 255, onchange_direction_thresh)\n", (6695, 6786), False, 'import cv2\n'), ((6792, 6896), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""post_avg_filter"""', '"""output"""', 'self._post_avg_filter', '(21)', 'onchange_post_avg_filter'], {}), "('post_avg_filter', 'output', self._post_avg_filter, 21,\n onchange_post_avg_filter)\n", (6810, 6896), False, 'import cv2\n'), ((6901, 6994), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""post_thresh"""', '"""output"""', 'self._post_thresh', '(255)', 'onchange_post_thresh'], {}), "('post_thresh', 'output', self._post_thresh, 255,\n onchange_post_thresh)\n", (6919, 6994), False, 'import cv2\n'), ((7107, 7121), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (7118, 7121), False, 'import cv2\n'), ((7131, 7158), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""output"""'], {}), "('output')\n", (7148, 7158), False, 'import cv2\n'), ((8986, 9022), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2HLS'], {}), '(img, cv2.COLOR_RGB2HLS)\n', (8998, 9022), False, 'import cv2\n'), ((10003, 10028), 'numpy.absolute', 'np.absolute', (['sobel_orient'], {}), '(sobel_orient)\n', (10014, 10028), True, 'import numpy as np\n'), ((10142, 10169), 'numpy.zeros_like', 'np.zeros_like', (['scaled_sobel'], {}), '(scaled_sobel)\n', (10155, 10169), True, 'import numpy as np\n'), ((10967, 10993), 'numpy.zeros_like', 'np.zeros_like', (['scaled_magn'], {}), '(scaled_magn)\n', (10980, 10993), True, 'import numpy as np\n'), ((11403, 11468), 'cv2.Sobel', 'cv2.Sobel', (['image_binary', 'cv2.CV_64F', '(1)', '(0)'], {'ksize': 'dir_sobel_kernel'}), '(image_binary, cv2.CV_64F, 1, 0, ksize=dir_sobel_kernel)\n', (11412, 11468), False, 'import cv2\n'), ((11487, 11552), 'cv2.Sobel', 'cv2.Sobel', (['image_binary', 'cv2.CV_64F', '(0)', '(1)'], {'ksize': 'dir_sobel_kernel'}), '(image_binary, cv2.CV_64F, 0, 1, ksize=dir_sobel_kernel)\n', (11496, 11552), False, 'import cv2\n'), ((11574, 11594), 'numpy.absolute', 'np.absolute', (['sobel_x'], {}), '(sobel_x)\n', (11585, 11594), True, 'import numpy as np\n'), ((11616, 11636), 'numpy.absolute', 'np.absolute', (['sobel_y'], {}), '(sobel_y)\n', (11627, 11636), True, 'import numpy as np\n'), ((11662, 11696), 'numpy.arctan2', 'np.arctan2', (['abs_grad_y', 'abs_grad_x'], {}), '(abs_grad_y, abs_grad_x)\n', (11672, 11696), True, 'import numpy as np\n'), ((11744, 11773), 'numpy.zeros_like', 'np.zeros_like', (['direction_grad'], {}), '(direction_grad)\n', (11757, 11773), True, 'import numpy as np\n'), ((12211, 12253), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'sys.maxsize'}), '(threshold=sys.maxsize)\n', (12230, 12253), True, 'import numpy as np\n'), ((12323, 12350), 'numpy.zeros_like', 'np.zeros_like', (['binary_image'], {}), '(binary_image)\n', (12336, 12350), True, 'import numpy as np\n'), ((12533, 12581), 'cv2.blur', 'cv2.blur', (['non_binary', '(filter_size, filter_size)'], {}), '(non_binary, (filter_size, filter_size))\n', (12541, 12581), False, 'import cv2\n'), ((12775, 12795), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (12788, 12795), True, 'import numpy as np\n'), ((13326, 13357), 'numpy.zeros_like', 'np.zeros_like', (['binary_channel_h'], {}), '(binary_channel_h)\n', (13339, 13357), True, 'import numpy as np\n'), ((14135, 14169), 'numpy.zeros_like', 'np.zeros_like', (['self._sobelx_binary'], {}), '(self._sobelx_binary)\n', (14148, 14169), True, 'import numpy as np\n'), ((17631, 17670), 'cv2.imread', 'cv2.imread', (['f"""test_images/{image_path}"""'], {}), "(f'test_images/{image_path}')\n", (17641, 17670), False, 'import cv2\n'), ((9804, 9865), 'cv2.Sobel', 'cv2.Sobel', (['image_binary', 'cv2.CV_64F', '(1)', '(0)'], {'ksize': 'sobel_kernel'}), '(image_binary, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n', (9813, 9865), False, 'import cv2\n'), ((14938, 14986), 'cv2.imshow', 'cv2.imshow', (['"""sobelx_binary"""', 'self._sobelx_binary'], {}), "('sobelx_binary', self._sobelx_binary)\n", (14948, 14986), False, 'import cv2\n'), ((14999, 15047), 'cv2.imshow', 'cv2.imshow', (['"""sobely_binary"""', 'self._sobely_binary'], {}), "('sobely_binary', self._sobely_binary)\n", (15009, 15047), False, 'import cv2\n'), ((15060, 15102), 'cv2.imshow', 'cv2.imshow', (['"""mag_binary"""', 'self._mag_binary'], {}), "('mag_binary', self._mag_binary)\n", (15070, 15102), False, 'import cv2\n'), ((15115, 15163), 'cv2.imshow', 'cv2.imshow', (['"""direction_binary"""', 'self._dir_binary'], {}), "('direction_binary', self._dir_binary)\n", (15125, 15163), False, 'import cv2\n'), ((15176, 15220), 'cv2.imshow', 'cv2.imshow', (['"""direction_&_avg"""', 'self._avg_img'], {}), "('direction_&_avg', self._avg_img)\n", (15186, 15220), False, 'import cv2\n'), ((15233, 15286), 'cv2.imshow', 'cv2.imshow', (['"""direction_&_avg_thresh"""', 'self._thres_img'], {}), "('direction_&_avg_thresh', self._thres_img)\n", (15243, 15286), False, 'import cv2\n'), ((15299, 15345), 'cv2.imshow', 'cv2.imshow', (['"""channels_binary"""', 'channels_binary'], {}), "('channels_binary', channels_binary)\n", (15309, 15345), False, 'import cv2\n'), ((15599, 15636), 'cv2.imshow', 'cv2.imshow', (['"""output"""', 'channels_binary'], {}), "('output', channels_binary)\n", (15609, 15636), False, 'import cv2\n'), ((15664, 15727), 'cv2.imwrite', 'cv2.imwrite', (['f"""test_output/{save_name}_output"""', 'channels_binary'], {}), "(f'test_output/{save_name}_output', channels_binary)\n", (15675, 15727), False, 'import cv2\n'), ((15883, 15907), 'pickle.dump', 'pickle.dump', (['var_list', 'f'], {}), '(var_list, f)\n', (15894, 15907), False, 'import pickle\n'), ((16023, 16037), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (16034, 16037), False, 'import pickle\n'), ((9921, 9982), 'cv2.Sobel', 'cv2.Sobel', (['image_binary', 'cv2.CV_64F', '(0)', '(1)'], {'ksize': 'sobel_kernel'}), '(image_binary, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n', (9930, 9982), False, 'import cv2\n'), ((10075, 10092), 'numpy.max', 'np.max', (['abs_sobel'], {}), '(abs_sobel)\n', (10081, 10092), True, 'import numpy as np\n'), ((10822, 10842), 'numpy.power', 'np.power', (['sobel_x', '(2)'], {}), '(sobel_x, 2)\n', (10830, 10842), True, 'import numpy as np\n'), ((10844, 10864), 'numpy.power', 'np.power', (['sobel_y', '(2)'], {}), '(sobel_y, 2)\n', (10852, 10864), True, 'import numpy as np\n'), ((10905, 10917), 'numpy.max', 'np.max', (['magn'], {}), '(magn)\n', (10911, 10917), True, 'import numpy as np\n'), ((15386, 15420), 'numpy.zeros_like', 'np.zeros_like', (['self._sobelx_binary'], {}), '(self._sobelx_binary)\n', (15399, 15420), True, 'import numpy as np\n'), ((9255, 9292), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (9267, 9292), False, 'import cv2\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2021 <NAME>
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
"""This module does comparison of two images"""
import argparse
import os
from io import BytesIO
from typing import Union, List, Tuple
from pathlib import Path
import numpy as np
from PIL import Image
from cv2 import cv2
import face_recognition
from emrtd_face_access.print_to_sg import SetInterval
print = SetInterval().print
def opencv_dnn_detector() -> cv2.dnn_Net:
"""Create face detection network"""
if "net" in opencv_dnn_detector.__dict__:
return opencv_dnn_detector.net
print("[+] Creating face detector network...")
# downloaded from
# https://raw.githubusercontent.com/opencv/opencv_3rdparty/dnn_samples_face_detector_20180205_fp16/res10_300x300_ssd_iter_140000_fp16.caffemodel
model_file = "face_detection/res10_300x300_ssd_iter_140000_fp16.caffemodel"
# downloaded from
# https://raw.githubusercontent.com/opencv/opencv/master/samples/dnn/face_detector/deploy.prototxt
config_file = "face_detection/deploy.prototxt"
opencv_dnn_detector.net = cv2.dnn.readNetFromCaffe(config_file, model_file)
return opencv_dnn_detector.net
def get_bounding_boxes(
image: np.ndarray, conf_threshold: float = 0.5, scale_size: Tuple[int, int] = (-1, -1)
) -> List[Tuple[int, ...]]:
"""Image is expected in opencv format (BGR)
takes image and returns face bounding boxes
scale_size: Tuple[int, int] (height, width)"""
# https://learnopencv.com/face-detection-opencv-dlib-and-deep-learning-c-python/
net = opencv_dnn_detector()
face_locations: List[Tuple[int, ...]] = []
blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300), [104, 117, 123], False, False)
net.setInput(blob)
detections = net.forward()
for i in range(detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > conf_threshold:
x1 = detections[0, 0, i, 3]
y1 = detections[0, 0, i, 4]
x2 = detections[0, 0, i, 5]
y2 = detections[0, 0, i, 6]
if scale_size == (-1, -1):
x1 = int(x1 * image.shape[1])
y1 = int(y1 * image.shape[0])
x2 = int(x2 * image.shape[1])
y2 = int(y2 * image.shape[0])
else:
x1 = int(x1 * scale_size[1])
y1 = int(y1 * scale_size[0])
x2 = int(x2 * scale_size[1])
y2 = int(y2 * scale_size[0])
face_locations.append((y1, x2, y2, x1))
return face_locations
def compare_faces(
id_image: bytes,
cam_image: np.ndarray,
face_location: List[Tuple[int, ...]],
save_dest: Union[Path, None] = None,
) -> bool:
"""
Compare two images. First one should be jpeg, the second one should be opencv image (numpy)
face_location is the location of the face in the second image
:returns: True if they are the same person, False otherwise.
"""
im1 = bytes_to_np(id_image)
im1 = im1[:, :, ::-1]
id_face_loc = get_bounding_boxes(im1)
im1 = im1[:, :, ::-1]
face_encodings = face_recognition.face_encodings(im1, id_face_loc, 10, "large")[0]
im2 = cam_image[:, :, ::-1]
face_encodings2 = face_recognition.face_encodings(im2, face_location, 10, "large")[0]
if save_dest:
Image.fromarray(im1).save(os.path.join(save_dest, "face_one.jpeg"))
Image.fromarray(im2).save(os.path.join(save_dest, "face_two.jpeg"))
dist = face_recognition.face_distance([face_encodings], face_encodings2)[0]
print("[i] Decision threshold is 0.5.")
if dist <= 0.5:
print(
f"[+] Distance between the images is {dist}"
"\n[+] These images are of the same people!"
)
return True
else:
print(
f"[-] Distance between the images is {dist}\n"
"[-] These images are of two different people!"
)
return False
def bytes_to_np(img: bytes) -> np.ndarray:
"""
Converts bytes image (PIL) to numpy image (opencv)
"""
im = Image.open(BytesIO(img))
im = im.convert("RGB")
return np.array(im)
def jpeg_to_png(img: bytes) -> bytes:
"""
Converts a JPEG to a PNG
"""
im = Image.open(BytesIO(img))
width = 240
height = int(im.size[1] * (240 / im.size[0]))
im = im.convert("RGB").resize((width, height))
stream = BytesIO()
im.save(stream, format="PNG")
return stream.getvalue()
def main(im1_filename: Path, im2_filename: Path) -> None:
"""
Compare two persons images.
"""
im1 = np.array(Image.open(im1_filename).convert("RGB"))
im2 = np.array(Image.open(im2_filename).convert("RGB"))
im1 = im1[:, :, ::-1]
id_face_loc = get_bounding_boxes(im1)
im1 = im1[:, :, ::-1]
face_encodings = face_recognition.face_encodings(im1, id_face_loc, 10, "large")[0]
im2 = im2[:, :, ::-1]
cam_face_loc = get_bounding_boxes(im2)
im2 = im2[:, :, ::-1]
face_encodings2 = face_recognition.face_encodings(im2, cam_face_loc, 10, "large")[0]
dist = face_recognition.face_distance([face_encodings], face_encodings2)[0]
if dist < 0.5:
print(f"[+] These images belong to the same person! ({dist})")
else:
print(f"[-] These images do not belong to the same person! ({dist})")
if __name__ == "__main__":
def raise_(ex):
"""https://stackoverflow.com/a/8294654/6077951"""
raise ex
parser = argparse.ArgumentParser(description="Find if two images are of the same people.")
parser.add_argument(
"image_one",
type=lambda x: x if os.path.isfile(x) else raise_(FileNotFoundError(x)),
help="Path to image one",
)
parser.add_argument(
"image_two",
type=lambda x: x if os.path.isfile(x) else raise_(FileNotFoundError(x)),
help="Path to image two",
)
args = parser.parse_args()
main(Path(args.image_one), Path(args.image_two))
|
[
"PIL.Image.fromarray",
"PIL.Image.open",
"emrtd_face_access.print_to_sg.SetInterval",
"argparse.ArgumentParser",
"pathlib.Path",
"io.BytesIO",
"os.path.join",
"os.path.isfile",
"numpy.array",
"face_recognition.face_distance",
"face_recognition.face_encodings",
"cv2.cv2.dnn.blobFromImage",
"cv2.cv2.dnn.readNetFromCaffe"
] |
[((455, 468), 'emrtd_face_access.print_to_sg.SetInterval', 'SetInterval', ([], {}), '()\n', (466, 468), False, 'from emrtd_face_access.print_to_sg import SetInterval\n'), ((1153, 1202), 'cv2.cv2.dnn.readNetFromCaffe', 'cv2.dnn.readNetFromCaffe', (['config_file', 'model_file'], {}), '(config_file, model_file)\n', (1177, 1202), False, 'from cv2 import cv2\n'), ((1707, 1783), 'cv2.cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['image', '(1.0)', '(300, 300)', '[104, 117, 123]', '(False)', '(False)'], {}), '(image, 1.0, (300, 300), [104, 117, 123], False, False)\n', (1728, 1783), False, 'from cv2 import cv2\n'), ((4203, 4215), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (4211, 4215), True, 'import numpy as np\n'), ((4465, 4474), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (4472, 4474), False, 'from io import BytesIO\n'), ((5531, 5617), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Find if two images are of the same people."""'}), "(description=\n 'Find if two images are of the same people.')\n", (5554, 5617), False, 'import argparse\n'), ((3176, 3238), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['im1', 'id_face_loc', '(10)', '"""large"""'], {}), "(im1, id_face_loc, 10, 'large')\n", (3207, 3238), False, 'import face_recognition\n'), ((3297, 3361), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['im2', 'face_location', '(10)', '"""large"""'], {}), "(im2, face_location, 10, 'large')\n", (3328, 3361), False, 'import face_recognition\n'), ((3548, 3613), 'face_recognition.face_distance', 'face_recognition.face_distance', (['[face_encodings]', 'face_encodings2'], {}), '([face_encodings], face_encodings2)\n', (3578, 3613), False, 'import face_recognition\n'), ((4151, 4163), 'io.BytesIO', 'BytesIO', (['img'], {}), '(img)\n', (4158, 4163), False, 'from io import BytesIO\n'), ((4321, 4333), 'io.BytesIO', 'BytesIO', (['img'], {}), '(img)\n', (4328, 4333), False, 'from io import BytesIO\n'), ((4882, 4944), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['im1', 'id_face_loc', '(10)', '"""large"""'], {}), "(im1, id_face_loc, 10, 'large')\n", (4913, 4944), False, 'import face_recognition\n'), ((5066, 5129), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['im2', 'cam_face_loc', '(10)', '"""large"""'], {}), "(im2, cam_face_loc, 10, 'large')\n", (5097, 5129), False, 'import face_recognition\n'), ((5145, 5210), 'face_recognition.face_distance', 'face_recognition.face_distance', (['[face_encodings]', 'face_encodings2'], {}), '([face_encodings], face_encodings2)\n', (5175, 5210), False, 'import face_recognition\n'), ((5988, 6008), 'pathlib.Path', 'Path', (['args.image_one'], {}), '(args.image_one)\n', (5992, 6008), False, 'from pathlib import Path\n'), ((6010, 6030), 'pathlib.Path', 'Path', (['args.image_two'], {}), '(args.image_two)\n', (6014, 6030), False, 'from pathlib import Path\n'), ((3418, 3458), 'os.path.join', 'os.path.join', (['save_dest', '"""face_one.jpeg"""'], {}), "(save_dest, 'face_one.jpeg')\n", (3430, 3458), False, 'import os\n'), ((3494, 3534), 'os.path.join', 'os.path.join', (['save_dest', '"""face_two.jpeg"""'], {}), "(save_dest, 'face_two.jpeg')\n", (3506, 3534), False, 'import os\n'), ((3392, 3412), 'PIL.Image.fromarray', 'Image.fromarray', (['im1'], {}), '(im1)\n', (3407, 3412), False, 'from PIL import Image\n'), ((3468, 3488), 'PIL.Image.fromarray', 'Image.fromarray', (['im2'], {}), '(im2)\n', (3483, 3488), False, 'from PIL import Image\n'), ((4665, 4689), 'PIL.Image.open', 'Image.open', (['im1_filename'], {}), '(im1_filename)\n', (4675, 4689), False, 'from PIL import Image\n'), ((4725, 4749), 'PIL.Image.open', 'Image.open', (['im2_filename'], {}), '(im2_filename)\n', (4735, 4749), False, 'from PIL import Image\n'), ((5687, 5704), 'os.path.isfile', 'os.path.isfile', (['x'], {}), '(x)\n', (5701, 5704), False, 'import os\n'), ((5854, 5871), 'os.path.isfile', 'os.path.isfile', (['x'], {}), '(x)\n', (5868, 5871), False, 'import os\n')]
|
from typing import NoReturn
from ...base import BaseEstimator
import numpy as np
from numpy.linalg import det, inv
from scipy.stats import multivariate_normal
class LDA(BaseEstimator):
"""
Linear Discriminant Analysis (LDA) classifier
Attributes
----------
self.classes_ : np.ndarray of shape (n_classes,)
The different labels classes. To be set in `LDA.fit`
self.mu_ : np.ndarray of shape (n_classes,n_features)
The estimated features means for each class. To be set in `LDA.fit`
self.cov_ : np.ndarray of shape (n_features,n_features)
The estimated features' covariance. To be set in `LDA.fit`
self._cov_inv : np.ndarray of shape (n_features,n_features)
The inverse of the estimated features covariance. To be set in `LDA.fit`
self.pi_: np.ndarray of shape (n_classes)
The estimated class probabilities. To be set in `GaussianNaiveBayes.fit`
"""
def __init__(self):
"""
Instantiate an LDA classifier
"""
super().__init__()
self.classes_, self.mu_, self.cov_, self._cov_inv, self.pi_ = None, None, None, None, None
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
"""
fits an LDA model.
Estimates gaussian for each label class - Different mean vector, same covariance
matrix with dependent features.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to fit an estimator for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
"""
self.classes_, counts = np.unique(y, return_counts=True)
self.pi_ = counts / sum(counts)
self.mu_ = np.zeros((len(self.classes_), X.shape[1]))
label_index_dict = {}
# map label to its index:
for i, k in enumerate(self.classes_):
label_index_dict[k] = i
# sum label's samples:
for index, label in enumerate(y):
self.mu_[label_index_dict[label]] += X[index]
# divide by number of samples of each class:
self.mu_ /= counts.reshape(-1, 1)
# calculating self.cov:
self.cov_ = np.zeros((X.shape[1], X.shape[1]))
for index, label in enumerate(y):
error = np.array(X[index] - self.mu_[label_index_dict[label]])
self.cov_ += np.outer(error, error)
self.cov_ /= (X.shape[0] - len(self.classes_))
self._cov_inv = inv(self.cov_)
def _predict(self, X: np.ndarray) -> np.ndarray:
"""
Predict responses for given samples using fitted estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
"""
ak_matrix = self._cov_inv @ self.mu_.transpose() # num_features X num_classes
bk_vec = np.log(self.pi_) - (0.5 * (np.diag(self.mu_ @ self._cov_inv @
self.mu_.transpose()))) # num_classes
classes_indexes = ((X @ ak_matrix) + bk_vec).argmax(1)
classes_indexes = self.classes_[classes_indexes]
prediction = np.zeros((X.shape[0],))
for index, row in enumerate(classes_indexes):
prediction[index] = self.classes_[row]
return prediction
def likelihood(self, X: np.ndarray) -> np.ndarray:
"""
Calculate the likelihood of a given data over the estimated model
Parameters
----------
X : np.ndarray of shape (n_samples, n_features)
Input data to calculate its likelihood over the different classes.
Returns
-------
likelihoods : np.ndarray of shape (n_samples, n_classes)
The likelihood for each sample under each of the classes
"""
if not self.fitted_:
raise ValueError("Estimator must first be fitted before calling `likelihood` function")
likelihood = np.zeros((X.shape[0], len(self.classes_)))
for index, row in enumerate(self.mu_):
likelihood[:, index] = multivariate_normal.pdf(X, mean=row, cov=self.cov_)*self.pi_[index]
return likelihood
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
"""
Evaluate performance under misclassification loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
Returns
-------
loss : float
Performance under missclassification loss function
"""
from ...metrics import misclassification_error
return misclassification_error(y, self._predict(X))
|
[
"numpy.unique",
"scipy.stats.multivariate_normal.pdf",
"numpy.log",
"numpy.array",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.outer"
] |
[((1651, 1683), 'numpy.unique', 'np.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (1660, 1683), True, 'import numpy as np\n'), ((2212, 2246), 'numpy.zeros', 'np.zeros', (['(X.shape[1], X.shape[1])'], {}), '((X.shape[1], X.shape[1]))\n', (2220, 2246), True, 'import numpy as np\n'), ((2491, 2505), 'numpy.linalg.inv', 'inv', (['self.cov_'], {}), '(self.cov_)\n', (2494, 2505), False, 'from numpy.linalg import det, inv\n'), ((3322, 3345), 'numpy.zeros', 'np.zeros', (['(X.shape[0],)'], {}), '((X.shape[0],))\n', (3330, 3345), True, 'import numpy as np\n'), ((2309, 2363), 'numpy.array', 'np.array', (['(X[index] - self.mu_[label_index_dict[label]])'], {}), '(X[index] - self.mu_[label_index_dict[label]])\n', (2317, 2363), True, 'import numpy as np\n'), ((2389, 2411), 'numpy.outer', 'np.outer', (['error', 'error'], {}), '(error, error)\n', (2397, 2411), True, 'import numpy as np\n'), ((3028, 3044), 'numpy.log', 'np.log', (['self.pi_'], {}), '(self.pi_)\n', (3034, 3044), True, 'import numpy as np\n'), ((4249, 4300), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['X'], {'mean': 'row', 'cov': 'self.cov_'}), '(X, mean=row, cov=self.cov_)\n', (4272, 4300), False, 'from scipy.stats import multivariate_normal\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 1 08:04:50 2019
@author: alexandradarmon
"""
import random
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from punctuation.config import options
from punctuation.visualisation.heatmap_functions import heatmap, annotate_heatmap
from webcolors import hex_to_rgb
color_vector = ['#2764A2','#EC7428','#438823', '#B9312B', '#785BAD','#72473D',
'#CA6FB6', '#6C6C6C','#B1AC27', '#44ADC1']
markers = {'o': 'circle','D': 'diamond', 'p': 'pentagon',
'v': 'triangle_down', '^': 'triangle_up',
'<': 'triangle_left', '>': 'triangle_right',
's': 'square', '*': 'star','x': 'x',
'_': 'hline', 'p': 'pentagon',
'h': 'hexagon1', 'H': 'hexagon2', 'x': 'x',
'D': 'diamond', 'd': 'thin_diamond', '|': 'vline', '+': 'plus',
'P': 'plus_filled', 'X': 'x_filled', 0: 'tickleft', 1: 'tickright',
2: 'tickup', 3: 'tickdown', 4: 'caretleft', 5: 'caretright',
6: 'caretup', 7: 'caretdown', 8: 'caretleftbase', '*': 'star',
9: 'caretrightbase', 10: 'caretupbase', 11: 'caretdownbase',
'None': 'nothing', None: 'nothing', ' ': 'nothing', '': 'nothing'}
marker_vector = list(markers.keys())
rgb_color_vector = [hex_to_rgb(i) for i in color_vector]
def get_overall_kdeplot(df,subfile,
punctuation_vector=options.punctuation_vector,
freq_pun_col=options.freq_pun_col,
with_pairs=False):
for col1, pun1 in zip(freq_pun_col, punctuation_vector):
sns.kdeplot(df[col1], label='{}'.format(pun1), color='black')
plt.legend(loc=0)
plt.savefig('results/stats_corpus/{}/kdeplot_{}.png'.format(subfile,col1))
plt.show()
if with_pairs:
for col2, pun2 in zip(freq_pun_col[freq_pun_col.index(col1)+1:],
punctuation_vector[punctuation_vector.index(pun1)+1:]):
sns.kdeplot(df[col1], df[col2], label='{},{}'.format(pun1,pun2))
plt.legend(loc=0)
plt.savefig('results/stats_corpus/{}/kdeplot_{}_{}.png'.format(subfile,
col1,
col2))
plt.show()
def get_overall_hist(df,subfile,
punctuation_vector=options.punctuation_vector,
freq_pun_col=options.freq_pun_col):
bins = np.arange(0,1,0.01)
for col1, pun1 in zip(freq_pun_col, punctuation_vector):
ax = plt.subplot(111)
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(options.font_size)
plt.hist(df[col1], bins=bins, label=pun1, color='blue')
plt.legend(loc=0, fontsize=options.font_size)
plt.xlabel('punctuation frequency')
plt.ylabel('number of documents')
plt.savefig('results/stats_corpus/{}/hist_{}.png'.format(subfile,col1))
plt.show()
def show_weapon_hist(kl_within_author_samples, kl_between_author_samples,
type_compute_baseline,path_res,feature_name,
baseline_between=None,
baseline_within=None,
bins=100, to_show=True):
bin_size = 0.1
bins = np.arange(0,2, bin_size)
x_bins = np.arange(0,2+bin_size, 4*bin_size)
ax = plt.subplot(111)
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(options.font_size)
y1, bin_edges1=np.histogram(kl_within_author_samples,bins=bins)
y1 = list(map(lambda x: x/sum(y1), y1))
bincenters1 = 0.5*(bin_edges1[1:]+bin_edges1[:-1])
y2, bin_edges2=np.histogram(kl_between_author_samples,bins=bins)
y2 = list(map(lambda x: x/sum(y2), y2))
bincenters2 = 0.5*(bin_edges2[1:]+bin_edges2[:-1])
# plt.hist(kl_within_author_samples, bins=bins, color='black',
# alpha=0.4,)
plt.bar(bincenters1, y1, width=bin_size,
color='black',alpha=0.3,)
plt.plot(bincenters1,y1,'-', color='black')
plt.bar(bincenters1, y2, width=bin_size,
color='blue',alpha=0.3,)
plt.plot(bincenters2, y2, '-', color='blue')
if type_compute_baseline:
plt.axvline(baseline_between, color='blue', linestyle=':')
plt.axvline(baseline_within, color='black', linestyle=':')
plt.xlim(min(min(kl_within_author_samples),
min(kl_between_author_samples)),2)
plt.ylim(0,1)
plt.yticks([0,0.5,1])
plt.xticks(x_bins)
plt.xlabel('KL divergence')
plt.ylabel('frequency')
plt.legend('')
plt.savefig('{}/kl_hist_comparison_{}.png'.format(path_res,feature_name))
if to_show: plt.show()
## CUMSUM REPRESENTATION
#y1_cum_sum = pd.Series(y1).cumsum()
#y1_cum_sum = y1_cum_sum.tolist()
#
#y2_cum_sum = pd.Series(y2).cumsum()
#y2_cum_sum = y2_cum_sum.tolist()
#
#
#plt.plot(bincenters1, y1_cum_sum, color='black', label='within')
#plt.plot(bincenters1, y2_cum_sum, color='blue', label='between')
#plt.legend()
def plot_list_class(df, class_name='author'):
res = df.groupby([class_name],as_index=False)\
['book_id'].count().rename(columns={'book_id':'nb_books'}).sort_values('nb_books',ascending=False)
# list_author = list(res[class_name])
# list_nb_books = list(res['nb_books'])
#
# plt.bar(list(range(0,len(list_author))), list_nb_books)
# plt.xticks([10,50,100,150,200],fontsize=options.font_size)
# plt.yticks([0,20,40,60],fontsize=options.font_size)
# plt.xlim([10,230])
# plt.xlabel('Number of documents',fontsize=options.font_size)
# plt.ylabel('Number of {}s'.format(class_name),fontsize=options.font_size)
# plt.bar(list_nb_books, list_nb_authors,width=3, color='blue')
#
res = res[[class_name,'nb_books']].\
groupby('nb_books',as_index=False)[class_name].count()
list_nb_authors = list(res[class_name])
list_nb_books = list(res['nb_books'])
ax = plt.subplot(111, )
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(options.font_size)
ax.set_xlim([10,230])
plt.xticks([10,50,100,150,200],fontsize=options.font_size)
plt.yticks([0,20,40,60],fontsize=options.font_size)
plt.xlim([10,230])
plt.xlabel('number of documents',fontsize=options.font_size)
plt.ylabel('number of {}s'.format(class_name),fontsize=options.font_size)
plt.bar(list_nb_books, list_nb_authors,width=3, color='blue')
plt.show()
def plot_hist_punc(freq, punctuation_vector=options.punctuation_vector):
y = freq
x = list(range(0,10))
ax = plt.subplot(111)
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(options.font_size)
ax.bar(x, y, align='center', color='b') #< added align keyword
ax.xaxis_date()
ax.set_ylim(bottom=0, top=0.7)
plt.xticks(list(range(0,10)), punctuation_vector[:-1]+['...'])
plt.show()
def plot_hist_words(freq):
ax = plt.subplot(111, )
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(options.font_size)
plt.rcParams.update({'font.size': options.font_size})
plt.bar(list(range(0,len(freq))), freq, color='magenta', align='center')
ax.set_ylim(bottom=0, top=0.4)
#plt.xticks(list(range(0,len(freq))), punctuation_vector)
plt.show()
def func(x, pos):
return "{:.2f}".format(x).replace("0.", ".").replace("1.00", "")
def plot_trans_mat(mat_nb_words,
punctuation_vector=options.punctuation_vector):
vegetables = punctuation_vector[:-1]+['...']
farmers = punctuation_vector[:-1]+['...']
harvest = np.array(mat_nb_words)
fig, ax = plt.subplots()
im, _ = heatmap(harvest, vegetables, farmers, ax=ax,
)
annotate_heatmap(im, valfmt="{x:.1f}", size=7)
plt.tight_layout()
plt.show()
def plot_scatter_freqs(df, title1=None, title2=None,
freq1=None, freq2=None,
font_size=options.font_size,
):
if title1 is None:
title1 = random.choice(df['title'].tolist())
if title2 is None:
title2 = random.choice(df['title'].tolist())
if freq1 is None:
freq1 = df[df['title']==title1]['freq_pun'].iloc[0]
if freq2 is None:
freq2 = df[df['title']==title2]['freq_pun'].iloc[0]
ax = plt.subplot(111, )
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(options.font_size)
plt.xlabel('$\it{'+title1.replace(' ','\ ')+'}$', fontsize=options.font_size)
plt.ylabel('$\it{'+title2.replace(' ','\ ')+'}$', fontsize=options.font_size)
plt.gca().set_aspect('equal', adjustable='box')
vect = np.linspace(-0, 0.5, 10)
plt.xticks([-0.,0.25,0.5], ['0', '0.25', '0.5'], fontsize=options.font_size)
plt.yticks([-0,0.25,0.5],['0', '0.25', '0.5'], fontsize=options.font_size)
for i in range(0,len(color_vector)):
plt.plot(freq1[i], freq2[i], color=color_vector[i], marker="o")
plt.plot(vect, vect, color = 'black', alpha=0.2)
plt.show()
|
[
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"numpy.array",
"matplotlib.pyplot.axvline",
"numpy.arange",
"numpy.histogram",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"webcolors.hex_to_rgb",
"numpy.linspace",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.bar",
"punctuation.visualisation.heatmap_functions.annotate_heatmap",
"matplotlib.pyplot.tight_layout",
"punctuation.visualisation.heatmap_functions.heatmap",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.subplots"
] |
[((1343, 1356), 'webcolors.hex_to_rgb', 'hex_to_rgb', (['i'], {}), '(i)\n', (1353, 1356), False, 'from webcolors import hex_to_rgb\n'), ((2515, 2536), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.01)'], {}), '(0, 1, 0.01)\n', (2524, 2536), True, 'import numpy as np\n'), ((3412, 3437), 'numpy.arange', 'np.arange', (['(0)', '(2)', 'bin_size'], {}), '(0, 2, bin_size)\n', (3421, 3437), True, 'import numpy as np\n'), ((3450, 3490), 'numpy.arange', 'np.arange', (['(0)', '(2 + bin_size)', '(4 * bin_size)'], {}), '(0, 2 + bin_size, 4 * bin_size)\n', (3459, 3490), True, 'import numpy as np\n'), ((3500, 3516), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (3511, 3516), True, 'import matplotlib.pyplot as plt\n'), ((3715, 3764), 'numpy.histogram', 'np.histogram', (['kl_within_author_samples'], {'bins': 'bins'}), '(kl_within_author_samples, bins=bins)\n', (3727, 3764), True, 'import numpy as np\n'), ((3887, 3937), 'numpy.histogram', 'np.histogram', (['kl_between_author_samples'], {'bins': 'bins'}), '(kl_between_author_samples, bins=bins)\n', (3899, 3937), True, 'import numpy as np\n'), ((4144, 4210), 'matplotlib.pyplot.bar', 'plt.bar', (['bincenters1', 'y1'], {'width': 'bin_size', 'color': '"""black"""', 'alpha': '(0.3)'}), "(bincenters1, y1, width=bin_size, color='black', alpha=0.3)\n", (4151, 4210), True, 'import matplotlib.pyplot as plt\n'), ((4228, 4273), 'matplotlib.pyplot.plot', 'plt.plot', (['bincenters1', 'y1', '"""-"""'], {'color': '"""black"""'}), "(bincenters1, y1, '-', color='black')\n", (4236, 4273), True, 'import matplotlib.pyplot as plt\n'), ((4281, 4346), 'matplotlib.pyplot.bar', 'plt.bar', (['bincenters1', 'y2'], {'width': 'bin_size', 'color': '"""blue"""', 'alpha': '(0.3)'}), "(bincenters1, y2, width=bin_size, color='blue', alpha=0.3)\n", (4288, 4346), True, 'import matplotlib.pyplot as plt\n'), ((4364, 4408), 'matplotlib.pyplot.plot', 'plt.plot', (['bincenters2', 'y2', '"""-"""'], {'color': '"""blue"""'}), "(bincenters2, y2, '-', color='blue')\n", (4372, 4408), True, 'import matplotlib.pyplot as plt\n'), ((4679, 4693), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (4687, 4693), True, 'import matplotlib.pyplot as plt\n'), ((4697, 4720), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 0.5, 1]'], {}), '([0, 0.5, 1])\n', (4707, 4720), True, 'import matplotlib.pyplot as plt\n'), ((4723, 4741), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x_bins'], {}), '(x_bins)\n', (4733, 4741), True, 'import matplotlib.pyplot as plt\n'), ((4751, 4778), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""KL divergence"""'], {}), "('KL divergence')\n", (4761, 4778), True, 'import matplotlib.pyplot as plt\n'), ((4783, 4806), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""frequency"""'], {}), "('frequency')\n", (4793, 4806), True, 'import matplotlib.pyplot as plt\n'), ((4816, 4830), 'matplotlib.pyplot.legend', 'plt.legend', (['""""""'], {}), "('')\n", (4826, 4830), True, 'import matplotlib.pyplot as plt\n'), ((6202, 6218), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (6213, 6218), True, 'import matplotlib.pyplot as plt\n'), ((6426, 6489), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[10, 50, 100, 150, 200]'], {'fontsize': 'options.font_size'}), '([10, 50, 100, 150, 200], fontsize=options.font_size)\n', (6436, 6489), True, 'import matplotlib.pyplot as plt\n'), ((6489, 6544), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 20, 40, 60]'], {'fontsize': 'options.font_size'}), '([0, 20, 40, 60], fontsize=options.font_size)\n', (6499, 6544), True, 'import matplotlib.pyplot as plt\n'), ((6545, 6564), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[10, 230]'], {}), '([10, 230])\n', (6553, 6564), True, 'import matplotlib.pyplot as plt\n'), ((6568, 6629), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""number of documents"""'], {'fontsize': 'options.font_size'}), "('number of documents', fontsize=options.font_size)\n", (6578, 6629), True, 'import matplotlib.pyplot as plt\n'), ((6711, 6773), 'matplotlib.pyplot.bar', 'plt.bar', (['list_nb_books', 'list_nb_authors'], {'width': '(3)', 'color': '"""blue"""'}), "(list_nb_books, list_nb_authors, width=3, color='blue')\n", (6718, 6773), True, 'import matplotlib.pyplot as plt\n'), ((6777, 6787), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6785, 6787), True, 'import matplotlib.pyplot as plt\n'), ((6916, 6932), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (6927, 6932), True, 'import matplotlib.pyplot as plt\n'), ((7309, 7319), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7317, 7319), True, 'import matplotlib.pyplot as plt\n'), ((7358, 7374), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (7369, 7374), True, 'import matplotlib.pyplot as plt\n'), ((7551, 7604), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': options.font_size}"], {}), "({'font.size': options.font_size})\n", (7570, 7604), True, 'import matplotlib.pyplot as plt\n'), ((7783, 7793), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7791, 7793), True, 'import matplotlib.pyplot as plt\n'), ((8098, 8120), 'numpy.array', 'np.array', (['mat_nb_words'], {}), '(mat_nb_words)\n', (8106, 8120), True, 'import numpy as np\n'), ((8145, 8159), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8157, 8159), True, 'import matplotlib.pyplot as plt\n'), ((8172, 8216), 'punctuation.visualisation.heatmap_functions.heatmap', 'heatmap', (['harvest', 'vegetables', 'farmers'], {'ax': 'ax'}), '(harvest, vegetables, farmers, ax=ax)\n', (8179, 8216), False, 'from punctuation.visualisation.heatmap_functions import heatmap, annotate_heatmap\n'), ((8248, 8294), 'punctuation.visualisation.heatmap_functions.annotate_heatmap', 'annotate_heatmap', (['im'], {'valfmt': '"""{x:.1f}"""', 'size': '(7)'}), "(im, valfmt='{x:.1f}', size=7)\n", (8264, 8294), False, 'from punctuation.visualisation.heatmap_functions import heatmap, annotate_heatmap\n'), ((8299, 8317), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8315, 8317), True, 'import matplotlib.pyplot as plt\n'), ((8322, 8332), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8330, 8332), True, 'import matplotlib.pyplot as plt\n'), ((8858, 8874), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (8869, 8874), True, 'import matplotlib.pyplot as plt\n'), ((9288, 9312), 'numpy.linspace', 'np.linspace', (['(-0)', '(0.5)', '(10)'], {}), '(-0, 0.5, 10)\n', (9299, 9312), True, 'import numpy as np\n'), ((9317, 9396), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[-0.0, 0.25, 0.5]', "['0', '0.25', '0.5']"], {'fontsize': 'options.font_size'}), "([-0.0, 0.25, 0.5], ['0', '0.25', '0.5'], fontsize=options.font_size)\n", (9327, 9396), True, 'import matplotlib.pyplot as plt\n'), ((9398, 9475), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[-0, 0.25, 0.5]', "['0', '0.25', '0.5']"], {'fontsize': 'options.font_size'}), "([-0, 0.25, 0.5], ['0', '0.25', '0.5'], fontsize=options.font_size)\n", (9408, 9475), True, 'import matplotlib.pyplot as plt\n'), ((9594, 9640), 'matplotlib.pyplot.plot', 'plt.plot', (['vect', 'vect'], {'color': '"""black"""', 'alpha': '(0.2)'}), "(vect, vect, color='black', alpha=0.2)\n", (9602, 9640), True, 'import matplotlib.pyplot as plt\n'), ((9647, 9657), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9655, 9657), True, 'import matplotlib.pyplot as plt\n'), ((1735, 1752), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)'}), '(loc=0)\n', (1745, 1752), True, 'import matplotlib.pyplot as plt\n'), ((1844, 1854), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1852, 1854), True, 'import matplotlib.pyplot as plt\n'), ((2609, 2625), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (2620, 2625), True, 'import matplotlib.pyplot as plt\n'), ((2807, 2862), 'matplotlib.pyplot.hist', 'plt.hist', (['df[col1]'], {'bins': 'bins', 'label': 'pun1', 'color': '"""blue"""'}), "(df[col1], bins=bins, label=pun1, color='blue')\n", (2815, 2862), True, 'import matplotlib.pyplot as plt\n'), ((2871, 2916), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)', 'fontsize': 'options.font_size'}), '(loc=0, fontsize=options.font_size)\n', (2881, 2916), True, 'import matplotlib.pyplot as plt\n'), ((2925, 2960), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""punctuation frequency"""'], {}), "('punctuation frequency')\n", (2935, 2960), True, 'import matplotlib.pyplot as plt\n'), ((2969, 3002), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""number of documents"""'], {}), "('number of documents')\n", (2979, 3002), True, 'import matplotlib.pyplot as plt\n'), ((3091, 3101), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3099, 3101), True, 'import matplotlib.pyplot as plt\n'), ((4448, 4506), 'matplotlib.pyplot.axvline', 'plt.axvline', (['baseline_between'], {'color': '"""blue"""', 'linestyle': '""":"""'}), "(baseline_between, color='blue', linestyle=':')\n", (4459, 4506), True, 'import matplotlib.pyplot as plt\n'), ((4515, 4573), 'matplotlib.pyplot.axvline', 'plt.axvline', (['baseline_within'], {'color': '"""black"""', 'linestyle': '""":"""'}), "(baseline_within, color='black', linestyle=':')\n", (4526, 4573), True, 'import matplotlib.pyplot as plt\n'), ((4925, 4935), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4933, 4935), True, 'import matplotlib.pyplot as plt\n'), ((9525, 9588), 'matplotlib.pyplot.plot', 'plt.plot', (['freq1[i]', 'freq2[i]'], {'color': 'color_vector[i]', 'marker': '"""o"""'}), "(freq1[i], freq2[i], color=color_vector[i], marker='o')\n", (9533, 9588), True, 'import matplotlib.pyplot as plt\n'), ((9224, 9233), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9231, 9233), True, 'import matplotlib.pyplot as plt\n'), ((2136, 2153), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)'}), '(loc=0)\n', (2146, 2153), True, 'import matplotlib.pyplot as plt\n'), ((2327, 2337), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2335, 2337), True, 'import matplotlib.pyplot as plt\n')]
|
import argparse
import os
import numpy as np
import torch
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from torchvision import transforms
from torchvision import datasets
from utils.lib import *
from utils.pgd_attack import *
from models.resnet import ResNet
def test(model, dataloader):
model.eval()
n_correct, n_total = 0, 0
for img, label in iter(dataloader):
batch_size = len(label)
img, label = img.cuda(), label.cuda()
with torch.no_grad():
class_output = model(img)
pred = class_output.data.max(1, keepdim=True)[1]
n_correct += pred.eq(label.data.view_as(pred)).cpu().sum()
n_total += batch_size
acc = n_correct.double() / n_total
return acc
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate augmented training dataset and extract features')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--model-type', default='nat_model',
choices=['nat_model', 'adv_model'], type=str, help='model type')
parser.add_argument('--save-dir', default='./generate_data/', type=str, help='dir to save data')
parser.add_argument('--model-dir', default='./checkpoints/', type=str, help='dir to saved model')
# args parse
args = parser.parse_args()
# Set random seed
set_seed(args.seed)
model_type = args.model_type
save_dir = os.path.join(args.save_dir, model_type)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(args.model_dir, model_type, "checkpoint.pth")
batch_size = 128
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
])
mean = [x/255.0 for x in [125.3, 123.0, 113.9]]
std = [x/255.0 for x in [63.0, 62.1, 66.7]]
train_dataset = datasets.CIFAR10('./datasets/cifar10', train=True, download=True, transform=transform_train)
train_dataloader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=False, num_workers=2)
test_dataset = datasets.CIFAR10('./datasets/cifar10', train=False, transform=transform_test)
test_dataloader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, num_workers=2)
# Model Setup
model = torch.load(model_path).cuda()
model.eval()
attacker = LinfPGDAttack(model, eps=8/255.0, nb_iter=40,
eps_iter=1/255.0, rand_init=True, clip_min=0., clip_max=1.,
targeted=False, num_classes=10, elementwise_best=True)
augment_data = []
augment_label = []
for batch_x, batch_y in train_dataloader:
augment_data.extend(batch_x.numpy())
augment_label.extend(batch_y.numpy())
correct = 0.0
count = 0.0
for j in range(4):
for batch_x, batch_y in train_dataloader:
batch_x = batch_x.cuda()
batch_y = batch_y.cuda()
adv_batch_x = attacker.perturb(batch_x, batch_y)
augment_data.extend(adv_batch_x.cpu().numpy())
augment_label.extend(batch_y.cpu().numpy())
with torch.no_grad():
outputs = model(adv_batch_x)
preds = torch.argmax(outputs, axis=1)
correct += torch.sum(preds==batch_y)
count += batch_x.shape[0]
print("Adv acc: {:.2f}%".format((correct/count)*100))
augment_data = np.array(augment_data)
augment_label = np.array(augment_label)
np.save(os.path.join(save_dir, "augment_data.npy"), augment_data)
np.save(os.path.join(save_dir, "augment_label.npy"), augment_label)
augment_data = torch.Tensor(augment_data)
augment_label = torch.Tensor(augment_label).long()
augment_dataset = TensorDataset(augment_data, augment_label)
augment_dataloader = DataLoader(augment_dataset, batch_size=batch_size, shuffle=False)
augment_features = []
for batch_x, batch_y in augment_dataloader:
batch_x = batch_x.cuda()
with torch.no_grad():
feature = model.get_feature(batch_x)
augment_features.extend(feature.cpu().numpy())
augment_features = np.array(augment_features)
np.save(os.path.join(save_dir, "augment_feature.npy"), augment_features)
|
[
"os.path.exists",
"argparse.ArgumentParser",
"os.makedirs",
"torch.load",
"os.path.join",
"torch.Tensor",
"torch.utils.data.TensorDataset",
"torchvision.transforms.RandomHorizontalFlip",
"torchvision.transforms.RandomCrop",
"numpy.array",
"torchvision.datasets.CIFAR10",
"torch.sum",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torchvision.transforms.ToTensor",
"torch.argmax"
] |
[((813, 913), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate augmented training dataset and extract features"""'}), "(description=\n 'Generate augmented training dataset and extract features')\n", (836, 913), False, 'import argparse\n'), ((1467, 1506), 'os.path.join', 'os.path.join', (['args.save_dir', 'model_type'], {}), '(args.save_dir, model_type)\n', (1479, 1506), False, 'import os\n'), ((1592, 1650), 'os.path.join', 'os.path.join', (['args.model_dir', 'model_type', '"""checkpoint.pth"""'], {}), "(args.model_dir, model_type, 'checkpoint.pth')\n", (1604, 1650), False, 'import os\n'), ((2053, 2150), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', (['"""./datasets/cifar10"""'], {'train': '(True)', 'download': '(True)', 'transform': 'transform_train'}), "('./datasets/cifar10', train=True, download=True, transform\n =transform_train)\n", (2069, 2150), False, 'from torchvision import datasets\n'), ((2169, 2259), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(2)'}), '(dataset=train_dataset, batch_size=batch_size, shuffle=False,\n num_workers=2)\n', (2179, 2259), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((2275, 2352), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', (['"""./datasets/cifar10"""'], {'train': '(False)', 'transform': 'transform_test'}), "('./datasets/cifar10', train=False, transform=transform_test)\n", (2291, 2352), False, 'from torchvision import datasets\n'), ((2375, 2464), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_dataset', 'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(2)'}), '(dataset=test_dataset, batch_size=batch_size, shuffle=False,\n num_workers=2)\n', (2385, 2464), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((3597, 3619), 'numpy.array', 'np.array', (['augment_data'], {}), '(augment_data)\n', (3605, 3619), True, 'import numpy as np\n'), ((3640, 3663), 'numpy.array', 'np.array', (['augment_label'], {}), '(augment_label)\n', (3648, 3663), True, 'import numpy as np\n'), ((3827, 3853), 'torch.Tensor', 'torch.Tensor', (['augment_data'], {}), '(augment_data)\n', (3839, 3853), False, 'import torch\n'), ((3932, 3974), 'torch.utils.data.TensorDataset', 'TensorDataset', (['augment_data', 'augment_label'], {}), '(augment_data, augment_label)\n', (3945, 3974), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((4000, 4065), 'torch.utils.data.DataLoader', 'DataLoader', (['augment_dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)'}), '(augment_dataset, batch_size=batch_size, shuffle=False)\n', (4010, 4065), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((4333, 4359), 'numpy.array', 'np.array', (['augment_features'], {}), '(augment_features)\n', (4341, 4359), True, 'import numpy as np\n'), ((1518, 1542), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (1532, 1542), False, 'import os\n'), ((1552, 1573), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (1563, 1573), False, 'import os\n'), ((3677, 3719), 'os.path.join', 'os.path.join', (['save_dir', '"""augment_data.npy"""'], {}), "(save_dir, 'augment_data.npy')\n", (3689, 3719), False, 'import os\n'), ((3747, 3790), 'os.path.join', 'os.path.join', (['save_dir', '"""augment_label.npy"""'], {}), "(save_dir, 'augment_label.npy')\n", (3759, 3790), False, 'import os\n'), ((4372, 4417), 'os.path.join', 'os.path.join', (['save_dir', '"""augment_feature.npy"""'], {}), "(save_dir, 'augment_feature.npy')\n", (4384, 4417), False, 'import os\n'), ((507, 522), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (520, 522), False, 'import torch\n'), ((1724, 1760), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (1745, 1760), False, 'from torchvision import transforms\n'), ((1770, 1803), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (1801, 1803), False, 'from torchvision import transforms\n'), ((1813, 1834), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1832, 1834), False, 'from torchvision import transforms\n'), ((1897, 1918), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1916, 1918), False, 'from torchvision import transforms\n'), ((2496, 2518), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (2506, 2518), False, 'import torch\n'), ((3401, 3430), 'torch.argmax', 'torch.argmax', (['outputs'], {'axis': '(1)'}), '(outputs, axis=1)\n', (3413, 3430), False, 'import torch\n'), ((3455, 3482), 'torch.sum', 'torch.sum', (['(preds == batch_y)'], {}), '(preds == batch_y)\n', (3464, 3482), False, 'import torch\n'), ((3875, 3902), 'torch.Tensor', 'torch.Tensor', (['augment_label'], {}), '(augment_label)\n', (3887, 3902), False, 'import torch\n'), ((4188, 4203), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4201, 4203), False, 'import torch\n'), ((3306, 3321), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3319, 3321), False, 'import torch\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.