code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import os
import h5py
import numpy as np
import pandas as pd
import multiprocessing as mp
class Scaler:
def __init__(self, mean=None, std=None):
self.mean = mean
self.std = std
def fit(self, data):
self.mean = np.mean(data)
self.std = np.std(data)
def set_mean(self, mean):
self.mean = mean
def set_std(self, std):
self.std = std
def transform(self, data):
return (data - self.mean) / self.std
def inverse_transform(self, data):
return data * self.std + self.mean
def load_h5(filename, keywords):
f = h5py.File(filename, 'r')
data = []
for name in keywords:
data.append(np.array(f[name]))
f.close()
if len(data) == 1:
return data[0]
return data
def write_h5(filename, d):
f = h5py.File(filename, 'w')
for key, value in d.items():
f.create_dataset(key, data=value)
f.close()
def from_str_to_np(s):
arr = np.fromstring(s, dtype=np.int32, sep=',')
arr = arr.reshape(-1, 3)
return arr
def load_trajectory(filename):
with open(filename) as f:
lines = f.readlines()
pool = mp.Pool()
trajectory = pool.map(from_str_to_np, lines)
return trajectory
def fill_missing(data):
T, N, D = data.shape
data = np.reshape(data, (T, N * D))
df = pd.DataFrame(data)
df = df.fillna(method='pad')
df = df.fillna(method='bfill')
data = df.values
data = np.reshape(data, (T, N, D))
data[np.isnan(data)] = 0
return data
|
[
"numpy.mean",
"numpy.reshape",
"h5py.File",
"numpy.array",
"numpy.isnan",
"multiprocessing.Pool",
"numpy.std",
"pandas.DataFrame",
"numpy.fromstring"
] |
[((562, 586), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (571, 586), False, 'import h5py\n'), ((758, 782), 'h5py.File', 'h5py.File', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (767, 782), False, 'import h5py\n'), ((897, 938), 'numpy.fromstring', 'np.fromstring', (['s'], {'dtype': 'np.int32', 'sep': '""","""'}), "(s, dtype=np.int32, sep=',')\n", (910, 938), True, 'import numpy as np\n'), ((1078, 1087), 'multiprocessing.Pool', 'mp.Pool', ([], {}), '()\n', (1085, 1087), True, 'import multiprocessing as mp\n'), ((1216, 1244), 'numpy.reshape', 'np.reshape', (['data', '(T, N * D)'], {}), '(data, (T, N * D))\n', (1226, 1244), True, 'import numpy as np\n'), ((1252, 1270), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (1264, 1270), True, 'import pandas as pd\n'), ((1363, 1390), 'numpy.reshape', 'np.reshape', (['data', '(T, N, D)'], {}), '(data, (T, N, D))\n', (1373, 1390), True, 'import numpy as np\n'), ((232, 245), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (239, 245), True, 'import numpy as np\n'), ((260, 272), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (266, 272), True, 'import numpy as np\n'), ((1398, 1412), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (1406, 1412), True, 'import numpy as np\n'), ((638, 655), 'numpy.array', 'np.array', (['f[name]'], {}), '(f[name])\n', (646, 655), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#AUTHOR: <NAME>
#DATE: Wed Apr 3 16:15:13 2019
#VERSION:
#PYTHON_VERSION: 3.6
'''
DESCRIPTION
'''
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from neural_network import neural_network
stimuli = np.random.rand(6, 11)
c = neural_network(stimuli=stimuli, threshold=0.05)
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches((12, 5))
ax1.scatter(stimuli[:, -2], stimuli[:, -1], s=10, c='r')
agent = ax1.scatter(0, 0, s=10, c='b')
ax1.legend(labels=['Stimuli', 'Agent'])
xlabels = ['Odor\n1', 'Odor\n2', 'Odor\n3', 'Odor\n4', 'Odor\n5',
'Odor\n6', 'Taste\n1', 'Taste\n2', 'Taste\n3']
sensors = ax2.bar(xlabels, c.get_sensory_inputs())
ax2.set_title('Agent\'s sensory inputs')
def update(pos):
agent.set_offsets(pos)
sensory_inputs = c.jump(pos)
max_h = sensory_inputs.max() + 1
ax2.set_ylim([0, max_h])
for bar, h in zip(sensors, sensory_inputs):
bar.set_height(h)
return agent, sensors
if __name__ == '__main__':
anim = FuncAnimation(fig, update, interval=1, repeat_delay=10,
frames=[(i/100, i/100) for i in range(1, 100)])
anim.save('animation.gif', dpi=100, writer='pillow')
plt.show()
|
[
"matplotlib.pyplot.subplots",
"neural_network.neural_network",
"numpy.random.rand",
"matplotlib.pyplot.show"
] |
[((298, 319), 'numpy.random.rand', 'np.random.rand', (['(6)', '(11)'], {}), '(6, 11)\n', (312, 319), True, 'import numpy as np\n'), ((324, 371), 'neural_network.neural_network', 'neural_network', ([], {'stimuli': 'stimuli', 'threshold': '(0.05)'}), '(stimuli=stimuli, threshold=0.05)\n', (338, 371), False, 'from neural_network import neural_network\n'), ((391, 409), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (403, 409), True, 'import matplotlib.pyplot as plt\n'), ((1268, 1278), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1276, 1278), True, 'import matplotlib.pyplot as plt\n')]
|
"""System utilities."""
import socket
import sys
import os
import csv
import yaml
import torch
import torchvision
import random
import numpy as np
import datetime
import hydra
from omegaconf import OmegaConf, open_dict
import logging
def system_startup(process_idx, local_group_size, cfg):
"""Decide and print GPU / CPU / hostname info. Generate local distributed setting if running in distr. mode."""
log = get_log(cfg)
torch.backends.cudnn.benchmark = cfg.case.impl.benchmark
torch.multiprocessing.set_sharing_strategy(cfg.case.impl.sharing_strategy)
huggingface_offline_mode(cfg.case.impl.enable_huggingface_offline_mode)
# 100% reproducibility?
if cfg.case.impl.deterministic:
set_deterministic()
if cfg.seed is not None:
set_random_seed(cfg.seed + 10 * process_idx)
dtype = getattr(torch, cfg.case.impl.dtype) # :> dont mess this up
device = torch.device(f"cuda:{process_idx}") if torch.cuda.is_available() else torch.device("cpu")
setup = dict(device=device, dtype=dtype)
python_version = sys.version.split(" (")[0]
log.info(f"Platform: {sys.platform}, Python: {python_version}, PyTorch: {torch.__version__}")
log.info(f"CPUs: {torch.get_num_threads()}, GPUs: {torch.cuda.device_count()} on {socket.gethostname()}.")
if torch.cuda.is_available():
torch.cuda.set_device(process_idx)
log.info(f"GPU : {torch.cuda.get_device_name(device=device)}")
# if not torch.cuda.is_available() and not cfg.dryrun:
# raise ValueError('No GPU allocated to this process. Running in CPU-mode is likely a bad idea. Complain to your admin.')
return setup
def is_main_process():
return not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0
def get_log(cfg, name=os.path.basename(__file__)):
"""Solution via https://github.com/facebookresearch/hydra/issues/1126#issuecomment-727826513"""
if is_main_process():
logging.config.dictConfig(OmegaConf.to_container(cfg.job_logging_cfg, resolve=True))
logger = logging.getLogger(name)
else:
def logger(*args, **kwargs):
pass
logger.info = logger
return logger
def initialize_multiprocess_log(cfg):
with open_dict(cfg):
# manually save log config to cfg
log_config = hydra.core.hydra_config.HydraConfig.get().job_logging
# but resolve any filenames
cfg.job_logging_cfg = OmegaConf.to_container(log_config, resolve=True)
cfg.original_cwd = hydra.utils.get_original_cwd()
def save_summary(cfg, metrics, stats, local_time, original_cwd=True, table_name="breach"):
"""Save two summary tables. A detailed table of iterations/loss+acc and a summary of the end results."""
# 1) detailed table:
for step in range(len(stats["train_loss"])):
iteration = dict()
for key in stats:
iteration[key] = stats[key][step] if step < len(stats[key]) else None
save_to_table(".", f"{cfg.attack.type}_convergence_results", dryrun=cfg.dryrun, **iteration)
try:
local_folder = os.getcwd().split("outputs/")[1]
except IndexError:
local_folder = ""
# 2) save a reduced summary
summary = dict(
name=cfg.name,
usecase=cfg.case.name,
model=cfg.case.model,
datapoints=cfg.case.user.num_data_points,
model_state=cfg.case.server.model_state,
attack=cfg.attack.type,
attacktype=cfg.attack.attack_type,
**{k: v for k, v in metrics.items() if k != "order"},
score=stats["opt_value"],
total_time=str(datetime.timedelta(seconds=local_time)).replace(",", ""),
user_type=cfg.case.user.user_type,
gradient_noise=cfg.case.user.local_diff_privacy.gradient_noise,
seed=cfg.seed,
# dump extra values from here:
**{f"ATK_{k}": v for k, v in cfg.attack.items()},
**{k: v for k, v in cfg.case.items() if k not in ["name", "model"]},
folder=local_folder,
)
location = os.path.join(cfg.original_cwd, "tables") if original_cwd else "tables"
save_to_table(location, f"{table_name}_{cfg.case.name}_{cfg.case.data.name}_reports", dryrun=cfg.dryrun, **summary)
def save_to_table(out_dir, table_name, dryrun, **kwargs):
"""Save keys to .csv files. Function adapted from Micah."""
# Check for file
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
fname = os.path.join(out_dir, f"table_{table_name}.csv")
fieldnames = list(kwargs.keys())
# Read or write header
try:
with open(fname, "r") as f:
reader = csv.reader(f, delimiter="\t")
header = next(reader) # noqa # this line is testing the header
# assert header == fieldnames[:len(header)] # new columns are ok, but old columns need to be consistent
# dont test, always write when in doubt to prevent erroneous table rewrites
except Exception as e: # noqa
if not dryrun:
# print('Creating a new .csv table...')
with open(fname, "w") as f:
writer = csv.DictWriter(f, delimiter="\t", fieldnames=fieldnames)
writer.writeheader()
else:
pass
# print(f'Would create new .csv table {fname}.')
# Write a new row
if not dryrun:
# Add row for this experiment
with open(fname, "a") as f:
writer = csv.DictWriter(f, delimiter="\t", fieldnames=fieldnames)
writer.writerow(kwargs)
# print('\nResults saved to ' + fname + '.')
else:
pass
# print(f'Would save results to {fname}.')
def set_random_seed(seed=233):
"""."""
torch.manual_seed(seed + 1)
torch.cuda.manual_seed(seed + 2)
torch.cuda.manual_seed_all(seed + 3)
np.random.seed(seed + 4)
torch.cuda.manual_seed_all(seed + 5)
random.seed(seed + 6)
# Can't be too careful :>
def set_deterministic():
"""Switch pytorch into a deterministic computation mode."""
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True)
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
def avg_n_dicts(dicts):
"""https://github.com/wronnyhuang/metapoison/blob/master/utils.py."""
# given a list of dicts with the same exact schema, return a single dict with same schema whose values are the
# key-wise average over all input dicts
means = {}
for dic in dicts:
for key in dic:
if key not in means:
if isinstance(dic[key], list):
means[key] = [0 for entry in dic[key]]
else:
means[key] = 0
if isinstance(dic[key], list):
for idx, entry in enumerate(dic[key]):
means[key][idx] += entry / len(dicts)
else:
means[key] += dic[key] / len(dicts)
return means
def get_base_cwd():
try:
return hydra.utils.get_original_cwd()
except ValueError: # Hydra not initialized:
return os.getcwd()
def overview(server, user, attacker):
num_params, num_buffers = (
sum([p.numel() for p in user.model.parameters()]),
sum([b.numel() for b in user.model.buffers()]),
)
target_information = user.num_data_points * torch.as_tensor(server.cfg_data.shape).prod()
print(f"Model architecture {user.model.name} loaded with {num_params:,} parameters and {num_buffers:,} buffers.")
print(
f"Overall this is a data ratio of {server.num_queries * num_params / target_information:7.0f}:1 "
f"for target shape {[user.num_data_points, *server.cfg_data.shape]} given that num_queries={server.num_queries}."
)
print(user)
print(server)
print(attacker)
def save_reconstruction(
reconstructed_user_data, server_payload, true_user_data, cfg, side_by_side=True, target_indx=None
):
"""If target_indx is not None, only the datapoints at target_indx will be saved to file."""
os.makedirs("reconstructions", exist_ok=True)
metadata = server_payload[0]["metadata"]
if metadata["modality"] == "text":
from breaching.cases.data.datasets_text import _get_tokenizer
tokenizer = _get_tokenizer(
server_payload[0]["metadata"]["tokenizer"],
server_payload[0]["metadata"]["vocab_size"],
cache_dir=cfg.case.data.path,
)
text_rec = tokenizer.batch_decode(reconstructed_user_data["data"])
text_ref = tokenizer.batch_decode(true_user_data["data"])
if target_indx is not None:
text_rec = text_rec[target_indx]
text_ref = text_ref[target_indx]
filepath = os.path.join(
"reconstructions", f"text_rec_{cfg.case.data.name}_{cfg.case.model}_user{cfg.case.user.user_idx}.txt"
)
with open(filepath, "w") as f:
f.writelines(text_rec)
if side_by_side:
f.write("\n")
f.write("========== GROUND TRUTH TEXT ===========")
f.write("\n")
f.writelines(text_ref)
else:
if hasattr(metadata, "mean"):
dm = torch.as_tensor(metadata.mean)[None, :, None, None]
ds = torch.as_tensor(metadata.std)[None, :, None, None]
else:
dm, ds = torch.tensor(0,), torch.tensor(1)
rec_denormalized = torch.clamp(reconstructed_user_data["data"].cpu() * ds + dm, 0, 1)
ground_truth_denormalized = torch.clamp(true_user_data["data"].cpu() * ds + dm, 0, 1)
if target_indx is not None:
rec_denormalized = rec_denormalized[target_indx]
ground_truth_denormalized = ground_truth_denormalized[target_indx]
filepath = os.path.join(
"reconstructions", f"img_rec_{cfg.case.data.name}_{cfg.case.model}_user{cfg.case.user.user_idx}.png",
)
if not side_by_side:
torchvision.utils.save_image(rec_denormalized, filepath)
else:
torchvision.utils.save_image(torch.cat([rec_denormalized, ground_truth_denormalized]), filepath)
def dump_metrics(cfg, metrics):
"""Simple yaml dump of metric values."""
filepath = f"metrics_{cfg.case.data.name}_{cfg.case.model}_user{cfg.case.user.user_idx}.yaml"
sanitized_metrics = dict()
for metric, val in metrics.items():
try:
sanitized_metrics[metric] = np.asarray(val).item()
except ValueError:
sanitized_metrics[metric] = np.asarray(val).tolist()
with open(filepath, "w") as yaml_file:
yaml.dump(sanitized_metrics, yaml_file, default_flow_style=False)
def huggingface_offline_mode(huggingface_offline_mode):
if huggingface_offline_mode:
os.environ["HF_DATASETS_OFFLINE"] = "1"
os.environ["TRANSFORMERS_OFFLINE"] = "1"
|
[
"logging.getLogger",
"omegaconf.open_dict",
"csv.DictWriter",
"torch.as_tensor",
"torch.cuda.device_count",
"torch.cuda.is_available",
"torch.distributed.get_rank",
"sys.version.split",
"datetime.timedelta",
"torchvision.utils.save_image",
"hydra.utils.get_original_cwd",
"numpy.asarray",
"os.path.isdir",
"numpy.random.seed",
"csv.reader",
"socket.gethostname",
"torch.get_num_threads",
"yaml.dump",
"breaching.cases.data.datasets_text._get_tokenizer",
"torch.use_deterministic_algorithms",
"hydra.core.hydra_config.HydraConfig.get",
"torch.cuda.set_device",
"torch.cat",
"torch.device",
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"torch.cuda.get_device_name",
"os.makedirs",
"os.path.join",
"torch.distributed.is_initialized",
"random.seed",
"os.getcwd",
"torch.tensor",
"omegaconf.OmegaConf.to_container",
"os.path.basename",
"torch.multiprocessing.set_sharing_strategy",
"torch.cuda.manual_seed"
] |
[((502, 576), 'torch.multiprocessing.set_sharing_strategy', 'torch.multiprocessing.set_sharing_strategy', (['cfg.case.impl.sharing_strategy'], {}), '(cfg.case.impl.sharing_strategy)\n', (544, 576), False, 'import torch\n'), ((1314, 1339), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1337, 1339), False, 'import torch\n'), ((1799, 1825), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (1815, 1825), False, 'import os\n'), ((4446, 4494), 'os.path.join', 'os.path.join', (['out_dir', 'f"""table_{table_name}.csv"""'], {}), "(out_dir, f'table_{table_name}.csv')\n", (4458, 4494), False, 'import os\n'), ((5705, 5732), 'torch.manual_seed', 'torch.manual_seed', (['(seed + 1)'], {}), '(seed + 1)\n', (5722, 5732), False, 'import torch\n'), ((5737, 5769), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(seed + 2)'], {}), '(seed + 2)\n', (5759, 5769), False, 'import torch\n'), ((5774, 5810), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['(seed + 3)'], {}), '(seed + 3)\n', (5800, 5810), False, 'import torch\n'), ((5815, 5839), 'numpy.random.seed', 'np.random.seed', (['(seed + 4)'], {}), '(seed + 4)\n', (5829, 5839), True, 'import numpy as np\n'), ((5844, 5880), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['(seed + 5)'], {}), '(seed + 5)\n', (5870, 5880), False, 'import torch\n'), ((5885, 5906), 'random.seed', 'random.seed', (['(seed + 6)'], {}), '(seed + 6)\n', (5896, 5906), False, 'import random\n'), ((6121, 6161), 'torch.use_deterministic_algorithms', 'torch.use_deterministic_algorithms', (['(True)'], {}), '(True)\n', (6155, 6161), False, 'import torch\n'), ((8064, 8109), 'os.makedirs', 'os.makedirs', (['"""reconstructions"""'], {'exist_ok': '(True)'}), "('reconstructions', exist_ok=True)\n", (8075, 8109), False, 'import os\n'), ((953, 978), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (976, 978), False, 'import torch\n'), ((914, 949), 'torch.device', 'torch.device', (['f"""cuda:{process_idx}"""'], {}), "(f'cuda:{process_idx}')\n", (926, 949), False, 'import torch\n'), ((984, 1003), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (996, 1003), False, 'import torch\n'), ((1070, 1093), 'sys.version.split', 'sys.version.split', (['""" ("""'], {}), "(' (')\n", (1087, 1093), False, 'import sys\n'), ((1349, 1383), 'torch.cuda.set_device', 'torch.cuda.set_device', (['process_idx'], {}), '(process_idx)\n', (1370, 1383), False, 'import torch\n'), ((2064, 2087), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (2081, 2087), False, 'import logging\n'), ((2250, 2264), 'omegaconf.open_dict', 'open_dict', (['cfg'], {}), '(cfg)\n', (2259, 2264), False, 'from omegaconf import OmegaConf, open_dict\n'), ((2449, 2497), 'omegaconf.OmegaConf.to_container', 'OmegaConf.to_container', (['log_config'], {'resolve': '(True)'}), '(log_config, resolve=True)\n', (2471, 2497), False, 'from omegaconf import OmegaConf, open_dict\n'), ((2525, 2555), 'hydra.utils.get_original_cwd', 'hydra.utils.get_original_cwd', ([], {}), '()\n', (2553, 2555), False, 'import hydra\n'), ((4034, 4074), 'os.path.join', 'os.path.join', (['cfg.original_cwd', '"""tables"""'], {}), "(cfg.original_cwd, 'tables')\n", (4046, 4074), False, 'import os\n'), ((4381, 4403), 'os.path.isdir', 'os.path.isdir', (['out_dir'], {}), '(out_dir)\n', (4394, 4403), False, 'import os\n'), ((4413, 4433), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (4424, 4433), False, 'import os\n'), ((7021, 7051), 'hydra.utils.get_original_cwd', 'hydra.utils.get_original_cwd', ([], {}), '()\n', (7049, 7051), False, 'import hydra\n'), ((8285, 8423), 'breaching.cases.data.datasets_text._get_tokenizer', '_get_tokenizer', (["server_payload[0]['metadata']['tokenizer']", "server_payload[0]['metadata']['vocab_size']"], {'cache_dir': 'cfg.case.data.path'}), "(server_payload[0]['metadata']['tokenizer'], server_payload[0\n ]['metadata']['vocab_size'], cache_dir=cfg.case.data.path)\n", (8299, 8423), False, 'from breaching.cases.data.datasets_text import _get_tokenizer\n'), ((8753, 8877), 'os.path.join', 'os.path.join', (['"""reconstructions"""', 'f"""text_rec_{cfg.case.data.name}_{cfg.case.model}_user{cfg.case.user.user_idx}.txt"""'], {}), "('reconstructions',\n f'text_rec_{cfg.case.data.name}_{cfg.case.model}_user{cfg.case.user.user_idx}.txt'\n )\n", (8765, 8877), False, 'import os\n'), ((9801, 9924), 'os.path.join', 'os.path.join', (['"""reconstructions"""', 'f"""img_rec_{cfg.case.data.name}_{cfg.case.model}_user{cfg.case.user.user_idx}.png"""'], {}), "('reconstructions',\n f'img_rec_{cfg.case.data.name}_{cfg.case.model}_user{cfg.case.user.user_idx}.png'\n )\n", (9813, 9924), False, 'import os\n'), ((10629, 10694), 'yaml.dump', 'yaml.dump', (['sanitized_metrics', 'yaml_file'], {'default_flow_style': '(False)'}), '(sanitized_metrics, yaml_file, default_flow_style=False)\n', (10638, 10694), False, 'import yaml\n'), ((1703, 1737), 'torch.distributed.is_initialized', 'torch.distributed.is_initialized', ([], {}), '()\n', (1735, 1737), False, 'import torch\n'), ((1741, 1769), 'torch.distributed.get_rank', 'torch.distributed.get_rank', ([], {}), '()\n', (1767, 1769), False, 'import torch\n'), ((1988, 2045), 'omegaconf.OmegaConf.to_container', 'OmegaConf.to_container', (['cfg.job_logging_cfg'], {'resolve': '(True)'}), '(cfg.job_logging_cfg, resolve=True)\n', (2010, 2045), False, 'from omegaconf import OmegaConf, open_dict\n'), ((2329, 2370), 'hydra.core.hydra_config.HydraConfig.get', 'hydra.core.hydra_config.HydraConfig.get', ([], {}), '()\n', (2368, 2370), False, 'import hydra\n'), ((4626, 4655), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""'}), "(f, delimiter='\\t')\n", (4636, 4655), False, 'import csv\n'), ((5436, 5492), 'csv.DictWriter', 'csv.DictWriter', (['f'], {'delimiter': '"""\t"""', 'fieldnames': 'fieldnames'}), "(f, delimiter='\\t', fieldnames=fieldnames)\n", (5450, 5492), False, 'import csv\n'), ((7116, 7127), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7125, 7127), False, 'import os\n'), ((9981, 10037), 'torchvision.utils.save_image', 'torchvision.utils.save_image', (['rec_denormalized', 'filepath'], {}), '(rec_denormalized, filepath)\n', (10009, 10037), False, 'import torchvision\n'), ((1217, 1240), 'torch.get_num_threads', 'torch.get_num_threads', ([], {}), '()\n', (1238, 1240), False, 'import torch\n'), ((1250, 1275), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1273, 1275), False, 'import torch\n'), ((1281, 1301), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (1299, 1301), False, 'import socket\n'), ((7369, 7407), 'torch.as_tensor', 'torch.as_tensor', (['server.cfg_data.shape'], {}), '(server.cfg_data.shape)\n', (7384, 7407), False, 'import torch\n'), ((9227, 9257), 'torch.as_tensor', 'torch.as_tensor', (['metadata.mean'], {}), '(metadata.mean)\n', (9242, 9257), False, 'import torch\n'), ((9296, 9325), 'torch.as_tensor', 'torch.as_tensor', (['metadata.std'], {}), '(metadata.std)\n', (9311, 9325), False, 'import torch\n'), ((9382, 9397), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (9394, 9397), False, 'import torch\n'), ((9400, 9415), 'torch.tensor', 'torch.tensor', (['(1)'], {}), '(1)\n', (9412, 9415), False, 'import torch\n'), ((10093, 10149), 'torch.cat', 'torch.cat', (['[rec_denormalized, ground_truth_denormalized]'], {}), '([rec_denormalized, ground_truth_denormalized])\n', (10102, 10149), False, 'import torch\n'), ((1410, 1451), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', ([], {'device': 'device'}), '(device=device)\n', (1436, 1451), False, 'import torch\n'), ((3101, 3112), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3110, 3112), False, 'import os\n'), ((5113, 5169), 'csv.DictWriter', 'csv.DictWriter', (['f'], {'delimiter': '"""\t"""', 'fieldnames': 'fieldnames'}), "(f, delimiter='\\t', fieldnames=fieldnames)\n", (5127, 5169), False, 'import csv\n'), ((10463, 10478), 'numpy.asarray', 'np.asarray', (['val'], {}), '(val)\n', (10473, 10478), True, 'import numpy as np\n'), ((3613, 3651), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'local_time'}), '(seconds=local_time)\n', (3631, 3651), False, 'import datetime\n'), ((10553, 10568), 'numpy.asarray', 'np.asarray', (['val'], {}), '(val)\n', (10563, 10568), True, 'import numpy as np\n')]
|
"""
Modified from https://github.com/microsoft/Swin-Transformer/blob/main/main.py
"""
import os
import time
import argparse
import datetime
import numpy as np
import oneflow as flow
import oneflow.backends.cudnn as cudnn
from flowvision.loss.cross_entropy import (
LabelSmoothingCrossEntropy,
SoftTargetCrossEntropy,
)
from flowvision.utils.metrics import accuracy, AverageMeter
from config import get_config
from models import build_model
from data import build_loader
from lr_scheduler import build_scheduler
from optimizer import build_optimizer
from logger import create_logger
from utils import (
load_checkpoint,
save_checkpoint,
get_grad_norm,
auto_resume_helper,
reduce_tensor,
)
def parse_option():
parser = argparse.ArgumentParser(
"Flowvision image classification training and evaluation script", add_help=False
)
parser.add_argument(
"--model_arch",
type=str,
required=True,
default="swin_tiny_patch4_window7_224",
help="model for training",
)
parser.add_argument(
"--cfg", type=str, required=True, metavar="FILE", help="path to config file",
)
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs="+",
)
# easy config modification
parser.add_argument(
"--batch-size", type=int, default=8, help="batch size for single GPU"
)
parser.add_argument("--data-path", type=str, help="path to dataset")
parser.add_argument(
"--zip",
action="store_true",
help="use zipped dataset instead of folder dataset",
)
parser.add_argument(
"--cache-mode",
type=str,
default="part",
choices=["no", "full", "part"],
help="no: no cache, "
"full: cache all data, "
"part: sharding the dataset into nonoverlapping pieces and only cache one piece",
)
parser.add_argument("--resume", help="resume from checkpoint")
parser.add_argument(
"--accumulation-steps", type=int, help="gradient accumulation steps"
)
parser.add_argument(
"--use-checkpoint",
action="store_true",
help="whether to use gradient checkpointing to save memory",
)
parser.add_argument(
"--output",
default="output",
type=str,
metavar="PATH",
help="root of output folder, the full path is <output>/<model_name>/<tag> (default: output)",
)
parser.add_argument("--tag", help="tag of experiment")
parser.add_argument("--eval", action="store_true", help="Perform evaluation only")
parser.add_argument(
"--throughput", action="store_true", help="Test throughput only"
)
# distributed training
parser.add_argument(
"--local_rank",
type=int,
default=0,
required=False,
help="local rank for DistributedDataParallel",
)
args, unparsed = parser.parse_known_args()
config = get_config(args)
return args, config
def main(config):
(
dataset_train,
dataset_val,
data_loader_train,
data_loader_val,
mixup_fn,
) = build_loader(config)
logger.info(f"Creating model:{config.MODEL.ARCH}")
model = build_model(config)
model.cuda()
logger.info(str(model))
optimizer = build_optimizer(config, model)
model = flow.nn.parallel.DistributedDataParallel(model, broadcast_buffers=False)
# FIXME: model with DDP wrapper doesn't have model.module
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
logger.info(f"number of params: {n_parameters}")
if hasattr(model_without_ddp, "flops"):
flops = model_without_ddp.flops()
logger.info(f"number of GFLOPs: {flops / 1e9}")
lr_scheduler = build_scheduler(config, optimizer, len(data_loader_train))
if config.AUG.MIXUP > 0.0:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif config.MODEL.LABEL_SMOOTHING > 0.0:
criterion = LabelSmoothingCrossEntropy(smoothing=config.MODEL.LABEL_SMOOTHING)
else:
criterion = flow.nn.CrossEntropyLoss()
max_accuracy = 0.0
if config.TRAIN.AUTO_RESUME:
resume_file = auto_resume_helper(config.OUTPUT)
if resume_file:
if config.MODEL.RESUME:
logger.warning(
f"auto-resume changing resume file from {config.MODEL.RESUME} to {resume_file}"
)
config.defrost()
config.MODEL.RESUME = resume_file
config.freeze()
logger.info(f"auto resuming from {resume_file}")
else:
logger.info(f"no checkpoint found in {config.OUTPUT}, ignoring auto resume")
if config.MODEL.RESUME:
max_accuracy = load_checkpoint(
config, model_without_ddp, optimizer, lr_scheduler, logger
)
acc1, acc5, loss = validate(config, data_loader_val, model)
logger.info(
f"Accuracy of the network on the {len(dataset_val)} test images: {acc1:.1f}%"
)
if config.EVAL_MODE:
return
if config.THROUGHPUT_MODE:
throughput(data_loader_val, model, logger)
return
logger.info("Start training")
start_time = time.time()
for epoch in range(config.TRAIN.START_EPOCH, config.TRAIN.EPOCHS):
data_loader_train.sampler.set_epoch(epoch)
train_one_epoch(
config,
model,
criterion,
data_loader_train,
optimizer,
epoch,
mixup_fn,
lr_scheduler,
)
if flow.env.get_rank() == 0 and (
epoch % config.SAVE_FREQ == 0 or epoch == (config.TRAIN.EPOCHS - 1)
):
save_checkpoint(
config,
epoch,
model_without_ddp,
max_accuracy,
optimizer,
lr_scheduler,
logger,
)
# no validate
acc1, acc5, loss = validate(config, data_loader_val, model)
logger.info(
f"Accuracy of the network on the {len(dataset_val)} test images: {acc1:.1f}%"
)
max_accuracy = max(max_accuracy, acc1)
logger.info(f"Max accuracy: {max_accuracy:.2f}%")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logger.info("Training time {}".format(total_time_str))
def train_one_epoch(
config, model, criterion, data_loader, optimizer, epoch, mixup_fn, lr_scheduler
):
model.train()
optimizer.zero_grad()
num_steps = len(data_loader)
batch_time = AverageMeter()
loss_meter = AverageMeter()
norm_meter = AverageMeter()
start = time.time()
end = time.time()
for idx, (samples, targets) in enumerate(data_loader):
samples = samples.cuda()
targets = targets.cuda()
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
outputs = model(samples)
if config.TRAIN.ACCUMULATION_STEPS > 1:
loss = criterion(outputs, targets)
loss = loss / config.TRAIN.ACCUMULATION_STEPS
loss.backward()
if config.TRAIN.CLIP_GRAD:
grad_norm = flow.nn.utils.clip_grad_norm_(
model.parameters(), config.TRAIN.CLIP_GRAD
)
else:
grad_norm = get_grad_norm(model.parameters())
if (idx + 1) % config.TRAIN.ACCUMULATION_STEPS == 0:
optimizer.step()
optimizer.zero_grad()
lr_scheduler.step_update(epoch * num_steps + idx)
else:
loss = criterion(outputs, targets)
optimizer.zero_grad()
loss.backward()
if config.TRAIN.CLIP_GRAD:
grad_norm = flow.nn.utils.clip_grad_norm_(
model.parameters(), config.TRAIN.CLIP_GRAD
)
else:
grad_norm = get_grad_norm(model.parameters())
optimizer.step()
lr_scheduler.step_update(epoch * num_steps + idx)
loss_meter.update(loss.item(), targets.size(0))
norm_meter.update(grad_norm)
batch_time.update(time.time() - end)
end = time.time()
if idx % config.PRINT_FREQ == 0:
lr = optimizer.param_groups[0]["lr"]
etas = batch_time.avg * (num_steps - idx)
logger.info(
f"Train: [{epoch}/{config.TRAIN.EPOCHS}][{idx}/{num_steps}]\t"
f"eta {datetime.timedelta(seconds=int(etas))} lr {lr:.6f}\t"
f"time {batch_time.val:.4f} ({batch_time.avg:.4f})\t"
f"loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t"
f"grad_norm {norm_meter.val:.4f} ({norm_meter.avg:.4f})\t"
)
epoch_time = time.time() - start
logger.info(
f"EPOCH {epoch} training takes {datetime.timedelta(seconds=int(epoch_time))}"
)
@flow.no_grad()
def validate(config, data_loader, model):
criterion = flow.nn.CrossEntropyLoss()
model.eval()
batch_time = AverageMeter()
loss_meter = AverageMeter()
acc1_meter = AverageMeter()
acc5_meter = AverageMeter()
end = time.time()
for idx, (images, target) in enumerate(data_loader):
images = images.cuda()
target = target.cuda()
# compute output
output = model(images)
# measure accuracy and record loss
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
acc1 = reduce_tensor(acc1)
acc5 = reduce_tensor(acc5)
loss = reduce_tensor(loss)
loss_meter.update(loss.item(), target.size(0))
acc1_meter.update(acc1.item(), target.size(0))
acc5_meter.update(acc5.item(), target.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if idx % config.PRINT_FREQ == 0:
logger.info(
f"Test: [{idx}/{len(data_loader)}]\t"
f"Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t"
f"Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t"
f"Acc@1 {acc1_meter.val:.3f} ({acc1_meter.avg:.3f})\t"
f"Acc@5 {acc5_meter.val:.3f} ({acc5_meter.avg:.3f})\t"
)
logger.info(f" * Acc@1 {acc1_meter.avg:.3f} Acc@5 {acc5_meter.avg:.3f}")
return acc1_meter.avg, acc5_meter.avg, loss_meter.avg
@flow.no_grad()
def throughput(data_loader, model, logger):
model.eval()
for idx, (images, _) in enumerate(data_loader):
images = images.cuda()
batch_size = images.shape[0]
for i in range(50):
model(images)
# TODO: add flow.cuda.synchronize()
logger.info(f"throughput averaged with 30 times")
tic1 = time.time()
for i in range(30):
model(images)
tic2 = time.time()
logger.info(
f"batch_size {batch_size} throughput {30 * batch_size / (tic2 - tic1)}"
)
return
if __name__ == "__main__":
_, config = parse_option()
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
rank = flow.env.get_rank()
world_size = flow.env.get_world_size()
print(f"RANK and WORLD_SIZE in environ: {rank}/{world_size}")
else:
rank = -1
world_size = -1
seed = config.SEED + flow.env.get_rank()
flow.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
linear_scaled_lr = (
config.TRAIN.BASE_LR
* config.DATA.BATCH_SIZE
* flow.env.get_world_size()
/ 512.0
)
linear_scaled_warmup_lr = (
config.TRAIN.WARMUP_LR
* config.DATA.BATCH_SIZE
* flow.env.get_world_size()
/ 512.0
)
linear_scaled_min_lr = (
config.TRAIN.MIN_LR * config.DATA.BATCH_SIZE * flow.env.get_world_size() / 512.0
)
# gradient accumulation also need to scale the learning rate
if config.TRAIN.ACCUMULATION_STEPS > 1:
linear_scaled_lr = linear_scaled_lr * config.TRAIN.ACCUMULATION_STEPS
linear_scaled_warmup_lr = (
linear_scaled_warmup_lr * config.TRAIN.ACCUMULATION_STEPS
)
linear_scaled_min_lr = linear_scaled_min_lr * config.TRAIN.ACCUMULATION_STEPS
config.defrost()
config.TRAIN.BASE_LR = linear_scaled_lr
config.TRAIN.WARMUP_LR = linear_scaled_warmup_lr
config.TRAIN.MIN_LR = linear_scaled_min_lr
config.freeze()
os.makedirs(config.OUTPUT, exist_ok=True)
logger = create_logger(
output_dir=config.OUTPUT,
dist_rank=flow.env.get_rank(),
name=f"{config.MODEL.ARCH}",
)
if flow.env.get_rank() == 0:
path = os.path.join(config.OUTPUT, "config.json")
with open(path, "w") as f:
f.write(config.dump())
logger.info(f"Full config saved to {path}")
# print config
logger.info(config.dump())
main(config)
|
[
"flowvision.loss.cross_entropy.SoftTargetCrossEntropy",
"utils.reduce_tensor",
"flowvision.utils.metrics.accuracy",
"argparse.ArgumentParser",
"oneflow.env.get_rank",
"flowvision.utils.metrics.AverageMeter",
"oneflow.nn.CrossEntropyLoss",
"models.build_model",
"data.build_loader",
"oneflow.no_grad",
"numpy.random.seed",
"utils.auto_resume_helper",
"flowvision.loss.cross_entropy.LabelSmoothingCrossEntropy",
"utils.load_checkpoint",
"oneflow.env.get_world_size",
"time.time",
"utils.save_checkpoint",
"os.makedirs",
"os.path.join",
"oneflow.manual_seed",
"config.get_config",
"oneflow.nn.parallel.DistributedDataParallel",
"optimizer.build_optimizer"
] |
[((9189, 9203), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (9201, 9203), True, 'import oneflow as flow\n'), ((10709, 10723), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (10721, 10723), True, 'import oneflow as flow\n'), ((754, 868), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Flowvision image classification training and evaluation script"""'], {'add_help': '(False)'}), "(\n 'Flowvision image classification training and evaluation script',\n add_help=False)\n", (777, 868), False, 'import argparse\n'), ((3034, 3050), 'config.get_config', 'get_config', (['args'], {}), '(args)\n', (3044, 3050), False, 'from config import get_config\n'), ((3224, 3244), 'data.build_loader', 'build_loader', (['config'], {}), '(config)\n', (3236, 3244), False, 'from data import build_loader\n'), ((3313, 3332), 'models.build_model', 'build_model', (['config'], {}), '(config)\n', (3324, 3332), False, 'from models import build_model\n'), ((3395, 3425), 'optimizer.build_optimizer', 'build_optimizer', (['config', 'model'], {}), '(config, model)\n', (3410, 3425), False, 'from optimizer import build_optimizer\n'), ((3438, 3510), 'oneflow.nn.parallel.DistributedDataParallel', 'flow.nn.parallel.DistributedDataParallel', (['model'], {'broadcast_buffers': '(False)'}), '(model, broadcast_buffers=False)\n', (3478, 3510), True, 'import oneflow as flow\n'), ((5411, 5422), 'time.time', 'time.time', ([], {}), '()\n', (5420, 5422), False, 'import time\n'), ((6827, 6841), 'flowvision.utils.metrics.AverageMeter', 'AverageMeter', ([], {}), '()\n', (6839, 6841), False, 'from flowvision.utils.metrics import accuracy, AverageMeter\n'), ((6859, 6873), 'flowvision.utils.metrics.AverageMeter', 'AverageMeter', ([], {}), '()\n', (6871, 6873), False, 'from flowvision.utils.metrics import accuracy, AverageMeter\n'), ((6891, 6905), 'flowvision.utils.metrics.AverageMeter', 'AverageMeter', ([], {}), '()\n', (6903, 6905), False, 'from flowvision.utils.metrics import accuracy, AverageMeter\n'), ((6919, 6930), 'time.time', 'time.time', ([], {}), '()\n', (6928, 6930), False, 'import time\n'), ((6941, 6952), 'time.time', 'time.time', ([], {}), '()\n', (6950, 6952), False, 'import time\n'), ((9262, 9288), 'oneflow.nn.CrossEntropyLoss', 'flow.nn.CrossEntropyLoss', ([], {}), '()\n', (9286, 9288), True, 'import oneflow as flow\n'), ((9324, 9338), 'flowvision.utils.metrics.AverageMeter', 'AverageMeter', ([], {}), '()\n', (9336, 9338), False, 'from flowvision.utils.metrics import accuracy, AverageMeter\n'), ((9356, 9370), 'flowvision.utils.metrics.AverageMeter', 'AverageMeter', ([], {}), '()\n', (9368, 9370), False, 'from flowvision.utils.metrics import accuracy, AverageMeter\n'), ((9388, 9402), 'flowvision.utils.metrics.AverageMeter', 'AverageMeter', ([], {}), '()\n', (9400, 9402), False, 'from flowvision.utils.metrics import accuracy, AverageMeter\n'), ((9420, 9434), 'flowvision.utils.metrics.AverageMeter', 'AverageMeter', ([], {}), '()\n', (9432, 9434), False, 'from flowvision.utils.metrics import accuracy, AverageMeter\n'), ((9446, 9457), 'time.time', 'time.time', ([], {}), '()\n', (9455, 9457), False, 'import time\n'), ((11676, 11698), 'oneflow.manual_seed', 'flow.manual_seed', (['seed'], {}), '(seed)\n', (11692, 11698), True, 'import oneflow as flow\n'), ((11703, 11723), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (11717, 11723), True, 'import numpy as np\n'), ((12755, 12796), 'os.makedirs', 'os.makedirs', (['config.OUTPUT'], {'exist_ok': '(True)'}), '(config.OUTPUT, exist_ok=True)\n', (12766, 12796), False, 'import os\n'), ((4069, 4093), 'flowvision.loss.cross_entropy.SoftTargetCrossEntropy', 'SoftTargetCrossEntropy', ([], {}), '()\n', (4091, 4093), False, 'from flowvision.loss.cross_entropy import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy\n'), ((4363, 4396), 'utils.auto_resume_helper', 'auto_resume_helper', (['config.OUTPUT'], {}), '(config.OUTPUT)\n', (4381, 4396), False, 'from utils import load_checkpoint, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor\n'), ((4926, 5001), 'utils.load_checkpoint', 'load_checkpoint', (['config', 'model_without_ddp', 'optimizer', 'lr_scheduler', 'logger'], {}), '(config, model_without_ddp, optimizer, lr_scheduler, logger)\n', (4941, 5001), False, 'from utils import load_checkpoint, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor\n'), ((6468, 6479), 'time.time', 'time.time', ([], {}), '()\n', (6477, 6479), False, 'import time\n'), ((8473, 8484), 'time.time', 'time.time', ([], {}), '()\n', (8482, 8484), False, 'import time\n'), ((9057, 9068), 'time.time', 'time.time', ([], {}), '()\n', (9066, 9068), False, 'import time\n'), ((9740, 9777), 'flowvision.utils.metrics.accuracy', 'accuracy', (['output', 'target'], {'topk': '(1, 5)'}), '(output, target, topk=(1, 5))\n', (9748, 9777), False, 'from flowvision.utils.metrics import accuracy, AverageMeter\n'), ((9794, 9813), 'utils.reduce_tensor', 'reduce_tensor', (['acc1'], {}), '(acc1)\n', (9807, 9813), False, 'from utils import load_checkpoint, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor\n'), ((9829, 9848), 'utils.reduce_tensor', 'reduce_tensor', (['acc5'], {}), '(acc5)\n', (9842, 9848), False, 'from utils import load_checkpoint, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor\n'), ((9864, 9883), 'utils.reduce_tensor', 'reduce_tensor', (['loss'], {}), '(loss)\n', (9877, 9883), False, 'from utils import load_checkpoint, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor\n'), ((10141, 10152), 'time.time', 'time.time', ([], {}), '()\n', (10150, 10152), False, 'import time\n'), ((11077, 11088), 'time.time', 'time.time', ([], {}), '()\n', (11086, 11088), False, 'import time\n'), ((11159, 11170), 'time.time', 'time.time', ([], {}), '()\n', (11168, 11170), False, 'import time\n'), ((11437, 11456), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (11454, 11456), True, 'import oneflow as flow\n'), ((11478, 11503), 'oneflow.env.get_world_size', 'flow.env.get_world_size', ([], {}), '()\n', (11501, 11503), True, 'import oneflow as flow\n'), ((11652, 11671), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (11669, 11671), True, 'import oneflow as flow\n'), ((12949, 12968), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (12966, 12968), True, 'import oneflow as flow\n'), ((12990, 13032), 'os.path.join', 'os.path.join', (['config.OUTPUT', '"""config.json"""'], {}), "(config.OUTPUT, 'config.json')\n", (13002, 13032), False, 'import os\n'), ((4159, 4225), 'flowvision.loss.cross_entropy.LabelSmoothingCrossEntropy', 'LabelSmoothingCrossEntropy', ([], {'smoothing': 'config.MODEL.LABEL_SMOOTHING'}), '(smoothing=config.MODEL.LABEL_SMOOTHING)\n', (4185, 4225), False, 'from flowvision.loss.cross_entropy import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy\n'), ((4256, 4282), 'oneflow.nn.CrossEntropyLoss', 'flow.nn.CrossEntropyLoss', ([], {}), '()\n', (4280, 4282), True, 'import oneflow as flow\n'), ((5909, 6009), 'utils.save_checkpoint', 'save_checkpoint', (['config', 'epoch', 'model_without_ddp', 'max_accuracy', 'optimizer', 'lr_scheduler', 'logger'], {}), '(config, epoch, model_without_ddp, max_accuracy, optimizer,\n lr_scheduler, logger)\n', (5924, 6009), False, 'from utils import load_checkpoint, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor\n'), ((11849, 11874), 'oneflow.env.get_world_size', 'flow.env.get_world_size', ([], {}), '()\n', (11872, 11874), True, 'import oneflow as flow\n'), ((12003, 12028), 'oneflow.env.get_world_size', 'flow.env.get_world_size', ([], {}), '()\n', (12026, 12028), True, 'import oneflow as flow\n'), ((12135, 12160), 'oneflow.env.get_world_size', 'flow.env.get_world_size', ([], {}), '()\n', (12158, 12160), True, 'import oneflow as flow\n'), ((12877, 12896), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (12894, 12896), True, 'import oneflow as flow\n'), ((5775, 5794), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (5792, 5794), True, 'import oneflow as flow\n'), ((8440, 8451), 'time.time', 'time.time', ([], {}), '()\n', (8449, 8451), False, 'import time\n'), ((10108, 10119), 'time.time', 'time.time', ([], {}), '()\n', (10117, 10119), False, 'import time\n')]
|
#
import json
import copy
import itertools
import multiprocessing
import os
import sys
import time
from functools import partial
import numpy as np
from numpy import linalg
from tqdm import tqdm
import rmsd
import quantum
import clockwork
import merge
import similarity_fchl19 as sim
from chemhelp import cheminfo
from rdkit import Chem
from rdkit.Chem import AllChem, ChemicalForceFields
from rdkit.Chem import rdmolfiles
from communication import rediscomm
import joblib
# Set local cache
cachedir = '.pycache'
memory = joblib.Memory(cachedir, verbose=0)
DEFAULT_DECIMALS = 5
# DEFAULT_DECIMALS = 12
def correct_userpath(filepath):
return os.path.expanduser(filepath)
def get_forcefield(molobj):
ffprop = ChemicalForceFields.MMFFGetMoleculeProperties(molobj)
forcefield = ChemicalForceFields.MMFFGetMoleculeForceField(molobj, ffprop) # 0.01 overhead
return ffprop, forcefield
def run_forcefield(ff, steps, energy=1e-2, force=1e-3):
"""
"""
try:
status = ff.Minimize(maxIts=steps, energyTol=energy, forceTol=force)
except RuntimeError:
return 1
return status
def run_forcefield_prime(ff, steps, energy=1e-2, force=1e-3):
try:
status = ff.Minimize(maxIts=steps, energyTol=energy, forceTol=force)
except RuntimeError:
return 1
return status
@memory.cache
def generate_torsion_combinations(total_torsions, n_tor):
combinations = clockwork.generate_torsion_combinations(total_torsions, n_tor)
combinations = list(combinations)
return combinations
def generate_torsions(total_torsions,
min_cost=0, max_cost=15, prefix="0"):
cost_input, cost_cost = clockwork.generate_costlist(total_torsions=total_torsions)
for (n_tor, resolution), cost in zip(cost_input[min_cost:max_cost], cost_cost[min_cost:max_cost]):
combinations = generate_torsion_combinations(total_torsions, n_tor)
for combination in combinations:
jobstr = prefix + ","
torstr = " ".join([str(x) for x in combination])
resstr = str(resolution)
jobstr += torstr + "," + resstr
print(jobstr)
return
def generate_torsions_specific(total_torsions, n_tor, resolution, prefix="0"):
sep = ","
combinations = generate_torsion_combinations(total_torsions, n_tor)
for combination in combinations:
jobstr = prefix + sep
torstr = " ".join([str(x) for x in combination])
resstr = str(resolution)
jobstr += torstr + sep + resstr
print(jobstr)
return
def generate_jobs(molobjs, args, tordb=None,
min_cost=0, max_cost=15):
# TODO Group by cost?
combos = args.jobcombos
n_molecules = len(molobjs)
if tordb is None:
tordb = [cheminfo.get_torsions(molobj) for molobj in molobjs]
# TODO only first 500 molecules
# for i in range(n_molecules)[:20]:
for i in range(n_molecules)[20:100]:
molobj = molobjs[i]
torsions = tordb[i]
total_torsions = len(torsions)
prefix = str(i)
if combos is None:
generate_torsions(total_torsions, prefix=prefix, min_cost=min_cost, max_cost=max_cost)
else:
for combo in combos:
combo = combo.split(",")
combo = [int(x) for x in combo]
generate_torsions_specific(total_torsions, combo[0], combo[1], prefix=prefix)
# quit()
#
# cost_input, cost_cost = clockwork.generate_costlist(total_torsions=total_torsions)
#
# for (n_tor, resolution), cost in zip(cost_input[min_cost:max_cost], cost_cost[min_cost:max_cost]):
#
# combinations = clockwork.generate_torsion_combinations(total_torsions, n_tor)
#
# for combination in combinations:
#
# # torsions = [tordb[x] for x in combination]
#
# jobstr = prefix + ","
# torstr = " ".join([str(x) for x in combination])
# resstr = str(resolution)
# jobstr += torstr + "," + resstr
# print(jobstr)
#
# quit()
return
def converge_clockwork(molobj, tordb, max_cost=2):
"""
molobj
torsions_idx
resolution
"""
atoms, xyz = cheminfo.molobj_to_xyz(molobj)
total_torsions = len(tordb)
print("total torsions", total_torsions)
# TODO Cache this
cost_input, cost_cost = clockwork.generate_costlist(total_torsions=total_torsions)
# TODO cost_cost and costfunc
offset = 6
max_cost = 1
offset = 1
max_cost = 7
# offset = 7
# max_cost = 1
for (n_tor, resolution), cost in zip(cost_input[offset:offset+max_cost], cost_cost[offset:offset+max_cost]):
start = time.time()
# Iterate over torsion combinations
combinations = clockwork.generate_torsion_combinations(total_torsions, n_tor)
cost_result_energies = []
cost_result_coordinates = []
C = 0
for combination in combinations:
# TODO Move this to function
com_start = time.time()
torsions = [tordb[i] for i in combination]
result_energies, result_coordinates = get_clockwork_conformations(molobj, torsions, resolution)
n_results = len(result_energies)
result_cost = [cost]*n_results
com_end = time.time()
# print("new confs", len(result_energies), "{:6.2f}".format(com_end-com_start))
# Merge
if len(cost_result_energies) == 0:
cost_result_energies += list(result_energies)
cost_result_coordinates += list(result_coordinates)
continue
else:
start_merge = time.time()
# TODO Move this to function
continue
idxs = merge.merge_asymmetric(atoms,
result_energies,
cost_result_energies,
result_coordinates,
cost_result_coordinates, decimals=2, debug=True)
for i, idx in enumerate(idxs):
C += 1
if len(idx) == 0:
cost_result_energies.append(result_energies[i])
cost_result_coordinates.append(result_coordinates[i])
end_merge = time.time()
print("total confs", len(cost_result_energies), "{:10.2f}".format(end_merge-start_merge))
continue
end = time.time()
print("conv", n_tor, resolution, cost, len(cost_result_energies), "tot: {:5.2f}".format(end-start), "per sec: {:5.2f}".format(cost/(end-start)))
quit()
return
def get_clockwork_conformations(molobj, torsions, resolution,
atoms=None,
debug=False,
timings=False):
"""
Get all conformation for specific cost
cost defined from torsions and resolution
"""
n_torsions = len(torsions)
if atoms is None:
atoms, xyz = cheminfo.molobj_to_xyz(molobj, atom_type="int")
del xyz
combinations = clockwork.generate_clockwork_combinations(resolution, n_torsions)
# Collect energies and coordinates
end_energies = []
end_coordinates = []
end_representations = []
first = True
for resolutions in combinations:
time_start = time.time()
# Get all conformations
c_energies, c_coordinates, c_states = get_conformations(molobj, torsions, resolutions)
N = len(c_energies)
# Filter unconverged
success = np.argwhere(c_states == 0)
success = success.flatten()
c_energies = c_energies[success]
c_coordinates = c_coordinates[success]
N2 = len(c_energies)
# Calculate representations
c_representations = [sim.get_representation(atoms, coordinates) for coordinates in c_coordinates]
c_representations = np.asarray(c_representations)
# Clean all new conformers for energies and similarity
idxs = clean_representations(atoms, c_energies, c_representations)
c_energies = c_energies[idxs]
c_coordinates = c_coordinates[idxs]
c_representations = c_representations[idxs]
if first:
first = False
end_energies += list(c_energies)
end_coordinates += list(c_coordinates)
end_representations += list(c_representations)
continue
# Asymmetrically add new conformers
idxs = merge.merge_asymmetric(atoms,
c_energies,
end_energies,
c_representations,
end_representations)
# Add new unique conformation to return collection
for i, idx in enumerate(idxs):
# if conformation already exists, continue
if len(idx) > 0: continue
# Add new unique conformation to collection
end_energies.append(c_energies[i])
end_coordinates.append(c_coordinates[i])
end_representations.append(c_representations[i])
time_end = time.time()
if timings:
timing = time_end - time_start
print("res time {:8.2f} cnf/sec - {:8.2f} tot sec".format(N/timing, timing))
continue
return end_energies, end_coordinates
def clean_representations(atoms, energies, representations):
"""
"""
N = len(energies)
# Keep index for only unique
# idxs = merge.merge_asymmetric(atoms,
# energies,
# energies,
# representations,
# representations)
idxs = merge.merge(atoms,
energies,
representations)
# Here all cost is the same, so just take the first conformer
# idxs = [idx[0] for idx in idxs]
# idxs = np.unique(idxs)
return idxs
def clean_conformers(atoms, energies, coordinates, states=None):
# Total count
N = len(energies)
if states is not None:
# Keep only converged states
success = np.argwhere(states == 0)
success = success.flatten()
# Only looked at converged states, discard rest
energies = energies[success]
coordinates = coordinates[success]
# TODO what about failed states?
# TODO Check high energies
# TODO change to asymetric merge (cleaner code)
# Keep index for only unique
idxs = merge.merge(atoms, energies, coordinates)
# Here all cost is the same, so just take the first conformer
idxs = [idx[0] for idx in idxs]
return idxs
def get_conformations(molobj, torsions, resolutions, method="sqm", debug=False):
molobj = copy.deepcopy(molobj)
n_torsions = len(torsions)
# init energy
energies = []
states = []
coordinates = []
# no constraints
ffprop, forcefield = get_forcefield(molobj)
# Forcefield generation failed
if forcefield is None:
return [], [], []
# Get conformer and origin
conformer = molobj.GetConformer()
origin = conformer.GetPositions()
# Origin angle
origin_angles = []
# HACK rdkit requires int type for index
torsions = [[int(y) for y in x] for x in torsions]
for idxs in torsions:
angle = Chem.rdMolTransforms.GetDihedralDeg(conformer, *idxs)
origin_angles.append(angle)
# Get resolution angles
angle_iterator = clockwork.generate_angles(resolutions, n_torsions)
# set calculate func
if method == "ff":
# rdkit mmff
calculate_method = calculate_forcefield
cal_kwargs = {
"ffprop": ffprop,
"ff": forcefield
}
else:
atoms = cheminfo.molobj_to_atoms(molobj)
atoms_str = [cheminfo.convert_atom(atom) for atom in atoms]
smiles = quantum.get_smiles(atoms, origin)
calculate_method = calculate_mopac
cal_kwargs = {
"ffprop": ffprop,
"atoms": atoms,
"reference_smiles": smiles
}
for angle in angle_iterator:
# reset coordinates
set_coordinates(conformer, origin)
# Minimze with torsion angle constraint
# energy, pos, status = calculate_forcefield(molobj, conformer, torsions, origin_angles, angle,
# ffprop=ffprop,
# ff=forcefield)
if debug:
start = time.time()
energy, pos, status = calculate_method(molobj, conformer, torsions, origin_angles, angle, **cal_kwargs)
if debug:
end = time.time()
print("{:6.5f}s".format(end-start), "{:6.2f}".format(energy), status)
# collect
energies += [energy]
coordinates += [pos]
states += [status]
return np.asarray(energies), np.asarray(coordinates), np.asarray(states)
def get_energy(molobj):
ffprop, ff = get_forcefield(molobj)
# Get current energy
energy = ff.CalcEnergy()
return energy
def get_energies(molobj, coordinates,
ffprop=None,
ff=None):
if ffprop is None or ff is None:
ffprop, ff = get_forcefield(molobj)
# Get conformer and origin
conformer = molobj.GetConformer()
for coordinate in coordinates:
set_coordinates(conformer, coordinate)
# Get current energy
energy = ff.CalcEnergy()
return
def get_sdfcontent(sdffile, rtn_atoms=False):
coordinates = []
energies = []
reader = cheminfo.read_sdffile(sdffile)
molobjs = [molobj for molobj in reader]
atoms = ""
for molobj in molobjs:
atoms, coordinate = cheminfo.molobj_to_xyz(molobj)
energy = get_energy(molobj)
coordinates.append(coordinate)
energies.append(energy)
if rtn_atoms:
return molobjs[0], atoms, energies, coordinates
return energies, coordinates
def calculate_mopac(molobj, conformer, torsions, origin_angles, delta_angles,
delta=10**-7,
coord_decimals=6,
atoms=None,
ffprop=None,
reference_smiles=None):
sdfstr = cheminfo.molobj_to_sdfstr(molobj)
molobj_prime, status = cheminfo.sdfstr_to_molobj(sdfstr)
conformer_prime = molobj_prime.GetConformer()
# Setup constrained forcefield
# ffprop_prime, ffc = get_forcefield(molobj_prime)
ffc = ChemicalForceFields.MMFFGetMoleculeForceField(molobj_prime, ffprop)
# Set angles and constrains for all torsions
for i, angle in enumerate(delta_angles):
set_angle = origin_angles[i] + angle
# Set clockwork angle
try: Chem.rdMolTransforms.SetDihedralDeg(conformer_prime, *torsions[i], set_angle)
except: pass
# Set forcefield constrain
ffc.MMFFAddTorsionConstraint(*torsions[i], False,
set_angle-delta, set_angle+delta, 1.0e10)
# minimize constrains
status = run_forcefield(ffc, 500)
# Set result
coordinates = conformer_prime.GetPositions()
coordinates = np.round(coordinates, coord_decimals) # rdkit hack, read description
smiles = ""
try:
energy, ocoordinates = quantum.optmize_conformation(atoms, coordinates)
status = 0
coordinates = ocoordinates
if reference_smiles is not None:
new_smiles = quantum.get_smiles(atoms, coordinates)
smiles = new_smiles
if new_smiles != reference_smiles:
status = 5
except:
energy = 0.0
status = 4
# if status == 0:
# atoms_str = [cheminfo.convert_atom(atom) for atom in atoms]
# txt = rmsd.set_coordinates(atoms_str, coordinates, title="")
# with open("_tmp_local_dump.xyz", 'a') as f:
# f.write(txt)
# f.write("\n")
#
# print(status, smiles)
return energy, coordinates, status
def calculate_forcefield(molobj, conformer, torsions, origin_angles, delta_angles,
ffprop=None,
ff=None,
delta=10**-7,
coord_decimals=6,
grad_threshold=100):
"""
Disclaimer: lots of hacks, sorry. Let me know if you have an alternative.
Note: There is a artificat where if delta < 10**-16 the FF will find a
*extremely* local minima with very high energy (un-physical)the FF will
find a *extremely* local minima with very high energy (un-physical).
Setting delta to 10**-6 (numerical noise) should fix this.
Note: rdkit forcefield restrained optimization will optimized to a *very*
local and very unphysical minima which the global optimizer cannot get out
from. Truncating the digits of the coordinates to six is a crude but
effective way to slight move the the molecule out of this in a reproducable
way.
"""
if ffprop is None or ff is None:
ffprop, ff = get_forcefield(molobj)
sdfstr = cheminfo.molobj_to_sdfstr(molobj)
molobj_prime, status = cheminfo.sdfstr_to_molobj(sdfstr)
conformer_prime = molobj_prime.GetConformer()
# Setup constrained forcefield
# ffprop_prime, ffc = get_forcefield(molobj_prime)
ffc = ChemicalForceFields.MMFFGetMoleculeForceField(molobj_prime, ffprop)
# Set angles and constrains for all torsions
for i, angle in enumerate(delta_angles):
set_angle = origin_angles[i] + angle
# Set clockwork angle
try: Chem.rdMolTransforms.SetDihedralDeg(conformer_prime, *torsions[i], set_angle)
except: pass
# Set forcefield constrain
ffc.MMFFAddTorsionConstraint(*torsions[i], False,
set_angle-delta, set_angle+delta, 1.0e10)
# minimize constrains
status = run_forcefield(ffc, 500)
# Set result
coordinates = conformer_prime.GetPositions()
coordinates = np.round(coordinates, coord_decimals) # rdkit hack, read description
cheminfo.conformer_set_coordinates(conformer, coordinates)
# minimize global
status = run_forcefield_prime(ff, 700, force=1e-4)
# Get current energy
energy = ff.CalcEnergy()
if status == 0:
grad = ff.CalcGrad()
grad = np.array(grad)
grad_norm = linalg.norm(grad)
if grad_norm > grad_threshold:
status = 4
debug = False
if energy > 1000 and debug:
print(torsions, origin_angles, delta_angles)
print(energy, status)
print("id")
print(id(molobj_prime))
print(id(molobj))
molobj_test, status = cheminfo.sdfstr_to_molobj(sdfstr)
coordinates = conformer.GetPositions()
cheminfo.molobj_set_coordinates(molobj_test, coordinates)
ffprop_t, ff_t = get_forcefield(molobj)
run_forcefield(ff_t, 500)
print(coordinates)
for idxs in torsions:
angle = Chem.rdMolTransforms.GetDihedralDeg(conformer, *idxs)
print("ANGLE 1", angle)
f = open("_test_dumpsdf.sdf", 'w')
sdf = cheminfo.save_molobj(molobj)
f.write(sdf)
# prop, ff = get_forcefield(molobj)
# status = run_forcefield(ff, 500)
conformer = molobj_test.GetConformer()
for idxs in torsions:
angle = Chem.rdMolTransforms.GetDihedralDeg(conformer, *idxs)
print("ANGLE 2",angle)
print(energy, status)
sdf = cheminfo.save_molobj(molobj_test)
f.write(sdf)
f.close()
quit()
# Get current positions
pos = conformer.GetPositions()
return energy, pos, status
def set_coordinates(conformer, coordinates):
for i, pos in enumerate(coordinates):
conformer.SetAtomPosition(i, pos)
return
def run_job(molobj, tordb, jobstr):
sep = ","
jobstr = jobstr.split(sep)
molid, torsions_idx, resolution = jobstr
molid = int(molid)
resolution = int(resolution)
torsions_idx = torsions_idx.split()
torsions_idx = [int(idx) for idx in torsions_idx]
torsions = [tordb[idx] for idx in torsions_idx]
job_energies, job_coordinates = get_clockwork_conformations(molobj, torsions, resolution)
return job_energies, job_coordinates
###
def run_jobfile(molobjs, tordbs, filename, threads=0):
# Prepare molobjs to xyz
origins = []
for molobj in molobjs:
atoms, xyz = cheminfo.molobj_to_xyz(molobj)
origins.append(xyz)
with open(filename, 'r') as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
if threads > 0:
run_joblines_threads(origins, molobjs, tordbs, lines, threads=threads, dump=False)
else:
run_joblines(origins, molobjs, tordbs, lines, dump=False)
return True
def run_joblines_threads(origins, molobjs, tordbs, lines, threads=1, show_bar=True, dump=False):
# TODO Collect the conformers and return them
# list for each line
pool = multiprocessing.Pool(threads)
if not show_bar:
pool.map(partial(run_jobline, origins, molobjs, tordbs, dump=dump), lines)
else:
pbar = tqdm(total=len(lines))
for i, _ in enumerate(pool.imap_unordered(partial(run_jobline, origins, molobjs, tordbs, dump=dump), lines)):
pbar.update()
pbar.close()
return True
def run_joblines(origins, molobjs, tordbs, lines, dump=False):
lines_energies = []
lines_coordinates = []
for i, line in enumerate(tqdm(lines)):
job_energies, job_coordinates = run_jobline(origins, molobjs, tordbs, line, prefix=i, dump=dump)
return True
def run_jobline(origins, molobjs, tordbs, line,
prefix=None,
debug=False,
dump=False):
sep = ","
# TODO multiple molobjs
line = line.strip()
# Locate molobj
line_s = line.split(sep)
molid = int(line_s[0])
molobj = molobjs[molid]
tordb = tordbs[molid]
# deep copy
molobj = copy.deepcopy(molobj)
cheminfo.molobj_set_coordinates(molobj, origins[molid])
if dump:
if prefix is None:
prefix = line.replace(" ", "_").replace(",", ".")
filename = "_tmp_data/{:}.sdf".format(prefix)
# if os.path.exists(filename):
# return [],[]
job_start = time.time()
job_energies, job_coordinates = run_job(molobj, tordb, line)
job_end = time.time()
if debug:
print(line, "-", len(job_energies), "{:5.2f}".format(job_end-job_start), filename)
if dump:
if debug: print("saving {:} confs to".format(len(job_energies)), filename)
fsdf = open(filename, 'w')
for energy, coordinates in zip(job_energies, job_coordinates):
sdfstr = cheminfo.save_molobj(molobj, coordinates)
fsdf.write(sdfstr)
return job_energies, job_coordinates
#####
def read_tordb(filename):
with open(filename) as f:
lines = f.readlines()
tordb = []
for line in lines:
line = line.split(":")
idx = line[0]
torsions = line[1]
torsions = torsions.split(",")
torsions = [np.array(x.split(), dtype=int) for x in torsions]
torsions = np.asarray(torsions, dtype=int)
tordb.append(torsions)
return tordb
def main_redis(args):
redis_task = args.redis_task
if args.redis_connect is not None:
redis_connection = args.redis_connection_str
else:
if not os.path.exists(args.redis_connect_file):
print("error: redis connection not set and file does not exists")
print("error: path", args.redis_connect_file)
quit()
with open(args.redis_connect_file, 'r') as f:
redis_connection = f.read().strip()
if args.debug:
print("redis: connecting to", redis_connection)
tasks = rediscomm.Taskqueue(redis_connection, redis_task)
# Prepare moldb
molecules = cheminfo.read_sdffile(args.sdf)
molecules = [molobj for molobj in molecules]
# Prepare tordb
if args.sdftor is None:
tordb = [cheminfo.get_torsions(molobj) for molobj in molecules]
else:
tordb = read_tordb(args.sdftor)
# Make origins
origins = []
for molobj in molecules:
xyz = cheminfo.molobj_get_coordinates(molobj)
origins.append(xyz)
# TODO if threads is > 0 then make more redis_workers
do_work = lambda x: redis_worker(origins, molecules, tordb, x, debug=args.debug)
tasks.main_loop(do_work)
return
def redis_worker(origins, moldb, tordb, lines, debug=False):
"""
job is lines
try
except
rtn = ("error "+jobs, error)
error = traceback.format_exc()
print(error)
"""
# TODO Prepare for multiple lines
line = lines
stamp1 = time.time()
energies, coordinates = run_jobline(origins, moldb, tordb, line, debug=debug)
# Prepare dump
results = prepare_redis_dump(energies, coordinates)
stamp2 = time.time()
print("workpackage {:} - {:5.3f}s".format(line, stamp2-stamp1))
here=1
line = line.split(",")
line[here] = line[here].split(" ")
line[here] = len(line[here])
line[here] = str(line[here])
storestring = "Results_" + "_".join(line)
status = ""
# Only log errors
status.strip()
if status == "":
status = None
return results, status, storestring
def prepare_redis_dump(energies, coordinates, coord_decimals=DEFAULT_DECIMALS):
results = []
for energy, coord in zip(energies, coordinates):
coord = np.round(coord, coord_decimals).flatten().tolist()
result = [energy, coord]
result = json.dumps(result)
result = result.replace(" ", "")
results.append(result)
results = "\n".join(results)
return results
def main_file(args):
suppl = cheminfo.read_sdffile(args.sdf)
molobjs = [molobj for molobj in suppl]
if args.sdftor:
tordb = read_tordb(args.sdftor)
else:
tordb = [cheminfo.get_torsions(molobj) for molobj in molobjs]
if args.jobfile:
run_jobfile(molobjs, tordb, args.jobfile, threads=args.threads)
else:
# TODO Base on tordb
generate_jobs(molobjs, args, tordb=tordb)
return
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--version', action='version', version="1.0")
parser.add_argument('--sdf', type=str, help='SDF file', metavar='file', default="~/db/qm9s.sdf.gz")
parser.add_argument('--sdftor', type=str, help='Torsion indexes for the SDF file', metavar='file', default=None)
parser.add_argument('-j', '--threads', type=int, default=0)
parser.add_argument('--jobcombos', nargs="+", help="", metavar="str")
# OR
parser.add_argument('--jobfile', type=str, help='txt of jobs', metavar='file')
# OR
parser.add_argument('--redis-task', help="redis task name", default=None)
parser.add_argument('--redis-connect', '--redis-connect-str', help="connection to str redis server", default=None)
parser.add_argument('--redis-connect-file', help="connection to redis server", default="~/db/redis_connection")
parser.add_argument('--debug', action="store_true", help="", default=False)
args = parser.parse_args()
if "~" in args.sdf:
args.sdf = correct_userpath(args.sdf)
is_redis = False
is_file = False
if args.redis_task is not None:
if "~" in args.redis_connect_file:
args.redis_connect_file = correct_userpath(args.redis_connect_file)
is_redis = True
else:
is_file = True
if is_file:
main_file(args)
if is_redis:
main_redis(args)
return
if __name__ == '__main__':
main()
|
[
"clockwork.generate_torsion_combinations",
"similarity_fchl19.get_representation",
"quantum.optmize_conformation",
"chemhelp.cheminfo.convert_atom",
"communication.rediscomm.Taskqueue",
"chemhelp.cheminfo.sdfstr_to_molobj",
"numpy.array",
"copy.deepcopy",
"numpy.linalg.norm",
"rdkit.Chem.rdMolTransforms.GetDihedralDeg",
"chemhelp.cheminfo.save_molobj",
"os.path.exists",
"argparse.ArgumentParser",
"chemhelp.cheminfo.get_torsions",
"json.dumps",
"numpy.asarray",
"chemhelp.cheminfo.read_sdffile",
"clockwork.generate_costlist",
"numpy.round",
"merge.merge",
"os.path.expanduser",
"quantum.get_smiles",
"chemhelp.cheminfo.molobj_set_coordinates",
"chemhelp.cheminfo.molobj_to_sdfstr",
"chemhelp.cheminfo.conformer_set_coordinates",
"chemhelp.cheminfo.molobj_get_coordinates",
"rdkit.Chem.ChemicalForceFields.MMFFGetMoleculeForceField",
"time.time",
"rdkit.Chem.ChemicalForceFields.MMFFGetMoleculeProperties",
"clockwork.generate_clockwork_combinations",
"chemhelp.cheminfo.molobj_to_xyz",
"tqdm.tqdm",
"joblib.Memory",
"clockwork.generate_angles",
"numpy.argwhere",
"functools.partial",
"multiprocessing.Pool",
"rdkit.Chem.rdMolTransforms.SetDihedralDeg",
"chemhelp.cheminfo.molobj_to_atoms",
"merge.merge_asymmetric"
] |
[((528, 562), 'joblib.Memory', 'joblib.Memory', (['cachedir'], {'verbose': '(0)'}), '(cachedir, verbose=0)\n', (541, 562), False, 'import joblib\n'), ((653, 681), 'os.path.expanduser', 'os.path.expanduser', (['filepath'], {}), '(filepath)\n', (671, 681), False, 'import os\n'), ((726, 779), 'rdkit.Chem.ChemicalForceFields.MMFFGetMoleculeProperties', 'ChemicalForceFields.MMFFGetMoleculeProperties', (['molobj'], {}), '(molobj)\n', (771, 779), False, 'from rdkit.Chem import AllChem, ChemicalForceFields\n'), ((797, 858), 'rdkit.Chem.ChemicalForceFields.MMFFGetMoleculeForceField', 'ChemicalForceFields.MMFFGetMoleculeForceField', (['molobj', 'ffprop'], {}), '(molobj, ffprop)\n', (842, 858), False, 'from rdkit.Chem import AllChem, ChemicalForceFields\n'), ((1434, 1496), 'clockwork.generate_torsion_combinations', 'clockwork.generate_torsion_combinations', (['total_torsions', 'n_tor'], {}), '(total_torsions, n_tor)\n', (1473, 1496), False, 'import clockwork\n'), ((1671, 1729), 'clockwork.generate_costlist', 'clockwork.generate_costlist', ([], {'total_torsions': 'total_torsions'}), '(total_torsions=total_torsions)\n', (1698, 1729), False, 'import clockwork\n'), ((4236, 4266), 'chemhelp.cheminfo.molobj_to_xyz', 'cheminfo.molobj_to_xyz', (['molobj'], {}), '(molobj)\n', (4258, 4266), False, 'from chemhelp import cheminfo\n'), ((4395, 4453), 'clockwork.generate_costlist', 'clockwork.generate_costlist', ([], {'total_torsions': 'total_torsions'}), '(total_torsions=total_torsions)\n', (4422, 4453), False, 'import clockwork\n'), ((7074, 7139), 'clockwork.generate_clockwork_combinations', 'clockwork.generate_clockwork_combinations', (['resolution', 'n_torsions'], {}), '(resolution, n_torsions)\n', (7115, 7139), False, 'import clockwork\n'), ((9573, 9618), 'merge.merge', 'merge.merge', (['atoms', 'energies', 'representations'], {}), '(atoms, energies, representations)\n', (9584, 9618), False, 'import merge\n'), ((10343, 10384), 'merge.merge', 'merge.merge', (['atoms', 'energies', 'coordinates'], {}), '(atoms, energies, coordinates)\n', (10354, 10384), False, 'import merge\n'), ((10602, 10623), 'copy.deepcopy', 'copy.deepcopy', (['molobj'], {}), '(molobj)\n', (10615, 10623), False, 'import copy\n'), ((11324, 11374), 'clockwork.generate_angles', 'clockwork.generate_angles', (['resolutions', 'n_torsions'], {}), '(resolutions, n_torsions)\n', (11349, 11374), False, 'import clockwork\n'), ((13356, 13386), 'chemhelp.cheminfo.read_sdffile', 'cheminfo.read_sdffile', (['sdffile'], {}), '(sdffile)\n', (13377, 13386), False, 'from chemhelp import cheminfo\n'), ((13945, 13978), 'chemhelp.cheminfo.molobj_to_sdfstr', 'cheminfo.molobj_to_sdfstr', (['molobj'], {}), '(molobj)\n', (13970, 13978), False, 'from chemhelp import cheminfo\n'), ((14006, 14039), 'chemhelp.cheminfo.sdfstr_to_molobj', 'cheminfo.sdfstr_to_molobj', (['sdfstr'], {}), '(sdfstr)\n', (14031, 14039), False, 'from chemhelp import cheminfo\n'), ((14191, 14258), 'rdkit.Chem.ChemicalForceFields.MMFFGetMoleculeForceField', 'ChemicalForceFields.MMFFGetMoleculeForceField', (['molobj_prime', 'ffprop'], {}), '(molobj_prime, ffprop)\n', (14236, 14258), False, 'from rdkit.Chem import AllChem, ChemicalForceFields\n'), ((14841, 14878), 'numpy.round', 'np.round', (['coordinates', 'coord_decimals'], {}), '(coordinates, coord_decimals)\n', (14849, 14878), True, 'import numpy as np\n'), ((16666, 16699), 'chemhelp.cheminfo.molobj_to_sdfstr', 'cheminfo.molobj_to_sdfstr', (['molobj'], {}), '(molobj)\n', (16691, 16699), False, 'from chemhelp import cheminfo\n'), ((16727, 16760), 'chemhelp.cheminfo.sdfstr_to_molobj', 'cheminfo.sdfstr_to_molobj', (['sdfstr'], {}), '(sdfstr)\n', (16752, 16760), False, 'from chemhelp import cheminfo\n'), ((16912, 16979), 'rdkit.Chem.ChemicalForceFields.MMFFGetMoleculeForceField', 'ChemicalForceFields.MMFFGetMoleculeForceField', (['molobj_prime', 'ffprop'], {}), '(molobj_prime, ffprop)\n', (16957, 16979), False, 'from rdkit.Chem import AllChem, ChemicalForceFields\n'), ((17562, 17599), 'numpy.round', 'np.round', (['coordinates', 'coord_decimals'], {}), '(coordinates, coord_decimals)\n', (17570, 17599), True, 'import numpy as np\n'), ((17635, 17693), 'chemhelp.cheminfo.conformer_set_coordinates', 'cheminfo.conformer_set_coordinates', (['conformer', 'coordinates'], {}), '(conformer, coordinates)\n', (17669, 17693), False, 'from chemhelp import cheminfo\n'), ((20606, 20635), 'multiprocessing.Pool', 'multiprocessing.Pool', (['threads'], {}), '(threads)\n', (20626, 20635), False, 'import multiprocessing\n'), ((21588, 21609), 'copy.deepcopy', 'copy.deepcopy', (['molobj'], {}), '(molobj)\n', (21601, 21609), False, 'import copy\n'), ((21614, 21669), 'chemhelp.cheminfo.molobj_set_coordinates', 'cheminfo.molobj_set_coordinates', (['molobj', 'origins[molid]'], {}), '(molobj, origins[molid])\n', (21645, 21669), False, 'from chemhelp import cheminfo\n'), ((21912, 21923), 'time.time', 'time.time', ([], {}), '()\n', (21921, 21923), False, 'import time\n'), ((22005, 22016), 'time.time', 'time.time', ([], {}), '()\n', (22014, 22016), False, 'import time\n'), ((23452, 23501), 'communication.rediscomm.Taskqueue', 'rediscomm.Taskqueue', (['redis_connection', 'redis_task'], {}), '(redis_connection, redis_task)\n', (23471, 23501), False, 'from communication import rediscomm\n'), ((23540, 23571), 'chemhelp.cheminfo.read_sdffile', 'cheminfo.read_sdffile', (['args.sdf'], {}), '(args.sdf)\n', (23561, 23571), False, 'from chemhelp import cheminfo\n'), ((24412, 24423), 'time.time', 'time.time', ([], {}), '()\n', (24421, 24423), False, 'import time\n'), ((24597, 24608), 'time.time', 'time.time', ([], {}), '()\n', (24606, 24608), False, 'import time\n'), ((25464, 25495), 'chemhelp.cheminfo.read_sdffile', 'cheminfo.read_sdffile', (['args.sdf'], {}), '(args.sdf)\n', (25485, 25495), False, 'from chemhelp import cheminfo\n'), ((25924, 25949), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (25947, 25949), False, 'import argparse\n'), ((4721, 4732), 'time.time', 'time.time', ([], {}), '()\n', (4730, 4732), False, 'import time\n'), ((4801, 4863), 'clockwork.generate_torsion_combinations', 'clockwork.generate_torsion_combinations', (['total_torsions', 'n_tor'], {}), '(total_torsions, n_tor)\n', (4840, 4863), False, 'import clockwork\n'), ((6498, 6509), 'time.time', 'time.time', ([], {}), '()\n', (6507, 6509), False, 'import time\n'), ((6989, 7036), 'chemhelp.cheminfo.molobj_to_xyz', 'cheminfo.molobj_to_xyz', (['molobj'], {'atom_type': '"""int"""'}), "(molobj, atom_type='int')\n", (7011, 7036), False, 'from chemhelp import cheminfo\n'), ((7334, 7345), 'time.time', 'time.time', ([], {}), '()\n', (7343, 7345), False, 'import time\n'), ((7551, 7577), 'numpy.argwhere', 'np.argwhere', (['(c_states == 0)'], {}), '(c_states == 0)\n', (7562, 7577), True, 'import numpy as np\n'), ((7903, 7932), 'numpy.asarray', 'np.asarray', (['c_representations'], {}), '(c_representations)\n', (7913, 7932), True, 'import numpy as np\n'), ((8488, 8587), 'merge.merge_asymmetric', 'merge.merge_asymmetric', (['atoms', 'c_energies', 'end_energies', 'c_representations', 'end_representations'], {}), '(atoms, c_energies, end_energies, c_representations,\n end_representations)\n', (8510, 8587), False, 'import merge\n'), ((9064, 9075), 'time.time', 'time.time', ([], {}), '()\n', (9073, 9075), False, 'import time\n'), ((9978, 10002), 'numpy.argwhere', 'np.argwhere', (['(states == 0)'], {}), '(states == 0)\n', (9989, 10002), True, 'import numpy as np\n'), ((11184, 11237), 'rdkit.Chem.rdMolTransforms.GetDihedralDeg', 'Chem.rdMolTransforms.GetDihedralDeg', (['conformer', '*idxs'], {}), '(conformer, *idxs)\n', (11219, 11237), False, 'from rdkit import Chem\n'), ((11612, 11644), 'chemhelp.cheminfo.molobj_to_atoms', 'cheminfo.molobj_to_atoms', (['molobj'], {}), '(molobj)\n', (11636, 11644), False, 'from chemhelp import cheminfo\n'), ((11730, 11763), 'quantum.get_smiles', 'quantum.get_smiles', (['atoms', 'origin'], {}), '(atoms, origin)\n', (11748, 11763), False, 'import quantum\n'), ((12674, 12694), 'numpy.asarray', 'np.asarray', (['energies'], {}), '(energies)\n', (12684, 12694), True, 'import numpy as np\n'), ((12696, 12719), 'numpy.asarray', 'np.asarray', (['coordinates'], {}), '(coordinates)\n', (12706, 12719), True, 'import numpy as np\n'), ((12721, 12739), 'numpy.asarray', 'np.asarray', (['states'], {}), '(states)\n', (12731, 12739), True, 'import numpy as np\n'), ((13502, 13532), 'chemhelp.cheminfo.molobj_to_xyz', 'cheminfo.molobj_to_xyz', (['molobj'], {}), '(molobj)\n', (13524, 13532), False, 'from chemhelp import cheminfo\n'), ((14968, 15016), 'quantum.optmize_conformation', 'quantum.optmize_conformation', (['atoms', 'coordinates'], {}), '(atoms, coordinates)\n', (14996, 15016), False, 'import quantum\n'), ((17893, 17907), 'numpy.array', 'np.array', (['grad'], {}), '(grad)\n', (17901, 17907), True, 'import numpy as np\n'), ((17928, 17945), 'numpy.linalg.norm', 'linalg.norm', (['grad'], {}), '(grad)\n', (17939, 17945), False, 'from numpy import linalg\n'), ((18254, 18287), 'chemhelp.cheminfo.sdfstr_to_molobj', 'cheminfo.sdfstr_to_molobj', (['sdfstr'], {}), '(sdfstr)\n', (18279, 18287), False, 'from chemhelp import cheminfo\n'), ((18343, 18400), 'chemhelp.cheminfo.molobj_set_coordinates', 'cheminfo.molobj_set_coordinates', (['molobj_test', 'coordinates'], {}), '(molobj_test, coordinates)\n', (18374, 18400), False, 'from chemhelp import cheminfo\n'), ((18711, 18739), 'chemhelp.cheminfo.save_molobj', 'cheminfo.save_molobj', (['molobj'], {}), '(molobj)\n', (18731, 18739), False, 'from chemhelp import cheminfo\n'), ((19082, 19115), 'chemhelp.cheminfo.save_molobj', 'cheminfo.save_molobj', (['molobj_test'], {}), '(molobj_test)\n', (19102, 19115), False, 'from chemhelp import cheminfo\n'), ((20039, 20069), 'chemhelp.cheminfo.molobj_to_xyz', 'cheminfo.molobj_to_xyz', (['molobj'], {}), '(molobj)\n', (20061, 20069), False, 'from chemhelp import cheminfo\n'), ((21119, 21130), 'tqdm.tqdm', 'tqdm', (['lines'], {}), '(lines)\n', (21123, 21130), False, 'from tqdm import tqdm\n'), ((22805, 22836), 'numpy.asarray', 'np.asarray', (['torsions'], {'dtype': 'int'}), '(torsions, dtype=int)\n', (22815, 22836), True, 'import numpy as np\n'), ((23873, 23912), 'chemhelp.cheminfo.molobj_get_coordinates', 'cheminfo.molobj_get_coordinates', (['molobj'], {}), '(molobj)\n', (23904, 23912), False, 'from chemhelp import cheminfo\n'), ((25283, 25301), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (25293, 25301), False, 'import json\n'), ((2775, 2804), 'chemhelp.cheminfo.get_torsions', 'cheminfo.get_torsions', (['molobj'], {}), '(molobj)\n', (2796, 2804), False, 'from chemhelp import cheminfo\n'), ((5060, 5071), 'time.time', 'time.time', ([], {}), '()\n', (5069, 5071), False, 'import time\n'), ((5348, 5359), 'time.time', 'time.time', ([], {}), '()\n', (5357, 5359), False, 'import time\n'), ((7798, 7840), 'similarity_fchl19.get_representation', 'sim.get_representation', (['atoms', 'coordinates'], {}), '(atoms, coordinates)\n', (7820, 7840), True, 'import similarity_fchl19 as sim\n'), ((11666, 11693), 'chemhelp.cheminfo.convert_atom', 'cheminfo.convert_atom', (['atom'], {}), '(atom)\n', (11687, 11693), False, 'from chemhelp import cheminfo\n'), ((12302, 12313), 'time.time', 'time.time', ([], {}), '()\n', (12311, 12313), False, 'import time\n'), ((12464, 12475), 'time.time', 'time.time', ([], {}), '()\n', (12473, 12475), False, 'import time\n'), ((14444, 14521), 'rdkit.Chem.rdMolTransforms.SetDihedralDeg', 'Chem.rdMolTransforms.SetDihedralDeg', (['conformer_prime', '*torsions[i]', 'set_angle'], {}), '(conformer_prime, *torsions[i], set_angle)\n', (14479, 14521), False, 'from rdkit import Chem\n'), ((15138, 15176), 'quantum.get_smiles', 'quantum.get_smiles', (['atoms', 'coordinates'], {}), '(atoms, coordinates)\n', (15156, 15176), False, 'import quantum\n'), ((17165, 17242), 'rdkit.Chem.rdMolTransforms.SetDihedralDeg', 'Chem.rdMolTransforms.SetDihedralDeg', (['conformer_prime', '*torsions[i]', 'set_angle'], {}), '(conformer_prime, *torsions[i], set_angle)\n', (17200, 17242), False, 'from rdkit import Chem\n'), ((18563, 18616), 'rdkit.Chem.rdMolTransforms.GetDihedralDeg', 'Chem.rdMolTransforms.GetDihedralDeg', (['conformer', '*idxs'], {}), '(conformer, *idxs)\n', (18598, 18616), False, 'from rdkit import Chem\n'), ((18947, 19000), 'rdkit.Chem.rdMolTransforms.GetDihedralDeg', 'Chem.rdMolTransforms.GetDihedralDeg', (['conformer', '*idxs'], {}), '(conformer, *idxs)\n', (18982, 19000), False, 'from rdkit import Chem\n'), ((20675, 20732), 'functools.partial', 'partial', (['run_jobline', 'origins', 'molobjs', 'tordbs'], {'dump': 'dump'}), '(run_jobline, origins, molobjs, tordbs, dump=dump)\n', (20682, 20732), False, 'from functools import partial\n'), ((22347, 22388), 'chemhelp.cheminfo.save_molobj', 'cheminfo.save_molobj', (['molobj', 'coordinates'], {}), '(molobj, coordinates)\n', (22367, 22388), False, 'from chemhelp import cheminfo\n'), ((23064, 23103), 'os.path.exists', 'os.path.exists', (['args.redis_connect_file'], {}), '(args.redis_connect_file)\n', (23078, 23103), False, 'import os\n'), ((23687, 23716), 'chemhelp.cheminfo.get_torsions', 'cheminfo.get_torsions', (['molobj'], {}), '(molobj)\n', (23708, 23716), False, 'from chemhelp import cheminfo\n'), ((25627, 25656), 'chemhelp.cheminfo.get_torsions', 'cheminfo.get_torsions', (['molobj'], {}), '(molobj)\n', (25648, 25656), False, 'from chemhelp import cheminfo\n'), ((5727, 5738), 'time.time', 'time.time', ([], {}), '()\n', (5736, 5738), False, 'import time\n'), ((5835, 5976), 'merge.merge_asymmetric', 'merge.merge_asymmetric', (['atoms', 'result_energies', 'cost_result_energies', 'result_coordinates', 'cost_result_coordinates'], {'decimals': '(2)', 'debug': '(True)'}), '(atoms, result_energies, cost_result_energies,\n result_coordinates, cost_result_coordinates, decimals=2, debug=True)\n', (5857, 5976), False, 'import merge\n'), ((6347, 6358), 'time.time', 'time.time', ([], {}), '()\n', (6356, 6358), False, 'import time\n'), ((20840, 20897), 'functools.partial', 'partial', (['run_jobline', 'origins', 'molobjs', 'tordbs'], {'dump': 'dump'}), '(run_jobline, origins, molobjs, tordbs, dump=dump)\n', (20847, 20897), False, 'from functools import partial\n'), ((25182, 25213), 'numpy.round', 'np.round', (['coord', 'coord_decimals'], {}), '(coord, coord_decimals)\n', (25190, 25213), True, 'import numpy as np\n')]
|
import pickle
from matplotlib import pyplot as plt
import torch
import seaborn as sns
import numpy as np
from src.src_vvCV_MD1P.stein_operators import *
from src.src_vvCV_MD1P.sv_CV import *
from src.src_vvCV_MD1P.vv_CV_MD1P import *
from src.src_vvCV_MD1P.vv_CV_FixB_MD1P import *
from src.src_vvCV_MD1P.vv_CV_unbalanced_FixB_MD1P import *
# ======================
# Step Function
# ======================
# Set vv-CV kernel
my_base_kernel = rbf_kernel
my_lr = 0.0003
my_poly_ker_parm = torch.Tensor([1,1])
no_replica_ty2 = 1
no_epochs_ty2 = 400
no_points_per_func_ty2= 40
for i in range(no_replica_ty2):
print("REP {} out of {}-----------".format(i+1, no_replica_ty2 ))
dim = 1
factor = torch.ones(1) * 1
mu = torch.zeros(dim, dtype=torch.float) + 0
var = torch.eye(dim, dtype=torch.float) * factor # MUst use eye() here
print(mu, var)
def my_func_1(X):
return (0.5 + (2 * (X >= 0) - 1) * 1.5) * torch.ones(1, dtype=torch.float)
def my_func_2(X):
return (X >= 0) * torch.ones(1, dtype=torch.float)
# Training samples
print("REP {} out of {}-----------".format(i+1, no_replica_ty2 ))
torch.manual_seed(5)
X1 = mu + torch.sqrt(factor) * torch.randn(no_points_per_func_ty2, dim)
Y1 = my_func_1(X1)
# --- For MD1P
torch.manual_seed(6)
X2 = mu + torch.sqrt(factor) * torch.randn(no_points_per_func_ty2, dim)
Y2 = my_func_2(X2)
# --- For 1D1P
Y1_X2 = my_func_1(X2)
Ys_on_X2 = torch.stack((Y1_X2, Y2), dim=1).squeeze()
# Scores on X's
mu = torch.zeros(dim, 1)
cov = var
score_X1 = multivariate_Normal_score(mu, cov, X1)
score_X1.size()
score_X2 = multivariate_Normal_score(mu, cov, X2)
xall = torch.stack((X1, X2), dim=0)
xall.size()
yall = torch.stack((Y1, Y2), dim=0)
yall.size()
score_all = torch.stack((score_X1, score_X2), dim=0)
score_all.size()
# f1
print("REP {} out of {} --- sv-CV-f1 -----------".format(i+1, no_replica_ty2 ))
torch.manual_seed(0)
Ty2_SCV_scalarvaluedfunc1 = SV_CV_scalarvaluedfuncs_model(penalized_ls_objective_scalarvaluedfunc, stein_base_kernel_MV_2, my_base_kernel, X1, Y1, score_X1)
torch.manual_seed(0)
Ty2_SCV_scalarvaluedfunc1.do_tune_kernelparams_negmllk(batch_size_tune = 5, flag_if_use_medianheuristic=False, beta_cstkernel=0., lr=0.02, epochs=15, verbose=True)
torch.manual_seed(0)
Ty2_SCV_scalarvaluedfunc1.do_optimize_sv_CV(regularizer_const = 1e-5, batch_size = 10, lr = my_lr, epochs = no_epochs_ty2, verbose = True)
# f2
print("REP {} out of {}--- sv-CV-f2 -----------".format(i+1, no_replica_ty2 ))
torch.manual_seed(0)
Ty2_SCV_scalarvaluedfunc2 = SV_CV_scalarvaluedfuncs_model(penalized_ls_objective_scalarvaluedfunc,stein_base_kernel_MV_2, my_base_kernel, X2, Y2, score_X2)
torch.manual_seed(0)
Ty2_SCV_scalarvaluedfunc2.do_tune_kernelparams_negmllk(batch_size_tune = 5, flag_if_use_medianheuristic=False, beta_cstkernel=0., lr=0.02, epochs=15, verbose=True)
torch.manual_seed(0)
Ty2_SCV_scalarvaluedfunc2.do_optimize_sv_CV(regularizer_const=1e-5, batch_size=10, lr=my_lr, epochs=no_epochs_ty2, verbose=True)
# vv-CV: MD1P with B fixed
print("REP {} out of {} --- vv-CV: MD1P with B fixed -----------".format(i+1, no_replica_ty2 ))
torch.manual_seed(0)
Ty2_SCV_vectorvaluedfunc_fixB = VV_CV_vectorvaluedfuncs_model_fixB(vv_cv_objective=penalized_ls_objective_vectorvaluedfunc_fixB, prior_kernel=stein_base_kernel_MV_2, base_kernel=my_base_kernel, Xs_tensor=xall, Ys_tensor=yall, scores_Tensor=score_all)
torch.manual_seed(0)
Ty2_SCV_vectorvaluedfunc_fixB.do_tune_kernelparams_negmllk(batch_size_tune=5, flag_if_use_medianheuristic=False, beta_cstkernel=0., lr=0.02, epochs=15, verbose=True) # bs 5; lr 0.2; epochs 5
torch.manual_seed(0)
# set B
Ty2_SCV_vectorvaluedfunc_fixB.B = torch.Tensor([[0.1, 0.01], [0.01,0.1]])
Ty2_SCV_vectorvaluedfunc_fixB.do_optimize_vv_CV(regularizer_const=1e-5, batch_size=5, lr=my_lr, epochs=no_epochs_ty2, verbose=True)
# ---------------
# vv-CV: MD1P with B fixed -- ANOTHER B
print("REP {} out of {} --- vv-CV: MD1P with B fixed --- Another B-----------".format(i+1, no_replica_ty2 ))
torch.manual_seed(0)
Ty2_SCV_vectorvaluedfunc_fixB_another = VV_CV_vectorvaluedfuncs_model_fixB(vv_cv_objective=penalized_ls_objective_vectorvaluedfunc_fixB, prior_kernel=stein_base_kernel_MV_2, base_kernel=my_base_kernel, Xs_tensor=xall, Ys_tensor=yall, scores_Tensor=score_all)
torch.manual_seed(0)
Ty2_SCV_vectorvaluedfunc_fixB_another.do_tune_kernelparams_negmllk(batch_size_tune=5, flag_if_use_medianheuristic=False, beta_cstkernel=0., lr=0.02, epochs=15, verbose=True) # bs 5; lr 0.2; epochs 5
torch.manual_seed(0)
# set B
Ty2_SCV_vectorvaluedfunc_fixB_another.B = torch.Tensor([[0.5, 0.01], [0.01, 0.5]]) # a value close to estimated B
Ty2_SCV_vectorvaluedfunc_fixB_another.do_optimize_vv_CV(regularizer_const=1e-5, batch_size=5, lr=my_lr, epochs=no_epochs_ty2, verbose=True) # 0.002 ; 5
# ---------------
# vv-CV: MD1P with learning B
print("REP {} out of {} --- vv-CV: MD1P with learning B -----------".format(i+1, no_replica_ty2 ))
torch.manual_seed(0)
Ty2_SCV_vectorvaluedfunc = VV_CV_vectorvaluedfuncs_model(vv_cv_objective=penalized_ls_objective_vectorvaluedfunc, prior_kernel=stein_base_kernel_MV_2, base_kernel=my_base_kernel, Xs_tensor=xall, Ys_tensor=yall, scores_Tensor=score_all)
torch.manual_seed(0)
Ty2_SCV_vectorvaluedfunc.do_tune_kernelparams_negmllk(batch_size_tune = 5, flag_if_use_medianheuristic=False, beta_cstkernel=0., lr=0.02, epochs=15, verbose=True) # bs 5; lr 0.2; epochs 5
torch.manual_seed(0)
Ty2_SCV_vectorvaluedfunc.do_optimize_vv_CV(regularizer_const=1e-5, regularizer_const_FB=1, batch_size=5, lr=my_lr, epochs=no_epochs_ty2, verbose=True) # 0.002; 5
# --------------
# sv-polynomials: f1
print("REP {} out of {} --- sv-polynomials: f1 -----------".format(i + 1, no_replica_ty2))
torch.manual_seed(0)
Ty2_SCV_svpolynomials_f1 = SV_CV_scalarvaluedfuncs_model(penalized_ls_objective_scalarvaluedfunc, stein_base_kernel_MV_2, polynomial_kernel, X1, Y1, score_X1)
Ty2_SCV_svpolynomials_f1.optim_base_kernel_parms = my_poly_ker_parm
torch.manual_seed(0)
Ty2_SCV_svpolynomials_f1.do_optimize_sv_CV(regularizer_const=1e-5, batch_size=10, lr=my_lr, epochs=no_epochs_ty2, verbose=True) # 0.002
# sv-polynomials: f2
print("REP {} out of {} --- sv-polynomials: f2 -----------".format(i + 1, no_replica_ty2))
torch.manual_seed(0)
Ty2_SCV_svpolynomials_f2 = SV_CV_scalarvaluedfuncs_model(penalized_ls_objective_scalarvaluedfunc, stein_base_kernel_MV_2, polynomial_kernel, X2, Y2, score_X2)
Ty2_SCV_svpolynomials_f2.optim_base_kernel_parms = my_poly_ker_parm
torch.manual_seed(0)
Ty2_SCV_svpolynomials_f2.do_optimize_sv_CV(regularizer_const=1e-5, batch_size=10, lr=my_lr, epochs=no_epochs_ty2, verbose=True) # 0.002
# vv-polynomials: MD1P with B fixed
print("REP {} out of {} --- vv-polynomials: MD1P with B fixed -----------".format(i+1, no_replica_ty2 ))
torch.manual_seed(0)
Ty2_SCV_vvpolynomials_MD1P_fixB = VV_CV_vectorvaluedfuncs_model_fixB(vv_cv_objective=penalized_ls_objective_vectorvaluedfunc_fixB, prior_kernel=stein_base_kernel_MV_2, base_kernel=polynomial_kernel, Xs_tensor=xall, Ys_tensor=yall, scores_Tensor=score_all)
Ty2_SCV_vvpolynomials_MD1P_fixB.optim_base_kernel_parms = my_poly_ker_parm
torch.manual_seed(0)
# set B
Ty2_SCV_vvpolynomials_MD1P_fixB.B = torch.Tensor([[0.1, 0.01], [0.01,0.1]])
Ty2_SCV_vvpolynomials_MD1P_fixB.do_optimize_vv_CV(regularizer_const=1e-5, batch_size=5, lr=my_lr, epochs=no_epochs_ty2, verbose=True)
# vv-polynomials: MD1P with B fixed --- ANOTHER B
print("REP {} out of {} --- vv-polynomials: MD1P with B fixed ---Another B -----------".format(i+1, no_replica_ty2 ))
torch.manual_seed(0)
Ty2_SCV_vvpolynomials_MD1P_fixB_another = VV_CV_vectorvaluedfuncs_model_fixB(vv_cv_objective=penalized_ls_objective_vectorvaluedfunc_fixB, prior_kernel=stein_base_kernel_MV_2, base_kernel=polynomial_kernel, Xs_tensor=xall, Ys_tensor=yall, scores_Tensor=score_all)
Ty2_SCV_vvpolynomials_MD1P_fixB_another.optim_base_kernel_parms = my_poly_ker_parm
torch.manual_seed(0)
# set B
Ty2_SCV_vvpolynomials_MD1P_fixB_another.B = torch.Tensor([[0.5, 0.01], [0.01, 0.5]])
Ty2_SCV_vvpolynomials_MD1P_fixB_another.do_optimize_vv_CV(regularizer_const=1e-5, batch_size=5, lr=my_lr, epochs=no_epochs_ty2, verbose=True)
# vv-polynomials: MD1P with learning B
print("REP {} out of {} --- vv-polynomials: MD1P with learning B -----------".format(i+1, no_replica_ty2 ))
torch.manual_seed(0)
Ty2_SCV_vvpolynomials_MD1P = VV_CV_vectorvaluedfuncs_model(vv_cv_objective=penalized_ls_objective_vectorvaluedfunc, prior_kernel=stein_base_kernel_MV_2, base_kernel=polynomial_kernel, Xs_tensor=xall, Ys_tensor=yall, scores_Tensor=score_all)
Ty2_SCV_vvpolynomials_MD1P.optim_base_kernel_parms = my_poly_ker_parm
torch.manual_seed(0)
Ty2_SCV_vvpolynomials_MD1P.do_optimize_vv_CV(regularizer_const=1e-5, regularizer_const_FB=1, batch_size=5, lr=my_lr, epochs=no_epochs_ty2, verbose=True)
# Define a helper function to caculate the density of some samples from a standard normal distributions
def helper_standard_Gaussian_PDF(x):
assert x.size(1)==1, "Dim should be 1"
n = x.size(0)
d = x.size(1)
prob_densities_at_x = torch.zeros(n)
for i in range(n):
cur_x = x[i].squeeze()
prob_densities_at_x[i] = ((2.*math.pi)**(-0.5)) * torch.exp(-0.5* (cur_x.pow(2)) )
return prob_densities_at_x
## Plot a fitted line for squared exponetial kernel.
sns.set_style("white")
all_x = torch.cat((X1, X2), dim=0)
all_x_dens = helper_standard_Gaussian_PDF(all_x)
all_x = all_x.squeeze()
all_x.size()
X1_sorted_values, X1_sorted_indices = X1.squeeze().sort()
X2_sorted_values, X2_sorted_indices = X2.squeeze().sort()
test_x = torch.unique(torch.sort(torch.cat((X1_sorted_values, X2_sorted_values, torch.linspace(-3, 3, 100)))).values)
test_x = test_x.unsqueeze(1)
test_x.size()
test_x_sorted_values, test_x_sorted_indices = test_x.squeeze().sort()
score_X1 = multivariate_Normal_score(mu, cov, X1)
score_X2 = multivariate_Normal_score(mu, cov, X2)
score_all_x = multivariate_Normal_score(mu, cov, all_x.unsqueeze(1))
score_test_x = multivariate_Normal_score(mu, cov, test_x )
vv_SEk_theta_hat = Ty2_SCV_vectorvaluedfunc.fitting_obj.theta.detach().clone()
vv_SEk_B = Ty2_SCV_vectorvaluedfunc.fitting_obj.B.detach().clone()
vv_SEk_est = Ty2_SCV_vectorvaluedfunc.fitting_obj.c.detach().clone().squeeze()
with torch.no_grad():
vv_SEk_k_XX = Ty2_SCV_vectorvaluedfunc.fitting_obj.kernel_obj.cal_stein_base_kernel(test_x, all_x.unsqueeze(1), score_test_x, score_all_x)
vv_SEk_y_fitted = vv_SEk_k_XX @ vv_SEk_theta_hat @ vv_SEk_B + vv_SEk_est
vv_SEk_y_fitted.size()
vv_SEk_data_sorted_values, vv_SEk_data_sorted_indices = all_x.sort()
vv_1polnk_theta_hat = Ty2_SCV_vvpolynomials_MD1P.fitting_obj.theta.detach().clone()
vv_1polnk_B = Ty2_SCV_vvpolynomials_MD1P.fitting_obj.B.detach().clone()
vv_1polnk_est = Ty2_SCV_vvpolynomials_MD1P.fitting_obj.c.detach().clone().squeeze()
with torch.no_grad():
vv_1polnk_k_XX = Ty2_SCV_vvpolynomials_MD1P.fitting_obj.kernel_obj.cal_stein_base_kernel(test_x, all_x.unsqueeze(1), score_test_x, score_all_x)
vv_1polnk_y_fitted = vv_1polnk_k_XX @ vv_1polnk_theta_hat @ vv_1polnk_B + vv_1polnk_est
vv_1polnk_y_fitted.size()
vv_1polnk_data_sorted_values, vv_1polnk_data_sorted_indices = all_x.sort()
sv_SEk_LF_theta_hat = Ty2_SCV_scalarvaluedfunc1.fitting_obj.theta.detach().clone()
sv_SEk_LF_est = Ty2_SCV_scalarvaluedfunc1.fitting_obj.c.clone().detach()
with torch.no_grad():
sv_SEk_LF_k_XX = Ty2_SCV_scalarvaluedfunc1.fitting_obj.kernel_obj.cal_stein_base_kernel(test_x, X1, score_test_x, score_X1)
sv_SEk_LF_y_fitted = sv_SEk_LF_k_XX @ sv_SEk_LF_theta_hat + sv_SEk_LF_est
sv_SEk_LF_y_fitted = sv_SEk_LF_y_fitted.squeeze()
sv_SEk_LF_data_sorted_values, sv_SEk_LF_data_sorted_indices = X1.squeeze().sort()
sv_SEk_HF_theta_hat = Ty2_SCV_scalarvaluedfunc2.fitting_obj.theta.detach().clone()
sv_SEk_HF_est = Ty2_SCV_scalarvaluedfunc2.fitting_obj.c.clone().detach()
with torch.no_grad():
sv_SEk_HF_k_XX = Ty2_SCV_scalarvaluedfunc2.fitting_obj.kernel_obj.cal_stein_base_kernel(test_x, X2, score_test_x, score_X2)
sv_SEk_HF_y_fitted = sv_SEk_HF_k_XX @ sv_SEk_HF_theta_hat + sv_SEk_HF_est
sv_SEk_HF_y_fitted = sv_SEk_HF_y_fitted.squeeze()
sv_SEk_HF_data_sorted_values, sv_SEk_HF_data_sorted_indices = X2.squeeze().sort()
x_step = np.linspace(-3,3, 3)
y_LF = [-1, -1, 2]
y_HF = [0, 0 , 1]
x_illu = np.linspace(-3, 3, 500)
# Extract Saved Outputs
with open('../data/Step_funcion_all_data.pkl', 'rb') as input:
no_replica_ty2 = pickle.load(input)
no_epochs_ty2 = pickle.load(input)
no_points_per_func_ty2 = pickle.load(input)
#
large_saved_MC_ests_ty2 = pickle.load(input)
large_save_est_scalar_f1_ty2 = pickle.load(input)
large_save_closed_form_sols_scalar_f1_ty2 = pickle.load(input)
large_save_est_scalar_f2_ty2 = pickle.load(input)
large_save_closed_form_sols_scalar_f2_ty2 = pickle.load(input)
large_save_est_vecfunc_ty2 = pickle.load(input)
large_save_est_vecfunc_fixB_ty2 = pickle.load(input)
large_save_est_vecfunc_fixB_another_ty2 = pickle.load(input)
# sv-polynomials
large_save_est_scalar_f1_svpolynomials_ty2 = pickle.load(input)
large_save_closed_form_sols_scalar_f1_svpolynomials_ty2 = pickle.load(input)
large_save_est_scalar_f2_svpolynomials_ty2 = pickle.load(input)
large_save_closed_form_sols_scalar_f2_svpolynomials_ty2 = pickle.load(input)
large_save_est_vecfunc_vvpolynomials_ty2_MD1P = pickle.load(input)
large_save_est_vecfunc_vvpolynomials_fixB_ty2 = pickle.load(input)
large_save_est_vecfunc_vvpolynomials_fixB_another_ty2 = pickle.load(input)
with torch.no_grad():
true_vals = [0.5, 0.5]
fig, axs = plt.subplots(1, 4, sharex=False, sharey=False)
fig.set_figwidth(20)
sns.set_style("ticks") # sns.set_style("whitegrid")
clrs = sns.color_palette("husl", 16)
start_pos = 0
axs[2].set_xlabel('Number of Epochs', fontsize=20)
axs[3].set_xlabel('Number of Epochs', fontsize=20)
axs[2].tick_params(labelsize=20)
axs[3].tick_params(labelsize=20)
show_indx = np.arange(0, 410, 20)
show_indx = show_indx - 1
show_indx[0] = 0
show_indx
axs[2].set_title("Squared-exponential kernel CVs", fontsize=18)
axs[3].set_title("First-order polynomial kernel CVs", fontsize=18)
axs[2].set_ylabel(r'Absolute error for $\Pi_H [f_H]$', fontsize=18)
# fig.set_figwidth(12)
mc_f1_mean_ty2 = (large_saved_MC_ests_ty2[:, 0] - true_vals[0]).abs().mean().repeat(1, no_epochs_ty2)
mc_f2_mean_ty2 = (large_saved_MC_ests_ty2[:, 1] - true_vals[1]).abs().mean().repeat(1, no_epochs_ty2)
mc_f1_std_ty2 = (large_saved_MC_ests_ty2[:, 0] - true_vals[0]).abs().std(dim=0) / (torch.ones(1) * no_replica_ty2).sqrt().repeat(1, no_epochs_ty2)
mc_f2_std_ty2 = (large_saved_MC_ests_ty2[:, 1] - true_vals[1]).abs().std(dim=0) / (torch.ones(1) * no_replica_ty2).sqrt().repeat(1, no_epochs_ty2)
axs[2].axhline(mc_f2_mean_ty2[0, 0], color='black', label='MC')
axs[3].axhline(mc_f2_mean_ty2[0, 0], color='black', label='MC')
axs[2].axhline((large_save_closed_form_sols_scalar_f2_ty2 - true_vals[1]).abs().mean().detach().numpy(), color='black', linestyle='-.', label='CF')
axs[3].axhline((large_save_closed_form_sols_scalar_f2_svpolynomials_ty2 - true_vals[1]).abs().mean().detach().numpy(),color='black', linestyle='-.', label='CF')
# -------
sv_f1_mean_ty2 = (large_save_est_scalar_f1_ty2 - true_vals[0]).abs().mean(dim=0).detach().numpy()
sv_f2_mean_ty2 = (large_save_est_scalar_f2_ty2 - true_vals[1]).abs().mean(dim=0).detach().numpy()
sv_f1_std_ty2 = (large_save_est_scalar_f1_ty2 - true_vals[0]).abs().std(dim=0).detach().numpy() / np.sqrt(no_replica_ty2)
sv_f2_std_ty2 = (large_save_est_scalar_f2_ty2 - true_vals[1]).abs().std(dim=0).detach().numpy() / np.sqrt(no_replica_ty2)
axs[2].plot(show_indx + 1, sv_f2_mean_ty2[show_indx], c=clrs[1], marker='+', label='CV')
axs[2].fill_between(show_indx + 1, sv_f2_mean_ty2[show_indx] - sv_f2_std_ty2[show_indx], sv_f2_mean_ty2[show_indx] + sv_f2_std_ty2[show_indx], alpha=0.3, facecolor=clrs[1])
# -------
vv_f1_mean_ty2_fixB = (large_save_est_vecfunc_fixB_ty2[:, :, 0] - true_vals[0]).abs().mean(dim=0).detach().numpy()
vv_f2_mean_ty2_fixB = (large_save_est_vecfunc_fixB_ty2[:, :, 1] - true_vals[1]).abs().mean(dim=0).detach().numpy()
vv_f1_std_ty2_fixB = (large_save_est_vecfunc_fixB_ty2[:, :, 0] - true_vals[0]).abs().std(dim=0).detach().numpy() / np.sqrt(no_replica_ty2)
vv_f2_std_ty2_fixB = (large_save_est_vecfunc_fixB_ty2[:, :, 1] - true_vals[1]).abs().std(dim=0).detach().numpy() / np.sqrt(no_replica_ty2)
axs[2].plot(show_indx + 1, (large_save_est_vecfunc_fixB_ty2[:, :, 1] - true_vals[1]).abs().mean(dim=0).detach().numpy()[show_indx],\
c=clrs[7], marker='x', label='vv-CV with Fixed B (1)')
axs[2].fill_between(show_indx + 1, vv_f2_mean_ty2_fixB[show_indx] - vv_f2_std_ty2_fixB[show_indx], vv_f2_mean_ty2_fixB[show_indx] + vv_f2_std_ty2_fixB[show_indx], alpha=0.3, facecolor=clrs[7])
# -------
vv_f1_mean_ty2_fixB_another = (large_save_est_vecfunc_fixB_another_ty2[:, :, 0] - true_vals[0]).abs().mean(dim=0).detach().numpy()
vv_f2_mean_ty2_fixB_another = (large_save_est_vecfunc_fixB_another_ty2[:, :, 1] - true_vals[1]).abs().mean(dim=0).detach().numpy()
vv_f1_std_ty2_fixB_another = (large_save_est_vecfunc_fixB_another_ty2[:, :, 0] - true_vals[0]).abs().std(dim=0).detach().numpy() / np.sqrt(no_replica_ty2)
vv_f2_std_ty2_fixB_another = (large_save_est_vecfunc_fixB_another_ty2[:, :, 1] - true_vals[1]).abs().std(dim=0).detach().numpy() / np.sqrt(no_replica_ty2)
axs[2].plot(show_indx + 1,(large_save_est_vecfunc_fixB_another_ty2[:, :, 1] - true_vals[1]).abs().mean(dim=0).detach().numpy()[ show_indx], c=clrs[3], marker='x', label='vv-CV with Fixed B (2)')
axs[2].fill_between(show_indx + 1, vv_f2_mean_ty2_fixB_another[show_indx] - vv_f2_std_ty2_fixB_another[show_indx],vv_f2_mean_ty2_fixB_another[show_indx] + vv_f2_std_ty2_fixB_another[show_indx], alpha=0.3, facecolor=clrs[5])
# -------
vv_f1_mean_ty2 = (large_save_est_vecfunc_ty2[:, :, 0] - true_vals[0]).abs().mean(dim=0).detach().numpy()
vv_f2_mean_ty2 = (large_save_est_vecfunc_ty2[:, :, 1] - true_vals[1]).abs().mean(dim=0).detach().numpy()
vv_f1_std_ty2 = (large_save_est_vecfunc_ty2[:, :, 0] - true_vals[0]).abs().std(dim=0).detach().numpy() / np.sqrt(no_replica_ty2)
vv_f2_std_ty2 = (large_save_est_vecfunc_ty2[:, :, 1] - true_vals[1]).abs().std(dim=0).detach().numpy() / np.sqrt(no_replica_ty2)
axs[2].plot(show_indx + 1,(large_save_est_vecfunc_ty2[:, :, 1] - true_vals[1]).abs().mean(dim=0).detach().numpy()[show_indx], c=clrs[10], marker='.', label='vv-CV with Estimated B')
axs[2].fill_between(show_indx + 1, vv_f2_mean_ty2[show_indx] - vv_f2_std_ty2[show_indx], vv_f2_mean_ty2[show_indx] + vv_f2_std_ty2[show_indx], alpha=0.3, facecolor=clrs[10])
# -------
svpoly_f1_mean_ty2 = (large_save_est_scalar_f1_svpolynomials_ty2 - true_vals[0]).abs().mean(dim=0).detach().numpy()
svpoly_f2_mean_ty2 = (large_save_est_scalar_f2_svpolynomials_ty2 - true_vals[1]).abs().mean(dim=0).detach().numpy()
svpoly_f1_std_ty2 = (large_save_est_scalar_f1_svpolynomials_ty2 - true_vals[0]).abs().std(dim=0).detach().numpy() / np.sqrt(no_replica_ty2)
svpoly_f2_std_ty2 = (large_save_est_scalar_f2_svpolynomials_ty2 - true_vals[1]).abs().std(dim=0).detach().numpy() / np.sqrt(no_replica_ty2)
axs[3].plot(show_indx + 1, svpoly_f2_mean_ty2[show_indx], c=clrs[1], marker='+', label='CV')
axs[3].fill_between(show_indx + 1, svpoly_f2_mean_ty2[show_indx] - svpoly_f2_std_ty2[show_indx], svpoly_f2_mean_ty2[show_indx] + svpoly_f2_std_ty2[show_indx], alpha=0.3, facecolor=clrs[1])
# -------
vvpoly_f1_mean_ty2_fixB = (large_save_est_vecfunc_vvpolynomials_fixB_ty2[:, :, 0] - true_vals[0]).abs().mean(dim=0).detach().numpy()
vvpoly_f2_mean_ty2_fixB = (large_save_est_vecfunc_vvpolynomials_fixB_ty2[:, :, 1] - true_vals[1]).abs().mean(dim=0).detach().numpy()
vvpoly_f1_std_ty2_fixB = (large_save_est_vecfunc_vvpolynomials_fixB_ty2[:, :, 0] - true_vals[0]).abs().std(dim=0).detach().numpy() / np.sqrt(no_replica_ty2)
vvpoly_f2_std_ty2_fixB = (large_save_est_vecfunc_vvpolynomials_fixB_ty2[:, :, 1] - true_vals[1]).abs().std(dim=0).detach().numpy() / np.sqrt(no_replica_ty2)
axs[3].plot(show_indx + 1, (large_save_est_vecfunc_vvpolynomials_fixB_ty2[:, :, 1] - true_vals[1]).abs().mean(dim=0).detach().numpy()[show_indx], c=clrs[7], marker='x', label='vv-CV with Fixed B (1)')
axs[3].fill_between(show_indx + 1, vvpoly_f2_mean_ty2_fixB[show_indx] - vvpoly_f2_std_ty2_fixB[show_indx], vvpoly_f2_mean_ty2_fixB[show_indx] + vvpoly_f2_std_ty2_fixB[show_indx], alpha=0.3, facecolor=clrs[7])
# -------
vvpoly_f1_mean_ty2_fixB_another = (large_save_est_vecfunc_vvpolynomials_fixB_another_ty2[:, :, 0] - true_vals[0]).abs().mean(dim=0).detach().numpy()
vvpoly_f2_mean_ty2_fixB_another = (large_save_est_vecfunc_vvpolynomials_fixB_another_ty2[:, :, 1] - true_vals[1]).abs().mean(dim=0).detach().numpy()
vvpoly_f1_std_ty2_fixB_another = (large_save_est_vecfunc_vvpolynomials_fixB_another_ty2[:, :, 0] - true_vals[0]).abs().std(dim=0).detach().numpy() / np.sqrt(no_replica_ty2)
vvpoly_f2_std_ty2_fixB_another = (large_save_est_vecfunc_vvpolynomials_fixB_another_ty2[:, :, 1] - true_vals[1]).abs().std(dim=0).detach().numpy() / np.sqrt(no_replica_ty2)
axs[3].plot(show_indx + 1, (large_save_est_vecfunc_vvpolynomials_fixB_another_ty2[:, :, 1] - true_vals[1]).abs().mean(dim=0).detach().numpy()[show_indx], c=clrs[3], marker='x', label='vv-CV with Fixed B (2)')
axs[3].fill_between(show_indx + 1,vvpoly_f2_mean_ty2_fixB_another[show_indx] - vvpoly_f2_std_ty2_fixB_another[show_indx], vvpoly_f2_mean_ty2_fixB_another[show_indx] + vvpoly_f2_std_ty2_fixB_another[show_indx],alpha=0.3, facecolor=clrs[5])
# -------
vvpoly_f1_mean_ty2 = (large_save_est_vecfunc_vvpolynomials_ty2_MD1P[:, :, 0] - true_vals[0]).abs().mean(dim=0).detach().numpy()
vvpoly_f2_mean_ty2 = (large_save_est_vecfunc_vvpolynomials_ty2_MD1P[:, :, 1] - true_vals[1]).abs().mean(dim=0).detach().numpy()
vvpoly_f1_std_ty2 = (large_save_est_vecfunc_vvpolynomials_ty2_MD1P[:, :, 0] - true_vals[0]).abs().std(dim=0).detach().numpy() / np.sqrt(no_replica_ty2)
vvpoly_f2_std_ty2 = (large_save_est_vecfunc_vvpolynomials_ty2_MD1P[:, :, 1] - true_vals[1]).abs().std(dim=0).detach().numpy() / np.sqrt(no_replica_ty2)
axs[3].plot(show_indx + 1, (large_save_est_vecfunc_vvpolynomials_ty2_MD1P[:, :, 1] - true_vals[1]).abs().mean(dim=0).detach().numpy()[show_indx], c=clrs[10], marker='.', label='vv-CV with Estimated B')
axs[3].fill_between(show_indx + 1, vvpoly_f2_mean_ty2[show_indx] - vvpoly_f2_std_ty2[show_indx], vvpoly_f2_mean_ty2[show_indx] + vvpoly_f2_std_ty2[show_indx], alpha=0.3, facecolor=clrs[10])
# If want to include the legend inside the figure
axs[2].legend(loc="upper right", fontsize=13)
# sns.set_style("ticks") # sns.set_style("whitegrid")
axs[0].set_title("Low-fidelity model", fontsize=18)
axs[1].set_title("High-fidelity model", fontsize=18)
axs[0].set_ylim([-3, 3])
axs[1].set_ylim([-3, 3])
axs[2].set_ylim([0.03, 0.07])
axs[3].set_ylim([0.03, 0.07])
axs[0].plot(test_x_sorted_values, vv_SEk_y_fitted[:, 0][test_x_sorted_indices], color='blue', ls='dotted',label='vv-CV')
axs[0].plot(test_x_sorted_values, vv_1polnk_y_fitted[:, 0][test_x_sorted_indices], color='orange', ls='dotted',label='vv-CV (1st order polyn. k)')
axs[0].plot(test_x_sorted_values, sv_SEk_LF_y_fitted[test_x_sorted_indices], color='red', ls='dotted',label='CV (squared-exponetial k)')
axs[0].step(x_step, y_LF, color='black', label=r'$f(x)$')
axs[1].set_xlabel("x", fontsize=20)
axs[1].set_ylabel("y", fontsize=20)
axs[1].tick_params(labelsize=20)
axs[1].plot(test_x_sorted_values, vv_SEk_y_fitted[:, 1][test_x_sorted_indices], color='blue', ls='dotted',label='vv-CV (squared-exponetial k)')
axs[1].plot(test_x_sorted_values, vv_1polnk_y_fitted[:, 1][test_x_sorted_indices], color='orange', ls='dotted', label='vv-CV (1st order polyn. k)')
axs[1].plot(test_x_sorted_values, sv_SEk_HF_y_fitted[test_x_sorted_indices], color='red', ls='dotted', label='CV (squared-exponetial k)')
axs[1].step(x_step, y_HF, color='black', label=r'$f(x)$')
axs[0].set_xlabel("x", fontsize=20)
axs[0].set_ylabel("y", fontsize=20)
axs[0].tick_params(labelsize=20)
axs[1].legend(fontsize=13)
# plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., fontsize=15)
plt.show()
fig.savefig('step_function_plot.pdf')
|
[
"numpy.sqrt",
"torch.sqrt",
"seaborn.set_style",
"numpy.arange",
"seaborn.color_palette",
"torch.eye",
"numpy.linspace",
"torch.randn",
"torch.Tensor",
"pickle.load",
"torch.cat",
"matplotlib.pyplot.show",
"torch.manual_seed",
"torch.stack",
"torch.zeros",
"torch.no_grad",
"matplotlib.pyplot.subplots",
"torch.linspace",
"torch.ones"
] |
[((495, 515), 'torch.Tensor', 'torch.Tensor', (['[1, 1]'], {}), '([1, 1])\n', (507, 515), False, 'import torch\n'), ((9837, 9859), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (9850, 9859), True, 'import seaborn as sns\n'), ((9868, 9894), 'torch.cat', 'torch.cat', (['(X1, X2)'], {'dim': '(0)'}), '((X1, X2), dim=0)\n', (9877, 9894), False, 'import torch\n'), ((12788, 12809), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(3)'], {}), '(-3, 3, 3)\n', (12799, 12809), True, 'import numpy as np\n'), ((12856, 12879), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(500)'], {}), '(-3, 3, 500)\n', (12867, 12879), True, 'import numpy as np\n'), ((1162, 1182), 'torch.manual_seed', 'torch.manual_seed', (['(5)'], {}), '(5)\n', (1179, 1182), False, 'import torch\n'), ((1307, 1327), 'torch.manual_seed', 'torch.manual_seed', (['(6)'], {}), '(6)\n', (1324, 1327), False, 'import torch\n'), ((1563, 1582), 'torch.zeros', 'torch.zeros', (['dim', '(1)'], {}), '(dim, 1)\n', (1574, 1582), False, 'import torch\n'), ((1737, 1765), 'torch.stack', 'torch.stack', (['(X1, X2)'], {'dim': '(0)'}), '((X1, X2), dim=0)\n', (1748, 1765), False, 'import torch\n'), ((1793, 1821), 'torch.stack', 'torch.stack', (['(Y1, Y2)'], {'dim': '(0)'}), '((Y1, Y2), dim=0)\n', (1804, 1821), False, 'import torch\n'), ((1854, 1894), 'torch.stack', 'torch.stack', (['(score_X1, score_X2)'], {'dim': '(0)'}), '((score_X1, score_X2), dim=0)\n', (1865, 1894), False, 'import torch\n'), ((2016, 2036), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (2033, 2036), False, 'import torch\n'), ((2202, 2222), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (2219, 2222), False, 'import torch\n'), ((2396, 2416), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (2413, 2416), False, 'import torch\n'), ((2658, 2678), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (2675, 2678), False, 'import torch\n'), ((2843, 2863), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (2860, 2863), False, 'import torch\n'), ((3036, 3056), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (3053, 3056), False, 'import torch\n'), ((3328, 3348), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (3345, 3348), False, 'import torch\n'), ((3608, 3628), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (3625, 3628), False, 'import torch\n'), ((3828, 3848), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (3845, 3848), False, 'import torch\n'), ((3900, 3940), 'torch.Tensor', 'torch.Tensor', (['[[0.1, 0.01], [0.01, 0.1]]'], {}), '([[0.1, 0.01], [0.01, 0.1]])\n', (3912, 3940), False, 'import torch\n'), ((4263, 4283), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (4280, 4283), False, 'import torch\n'), ((4551, 4571), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (4568, 4571), False, 'import torch\n'), ((4779, 4799), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (4796, 4799), False, 'import torch\n'), ((4859, 4899), 'torch.Tensor', 'torch.Tensor', (['[[0.5, 0.01], [0.01, 0.5]]'], {}), '([[0.5, 0.01], [0.01, 0.5]])\n', (4871, 4899), False, 'import torch\n'), ((5256, 5276), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (5273, 5276), False, 'import torch\n'), ((5521, 5541), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (5538, 5541), False, 'import torch\n'), ((5738, 5758), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (5755, 5758), False, 'import torch\n'), ((6075, 6095), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (6092, 6095), False, 'import torch\n'), ((6335, 6355), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (6352, 6355), False, 'import torch\n'), ((6623, 6643), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (6640, 6643), False, 'import torch\n'), ((6883, 6903), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (6900, 6903), False, 'import torch\n'), ((7202, 7222), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (7219, 7222), False, 'import torch\n'), ((7566, 7586), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (7583, 7586), False, 'import torch\n'), ((7641, 7681), 'torch.Tensor', 'torch.Tensor', (['[[0.1, 0.01], [0.01, 0.1]]'], {}), '([[0.1, 0.01], [0.01, 0.1]])\n', (7653, 7681), False, 'import torch\n'), ((8002, 8022), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (8019, 8022), False, 'import torch\n'), ((8382, 8402), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (8399, 8402), False, 'import torch\n'), ((8463, 8503), 'torch.Tensor', 'torch.Tensor', (['[[0.5, 0.01], [0.01, 0.5]]'], {}), '([[0.5, 0.01], [0.01, 0.5]])\n', (8475, 8503), False, 'import torch\n'), ((8812, 8832), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (8829, 8832), False, 'import torch\n'), ((9156, 9176), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (9173, 9176), False, 'import torch\n'), ((9585, 9599), 'torch.zeros', 'torch.zeros', (['n'], {}), '(n)\n', (9596, 9599), False, 'import torch\n'), ((10796, 10811), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10809, 10811), False, 'import torch\n'), ((11377, 11392), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11390, 11392), False, 'import torch\n'), ((11900, 11915), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11913, 11915), False, 'import torch\n'), ((12419, 12434), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12432, 12434), False, 'import torch\n'), ((12990, 13008), 'pickle.load', 'pickle.load', (['input'], {}), '(input)\n', (13001, 13008), False, 'import pickle\n'), ((13030, 13048), 'pickle.load', 'pickle.load', (['input'], {}), '(input)\n', (13041, 13048), False, 'import pickle\n'), ((13079, 13097), 'pickle.load', 'pickle.load', (['input'], {}), '(input)\n', (13090, 13097), False, 'import pickle\n'), ((13135, 13153), 'pickle.load', 'pickle.load', (['input'], {}), '(input)\n', (13146, 13153), False, 'import pickle\n'), ((13190, 13208), 'pickle.load', 'pickle.load', (['input'], {}), '(input)\n', (13201, 13208), False, 'import pickle\n'), ((13258, 13276), 'pickle.load', 'pickle.load', (['input'], {}), '(input)\n', (13269, 13276), False, 'import pickle\n'), ((13313, 13331), 'pickle.load', 'pickle.load', (['input'], {}), '(input)\n', (13324, 13331), False, 'import pickle\n'), ((13381, 13399), 'pickle.load', 'pickle.load', (['input'], {}), '(input)\n', (13392, 13399), False, 'import pickle\n'), ((13434, 13452), 'pickle.load', 'pickle.load', (['input'], {}), '(input)\n', (13445, 13452), False, 'import pickle\n'), ((13492, 13510), 'pickle.load', 'pickle.load', (['input'], {}), '(input)\n', (13503, 13510), False, 'import pickle\n'), ((13558, 13576), 'pickle.load', 'pickle.load', (['input'], {}), '(input)\n', (13569, 13576), False, 'import pickle\n'), ((13649, 13667), 'pickle.load', 'pickle.load', (['input'], {}), '(input)\n', (13660, 13667), False, 'import pickle\n'), ((13731, 13749), 'pickle.load', 'pickle.load', (['input'], {}), '(input)\n', (13742, 13749), False, 'import pickle\n'), ((13800, 13818), 'pickle.load', 'pickle.load', (['input'], {}), '(input)\n', (13811, 13818), False, 'import pickle\n'), ((13882, 13900), 'pickle.load', 'pickle.load', (['input'], {}), '(input)\n', (13893, 13900), False, 'import pickle\n'), ((13954, 13972), 'pickle.load', 'pickle.load', (['input'], {}), '(input)\n', (13965, 13972), False, 'import pickle\n'), ((14026, 14044), 'pickle.load', 'pickle.load', (['input'], {}), '(input)\n', (14037, 14044), False, 'import pickle\n'), ((14106, 14124), 'pickle.load', 'pickle.load', (['input'], {}), '(input)\n', (14117, 14124), False, 'import pickle\n'), ((14135, 14150), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (14148, 14150), False, 'import torch\n'), ((14194, 14240), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(4)'], {'sharex': '(False)', 'sharey': '(False)'}), '(1, 4, sharex=False, sharey=False)\n', (14206, 14240), True, 'from matplotlib import pyplot as plt\n'), ((14270, 14292), 'seaborn.set_style', 'sns.set_style', (['"""ticks"""'], {}), "('ticks')\n", (14283, 14292), True, 'import seaborn as sns\n'), ((14335, 14364), 'seaborn.color_palette', 'sns.color_palette', (['"""husl"""', '(16)'], {}), "('husl', 16)\n", (14352, 14364), True, 'import seaborn as sns\n'), ((14584, 14605), 'numpy.arange', 'np.arange', (['(0)', '(410)', '(20)'], {}), '(0, 410, 20)\n', (14593, 14605), True, 'import numpy as np\n'), ((25185, 25195), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25193, 25195), True, 'from matplotlib import pyplot as plt\n'), ((711, 724), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (721, 724), False, 'import torch\n'), ((738, 773), 'torch.zeros', 'torch.zeros', (['dim'], {'dtype': 'torch.float'}), '(dim, dtype=torch.float)\n', (749, 773), False, 'import torch\n'), ((788, 821), 'torch.eye', 'torch.eye', (['dim'], {'dtype': 'torch.float'}), '(dim, dtype=torch.float)\n', (797, 821), False, 'import torch\n'), ((16201, 16224), 'numpy.sqrt', 'np.sqrt', (['no_replica_ty2'], {}), '(no_replica_ty2)\n', (16208, 16224), True, 'import numpy as np\n'), ((16327, 16350), 'numpy.sqrt', 'np.sqrt', (['no_replica_ty2'], {}), '(no_replica_ty2)\n', (16334, 16350), True, 'import numpy as np\n'), ((16993, 17016), 'numpy.sqrt', 'np.sqrt', (['no_replica_ty2'], {}), '(no_replica_ty2)\n', (17000, 17016), True, 'import numpy as np\n'), ((17136, 17159), 'numpy.sqrt', 'np.sqrt', (['no_replica_ty2'], {}), '(no_replica_ty2)\n', (17143, 17159), True, 'import numpy as np\n'), ((17985, 18008), 'numpy.sqrt', 'np.sqrt', (['no_replica_ty2'], {}), '(no_replica_ty2)\n', (17992, 18008), True, 'import numpy as np\n'), ((18144, 18167), 'numpy.sqrt', 'np.sqrt', (['no_replica_ty2'], {}), '(no_replica_ty2)\n', (18151, 18167), True, 'import numpy as np\n'), ((18937, 18960), 'numpy.sqrt', 'np.sqrt', (['no_replica_ty2'], {}), '(no_replica_ty2)\n', (18944, 18960), True, 'import numpy as np\n'), ((19070, 19093), 'numpy.sqrt', 'np.sqrt', (['no_replica_ty2'], {}), '(no_replica_ty2)\n', (19077, 19093), True, 'import numpy as np\n'), ((19833, 19856), 'numpy.sqrt', 'np.sqrt', (['no_replica_ty2'], {}), '(no_replica_ty2)\n', (19840, 19856), True, 'import numpy as np\n'), ((19977, 20000), 'numpy.sqrt', 'np.sqrt', (['no_replica_ty2'], {}), '(no_replica_ty2)\n', (19984, 20000), True, 'import numpy as np\n'), ((20717, 20740), 'numpy.sqrt', 'np.sqrt', (['no_replica_ty2'], {}), '(no_replica_ty2)\n', (20724, 20740), True, 'import numpy as np\n'), ((20878, 20901), 'numpy.sqrt', 'np.sqrt', (['no_replica_ty2'], {}), '(no_replica_ty2)\n', (20885, 20901), True, 'import numpy as np\n'), ((21794, 21817), 'numpy.sqrt', 'np.sqrt', (['no_replica_ty2'], {}), '(no_replica_ty2)\n', (21801, 21817), True, 'import numpy as np\n'), ((21971, 21994), 'numpy.sqrt', 'np.sqrt', (['no_replica_ty2'], {}), '(no_replica_ty2)\n', (21978, 21994), True, 'import numpy as np\n'), ((22862, 22885), 'numpy.sqrt', 'np.sqrt', (['no_replica_ty2'], {}), '(no_replica_ty2)\n', (22869, 22885), True, 'import numpy as np\n'), ((23018, 23041), 'numpy.sqrt', 'np.sqrt', (['no_replica_ty2'], {}), '(no_replica_ty2)\n', (23025, 23041), True, 'import numpy as np\n'), ((946, 978), 'torch.ones', 'torch.ones', (['(1)'], {'dtype': 'torch.float'}), '(1, dtype=torch.float)\n', (956, 978), False, 'import torch\n'), ((1030, 1062), 'torch.ones', 'torch.ones', (['(1)'], {'dtype': 'torch.float'}), '(1, dtype=torch.float)\n', (1040, 1062), False, 'import torch\n'), ((1197, 1215), 'torch.sqrt', 'torch.sqrt', (['factor'], {}), '(factor)\n', (1207, 1215), False, 'import torch\n'), ((1218, 1258), 'torch.randn', 'torch.randn', (['no_points_per_func_ty2', 'dim'], {}), '(no_points_per_func_ty2, dim)\n', (1229, 1258), False, 'import torch\n'), ((1342, 1360), 'torch.sqrt', 'torch.sqrt', (['factor'], {}), '(factor)\n', (1352, 1360), False, 'import torch\n'), ((1363, 1403), 'torch.randn', 'torch.randn', (['no_points_per_func_ty2', 'dim'], {}), '(no_points_per_func_ty2, dim)\n', (1374, 1403), False, 'import torch\n'), ((1490, 1521), 'torch.stack', 'torch.stack', (['(Y1_X2, Y2)'], {'dim': '(1)'}), '((Y1_X2, Y2), dim=1)\n', (1501, 1521), False, 'import torch\n'), ((10180, 10206), 'torch.linspace', 'torch.linspace', (['(-3)', '(3)', '(100)'], {}), '(-3, 3, 100)\n', (10194, 10206), False, 'import torch\n'), ((15210, 15223), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (15220, 15223), False, 'import torch\n'), ((15361, 15374), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (15371, 15374), False, 'import torch\n')]
|
import re
import itertools
import os
import pandas as pd
import numpy as np
from prettytable import PrettyTable
from tqdm import tqdm
def get_char(seq):
"""split string int sequence of chars returned in pandas.Series"""
chars = list(seq)
return pd.Series(chars)
class SeqProcessConfig(object):
def __init__(self, seq_len, seq_stend, ewindow_stend, offset_val):
self.seq_len = seq_len
# entries are with respect to offset value (i.e. start and end indices)
self.seq_stend = seq_stend
self.ewindow_stend = ewindow_stend
self.offset_val = offset_val
# determine the range (start and end) from the offset provided
self._determine_offset_range()
# map the indices to 0-based indexing
self._translate_to_0based_indexing()
def _determine_offset_range(self):
# printing indices
st = self.offset_val
if st <= 0:
# for example -4,25 (equivalent to 30 elements where 0 is included)
end = self.seq_len - abs(st) - 1
else:
# for example 1,30 (equivalent to 30 elements)
end = self.seq_len + st - 1
self.offset_st = st
self.offset_end = end
def _translate_to_0based_indexing(self):
offset = self.offset_val
# edit window mapping
st, end = self.ewindow_stend
self.ewindow_st = st - offset
self.ewindow_end = end - offset
# sequence mapping
st, end = self.seq_stend
self.seq_st = st - offset
self.seq_end = end - offset
def __str__(self):
tb = PrettyTable()
tb.field_names = ['Sequence processing Config', 'Value']
tb.add_row(['sequence length', self.seq_len])
tb.add_row(['sequence start index (0-based indexing)', self.seq_st])
tb.add_row(['sequence end index (0-based indexing)', self.seq_end])
tb.add_row(['editable window start index (0-based indexing)', self.ewindow_st])
tb.add_row(['editable window end index (0-based indexing)', self.ewindow_end])
tb.add_row(['offset start numbering', self.offset_st])
tb.add_row(['offset end numbering', self.offset_end])
return tb.get_string()
class HaplotypeSeqProcessor(object):
def __init__(self, base_editor, conversion_nucl, seqconfig, max_num_targets=12):
self.base_editor = base_editor
self.conversion_nucl = conversion_nucl
self.seqconfig = seqconfig
self.max_num_targets = max_num_targets
self.describe()
def describe(self):
tb = PrettyTable()
tb.field_names = ['Description', 'Value']
tb.add_row(['Base editor', self.base_editor])
tb.add_row(['Target nucleotide', self.conversion_nucl[0]])
tb.add_row(['Conversion nucleotide', self.conversion_nucl[1]])
tb.add_row(['Maximum number of targets considered', self.max_num_targets])
print(tb)
print(self.seqconfig)
def _determine_target_complem_nucl(self):
tb_nucl, cb_nucl = self.conversion_nucl
return tb_nucl, cb_nucl
def remove_viol_seqs(self, df, inpseq_col):
"""
Args:
df: dataframe
inpseq_col: string, column name of input sequence such as "Inp_seq"
"""
print('--- checking for violating seqs ---')
seq_df = df.copy()
tb_nucl, __ = self.conversion_nucl
seqlen = self.seqconfig.seq_len
viol_seqs = []
cond_letter = ~seq_df[inpseq_col].str.contains(tb_nucl)
cond_len = ~seq_df[inpseq_col].str.len() == seqlen
df_clean = seq_df
if cond_len.any() or cond_letter.any():
cond = cond_letter | cond_len
print(seq_df.loc[cond, inpseq_col])
df_clean = seq_df.loc[~cond].copy()
df_clean.reset_index(inplace=True, drop=True)
return df_clean
def _check_duplicates(self, gdf, outcomeseq_colname, pbar, prg_counter):
gdf_clean = gdf.copy()
gdf_clean.drop_duplicates(subset=[outcomeseq_colname], inplace=True, ignore_index=True)
prg_counter+=1
pbar.update(prg_counter)
return gdf_clean
def preprocess_df(self, df, inpseq_colnames, outcomeseq_colname):
"""
Args:
df: dataframe
inpseq_colnames: list of column names such as ['seq_id', 'Inp_seq']
outcomeseq_colname: string, column name of observed outcome sequences
"""
print('--- removing duplicates (if found!) ---')
prg_counter=0
dfg = df.groupby(by=inpseq_colnames)
pbar = tqdm(total=dfg.ngroups)
df_clean = dfg.apply(self._check_duplicates, outcomeseq_colname, pbar, prg_counter)
pbar.close()
df_clean.reset_index(inplace=True, drop=True)
return df_clean
def renormalize_outcome_prop(self, df, by_cols, prop_col):
""" renormalize the outcome sequence probability (optional, in case it is not normalized!)
Args:
df:pd.DataFrame, read data frame
by_cols: list, input sequence column name/s such as ['seq_id', 'Inp_seq']
prop_col: string, outcome propotion (i.e. probability) column name
.. Note:
this method is run after using :func:`preprocess_df`
"""
print('--- renormalizing outcome proportion ---')
a = df.groupby(by=by_cols, as_index=False)[prop_col].sum()
a['denom'] = a[prop_col]
b = df.copy()
b = b.merge(a, on=by_cols, how='left')
validate_df(b)
b['prob'] = b[f'{prop_col}_x']/b['denom']
b[prop_col] = b['prob']
return b
def _generate_combinatorial_conversion(self, tbase_indices, conv_nl):
num_pos = len(tbase_indices)
comb_nucl_lst= []
conv_nl_lst = list(conv_nl)
for __ in range(num_pos):
comb_nucl_lst.append(conv_nl_lst)
return itertools.product(*comb_nucl_lst)
def generate_combinatorial_outcome(self, df):
""" Generates combinatorial outcome sequences based on identified canonical bases
Args:
df:pd.DataFrame, processed dataframe using :func:`process_inp_outp_df` function
"""
print('--- generating edit combinations ---')
# print(df.columns)
# print(df.shape)
seqconfig = self.seqconfig
conv_nl = self.conversion_nucl
tb_nucl, cb_nucl = conv_nl
e_st = seqconfig.ewindow_st
e_end = seqconfig.ewindow_end
seqlen = seqconfig.seq_len
max_num_targets=self.max_num_targets
res_df_lst = []
target_cols = ['seq_id', 'Inp_seq', 'Outp_seq']
for row in tqdm(df.iterrows()):
indx, record = row
rec_nucl = record[[f'Inp_L{i}'for i in range(e_st+1,e_end+2)]]
# print('indx:', indx)
# print(rec_nucl)
tbase_indices = np.where(rec_nucl==tb_nucl)[0]
# print('tbase_indices:\n', tbase_indices)
if len(tbase_indices) > max_num_targets:
tbase_indices = tbase_indices[:max_num_targets]
# print('e_st:', e_st)
# print('e_end:', e_end)
# print('tbase_indices:\n', tbase_indices)
comb_nucl_opt= self._generate_combinatorial_conversion(tbase_indices, conv_nl)
comb_nucl_opt = list(comb_nucl_opt)
num_options = len(comb_nucl_opt)
# print(comb_nucl_opt)
comb_nucl_arr = np.repeat(rec_nucl.values.reshape(1,-1),num_options,axis=0)
# print(comb_nucl_arr)
for i_arr, opt in enumerate(comb_nucl_opt):
# print('i_arr:', i_arr)
# print('opt:',opt)
comb_nucl_arr[i_arr, tbase_indices]= opt
# print(comb_nucl_arr)
comb_nucl_df = pd.DataFrame(comb_nucl_arr)
comb_nucl_df.columns = [f'Inp_L{i}'for i in range(e_st+1,e_end+2)]
# print(comb_nucl_df)
pre_ew_col = record[[f'Inp_L{i}'for i in range(1,e_st+1)]]
post_ew_col = record[[f'Inp_L{i}'for i in range(e_end+2,seqlen+1)]]
a = pd.DataFrame(np.repeat(pre_ew_col.values.reshape(1,-1), num_options, axis=0))
a.columns = [f'Inp_L{i}'for i in range(1,e_st+1)]
# print(a)
b = pd.DataFrame(np.repeat(post_ew_col.values.reshape(1,-1), num_options, axis=0))
b.columns = [f'Inp_L{i}'for i in range(e_end+2,seqlen+1)]
# print(b)
# print(record['Inp_seq'])
inpseq_df = pd.DataFrame([record['Inp_seq']]*num_options)
inpseq_df.columns = ['Inp_seq']
seqid_df = pd.DataFrame([record['seq_id']]*num_options)
seqid_df.columns = ['seq_id']
res_df = pd.concat([seqid_df,inpseq_df, a, comb_nucl_df, b], axis=1)
# print()
# print(res_df)
res_df['Outp_seq'] = res_df[[f'Inp_L{i}'for i in range(1,seqlen+1)]].astype(str).sum(axis=1)
# print(res_df)
res_df_lst.append(res_df[target_cols])
# print('-'*15)
comb_final_df = pd.concat(res_df_lst, axis=0)
# print('comb_final_df:\n', comb_final_df.columns)
return comb_final_df
def process_inp_outp_df(self, df, seqid_col, t_inp_col, t_outp_col, outcome_prop_col):
"""
df:pd.DataFrame, read data frame
t_inp_col: string, input sequence column name
t_outp_col: string, output sequence column name
None, when performing inference
outcome_prop_col: string, outcome propotion (i.e. probability of outcome sequence) column name
None, when performing inference
"""
# print()
# print('__ process_inp_outp __')
# print('df.columns:', df.columns)
# print()
max_num_targets = self.max_num_targets
pbar = tqdm(total=100)
seq_len = self.seqconfig.seq_len
tb_nucl, cb_nucl = self._determine_target_complem_nucl()
inp_df = self._process_df(df, seqid_col, t_inp_col, tb_nucl, 'Inp')
if t_outp_col is not None:
pbar.update(25)
outp_df = self._process_df(df, seqid_col, t_outp_col, cb_nucl, 'Outp')
pbar.update(50)
conv_mat = inp_df[[f'Inp_M{i}' for i in range(1,seq_len+1)]].values & \
outp_df[[f'Outp_M{i}' for i in range(1,seq_len+1)]].values
conv_df = pd.DataFrame(conv_mat)
conv_df.columns = [f'conv{tb_nucl}{cb_nucl}_{i}' for i in range(1,seq_len+1)]
pbar.update(75)
if outcome_prop_col is not None:
proc_df = pd.concat([inp_df, outp_df, conv_df, pd.DataFrame(df[outcome_prop_col])], axis=1)
else:
proc_df = pd.concat([inp_df, outp_df, conv_df], axis=1)
else:
pbar.update(50)
proc_df = inp_df
pbar.update(75)
# remove double seq_id columns
proc_df = proc_df.loc[:,~proc_df.columns.duplicated()]
pbar.update(100)
pbar.close()
# print('proc_df.columns:', proc_df.columns)
validate_df(proc_df)
# print()
return proc_df
def _get_char(self,seq):
"""split string int sequence of chars returned in pandas.Series"""
chars = list(seq)
return pd.Series(chars)
def _process_df(self, df, seqid_col, tcol, target_base, suffix):
"""cleans a data frame representing sequences and their edit info obtained from crispr experiment
Args:
df: pandas.DataFrame
tcol: string,
target_base: string,
suffix: string,
Note:
assumed columns in the dataframe are:
"""
## process outcome sequences
# print('__ process_df __')
# print(df.columns)
seqid_df = pd.DataFrame(df[seqid_col].copy())
seqid_df.columns = ['seq_id']
df = pd.DataFrame(df[tcol].copy())
seq_colname = f'{suffix}_seq'
df.columns = [seq_colname]
# harmonize sequence string representation to capitalized form
df[seq_colname] = df[seq_colname].str.upper()
baseseq_df = df[seq_colname].apply(self._get_char)
num_nucl = len(baseseq_df.columns)+1
baseseq_df.columns = [f'{suffix}_B{i}' for i in range(1, num_nucl)]
base_mask = (baseseq_df == target_base) * 1
base_mask.columns = [f'{suffix}_M{i}' for i in range(1, num_nucl)]
baseseq_letters_df = baseseq_df.copy()
baseseq_letters_df.columns = [f'{suffix}_L{i}' for i in range(1, num_nucl)]
# replace base letters with numbers
baseseq_df.replace(['A', 'C', 'T', 'G'], [0,1,2,3], inplace=True)
base_df = pd.concat([seqid_df,
base_mask,
df,
baseseq_letters_df,
baseseq_df], axis=1)
base_df.reset_index(inplace=True, drop=True)
return base_df
def validate_df(df):
print('number of NA:', df.isna().any().sum())
class VizInpOutp_Haplotype(object):
html_colors = {'blue':' #aed6f1',
'red':' #f5b7b1',
'green':' #a3e4d7',
'yellow':' #f9e79f',
'violet':'#d7bde2'}
codes = {'A':'@', 'C':'!', 'T':'#', 'G':'_', 'conv':'~', 'prob':'%'}
nucl_colrmap = {'A':'red',
'C':'yellow',
'T':'blue',
'G':'green',
'prob':'violet'}
def __init__(self):
pass
@classmethod
def viz_align_haplotype(clss, df, seqid, outcome_colname, seqconfig, conv_nl, predscore_thr=0., return_type='html'):
"""
Args:
df: processed dataframe using HaplotypeSeqProcessor.process_inp_outp_df
seqid: string, sequence id
outcome_colname: string or None, the ground truth outcome proportion
seqconfig: instance of SeqProcessConfig class
conv_nl: tuple of (target nucleotide, transition nucleotide)
predscore_thr: float, probability threshold
return_type: string, default `html`
"""
seq_len = seqconfig.seq_len
seq_st, seq_end = seqconfig.seq_st, seqconfig.seq_end
ewindow_st, ewindow_end = seqconfig.ewindow_st, seqconfig.ewindow_end
offset_st, offset_end = seqconfig.offset_st, seqconfig.offset_end
tb_nucl, cb_nucl = conv_nl
codes = clss.codes
tb = PrettyTable()
tb.field_names = ['Desc.'] + [f'{i}' for i in range(1, seq_len+1)]
cond = df['seq_id'] == seqid
cond_thr = df['pred_score'] >= predscore_thr
df = df.loc[(cond) & (cond_thr)].copy()
# sort df by outcome probability
if outcome_colname is not None:
sortcol = outcome_colname
else:
sortcol = 'pred_score'
df.sort_values(by=[sortcol], ascending=False, inplace=True)
# get the input sequence
inp_nucl = df.iloc[0][[f'Inp_L{i}' for i in range(1,seq_len+1)]].values
inp_str_lst = ['Input sequence'] + [f'{codes[nucl]}{nucl}' for nucl in inp_nucl]
tb.add_row(inp_str_lst)
n_rows = df.shape[0]
# generate outcome (haplotype) rows
for rcounter in range(n_rows):
row = df.iloc[rcounter]
outp_nucl = row[[f'Outp_L{i}' for i in range(1,seq_len+1)]].values
if outcome_colname is not None:
outp_str_lst = ['{}Output sequence\n Prob.={:.4f}'.format(codes['prob'], row[outcome_colname])]
else:
outp_str_lst = ['{}Output sequence'.format(codes['prob'])]
cl_lst = []
for pos, nucl in enumerate(outp_nucl):
if row[f'conv{tb_nucl}{cb_nucl}_{pos+1}']:
cl_lst += [f"{codes['conv']}{nucl}"]
else:
cl_lst += [f'{nucl}']
outp_str_lst += cl_lst
tb.add_row(outp_str_lst)
pos_str_lst = ['Position numbering']+[str(elm) for elm in range(offset_st, offset_end+1)]
tb.add_row(pos_str_lst)
ewindow_str_lst = ['Editable window (*)'] + \
[' ' for elm in range(0, ewindow_st)]+ \
['*' for elm in range(ewindow_st, ewindow_end+1)]+ \
[' ' for elm in range(ewindow_end+1, seq_len)]
tb.add_row(ewindow_str_lst)
seqwindow_str_lst = ['Sequence window (+)'] + \
[' ' for elm in range(0, seq_st)]+ \
['+' for elm in range(seq_st, seq_end+1)]+ \
[' ' for elm in range(seq_end+1, seq_len)]
tb.add_row(seqwindow_str_lst)
if return_type == 'html':
return clss._format_html_table(tb.get_html_string(), conv_nl)
else: # default string
return tb.get_string()
@classmethod
def _format_html_table(clss, html_str, conv_nl):
tb_nucl, cb_nucl = conv_nl
html_colors = clss.html_colors
codes = clss.codes
nucl_colrmap = clss.nucl_colrmap
for nucl in codes:
if nucl == 'conv':
ctext = codes[nucl]
color = html_colors[nucl_colrmap[cb_nucl]]
else:
ctext = codes[nucl]
color = html_colors[nucl_colrmap[nucl]]
html_str = re.sub(f'<td>{ctext}', '<td bgcolor="{}">'.format(color), html_str)
return html_str
class HaplotypeVizFile():
def __init__(self, resc_pth):
# resc_pth: viz resources folder path
# it contains 'header.txt', 'jupcellstyle.css', 'begin.txt', and 'end.txt'
self.resc_pth = resc_pth
def create(self, tablehtml, dest_pth, fname):
resc_pth = self.resc_pth
ls = []
for ftname in ('header.txt', 'jupcellstyle.css', 'begin.txt'):
with open(os.path.join(resc_pth, ftname), mode='r') as f:
ls.extend(f.readlines())
ls.append(tablehtml)
with open(os.path.join(resc_pth, 'end.txt'), mode='r') as f:
ls.extend(f.readlines())
content = "".join(ls)
with open(os.path.join(dest_pth, f'{fname}.html'), mode='w') as f:
f.write(content)
|
[
"pandas.Series",
"prettytable.PrettyTable",
"numpy.where",
"itertools.product",
"tqdm.tqdm",
"os.path.join",
"pandas.DataFrame",
"pandas.concat"
] |
[((259, 275), 'pandas.Series', 'pd.Series', (['chars'], {}), '(chars)\n', (268, 275), True, 'import pandas as pd\n'), ((1640, 1653), 'prettytable.PrettyTable', 'PrettyTable', ([], {}), '()\n', (1651, 1653), False, 'from prettytable import PrettyTable\n'), ((2618, 2631), 'prettytable.PrettyTable', 'PrettyTable', ([], {}), '()\n', (2629, 2631), False, 'from prettytable import PrettyTable\n'), ((4672, 4695), 'tqdm.tqdm', 'tqdm', ([], {'total': 'dfg.ngroups'}), '(total=dfg.ngroups)\n', (4676, 4695), False, 'from tqdm import tqdm\n'), ((5988, 6021), 'itertools.product', 'itertools.product', (['*comb_nucl_lst'], {}), '(*comb_nucl_lst)\n', (6005, 6021), False, 'import itertools\n'), ((9330, 9359), 'pandas.concat', 'pd.concat', (['res_df_lst'], {'axis': '(0)'}), '(res_df_lst, axis=0)\n', (9339, 9359), True, 'import pandas as pd\n'), ((10125, 10140), 'tqdm.tqdm', 'tqdm', ([], {'total': '(100)'}), '(total=100)\n', (10129, 10140), False, 'from tqdm import tqdm\n'), ((11615, 11631), 'pandas.Series', 'pd.Series', (['chars'], {}), '(chars)\n', (11624, 11631), True, 'import pandas as pd\n'), ((13058, 13134), 'pandas.concat', 'pd.concat', (['[seqid_df, base_mask, df, baseseq_letters_df, baseseq_df]'], {'axis': '(1)'}), '([seqid_df, base_mask, df, baseseq_letters_df, baseseq_df], axis=1)\n', (13067, 13134), True, 'import pandas as pd\n'), ((14883, 14896), 'prettytable.PrettyTable', 'PrettyTable', ([], {}), '()\n', (14894, 14896), False, 'from prettytable import PrettyTable\n'), ((7950, 7977), 'pandas.DataFrame', 'pd.DataFrame', (['comb_nucl_arr'], {}), '(comb_nucl_arr)\n', (7962, 7977), True, 'import pandas as pd\n'), ((8724, 8771), 'pandas.DataFrame', 'pd.DataFrame', (["([record['Inp_seq']] * num_options)"], {}), "([record['Inp_seq']] * num_options)\n", (8736, 8771), True, 'import pandas as pd\n'), ((8850, 8896), 'pandas.DataFrame', 'pd.DataFrame', (["([record['seq_id']] * num_options)"], {}), "([record['seq_id']] * num_options)\n", (8862, 8896), True, 'import pandas as pd\n'), ((8971, 9031), 'pandas.concat', 'pd.concat', (['[seqid_df, inpseq_df, a, comb_nucl_df, b]'], {'axis': '(1)'}), '([seqid_df, inpseq_df, a, comb_nucl_df, b], axis=1)\n', (8980, 9031), True, 'import pandas as pd\n'), ((10707, 10729), 'pandas.DataFrame', 'pd.DataFrame', (['conv_mat'], {}), '(conv_mat)\n', (10719, 10729), True, 'import pandas as pd\n'), ((6994, 7023), 'numpy.where', 'np.where', (['(rec_nucl == tb_nucl)'], {}), '(rec_nucl == tb_nucl)\n', (7002, 7023), True, 'import numpy as np\n'), ((11045, 11090), 'pandas.concat', 'pd.concat', (['[inp_df, outp_df, conv_df]'], {'axis': '(1)'}), '([inp_df, outp_df, conv_df], axis=1)\n', (11054, 11090), True, 'import pandas as pd\n'), ((18427, 18460), 'os.path.join', 'os.path.join', (['resc_pth', '"""end.txt"""'], {}), "(resc_pth, 'end.txt')\n", (18439, 18460), False, 'import os\n'), ((18563, 18602), 'os.path.join', 'os.path.join', (['dest_pth', 'f"""{fname}.html"""'], {}), "(dest_pth, f'{fname}.html')\n", (18575, 18602), False, 'import os\n'), ((18291, 18321), 'os.path.join', 'os.path.join', (['resc_pth', 'ftname'], {}), '(resc_pth, ftname)\n', (18303, 18321), False, 'import os\n'), ((10956, 10990), 'pandas.DataFrame', 'pd.DataFrame', (['df[outcome_prop_col]'], {}), '(df[outcome_prop_col])\n', (10968, 10990), True, 'import pandas as pd\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Using CNN to create descriptors and neural layer to predict
object recognition in images.
By: <NAME>, <NAME>, and <NAME>.
MLDM Master's Year 2
Fall Semester 2017
"""
import os
###############################################################################
#Set Params
Classes = os.listdir('D:/GD/MLDM/Computer_Vision_Project/cnn5/data/training')
model = 'D:/GD/MLDM/Computer_Vision_Project/cnn5/results/cvp_cnn_sigmoid_10_10.model'
test_dir = 'D:/GD/MLDM/Computer_Vision_Project/Data/test_VOC2007/JPEGImages/'
test_ann_dir = "D:/GD/MLDM/Computer_Vision_Project/Data/test_VOC2007/Annotations2/"
results_file = 'D:/GD/MLDM/Computer_Vision_Project/cnn5/results/cvp_cnn_sigmoid_10_10_results.csv'
specific_img = None #[]
###############################################################################
#%%
import sys
import argparse
import numpy as np
from PIL import Image
import requests
from io import BytesIO
import matplotlib.pyplot as plt
import cv2
import pandas as pd
import xml.etree.ElementTree as ET
from keras.preprocessing import image
from keras.models import load_model
from keras.applications.inception_v3 import preprocess_input
#%%
def predict(model, img, target_size):
"""Run model prediction on image
Args:
model: keras model
img: PIL format image
target_size: (w,h) tuple
Returns:
list of predicted labels and their probabilities
"""
if img.size != target_size:
img = img.resize(target_size)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
return preds[0]
#return x
#%%
def plot_preds(image, preds):
"""Displays image and the top-n predicted probabilities in a bar graph
Args:
image: PIL image
preds: list of predicted labels and their probabilities
"""
plt.imshow(image)
plt.axis('off')
plt.figure()
labels = ("cat", "dog")
plt.barh([0, 1], preds, alpha=0.5)
plt.yticks([0, 1], labels)
plt.xlabel('Probability')
plt.xlim(0,1.01)
plt.tight_layout()
plt.show()
#%%
#Percent of total prediction by class
def pred_percent(pred_class):
for col in pred_class.columns:
col_sum = pred_class.loc[:,col].sum()
for row in pred_class.index:
pred_val = pred_class.loc[row,col].astype('float64')
pred_class.loc[row,col] = pred_val/col_sum
return pred_class
#%%
#Use threshold to create binary prediction matrix
def binary_pred(pred_class, threshold):
#Use threshold to create binary classifications
for col in pred_class.columns:
for row in pred_class.index:
if pred_class.loc[row,col] >= threshold:
#if pred_class.loc[row,col] != 0:
pred_class.loc[row,col] = 1
else:
pred_class.loc[row,col] = 0
pred_class = pred_class.astype('int')
return pred_class
#%%
def make_prediction(test_dir,test_ann_dir,target_size,model,specific_img=None):
#Get all test images
test_imgs = os.listdir(test_dir)
test_anns = os.listdir(test_ann_dir)
pred_class = pd.DataFrame(index=Classes)
true_class = pd.DataFrame(index=Classes)
if specific_img:
test_imgs = [x for x in test_imgs if x in specific_img]
#Iterate and get prediction and correct class tables
print('Predicting')
preds = []
for img_name in test_imgs:
img_name = img_name[:-4]
#Ensure we have correct label values
if (img_name+'.xml') in test_anns:
#Load image
img = Image.open(test_dir+img_name+'.jpg')
#Predict labels
preds += [predict(model, img, target_size)]
print(img_name)
#return preds
print('Testing')
for j in range(len(preds)):
pred = preds[j]
img_name = test_imgs[j]
img_name = img_name[:-4]
print(img_name)
#Percent of total prediction by each object type
percent_pred = []
for i in pred:
percent_pred = percent_pred + [(i/np.sum(pred))]
#combine percents with labels
percent_pred = pd.DataFrame(percent_pred, index=Classes, columns=[img_name])
pred_class = pred_class.join(percent_pred)
#Get percent of prediction for each class
pred_class = pred_percent(pred_class)
#Use threshold to get binary classification
pred_class = binary_pred(pred_class, threshold=.26)
print('Compiling correct labels')
for img_name in test_imgs:
img_name = img_name[:-4]
#Get correct labels
tree = ET.parse(test_ann_dir + img_name + '.xml')
root = tree.getroot()
class_names = []
for child in root:
if child.tag == "object":
obj_name = child.find("name").text
if obj_name not in class_names:
class_names += [obj_name]
#Create one hot encoding
one_hot = pd.DataFrame(np.repeat(0,20),index=Classes,columns=[img_name])
for class_name in class_names:
one_hot.loc[class_name,img_name] = 1
true_class = true_class.join(one_hot)
'''
#Print prediction vs actual
for x in true_class.columns:
print('#######################################')
print('Image: ' + str(x))
print('***************************************')
print('Predicted Labels:')
for y in true_class.index:
print(str(y) + ': ' + str(pred_class.loc[y,x]))
print('***************************************')
print('True Labels:')
for y in true_class.index:
print(str(y) + ': ' + str(true_class.loc[y,x]))
print('***************************************')
'''
results = pd.DataFrame(columns=['tp','fp','tn','fn','acc','prec','rec'])
#Compare predictions vs true labels
tp = 0
fp = 0
tn = 0
fn = 0
for y in pred_class.index:
temp_tp = 0
temp_fp = 0
temp_tn = 0
temp_fn = 0
for x in pred_class.columns:
true_val = true_class.loc[y,x]
pred_val = pred_class.loc[y,x]
if ((true_val==1) & (pred_val==1)):
tp += 1
temp_tp += 1
elif ((true_val==0) & (pred_val==1)):
fp += 1
temp_fp += 1
elif ((true_val==1) & (pred_val==0)):
fn += 1
temp_fn += 1
elif ((true_val==0) & (pred_val==0)):
tn += 1
temp_tn += 1
results.loc[y,'tp'] = temp_tp
results.loc[y,'fp'] = temp_fp
results.loc[y,'tn'] = temp_tn
results.loc[y,'fn'] = temp_fn
results.loc[y,'acc'] = ((temp_tp+temp_tn)/(temp_tp+temp_fp+temp_tn+temp_fn))
if (temp_tp+temp_fp) > 0:
results.loc[y,'prec'] = (temp_tp/(temp_tp+temp_fp))
if (temp_tp+temp_fn) > 0:
results.loc[y,'rec'] = (temp_tp/(temp_tp+temp_fn))
#Results
print('True Positives: ' + str(tp))
print('False Positives: ' + str(fp))
print('True Negatives: ' + str(tn))
print('False Negatives: ' + str(fn))
#Accuracy, precision, recall
print('Accuracy: ' + str((tp+tn)/(tp+fp+tn+fn)))
print('Precision: ' + str(tp/(tp+fp)))
print('Recall: ' + str(tp/(tp+fn)))
print('#######################################')
#results = pd.DataFrame(columns=['tp','fp','tn','fn','acc','prec','rec'])
results.loc['Total','tp'] = tp
results.loc['Total','fp'] = fp
results.loc['Total','tn'] = tn
results.loc['Total','fn'] = fn
results.loc['Total','acc'] = ((tp+tn)/(tp+fp+tn+fn))
results.loc['Total','prec'] = (tp/(tp+fp))
results.loc['Total','rec'] = (tp/(tp+fn))
results.to_csv(results_file)
return pred_class, true_class
#%%
#Run Prediction
target_size = (299, 299) #fixed size for InceptionV3 architecture
print('loading model')
model = load_model(model)
print('model loading')
print('Starting prediction model')
pred_class, true_class = make_prediction(test_dir,test_ann_dir,target_size,model,specific_img)
|
[
"keras.preprocessing.image.img_to_array",
"keras.applications.inception_v3.preprocess_input",
"matplotlib.pyplot.imshow",
"os.listdir",
"xml.etree.ElementTree.parse",
"numpy.repeat",
"matplotlib.pyplot.barh",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"pandas.DataFrame",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"PIL.Image.open",
"keras.models.load_model",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.expand_dims",
"matplotlib.pyplot.tight_layout"
] |
[((331, 398), 'os.listdir', 'os.listdir', (['"""D:/GD/MLDM/Computer_Vision_Project/cnn5/data/training"""'], {}), "('D:/GD/MLDM/Computer_Vision_Project/cnn5/data/training')\n", (341, 398), False, 'import os\n'), ((8223, 8240), 'keras.models.load_model', 'load_model', (['model'], {}), '(model)\n', (8233, 8240), False, 'from keras.models import load_model\n'), ((1505, 1528), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (1523, 1528), False, 'from keras.preprocessing import image\n'), ((1535, 1560), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1549, 1560), True, 'import numpy as np\n'), ((1567, 1586), 'keras.applications.inception_v3.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (1583, 1586), False, 'from keras.applications.inception_v3 import preprocess_input\n'), ((1851, 1868), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (1861, 1868), True, 'import matplotlib.pyplot as plt\n'), ((1871, 1886), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1879, 1886), True, 'import matplotlib.pyplot as plt\n'), ((1890, 1902), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1900, 1902), True, 'import matplotlib.pyplot as plt\n'), ((1931, 1965), 'matplotlib.pyplot.barh', 'plt.barh', (['[0, 1]', 'preds'], {'alpha': '(0.5)'}), '([0, 1], preds, alpha=0.5)\n', (1939, 1965), True, 'import matplotlib.pyplot as plt\n'), ((1968, 1994), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 1]', 'labels'], {}), '([0, 1], labels)\n', (1978, 1994), True, 'import matplotlib.pyplot as plt\n'), ((1997, 2022), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Probability"""'], {}), "('Probability')\n", (2007, 2022), True, 'import matplotlib.pyplot as plt\n'), ((2025, 2042), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1.01)'], {}), '(0, 1.01)\n', (2033, 2042), True, 'import matplotlib.pyplot as plt\n'), ((2044, 2062), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2060, 2062), True, 'import matplotlib.pyplot as plt\n'), ((2065, 2075), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2073, 2075), True, 'import matplotlib.pyplot as plt\n'), ((3027, 3047), 'os.listdir', 'os.listdir', (['test_dir'], {}), '(test_dir)\n', (3037, 3047), False, 'import os\n'), ((3064, 3088), 'os.listdir', 'os.listdir', (['test_ann_dir'], {}), '(test_ann_dir)\n', (3074, 3088), False, 'import os\n'), ((3106, 3133), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'Classes'}), '(index=Classes)\n', (3118, 3133), True, 'import pandas as pd\n'), ((3151, 3178), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'Classes'}), '(index=Classes)\n', (3163, 3178), True, 'import pandas as pd\n'), ((6003, 6071), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['tp', 'fp', 'tn', 'fn', 'acc', 'prec', 'rec']"}), "(columns=['tp', 'fp', 'tn', 'fn', 'acc', 'prec', 'rec'])\n", (6015, 6071), True, 'import pandas as pd\n'), ((4209, 4270), 'pandas.DataFrame', 'pd.DataFrame', (['percent_pred'], {'index': 'Classes', 'columns': '[img_name]'}), '(percent_pred, index=Classes, columns=[img_name])\n', (4221, 4270), True, 'import pandas as pd\n'), ((4687, 4729), 'xml.etree.ElementTree.parse', 'ET.parse', (["(test_ann_dir + img_name + '.xml')"], {}), "(test_ann_dir + img_name + '.xml')\n", (4695, 4729), True, 'import xml.etree.ElementTree as ET\n'), ((3577, 3617), 'PIL.Image.open', 'Image.open', (["(test_dir + img_name + '.jpg')"], {}), "(test_dir + img_name + '.jpg')\n", (3587, 3617), False, 'from PIL import Image\n'), ((5095, 5111), 'numpy.repeat', 'np.repeat', (['(0)', '(20)'], {}), '(0, 20)\n', (5104, 5111), True, 'import numpy as np\n'), ((4110, 4122), 'numpy.sum', 'np.sum', (['pred'], {}), '(pred)\n', (4116, 4122), True, 'import numpy as np\n')]
|
from typing import List,Dict
import torch
from torch import nn
import numpy as np
from torch.nn import functional as F
from functools import partial
from detectron2.config import configurable
from detectron2.layers import Conv2d, ConvTranspose2d, cat, interpolate, DeformConv
from detectron2.structures import Instances, heatmaps_to_keypoints
from detectron2.utils.events import get_event_storage
from detectron2.utils.registry import Registry
#from fvcore.nn import sigmoid_focal_loss_jit
REPPOINT_HEAD_REGISTRY = Registry("REPPOINT_HEAD")
REPPOINT_HEAD_REGISTRY.__doc__ = ""
# copy from mmcv
def normal_init(module, mean=0.0, std=1.0, bias=0.0):
if isinstance(module,nn.GroupNorm):
return
if hasattr(module, 'weight') and module.weight is not None:
nn.init.normal_(module.weight, mean, std)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def bias_init_with_prob(prior_prob):
"""initialize conv/fc bias value according to giving probability."""
bias_init = float(-np.log((1 - prior_prob) / prior_prob))
return bias_init
def multi_apply(func, *args, **kwargs):
"""Apply function to a list of arguments
Note:
This function applies the ``func`` to multiple inputs and
map the multiple outputs of the ``func`` into different
list. Each list contains the same type of outputs corresponding
to different inputs.
Args:
func (Function): A function that will be applied to a list of
arguments
Returns:
tuple(list): A tuple containing multiple list, each list contains
a kind of returned results by the function
"""
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
def build_reppoint_heads(cfg):
"""
Build a keypoint head from `cfg.MODEL.ROI_KEYPOINT_HEAD.NAME`.
"""
name = cfg.MODEL.REPPOINT_HEAD.NAME
return REPPOINT_HEAD_REGISTRY.get(name)(cfg)
@REPPOINT_HEAD_REGISTRY.register()
class ReppointHead(nn.Module):
"""
Implement the basic Keypoint R-CNN losses and inference logic described in
Sec. 5 of :paper:`Mask R-CNN`.
"""
@configurable
def __init__(self,*,
num_classes,
in_channels,
point_feat_channels=256,
feat_channels=256,
stacked_convs=4,
num_points=9,
gradient_mul=0.1,
use_grid_points=False,
center_init=True,
**kwargs):
# TODO: losses will set in loss.py
'''
Args:
num_classes: num of pred classes
in_channels: num of input image channal
stacked_convs: num of convs used for feature extraction
feat_channels: num of conv feature
point_feat_channels: num of dim of points features
num_points: how much points used to fit an object
gradient_mul:
point_strides:
point_base_scale:
use_grid_points:
center_init:
transform_method:
moment_mul:
**kwargs:
'''
super(ReppointHead, self).__init__()
self.num_classes = num_classes
self.cls_out_channels = self.num_classes
self.in_channels = in_channels
self.num_points = num_points
self.point_feat_channels = point_feat_channels
self.stacked_convs = stacked_convs
self.feat_channels = feat_channels
self.use_grid_points = use_grid_points
self.center_init = center_init
# we use deformable conv to extract points features
self.dcn_kernel = int(np.sqrt(num_points))
self.dcn_pad = int((self.dcn_kernel - 1) / 2)
assert self.dcn_kernel * self.dcn_kernel == num_points, \
'The points number should be a square number.'
assert self.dcn_kernel % 2 == 1, \
'The points number should be an odd square number.'
dcn_base = np.arange(-self.dcn_pad,
self.dcn_pad + 1).astype(np.float64)
# [-1. 0. 1.]
dcn_base_y = np.repeat(dcn_base, self.dcn_kernel)
#[-1. - 1. - 1. 0. 0. 0. 1. 1. 1.]
dcn_base_x = np.tile(dcn_base, self.dcn_kernel)
#[-1. 0. 1. -1. 0. 1. -1. 0. 1.]
dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape(
(-1))
#[-1. - 1. - 1. 0. - 1. 1. 0. - 1. 0. 0. 0. 1. 1. - 1. 1. 0. 1. 1.]
self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1)
# layers
self.relu = nn.ReLU(inplace=True)
self.cls_convs = []
self.reg_convs = []
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
nn.Conv2d(
chn,
self.feat_channels,
3,
stride=1,
padding=1))
self.cls_convs.append(nn.GroupNorm(32,self.feat_channels))
self.cls_convs.append(nn.ReLU(inplace=True))
self.reg_convs.append(
nn.Conv2d(
chn,
self.feat_channels,
3,
stride=1,
padding=1))
self.reg_convs.append(nn.GroupNorm(32,self.feat_channels))
self.reg_convs.append(nn.ReLU(inplace=True))
self.cls_convs = nn.Sequential(*self.cls_convs)
self.reg_convs = nn.Sequential(*self.reg_convs)
pts_out_dim = 4 if self.use_grid_points else 2 * self.num_points
self.reppoints_cls_conv = DeformConv(self.feat_channels,
self.point_feat_channels,
self.dcn_kernel, 1, self.dcn_pad)
self.reppoints_cls_out = nn.Conv2d(self.point_feat_channels,
self.cls_out_channels, 1, 1, 0)
self.reppoints_pts_init_conv = nn.Conv2d(self.feat_channels,
self.point_feat_channels, 3,
1, 1)
self.reppoints_pts_init_out = nn.Conv2d(self.point_feat_channels,
pts_out_dim, 1, 1, 0)
self.reppoints_pts_refine_conv = DeformConv(self.feat_channels,
self.point_feat_channels,
self.dcn_kernel, 1,
self.dcn_pad)
self.reppoints_pts_refine_out = nn.Conv2d(self.point_feat_channels,
pts_out_dim, 1, 1, 0)
# init weight
for m in self.cls_convs:
normal_init(m, std=0.01)
for m in self.reg_convs:
normal_init(m, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.reppoints_cls_conv, std=0.01)
normal_init(self.reppoints_cls_out, std=0.01, bias=bias_cls)
normal_init(self.reppoints_pts_init_conv, std=0.01)
normal_init(self.reppoints_pts_init_out, std=0.01)
normal_init(self.reppoints_pts_refine_conv, std=0.01)
normal_init(self.reppoints_pts_refine_out, std=0.01)
self.gradient_mul = gradient_mul
self.cls_out_channels = self.num_classes
@classmethod
def from_config(cls, cfg):
ret = {
"num_classes":cfg.MODEL.REPPOINT_HEAD.NUM_CLASS,
"in_channels":cfg.MODEL.REPPOINT_HEAD.IN_CHANNEL,
"point_feat_channels" : cfg.MODEL.REPPOINT_HEAD.POINT_FEATURE_CHANNEL,
"feat_channels": cfg.MODEL.REPPOINT_HEAD.FEATURE_CHANNEL,
"stacked_convs": cfg.MODEL.REPPOINT_HEAD.STACKED_CONVS,
"num_points": cfg.MODEL.REPPOINT_HEAD.NUM_POINTS,
"gradient_mul": cfg.MODEL.REPPOINT_HEAD.GRADIENT_MUL,
"use_grid_points": cfg.MODEL.REPPOINT_HEAD.USE_GRID_POINTS,
"center_init": cfg.MODEL.REPPOINT_HEAD.CENTER_INIT
}
return ret
def forward_single(self, x):
""" Forward feature map of a single FPN level."""
dcn_base_offset = self.dcn_base_offset.type_as(x)
# If we use center_init, the initial reppoints is from center points.
# If we use bounding bbox representation, the initial reppoints is
# from regular grid placed on a pre-defined bbox.
if self.use_grid_points or not self.center_init:
scale = self.point_base_scale / 2
points_init = dcn_base_offset / dcn_base_offset.max() * scale
bbox_init = x.new_tensor([-scale, -scale, scale,
scale]).view(1, 4, 1, 1)
else:
points_init = 0
cls_feat = x
pts_feat = x
cls_feat = self.cls_convs(cls_feat)
pts_feat = self.reg_convs(pts_feat)
# initialize reppoints
pts_out_init = self.reppoints_pts_init_out(
self.relu(self.reppoints_pts_init_conv(pts_feat)))
pts_out_init = pts_out_init + points_init
# refine and classify reppoints
# TODO: why use grad_mul?
pts_out_init_grad_mul = (1 - self.gradient_mul) * pts_out_init.detach(
) + self.gradient_mul * pts_out_init
dcn_offset = pts_out_init_grad_mul - dcn_base_offset
cls_out = self.reppoints_cls_out(
self.relu(self.reppoints_cls_conv(cls_feat, dcn_offset)))
pts_out_refine = self.reppoints_pts_refine_out(
self.relu(self.reppoints_pts_refine_conv(pts_feat, dcn_offset)))
pts_out_refine = pts_out_refine + pts_out_init.detach()
#cls_out = self.softmax(cls_out)
#print(pts_out_refine.size())
#print(pts_out_init.size())
return cls_out, pts_out_init, pts_out_refine
def forward(self,x):
return multi_apply(self.forward_single,x)
|
[
"numpy.tile",
"torch.nn.ReLU",
"torch.nn.GroupNorm",
"numpy.repeat",
"numpy.sqrt",
"torch.nn.init.constant_",
"numpy.arange",
"torch.nn.Sequential",
"detectron2.layers.DeformConv",
"numpy.log",
"torch.nn.Conv2d",
"numpy.stack",
"torch.tensor",
"detectron2.utils.registry.Registry",
"functools.partial",
"torch.nn.init.normal_"
] |
[((531, 556), 'detectron2.utils.registry.Registry', 'Registry', (['"""REPPOINT_HEAD"""'], {}), "('REPPOINT_HEAD')\n", (539, 556), False, 'from detectron2.utils.registry import Registry\n'), ((803, 844), 'torch.nn.init.normal_', 'nn.init.normal_', (['module.weight', 'mean', 'std'], {}), '(module.weight, mean, std)\n', (818, 844), False, 'from torch import nn\n'), ((915, 951), 'torch.nn.init.constant_', 'nn.init.constant_', (['module.bias', 'bias'], {}), '(module.bias, bias)\n', (932, 951), False, 'from torch import nn\n'), ((1774, 1797), 'functools.partial', 'partial', (['func'], {}), '(func, **kwargs)\n', (1781, 1797), False, 'from functools import partial\n'), ((4386, 4422), 'numpy.repeat', 'np.repeat', (['dcn_base', 'self.dcn_kernel'], {}), '(dcn_base, self.dcn_kernel)\n', (4395, 4422), True, 'import numpy as np\n'), ((4495, 4529), 'numpy.tile', 'np.tile', (['dcn_base', 'self.dcn_kernel'], {}), '(dcn_base, self.dcn_kernel)\n', (4502, 4529), True, 'import numpy as np\n'), ((4886, 4907), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4893, 4907), False, 'from torch import nn\n'), ((5809, 5839), 'torch.nn.Sequential', 'nn.Sequential', (['*self.cls_convs'], {}), '(*self.cls_convs)\n', (5822, 5839), False, 'from torch import nn\n'), ((5866, 5896), 'torch.nn.Sequential', 'nn.Sequential', (['*self.reg_convs'], {}), '(*self.reg_convs)\n', (5879, 5896), False, 'from torch import nn\n'), ((6006, 6100), 'detectron2.layers.DeformConv', 'DeformConv', (['self.feat_channels', 'self.point_feat_channels', 'self.dcn_kernel', '(1)', 'self.dcn_pad'], {}), '(self.feat_channels, self.point_feat_channels, self.dcn_kernel, 1,\n self.dcn_pad)\n', (6016, 6100), False, 'from detectron2.layers import Conv2d, ConvTranspose2d, cat, interpolate, DeformConv\n'), ((6223, 6290), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.point_feat_channels', 'self.cls_out_channels', '(1)', '(1)', '(0)'], {}), '(self.point_feat_channels, self.cls_out_channels, 1, 1, 0)\n', (6232, 6290), False, 'from torch import nn\n'), ((6375, 6439), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.feat_channels', 'self.point_feat_channels', '(3)', '(1)', '(1)'], {}), '(self.feat_channels, self.point_feat_channels, 3, 1, 1)\n', (6384, 6439), False, 'from torch import nn\n'), ((6579, 6636), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.point_feat_channels', 'pts_out_dim', '(1)', '(1)', '(0)'], {}), '(self.point_feat_channels, pts_out_dim, 1, 1, 0)\n', (6588, 6636), False, 'from torch import nn\n'), ((6728, 6822), 'detectron2.layers.DeformConv', 'DeformConv', (['self.feat_channels', 'self.point_feat_channels', 'self.dcn_kernel', '(1)', 'self.dcn_pad'], {}), '(self.feat_channels, self.point_feat_channels, self.dcn_kernel, 1,\n self.dcn_pad)\n', (6738, 6822), False, 'from detectron2.layers import Conv2d, ConvTranspose2d, cat, interpolate, DeformConv\n'), ((7019, 7076), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.point_feat_channels', 'pts_out_dim', '(1)', '(1)', '(0)'], {}), '(self.point_feat_channels, pts_out_dim, 1, 1, 0)\n', (7028, 7076), False, 'from torch import nn\n'), ((1092, 1129), 'numpy.log', 'np.log', (['((1 - prior_prob) / prior_prob)'], {}), '((1 - prior_prob) / prior_prob)\n', (1098, 1129), True, 'import numpy as np\n'), ((3915, 3934), 'numpy.sqrt', 'np.sqrt', (['num_points'], {}), '(num_points)\n', (3922, 3934), True, 'import numpy as np\n'), ((4247, 4289), 'numpy.arange', 'np.arange', (['(-self.dcn_pad)', '(self.dcn_pad + 1)'], {}), '(-self.dcn_pad, self.dcn_pad + 1)\n', (4256, 4289), True, 'import numpy as np\n'), ((4605, 4647), 'numpy.stack', 'np.stack', (['[dcn_base_y, dcn_base_x]'], {'axis': '(1)'}), '([dcn_base_y, dcn_base_x], axis=1)\n', (4613, 4647), True, 'import numpy as np\n'), ((4797, 4826), 'torch.tensor', 'torch.tensor', (['dcn_base_offset'], {}), '(dcn_base_offset)\n', (4809, 4826), False, 'import torch\n'), ((5134, 5192), 'torch.nn.Conv2d', 'nn.Conv2d', (['chn', 'self.feat_channels', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(chn, self.feat_channels, 3, stride=1, padding=1)\n', (5143, 5192), False, 'from torch import nn\n'), ((5335, 5371), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(32)', 'self.feat_channels'], {}), '(32, self.feat_channels)\n', (5347, 5371), False, 'from torch import nn\n'), ((5407, 5428), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5414, 5428), False, 'from torch import nn\n'), ((5485, 5543), 'torch.nn.Conv2d', 'nn.Conv2d', (['chn', 'self.feat_channels', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(chn, self.feat_channels, 3, stride=1, padding=1)\n', (5494, 5543), False, 'from torch import nn\n'), ((5686, 5722), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(32)', 'self.feat_channels'], {}), '(32, self.feat_channels)\n', (5698, 5722), False, 'from torch import nn\n'), ((5758, 5779), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5765, 5779), False, 'from torch import nn\n')]
|
"""
The :mod:`pyfan.graph.example.scatterline3` generates a graprh with three lines.
This is the functionalized vesrion of `plot_randgrid Example <https://pyfan.readthedocs.io/en/latest/auto_examples/plot_randgrid.html#sphx-glr-auto-examples-plot-randgrid-py>`_.
Includes method :func:`gph_scatter_line_rand`.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pyfan.gen.rand.randgrid as pyfan_gen_rand
import pyfan.aws.general.path as pyfan_path
import pyfan.util.timer.timer as pyfan_timer
import argparse
import sys
import os
# Parse Inputs to be used commandline
parser = argparse.ArgumentParser()
parser.add_argument('-A', dest="st_s3_bucket", help="s3 bucket to store output images", default='fans3testbucket')
parser.add_argument('-B', dest="it_seed", help="random seed", type=int, default=666)
args = parser.parse_args()
def gph_scatter_line_rand(fl_mu=0, fl_sd=1,
it_draws=25, it_seed=123,
fl_lower_sd=-2, fl_higher_sd=2,
bl_show_fig=True, bl_save_fig=False,
st_s3_bucket='fans3testbucket'):
"""A randomly generated graph with scatter plot and lines.
Parameters
----------
fl_mu, fl_sd : `float`, optional
The mean and standard deviation of the normal process for lines
it_draws: `integer`, optional
Number of Draws lines
it_seed: `integer`, optional
External random seed externally. Default is 123. for lines
fl_lower_sd, fl_higher_sd : `float`, optional
Impose lower and upper bounds (in sd units) on shock draws. The normal
distribution does not have lower or upper bounds.
bl_show_fig: `bool`, optional
Show graph in documentation if needed. When storing graph to disc and uploading
to s3, do not need to show.
Returns
-------
pandas.DataFrame of shape (`it_draws`, 4)
A pandas dataframe with `it_draws` number of rows and four columns. First
for x values, the next three for three types of randomly generated variables
that are been plotted out.
Examples
--------
>>> fl_mu = 0
>>> fl_sd = 1
>>> it_draws = 20
>>> it_seed = 456
>>> fl_lower_sd = -1
>>> fl_higher_sd = 0.8
>>> scatter_line_rand_graph(fl_mu, fl_sd,
... it_draws, it_seed,
... fl_lower_sd, fl_higher_sd)
x shk_t0 shk_t1 shk_t2
1 1.0 -0.668129 -2.000000 -2.000000
2 2.0 -0.498210 -1.533950 -1.130231
3 3.0 0.618576 -1.268601 -1.111846
4 4.0 0.568692 -1.071098 -0.971485
5 5.0 1.350509 -0.908400 -0.668129
6 6.0 1.629589 -0.766786 -0.498210
7 7.0 0.301966 -0.639112 -0.384060
8 8.0 0.449483 -0.521108 -0.345811
9 9.0 -0.345811 -0.409963 -0.325130
10 10.0 -0.315231 -0.303676 -0.315231
11 11.0 -2.000000 -0.200721 -0.106208
12 12.0 -1.130231 -0.099856 -0.088752
13 13.0 -1.111846 0.000000 0.237851
14 14.0 0.237851 0.099856 0.301966
15 15.0 -0.325130 0.200721 0.449483
16 16.0 1.944702 0.303676 0.568692
17 17.0 1.915676 0.409963 0.618576
18 18.0 0.920348 0.521108 0.920348
19 19.0 0.936398 0.639112 0.936398
20 20.0 1.157552 0.766786 1.139873
21 21.0 -0.106208 0.908400 1.157552
22 22.0 -0.088752 1.071098 1.350509
23 23.0 -0.971485 1.268601 1.629589
24 24.0 -0.384060 1.533950 1.915676
25 25.0 1.139873 2.000000 1.944702
"""
# Type 0 Shock draw
it_draw_type = 0
ar_shock_t0 = \
pyfan_gen_rand.ar_draw_random_normal(fl_mu, fl_sd, it_draws,
it_seed, it_draw_type,
fl_lower_sd, fl_higher_sd)
# Type 1 Shock draw
it_draw_type = 1
ar_shock_t1 = \
pyfan_gen_rand.ar_draw_random_normal(fl_mu, fl_sd, it_draws,
it_seed, it_draw_type,
fl_lower_sd, fl_higher_sd)
# Type 2 Shock draw
it_draw_type = 2
ar_shock_t2 = \
pyfan_gen_rand.ar_draw_random_normal(fl_mu, fl_sd, it_draws,
it_seed, it_draw_type,
fl_lower_sd, fl_higher_sd)
# Draw Shocks Jointly
fig, ax = plt.subplots()
# Graph
ar_it_x_grid = np.arange(1, it_draws + 1)
ax.plot(ar_it_x_grid, ar_shock_t0,
color='blue', linestyle='dashed', marker='x',
label='Type 0: Bounded Shock Draws')
ax.scatter(ar_it_x_grid, ar_shock_t1,
color='red',
label='Type 1: Quantile Points')
ax.plot(ar_it_x_grid, ar_shock_t2,
color='black', marker='d',
label='Type 3: Sorted Bounded Shock Draws')
# Labeling
ax.legend(loc='upper left')
plt.ylabel('Shock Values')
plt.xlabel('Shock Draw Points')
plt.title('Shock, Sorted and Bounded Shocks, Quantile Points')
plt.grid()
if bl_show_fig:
plt.show()
if bl_save_fig:
sna_image_name = 'f_' + pyfan_timer.getDateTime(8) +'_s' + str(it_seed)
srt_s3_bucket_folder = 'pyfan_gph_scatter_line_rand'
pyfan_path.save_img(plt, sna_image_name,
dpi=300, papertype='a4',
orientation='horizontal',
bl_upload_s3=True, st_s3_bucket=st_s3_bucket,
srt_s3_bucket_folder=srt_s3_bucket_folder)
# %%
# Upload a local image
# ------------------------
mt_shocks = np.column_stack([ar_it_x_grid, ar_shock_t0, ar_shock_t1, ar_shock_t2])
df_shocks = pd.DataFrame(data=mt_shocks,
index=range(1, mt_shocks.shape[0] + 1),
columns=['x', 'shk_t0', 'shk_t1', 'shk_t2'])
return df_shocks
if __name__ == "__main__":
# Run on command line, might need to install latest file locally first
# conda activate base
# cd "C:/Users/fan/pyfan/"
# python setup.py install --user
# python C:/Users/fan/pyfan/pyfan/graph/exa/scatterline3.py -A fans3testbucket -B 1
# python /pyfan/pyfan/graph/exa/scatterline3.py -A fans3testbucket -B 1
# This is an AWS Batch run with Job Array Index for Parallel processing
# With this, only one job needs to be specified
if "AWS_BATCH_JOB_ARRAY_INDEX" in os.environ:
print('AWS_BATCH_JOB_ARRAY_INDEX')
it_seed_arg = os.environ['AWS_BATCH_JOB_ARRAY_INDEX']
it_seed_arg = int(it_seed_arg)
else:
it_seed_arg = args.it_seed
print(it_seed_arg)
gph_scatter_line_rand(fl_mu=0, fl_sd=1,
it_draws=25, it_seed=it_seed_arg,
fl_lower_sd=-2, fl_higher_sd=2,
bl_show_fig=False, bl_save_fig=True,
st_s3_bucket=args.st_s3_bucket)
|
[
"matplotlib.pyplot.grid",
"pyfan.util.timer.timer.getDateTime",
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"pyfan.aws.general.path.save_img",
"matplotlib.pyplot.xlabel",
"numpy.column_stack",
"pyfan.gen.rand.randgrid.ar_draw_random_normal",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((611, 636), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (634, 636), False, 'import argparse\n'), ((3535, 3649), 'pyfan.gen.rand.randgrid.ar_draw_random_normal', 'pyfan_gen_rand.ar_draw_random_normal', (['fl_mu', 'fl_sd', 'it_draws', 'it_seed', 'it_draw_type', 'fl_lower_sd', 'fl_higher_sd'], {}), '(fl_mu, fl_sd, it_draws, it_seed,\n it_draw_type, fl_lower_sd, fl_higher_sd)\n', (3571, 3649), True, 'import pyfan.gen.rand.randgrid as pyfan_gen_rand\n'), ((3810, 3924), 'pyfan.gen.rand.randgrid.ar_draw_random_normal', 'pyfan_gen_rand.ar_draw_random_normal', (['fl_mu', 'fl_sd', 'it_draws', 'it_seed', 'it_draw_type', 'fl_lower_sd', 'fl_higher_sd'], {}), '(fl_mu, fl_sd, it_draws, it_seed,\n it_draw_type, fl_lower_sd, fl_higher_sd)\n', (3846, 3924), True, 'import pyfan.gen.rand.randgrid as pyfan_gen_rand\n'), ((4085, 4199), 'pyfan.gen.rand.randgrid.ar_draw_random_normal', 'pyfan_gen_rand.ar_draw_random_normal', (['fl_mu', 'fl_sd', 'it_draws', 'it_seed', 'it_draw_type', 'fl_lower_sd', 'fl_higher_sd'], {}), '(fl_mu, fl_sd, it_draws, it_seed,\n it_draw_type, fl_lower_sd, fl_higher_sd)\n', (4121, 4199), True, 'import pyfan.gen.rand.randgrid as pyfan_gen_rand\n'), ((4327, 4341), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4339, 4341), True, 'import matplotlib.pyplot as plt\n'), ((4373, 4399), 'numpy.arange', 'np.arange', (['(1)', '(it_draws + 1)'], {}), '(1, it_draws + 1)\n', (4382, 4399), True, 'import numpy as np\n'), ((4849, 4875), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Shock Values"""'], {}), "('Shock Values')\n", (4859, 4875), True, 'import matplotlib.pyplot as plt\n'), ((4880, 4911), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Shock Draw Points"""'], {}), "('Shock Draw Points')\n", (4890, 4911), True, 'import matplotlib.pyplot as plt\n'), ((4916, 4978), 'matplotlib.pyplot.title', 'plt.title', (['"""Shock, Sorted and Bounded Shocks, Quantile Points"""'], {}), "('Shock, Sorted and Bounded Shocks, Quantile Points')\n", (4925, 4978), True, 'import matplotlib.pyplot as plt\n'), ((4983, 4993), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4991, 4993), True, 'import matplotlib.pyplot as plt\n'), ((5580, 5650), 'numpy.column_stack', 'np.column_stack', (['[ar_it_x_grid, ar_shock_t0, ar_shock_t1, ar_shock_t2]'], {}), '([ar_it_x_grid, ar_shock_t0, ar_shock_t1, ar_shock_t2])\n', (5595, 5650), True, 'import numpy as np\n'), ((5022, 5032), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5030, 5032), True, 'import matplotlib.pyplot as plt\n'), ((5203, 5391), 'pyfan.aws.general.path.save_img', 'pyfan_path.save_img', (['plt', 'sna_image_name'], {'dpi': '(300)', 'papertype': '"""a4"""', 'orientation': '"""horizontal"""', 'bl_upload_s3': '(True)', 'st_s3_bucket': 'st_s3_bucket', 'srt_s3_bucket_folder': 'srt_s3_bucket_folder'}), "(plt, sna_image_name, dpi=300, papertype='a4',\n orientation='horizontal', bl_upload_s3=True, st_s3_bucket=st_s3_bucket,\n srt_s3_bucket_folder=srt_s3_bucket_folder)\n", (5222, 5391), True, 'import pyfan.aws.general.path as pyfan_path\n'), ((5085, 5111), 'pyfan.util.timer.timer.getDateTime', 'pyfan_timer.getDateTime', (['(8)'], {}), '(8)\n', (5108, 5111), True, 'import pyfan.util.timer.timer as pyfan_timer\n')]
|
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import numpy as np
import torch
import random
import sys
import os
class UDNEnv(gym.Env):
metadata = {}
def __init__(self):
self.BSposition = np.loadtxt('BSposition.csv', delimiter=',')
self.BSnum = len(self.BSposition[0])
self.InterferenceBSposition = np.loadtxt('InterferenceBSposition.csv', delimiter=',')
self.InterferenceBSnum = len(self.InterferenceBSposition[0])
self.Area = 10 ** 2
self.usernum = 32
self.BSstate = np.ones(self.BSnum, dtype = bool)
self.InterferenceBSstate = np.random.randint(2, size = self.InterferenceBSnum)
self.user_Xposition = np.random.uniform(0,self.Area,self.usernum)
self.user_Yposition = np.random.uniform(0,self.Area,self.usernum)
self.action_space = spaces.Discrete(2**self.BSnum)
self.movedistance = None
self.state = np.r_[self.user_Xposition,self.user_Yposition,self.Hexchange(self.InterferenceBSstate)]
self.bandwidth = 10**7
self.threshold = 120 * 10 ** 6 #bit/s
def step(self, action):
#
#return state action pair reward
self.take_action(action)
Datarate_weightvalue = 1
Energyconsumption_weightvalue = 2
signal = self.BS_User_S() * 2
Interference = self.Interference_User_I()
#SIR = signal / Interference
SIR = signal - Interference
#Datarate = self.bandwidth * np.log2(1+SIR)
Datarate = self.bandwidth * np.log2(1+10**(SIR/10))
#coverage_prob = np.sum(Datarate > self.threshold) / self.usernum
#print(coverage_prob)
Energyconsumption = np.sum(self.BSstate.astype(float))
if Energyconsumption == 0:
reward = -100
is_done = True
else:
reward = Datarate_weightvalue * np.mean(Datarate) / (10 ** 6) - (Energyconsumption_weightvalue * Energyconsumption)
#reward = 1.0
is_done =False
#if coverage_prob < 0.7:
#reward = -10
#is_done = True
#else:
#is_done = False
#reward = Datarate_weightvalue * np.sum(Datarate) / (10 ** 6) - (Energyconsumption_weightvalue * Energyconsumption)
#is_done = False
info = self.BSstate.astype(float)
self.InterferenceBSstate = np.random.randint(2, size = self.InterferenceBSnum)
self.state[2 * self.usernum] = self.Hexchange(self.InterferenceBSstate)
return self.state, reward, is_done, info#for visualizing
def reset(self):
self.BSstate = np.ones(self.BSnum,dtype = bool)
self.user_Xposition = np.random.uniform(0,self.Area,self.usernum)
self.user_Yposition = np.random.uniform(0,self.Area,self.usernum)
self.InterferenceBSstate = np.random.randint(2, size = self.InterferenceBSnum)
self.state = np.r_[self.user_Xposition,self.user_Yposition,self.Hexchange(self.InterferenceBSstate)]
return self.state
def take_action(self, action):
#do action for change state
self.BSstate = self.Binarychange(action,self.BSnum)
self.movedistance = self.usermovedistance()
for j in range(2*self.usernum):
self.state[j] = self.state[j] + self.movedistance[j]
if self.state[j] > self.Area:
self.state[j] = self.state[j] - self.Area
if self.state[j] < 0:
self.state[j] = self.state[j] + self.Area
def Binarychange(self,num,tnum):
#hex number to binary matrix
hnum = num
bmatrix = np.zeros(tnum)
index = 0
while True:
if index == tnum:
break
else:
bmatrix[index] = hnum % 2
hnum = hnum // 2
index += 1
bmatrix = bmatrix.astype(bool)
return bmatrix
def Hexchange(self,mat):
#binary matrix to hex number
size = len(mat)
hxnum = 0
for i in range(size):
hxnum += mat[i] * 2 ** (size - i - 1)
return hxnum
def usermovedistance(self):
#human walking speed 1.3m/s = 4.68km/h
theta = np.random.uniform(0,2*np.pi,self.usernum) #random angle for each user
d = np.random.uniform(0,1.3,self.usernum)#random distance for each user
sin = np.sin(theta)
cos = np.cos(theta)
x_dis = d*cos
y_dis = d*sin
state_dis = np.r_[x_dis,y_dis] #form for state
return state_dis
def BS_User_S(self):
#calculate Signal power consist path loss for each user
#return 1 by usernum matrix include signal power for each user
BS_User_position = np.zeros((2,self.usernum,self.BSnum))
BS_User_distance = np.zeros((self.usernum,self.BSnum),dtype = float)
user_signal_power = np.zeros(self.usernum,dtype = float)
# axis x = 0, axis y = 1
for i in range(self.usernum):
for j in range(self.BSnum):
BS_User_position[0][i][j] = self.state[i] - self.BSposition[0][j]
BS_User_position[1][i][j] = self.state[self.usernum + i] - self.BSposition[1][j]
BS_User_distance = np.linalg.norm(BS_User_position, ord = 2, axis = 0)
for i in range(self.BSnum):
if self.BSstate[i]:
pass
else:
BS_User_distance[:,i] = np.inf
#BS_User_distance = BS_User_distance[:,self.BSstate]
assosiation_matrix = self.assosiation(BS_User_distance)
#user_signal_power = np.power(BS_User_distance[assosiation_matrix],-2)
user_signal_power = 10 * 4 * np.log10(BS_User_distance[assosiation_matrix]) + 20 * np.log10(3.5 * 10 ** 9) - 147.55
return user_signal_power
def Interference_User_I(self):
#calculate Interference power consist path loss for each user
#return 1 by usernum matrix include interference power for each user
InterferenceBS_User_position = np.zeros((2,self.usernum,self.InterferenceBSnum))
InterferenceBS_User_distance = np.zeros((self.usernum,self.BSnum), dtype = float)
InterferenceBSstate_bool = self.InterferenceBSstate.astype(bool)
user_interference_power = np.zeros(self.usernum,dtype = float)
user_interference_path_loss = np.zeros(self.usernum,dtype = float)
#axis x = 0, axis y = 1
for i in range(self.usernum):
for j in range(self.InterferenceBSnum):
InterferenceBS_User_position[0][i][j] = self.state[i] - self.InterferenceBSposition[0][j]
InterferenceBS_User_position[1][i][j] = self.state[self.usernum + i] - self.InterferenceBSposition[1][j]
Interference_User_distance = np.linalg.norm(InterferenceBS_User_position, ord = 2, axis = 0)
if np.sum(self.InterferenceBSstate) == 0:
#user_interference_path_loss = np.power(np.mean(Interference_User_distance,axis = 1),-2)
user_interference_path_loss = 10 * 4 * np.log10(np.mean(Interference_User_distance,axis = 1)) + 20 * np.log10(3.5 * 10 ** 9) - 147.55
else:
Interference_User_distance = Interference_User_distance[:,InterferenceBSstate_bool]
inter_bandwidth_num = self.InterferenceBSposition[2,InterferenceBSstate_bool]
for i in range(self.usernum):
for j in range(len(inter_bandwidth_num)):
if inter_bandwidth_num[j] == self.user_BS_shortest[i]:
user_interference_power[i] = user_interference_power[i] + Interference_User_distance[i,j]
for i in range(self.usernum):
if user_interference_power[i] == 0:
user_interference_power[i] = np.mean(Interference_User_distance[i])
#user_interference_path_loss = np.power(user_interference_power,-2)
user_interference_path_loss = 10 * 4 * np.log10(user_interference_power) + 20 * np.log10(3.5 * 10 ** 9) - 147.55
return user_interference_path_loss
def assosiation(self, distance):
#calculate user-BS assosiation follow shortest distance assosiation rule
#return usernum by BSnum matrix dtype boolean
BS_user_assosiation = np.zeros((self.usernum,self.BSnum),dtype = bool)
#BS_user_assosiation = BS_user_assosiation[:,self.BSstate]
self.user_BS_shortest = np.argmin(distance,axis = 1)
for i in range(self.usernum):
BS_user_assosiation[i][self.user_BS_shortest[i]] = True
#print(BS_user_assosiation)
return BS_user_assosiation
'''
if __name__ == "__main__":
env = UDNEnv()
env.reset()
action = 255
_, R, _, I = env.step(action)
print(R)
print(I)
'''
|
[
"numpy.mean",
"numpy.log10",
"numpy.ones",
"numpy.log2",
"numpy.linalg.norm",
"gym.spaces.Discrete",
"numpy.sum",
"numpy.random.randint",
"numpy.zeros",
"numpy.cos",
"numpy.argmin",
"numpy.random.uniform",
"numpy.sin",
"numpy.loadtxt"
] |
[((242, 285), 'numpy.loadtxt', 'np.loadtxt', (['"""BSposition.csv"""'], {'delimiter': '""","""'}), "('BSposition.csv', delimiter=',')\n", (252, 285), True, 'import numpy as np\n'), ((369, 424), 'numpy.loadtxt', 'np.loadtxt', (['"""InterferenceBSposition.csv"""'], {'delimiter': '""","""'}), "('InterferenceBSposition.csv', delimiter=',')\n", (379, 424), True, 'import numpy as np\n'), ((571, 602), 'numpy.ones', 'np.ones', (['self.BSnum'], {'dtype': 'bool'}), '(self.BSnum, dtype=bool)\n', (578, 602), True, 'import numpy as np\n'), ((640, 689), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': 'self.InterferenceBSnum'}), '(2, size=self.InterferenceBSnum)\n', (657, 689), True, 'import numpy as np\n'), ((722, 767), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'self.Area', 'self.usernum'], {}), '(0, self.Area, self.usernum)\n', (739, 767), True, 'import numpy as np\n'), ((796, 841), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'self.Area', 'self.usernum'], {}), '(0, self.Area, self.usernum)\n', (813, 841), True, 'import numpy as np\n'), ((868, 900), 'gym.spaces.Discrete', 'spaces.Discrete', (['(2 ** self.BSnum)'], {}), '(2 ** self.BSnum)\n', (883, 900), False, 'from gym import error, spaces, utils\n'), ((2421, 2470), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': 'self.InterferenceBSnum'}), '(2, size=self.InterferenceBSnum)\n', (2438, 2470), True, 'import numpy as np\n'), ((2667, 2698), 'numpy.ones', 'np.ones', (['self.BSnum'], {'dtype': 'bool'}), '(self.BSnum, dtype=bool)\n', (2674, 2698), True, 'import numpy as np\n'), ((2730, 2775), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'self.Area', 'self.usernum'], {}), '(0, self.Area, self.usernum)\n', (2747, 2775), True, 'import numpy as np\n'), ((2804, 2849), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'self.Area', 'self.usernum'], {}), '(0, self.Area, self.usernum)\n', (2821, 2849), True, 'import numpy as np\n'), ((2883, 2932), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': 'self.InterferenceBSnum'}), '(2, size=self.InterferenceBSnum)\n', (2900, 2932), True, 'import numpy as np\n'), ((3711, 3725), 'numpy.zeros', 'np.zeros', (['tnum'], {}), '(tnum)\n', (3719, 3725), True, 'import numpy as np\n'), ((4308, 4353), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * np.pi)', 'self.usernum'], {}), '(0, 2 * np.pi, self.usernum)\n', (4325, 4353), True, 'import numpy as np\n'), ((4390, 4429), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1.3)', 'self.usernum'], {}), '(0, 1.3, self.usernum)\n', (4407, 4429), True, 'import numpy as np\n'), ((4472, 4485), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4478, 4485), True, 'import numpy as np\n'), ((4500, 4513), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4506, 4513), True, 'import numpy as np\n'), ((4830, 4869), 'numpy.zeros', 'np.zeros', (['(2, self.usernum, self.BSnum)'], {}), '((2, self.usernum, self.BSnum))\n', (4838, 4869), True, 'import numpy as np\n'), ((4895, 4944), 'numpy.zeros', 'np.zeros', (['(self.usernum, self.BSnum)'], {'dtype': 'float'}), '((self.usernum, self.BSnum), dtype=float)\n', (4903, 4944), True, 'import numpy as np\n'), ((4973, 5008), 'numpy.zeros', 'np.zeros', (['self.usernum'], {'dtype': 'float'}), '(self.usernum, dtype=float)\n', (4981, 5008), True, 'import numpy as np\n'), ((5327, 5374), 'numpy.linalg.norm', 'np.linalg.norm', (['BS_User_position'], {'ord': '(2)', 'axis': '(0)'}), '(BS_User_position, ord=2, axis=0)\n', (5341, 5374), True, 'import numpy as np\n'), ((6120, 6171), 'numpy.zeros', 'np.zeros', (['(2, self.usernum, self.InterferenceBSnum)'], {}), '((2, self.usernum, self.InterferenceBSnum))\n', (6128, 6171), True, 'import numpy as np\n'), ((6209, 6258), 'numpy.zeros', 'np.zeros', (['(self.usernum, self.BSnum)'], {'dtype': 'float'}), '((self.usernum, self.BSnum), dtype=float)\n', (6217, 6258), True, 'import numpy as np\n'), ((6367, 6402), 'numpy.zeros', 'np.zeros', (['self.usernum'], {'dtype': 'float'}), '(self.usernum, dtype=float)\n', (6375, 6402), True, 'import numpy as np\n'), ((6442, 6477), 'numpy.zeros', 'np.zeros', (['self.usernum'], {'dtype': 'float'}), '(self.usernum, dtype=float)\n', (6450, 6477), True, 'import numpy as np\n'), ((6866, 6925), 'numpy.linalg.norm', 'np.linalg.norm', (['InterferenceBS_User_position'], {'ord': '(2)', 'axis': '(0)'}), '(InterferenceBS_User_position, ord=2, axis=0)\n', (6880, 6925), True, 'import numpy as np\n'), ((8360, 8408), 'numpy.zeros', 'np.zeros', (['(self.usernum, self.BSnum)'], {'dtype': 'bool'}), '((self.usernum, self.BSnum), dtype=bool)\n', (8368, 8408), True, 'import numpy as np\n'), ((8508, 8535), 'numpy.argmin', 'np.argmin', (['distance'], {'axis': '(1)'}), '(distance, axis=1)\n', (8517, 8535), True, 'import numpy as np\n'), ((1589, 1618), 'numpy.log2', 'np.log2', (['(1 + 10 ** (SIR / 10))'], {}), '(1 + 10 ** (SIR / 10))\n', (1596, 1618), True, 'import numpy as np\n'), ((6942, 6974), 'numpy.sum', 'np.sum', (['self.InterferenceBSstate'], {}), '(self.InterferenceBSstate)\n', (6948, 6974), True, 'import numpy as np\n'), ((5774, 5820), 'numpy.log10', 'np.log10', (['BS_User_distance[assosiation_matrix]'], {}), '(BS_User_distance[assosiation_matrix])\n', (5782, 5820), True, 'import numpy as np\n'), ((5828, 5851), 'numpy.log10', 'np.log10', (['(3.5 * 10 ** 9)'], {}), '(3.5 * 10 ** 9)\n', (5836, 5851), True, 'import numpy as np\n'), ((7863, 7901), 'numpy.mean', 'np.mean', (['Interference_User_distance[i]'], {}), '(Interference_User_distance[i])\n', (7870, 7901), True, 'import numpy as np\n'), ((1927, 1944), 'numpy.mean', 'np.mean', (['Datarate'], {}), '(Datarate)\n', (1934, 1944), True, 'import numpy as np\n'), ((7196, 7219), 'numpy.log10', 'np.log10', (['(3.5 * 10 ** 9)'], {}), '(3.5 * 10 ** 9)\n', (7204, 7219), True, 'import numpy as np\n'), ((8035, 8068), 'numpy.log10', 'np.log10', (['user_interference_power'], {}), '(user_interference_power)\n', (8043, 8068), True, 'import numpy as np\n'), ((8076, 8099), 'numpy.log10', 'np.log10', (['(3.5 * 10 ** 9)'], {}), '(3.5 * 10 ** 9)\n', (8084, 8099), True, 'import numpy as np\n'), ((7143, 7186), 'numpy.mean', 'np.mean', (['Interference_User_distance'], {'axis': '(1)'}), '(Interference_User_distance, axis=1)\n', (7150, 7186), True, 'import numpy as np\n')]
|
import numpy as np
import sys
import qoi as qoi
import parallel as par
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ !
# ~~~~ Selection without replacement
# ~~~~ Sample K numbers from an array 0...N-1 and output them
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ !
def ransam(N,K):
if N<K:
print("ERROR in ransam: N = " + str(N) + " < K = "+str(K) )
sys.exit()
return np.random.choice(list(range(N)), size=K, replace=False)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ !
# ~~~~ Selection with replacement
# ~~~~ Sample K numbers from an array 0...N-1 and output them
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ !
def ransam_rep(N,K):
return np.random.choice(list(range(N)), size=K, replace=True)
def simSetUp(inpt,Sim):
NSim = Sim['NSim']
nmax = Sim['nmax']
# MPI parallelization
Sim['NRep'] = int(inpt['NRep'])
nRep_, startRep_ = par.partitionSim(Sim['NRep'])
Sim['nRep_'] = nRep_
Sim['startRep_'] = startRep_
Sim['Target level'] = float(inpt['Target level'])
Sim['Nselection'] = int(inpt['Nselection'])
Sim['Cweight'] = float(inpt['Cweight'])
Sim['Epsilon clone'] = float(inpt['Epsilon clone'])
Sim['Min target level'] = float(inpt['Min target level'])
Sim['Max target level'] = float(inpt['Max target level'])
Sim['Number of thresholds'] = int(inpt['Number of thresholds'])
Sim['NselectionThreshold'] = int(round((nmax+1)/Sim['Nselection'])) # Number of steps between cloning
Sim['NselectionTotal'] = int((nmax+1)/Sim['NselectionThreshold']) # Actual number of cloning
minLevel = Sim['Min target level']
maxLevel = Sim['Max target level']
nLevels = Sim['Number of thresholds']
Sim['Levels'] = np.linspace(minLevel,maxLevel,nLevels)
# Post proc
Sim['Plot ISP CDF'] = (inpt['Plot ISP CDF']=='True')
if Sim['Plot ISP CDF']:
Sim['True CDF file'] = inpt['True CDF file']
Sim['Plot kill history'] = (inpt['Plot kill history']=='True')
par.printRoot('Asked for ' + str(Sim['Nselection']) + ' cloning steps')
par.printRoot('Will perform ' + str(Sim['NselectionTotal']) + ' cloning steps')
par.printRoot('Number of step between cloning ' + str(Sim['NselectionThreshold']))
# Make sure the cloning step properly divide the simulation
if nmax+1-Sim['NselectionThreshold']*Sim['NselectionTotal'] > Sim['NselectionThreshold']:
par.printRoot('ERROR: Problem in setup of number of cloning steps')
sys.exit()
if nmax+1-Sim['NselectionThreshold']*Sim['NselectionTotal'] < 5:
par.printRoot('WARNING: last cloning will be done with ' + str(nmax+1-Sim['NselectionThreshold']*Sim['NselectionTotal']) +'steps')
# Monitor
Sim['numberKills'] = np.zeros((Sim['NselectionTotal'],Sim['nRep_']))
Sim['probabilities'] = np.zeros((Sim['Number of thresholds'],Sim['nRep_']))
Sim['Elastclone'] = np.zeros(NSim)
Sim['W'] = np.zeros((nmax+1,NSim))
Sim['Wbar'] = np.zeros((nmax+1,NSim))
Sim['Z'] = np.zeros(nmax+1)
Sim['numberClones'] = np.zeros(NSim,dtype=int)
Sim['nCloneAvail'] = 0
# Useful markers
Sim['Ni'] = 0 # Timestep counter to know when to clone
Sim['NselectionLoc'] = 0 # how many times have we cloned
Sim['timestepLastClone'] = 0 # when was the last cloning
# For probabilities computation
Sim['F_prob'] = np.zeros(NSim)
Sim['Rw'] = np.zeros(Sim['NselectionTotal']+1)
# Rare path
try:
Sim['UseRarePath'] = (inpt['UseRarePath']=='True')
except KeyError:
Sim['UseRarePath'] = False
if Sim['UseRarePath']:
Sim['RarePathFile'] = inpt['RarePathFile']
Sim['scaleFlucC'] = float(inpt['scaleFlucC'])
Sim['meanPath'] = np.load(Sim['RarePathFile'])['meanPath']
Sim['varPath'] = (np.load(Sim['RarePathFile'])['stdPath'])**2
Sim['rarePath'] = np.load(Sim['RarePathFile'])['rarePath']
for i in range(len(Sim['varPath'])):
if Sim['varPath'][i]<1e-6:
Sim['varPath'][i] = np.amax(Sim['varPath'])
def reset(Sim):
NSim = Sim['NSim']
nmax = Sim['nmax']
Sim['Elastclone'] = np.zeros(NSim)
Sim['W'] = np.zeros((nmax+1,NSim))
Sim['Wbar'] = np.zeros((nmax+1,NSim))
Sim['Z'] = np.zeros(nmax+1)
Sim['numberClones'] = np.zeros(NSim,dtype=int)
Sim['nCloneAvail'] = 0
# Useful markers
Sim['Ni'] = 0 # Timestep counter to know when to clone
Sim['NselectionLoc'] = 0 # how many times have we cloned
Sim['timestepLastClone'] = 0 # when was the last cloning
# For probabilities computation
Sim['F_prob'] = np.zeros(NSim)
Sim['Rw'] = np.zeros(Sim['NselectionTotal']+1)
def computeRareFlucC(Sim,itime):
return Sim['scaleFlucC']*(Sim['rarePath'][itime] - Sim['meanPath'][itime])/Sim['varPath'][itime]
def computeWeights(Sim,itime):
qoi = Sim['qoiTot'][itime,0,:]
# Weights
if not Sim['UseRarePath']:
Sim['W'][itime,:] = np.exp(Sim['Cweight']*(qoi-Sim['Elastclone']))
else:
C = computeRareFlucC(Sim,itime)
ClastClone = computeRareFlucC(Sim,Sim['timestepLastClone'])
Sim['W'][itime,:] = np.exp(C*qoi-ClastClone*Sim['Elastclone'])
# Advance markers
Sim['NselectionLoc'] += 1
#print("cloning # " + str(Sim['NselectionLoc']))
# Reinitialize timestep marker
Sim['Ni'] = 0
# Compute Z
Sim['Z'][itime] = np.mean(Sim['W'][itime,:])
# Initialize parameters for cloning
rnd = np.random.rand(Sim['NSim'])#random numbers between 0 and 1
Sim['Wbar'][itime,:] = Sim['W'][itime,:]/Sim['Z'][itime]
Sim['numberClones'] = np.maximum(np.floor(Sim['Wbar'][itime,:]+rnd),0)
def clone(Sim,itime,irep):
# ~~~~ Get the difference between how many clones are created and how many total simulations should be there
numberDiff = int(np.sum(Sim['numberClones']) - Sim['NSim'])
# How many trajectories have numberClones>0
Iavail = np.argwhere(Sim['numberClones']>0)
numberAvail = len(Iavail)
# ~~~~ Balance the number of sim
# If the number of sim is too high, remove some of them randomly
if numberDiff>0:
# Select simulations to kill
toKill = ransam(numberAvail,numberDiff)
# Kill
Sim['numberClones'][Iavail[toKill]] -= 1
# ~~~~ Balance the number of sim
# If the number of sim is too low, add some of them randomly
if numberDiff<0:
# Select simulations to clone
toClone = ransam_rep(numberAvail,-numberDiff)
# Clone
for indClone in list(toClone):
Sim['numberClones'][Iavail[indClone]] += 1
# ~~~~ Verify that the number of simulation is good
if not np.sum(Sim['numberClones']) - Sim['NSim'] == 0:
print("ERROR in clone: number of clones inconsistent with total number of Sim")
sys.exit()
# ~~~~ Now, perform the cloning: assign the clone to the right simulations
# Find the simulations that should be killed
# These are the ones that will host the clones !
# First get the number of simulations that are killed
Ikilled = np.argwhere(Sim['numberClones']<=0)
numberKilled = len(Ikilled)
# Get the simulations that are cloned
Icloned = np.argwhere(Sim['numberClones']>1)
# Monitor number of kills
Sim['numberKills'][Sim['NselectionLoc']-1,irep] = numberKilled
# ~~~~ Now clone simulations
# Take a simulation to kill and replace it with a simulatiion to clone
epsilonClone = Sim['Epsilon clone']
if numberKilled >0 and np.amax(Sim['numberClones'])>1:
counter = -1
for iclone in list(Icloned):
nclones = int(Sim['numberClones'][iclone] - 1)
for p in range(nclones):
counter += 1
Sim['u'][:,Ikilled[counter]] = Sim['u'][:,iclone] + epsilonClone*np.random.normal(loc=0.0,
scale=1.0,
size=(Sim['u'].shape[0],1))
Sim['qoiTot'][:itime+1,0,Ikilled[counter]] = Sim['qoiTot'][:itime+1,0,iclone]
Sim['numberClones'][iclone] -= 1
Sim['numberClones'][Ikilled[counter]] += 1
# Verify that the number of simulation is good
if not np.sum(Sim['numberClones']) == Sim['NSim']:
print('ERROR in clone: number of clone inconsistent with NSim')
sys.exit()
def prestep(u,Sim,itime):
if Sim['Ni'] == 0:
Sim['timestepLastClone'] = itime-1#*Sim['Timestep']
Sim['Elastclone'] = Sim['qoiFunc'](u)
else:
return
def step(Sim):
Sim['Ni'] += 1
def poststep(Sim,itime,irep):
if not Sim['Ni'] == Sim['NselectionThreshold']:
return
computeWeights(Sim,itime)
clone(Sim,itime,irep)
def finalize(Sim,irep):
qoiEnd = Sim['qoiTot'][-1,0,:]
qoiInit = Sim['qoiTot'][0,0,:]
# Weights
if not Sim['UseRarePath']:
Sim['W'][-1,:] = np.exp(Sim['Cweight']*(qoiEnd-Sim['Elastclone']))
else:
C = computeRareFlucC(Sim,-1)
ClastClone = computeRareFlucC(Sim,Sim['timestepLastClone'])
Sim['W'][-1,:] = np.exp(C*qoiEnd-ClastClone*Sim['Elastclone'])
#print("Finalize splitting")
# Compute Z
#Sim['Z'][-1] = 1
Sim['Z'][-1] = np.mean(Sim['W'][-1,:])
# Compute for each level
for ilevel, level in enumerate(Sim['Levels'].tolist()):
Sim['F_prob'] = np.zeros(Sim['NSim'])
indLevel = np.argwhere(qoiEnd>=level)
if not Sim['UseRarePath']:
Sim['F_prob'][indLevel] = np.exp(Sim['Cweight']*(qoiInit[indLevel] - qoiEnd[indLevel]))
else:
CEnd = computeRareFlucC(Sim,-1)
CInit = computeRareFlucC(Sim,0)
Sim['F_prob'][indLevel] = np.exp(CInit*qoiInit[indLevel] - CEnd*qoiEnd[indLevel])
productZ=1.0
for itimestep in range(Sim['nmax']+1):
if abs(Sim['Z'][itimestep])>1e-12:
productZ = productZ*Sim['Z'][itimestep]
sumF = np.sum(Sim['F_prob'])
Sim['probabilities'][ilevel,irep] = sumF*productZ/Sim['NSim']
|
[
"numpy.random.normal",
"numpy.mean",
"numpy.random.rand",
"parallel.printRoot",
"numpy.floor",
"parallel.partitionSim",
"numpy.exp",
"numpy.linspace",
"numpy.zeros",
"numpy.argwhere",
"numpy.sum",
"sys.exit",
"numpy.load",
"numpy.amax"
] |
[((1027, 1056), 'parallel.partitionSim', 'par.partitionSim', (["Sim['NRep']"], {}), "(Sim['NRep'])\n", (1043, 1056), True, 'import parallel as par\n'), ((1855, 1895), 'numpy.linspace', 'np.linspace', (['minLevel', 'maxLevel', 'nLevels'], {}), '(minLevel, maxLevel, nLevels)\n', (1866, 1895), True, 'import numpy as np\n'), ((2880, 2928), 'numpy.zeros', 'np.zeros', (["(Sim['NselectionTotal'], Sim['nRep_'])"], {}), "((Sim['NselectionTotal'], Sim['nRep_']))\n", (2888, 2928), True, 'import numpy as np\n'), ((2955, 3008), 'numpy.zeros', 'np.zeros', (["(Sim['Number of thresholds'], Sim['nRep_'])"], {}), "((Sim['Number of thresholds'], Sim['nRep_']))\n", (2963, 3008), True, 'import numpy as np\n'), ((3033, 3047), 'numpy.zeros', 'np.zeros', (['NSim'], {}), '(NSim)\n', (3041, 3047), True, 'import numpy as np\n'), ((3063, 3089), 'numpy.zeros', 'np.zeros', (['(nmax + 1, NSim)'], {}), '((nmax + 1, NSim))\n', (3071, 3089), True, 'import numpy as np\n'), ((3105, 3131), 'numpy.zeros', 'np.zeros', (['(nmax + 1, NSim)'], {}), '((nmax + 1, NSim))\n', (3113, 3131), True, 'import numpy as np\n'), ((3144, 3162), 'numpy.zeros', 'np.zeros', (['(nmax + 1)'], {}), '(nmax + 1)\n', (3152, 3162), True, 'import numpy as np\n'), ((3187, 3212), 'numpy.zeros', 'np.zeros', (['NSim'], {'dtype': 'int'}), '(NSim, dtype=int)\n', (3195, 3212), True, 'import numpy as np\n'), ((3505, 3519), 'numpy.zeros', 'np.zeros', (['NSim'], {}), '(NSim)\n', (3513, 3519), True, 'import numpy as np\n'), ((3536, 3572), 'numpy.zeros', 'np.zeros', (["(Sim['NselectionTotal'] + 1)"], {}), "(Sim['NselectionTotal'] + 1)\n", (3544, 3572), True, 'import numpy as np\n'), ((4310, 4324), 'numpy.zeros', 'np.zeros', (['NSim'], {}), '(NSim)\n', (4318, 4324), True, 'import numpy as np\n'), ((4340, 4366), 'numpy.zeros', 'np.zeros', (['(nmax + 1, NSim)'], {}), '((nmax + 1, NSim))\n', (4348, 4366), True, 'import numpy as np\n'), ((4382, 4408), 'numpy.zeros', 'np.zeros', (['(nmax + 1, NSim)'], {}), '((nmax + 1, NSim))\n', (4390, 4408), True, 'import numpy as np\n'), ((4421, 4439), 'numpy.zeros', 'np.zeros', (['(nmax + 1)'], {}), '(nmax + 1)\n', (4429, 4439), True, 'import numpy as np\n'), ((4464, 4489), 'numpy.zeros', 'np.zeros', (['NSim'], {'dtype': 'int'}), '(NSim, dtype=int)\n', (4472, 4489), True, 'import numpy as np\n'), ((4782, 4796), 'numpy.zeros', 'np.zeros', (['NSim'], {}), '(NSim)\n', (4790, 4796), True, 'import numpy as np\n'), ((4813, 4849), 'numpy.zeros', 'np.zeros', (["(Sim['NselectionTotal'] + 1)"], {}), "(Sim['NselectionTotal'] + 1)\n", (4821, 4849), True, 'import numpy as np\n'), ((5569, 5596), 'numpy.mean', 'np.mean', (["Sim['W'][itime, :]"], {}), "(Sim['W'][itime, :])\n", (5576, 5596), True, 'import numpy as np\n'), ((5647, 5674), 'numpy.random.rand', 'np.random.rand', (["Sim['NSim']"], {}), "(Sim['NSim'])\n", (5661, 5674), True, 'import numpy as np\n'), ((6110, 6146), 'numpy.argwhere', 'np.argwhere', (["(Sim['numberClones'] > 0)"], {}), "(Sim['numberClones'] > 0)\n", (6121, 6146), True, 'import numpy as np\n'), ((7288, 7325), 'numpy.argwhere', 'np.argwhere', (["(Sim['numberClones'] <= 0)"], {}), "(Sim['numberClones'] <= 0)\n", (7299, 7325), True, 'import numpy as np\n'), ((7414, 7450), 'numpy.argwhere', 'np.argwhere', (["(Sim['numberClones'] > 1)"], {}), "(Sim['numberClones'] > 1)\n", (7425, 7450), True, 'import numpy as np\n'), ((9560, 9584), 'numpy.mean', 'np.mean', (["Sim['W'][-1, :]"], {}), "(Sim['W'][-1, :])\n", (9567, 9584), True, 'import numpy as np\n'), ((441, 451), 'sys.exit', 'sys.exit', ([], {}), '()\n', (449, 451), False, 'import sys\n'), ((2539, 2606), 'parallel.printRoot', 'par.printRoot', (['"""ERROR: Problem in setup of number of cloning steps"""'], {}), "('ERROR: Problem in setup of number of cloning steps')\n", (2552, 2606), True, 'import parallel as par\n'), ((2615, 2625), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2623, 2625), False, 'import sys\n'), ((5132, 5182), 'numpy.exp', 'np.exp', (["(Sim['Cweight'] * (qoi - Sim['Elastclone']))"], {}), "(Sim['Cweight'] * (qoi - Sim['Elastclone']))\n", (5138, 5182), True, 'import numpy as np\n'), ((5325, 5373), 'numpy.exp', 'np.exp', (["(C * qoi - ClastClone * Sim['Elastclone'])"], {}), "(C * qoi - ClastClone * Sim['Elastclone'])\n", (5331, 5373), True, 'import numpy as np\n'), ((5805, 5842), 'numpy.floor', 'np.floor', (["(Sim['Wbar'][itime, :] + rnd)"], {}), "(Sim['Wbar'][itime, :] + rnd)\n", (5813, 5842), True, 'import numpy as np\n'), ((7020, 7030), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7028, 7030), False, 'import sys\n'), ((8675, 8685), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8683, 8685), False, 'import sys\n'), ((9228, 9281), 'numpy.exp', 'np.exp', (["(Sim['Cweight'] * (qoiEnd - Sim['Elastclone']))"], {}), "(Sim['Cweight'] * (qoiEnd - Sim['Elastclone']))\n", (9234, 9281), True, 'import numpy as np\n'), ((9418, 9469), 'numpy.exp', 'np.exp', (["(C * qoiEnd - ClastClone * Sim['Elastclone'])"], {}), "(C * qoiEnd - ClastClone * Sim['Elastclone'])\n", (9424, 9469), True, 'import numpy as np\n'), ((9698, 9719), 'numpy.zeros', 'np.zeros', (["Sim['NSim']"], {}), "(Sim['NSim'])\n", (9706, 9719), True, 'import numpy as np\n'), ((9740, 9768), 'numpy.argwhere', 'np.argwhere', (['(qoiEnd >= level)'], {}), '(qoiEnd >= level)\n', (9751, 9768), True, 'import numpy as np\n'), ((10286, 10307), 'numpy.sum', 'np.sum', (["Sim['F_prob']"], {}), "(Sim['F_prob'])\n", (10292, 10307), True, 'import numpy as np\n'), ((3874, 3902), 'numpy.load', 'np.load', (["Sim['RarePathFile']"], {}), "(Sim['RarePathFile'])\n", (3881, 3902), True, 'import numpy as np\n'), ((4016, 4044), 'numpy.load', 'np.load', (["Sim['RarePathFile']"], {}), "(Sim['RarePathFile'])\n", (4023, 4044), True, 'import numpy as np\n'), ((6005, 6032), 'numpy.sum', 'np.sum', (["Sim['numberClones']"], {}), "(Sim['numberClones'])\n", (6011, 6032), True, 'import numpy as np\n'), ((7724, 7752), 'numpy.amax', 'np.amax', (["Sim['numberClones']"], {}), "(Sim['numberClones'])\n", (7731, 7752), True, 'import numpy as np\n'), ((8551, 8578), 'numpy.sum', 'np.sum', (["Sim['numberClones']"], {}), "(Sim['numberClones'])\n", (8557, 8578), True, 'import numpy as np\n'), ((9840, 9903), 'numpy.exp', 'np.exp', (["(Sim['Cweight'] * (qoiInit[indLevel] - qoiEnd[indLevel]))"], {}), "(Sim['Cweight'] * (qoiInit[indLevel] - qoiEnd[indLevel]))\n", (9846, 9903), True, 'import numpy as np\n'), ((10042, 10101), 'numpy.exp', 'np.exp', (['(CInit * qoiInit[indLevel] - CEnd * qoiEnd[indLevel])'], {}), '(CInit * qoiInit[indLevel] - CEnd * qoiEnd[indLevel])\n', (10048, 10101), True, 'import numpy as np\n'), ((3946, 3974), 'numpy.load', 'np.load', (["Sim['RarePathFile']"], {}), "(Sim['RarePathFile'])\n", (3953, 3974), True, 'import numpy as np\n'), ((4177, 4200), 'numpy.amax', 'np.amax', (["Sim['varPath']"], {}), "(Sim['varPath'])\n", (4184, 4200), True, 'import numpy as np\n'), ((6876, 6903), 'numpy.sum', 'np.sum', (["Sim['numberClones']"], {}), "(Sim['numberClones'])\n", (6882, 6903), True, 'import numpy as np\n'), ((8021, 8086), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(1.0)', 'size': "(Sim['u'].shape[0], 1)"}), "(loc=0.0, scale=1.0, size=(Sim['u'].shape[0], 1))\n", (8037, 8086), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding: utf-8
"""
run consensus analysis to identify overall pattern
analysis method developed by <NAME> and <NAME>
"""
import os
import sys
import glob
import numpy
import nibabel
import nilearn.plotting
import nilearn.input_data
import matplotlib.pyplot as plt
from statsmodels.stats.multitest import multipletests
import scipy.stats
from narps import Narps, hypnums, hypotheses
from narps import NarpsDirs # noqa, flake8 issue
from utils import log_to_file
def t_corr(y, res_mean=None, res_var=None, Q=None):
"""
perform a one-sample t-test on correlated data
y = data (n observations X n vars)
res_mean = Common mean over voxels and results
res_var = Common variance over voxels and results
Q = "known" correlation across observations
- (use empirical correlation based on maps)
"""
npts = y.shape[0]
X = numpy.ones((npts, 1))
if res_mean is None:
res_mean = 0
if res_var is None:
res_var = 1
if Q is None:
Q = numpy.eye(npts)
VarMean = res_var * X.T.dot(Q).dot(X) / npts**2
# T = mean(y,0)/s-hat-2
# use diag to get s_hat2 for each variable
T = (numpy.mean(y, 0)-res_mean
)/numpy.sqrt(VarMean)*numpy.sqrt(res_var) + res_mean
# Assuming variance is estimated on whole image
# and assuming infinite df
p = 1 - scipy.stats.norm.cdf(T)
return(T, p)
def run_ttests(narps, logfile,
overwrite=True):
masker = nilearn.input_data.NiftiMasker(
mask_img=narps.dirs.MNI_mask)
results_dir = narps.dirs.dirs['consensus']
func_name = sys._getframe().f_code.co_name
log_to_file(
logfile, '%s' %
func_name)
if not os.path.exists(results_dir):
os.mkdir(results_dir)
for hyp in hypnums:
if not overwrite and os.path.exists(os.path.join(
results_dir,
'hypo%d_1-fdr.nii.gz' % hyp)):
print('using existing results')
continue
print('running consensus analysis for hypothesis', hyp)
maps = glob.glob(os.path.join(
narps.dirs.dirs['output'],
'zstat/*/hypo%d_unthresh.nii.gz' % hyp))
maps.sort()
data = masker.fit_transform(maps)
# get estimated mean, variance, and correlation for t_corr
img_mean = numpy.mean(data)
img_var = numpy.mean(numpy.var(data, 1))
cc = numpy.corrcoef(data)
log_to_file(
logfile,
'mean = %f, var = %f, mean_cc = %f' %
(img_mean, img_var,
numpy.mean(cc[numpy.triu_indices_from(cc, 1)])))
# perform t-test
tvals, pvals = t_corr(data,
res_mean=img_mean,
res_var=img_var,
Q=cc)
# move back into image format
timg = masker.inverse_transform(tvals)
timg.to_filename(os.path.join(results_dir, 'hypo%d_t.nii.gz' % hyp))
pimg = masker.inverse_transform(1-pvals)
pimg.to_filename(os.path.join(results_dir, 'hypo%d_1-p.nii.gz' % hyp))
fdr_results = multipletests(pvals[0, :], 0.05, 'fdr_tsbh')
log_to_file(
logfile,
"%d voxels significant at FDR corrected p<.05" %
numpy.sum(fdr_results[0]))
fdrimg = masker.inverse_transform(1 - fdr_results[1])
fdrimg.to_filename(os.path.join(
results_dir,
'hypo%d_1-fdr.nii.gz' % hyp))
def mk_figures(narps, logfile, thresh=0.95):
func_name = sys._getframe().f_code.co_name
log_to_file(
logfile, '%s' %
func_name)
fig, ax = plt.subplots(7, 1, figsize=(12, 24))
cut_coords = [-24, -10, 4, 18, 32, 52, 64]
for i, hyp in enumerate(hypnums):
pmap = os.path.join(
narps.dirs.dirs['consensus'],
'hypo%d_1-fdr.nii.gz' % hyp)
tmap = os.path.join(
narps.dirs.dirs['consensus'],
'hypo%d_t.nii.gz' % hyp)
pimg = nibabel.load(pmap)
timg = nibabel.load(tmap)
pdata = pimg.get_fdata()
tdata = timg.get_fdata()[:, :, :, 0]
threshdata = (pdata > thresh)*tdata
threshimg = nibabel.Nifti1Image(threshdata, affine=timg.affine)
nilearn.plotting.plot_stat_map(
threshimg,
threshold=0.1,
display_mode="z",
colorbar=True,
title='hyp %d:' % hyp+hypotheses[hyp],
vmax=8,
cmap='jet',
cut_coords=cut_coords,
axes=ax[i])
plt.savefig(os.path.join(
narps.dirs.dirs['figures'],
'consensus_map.pdf'))
plt.close(fig)
if __name__ == "__main__":
# set an environment variable called NARPS_BASEDIR
# with location of base directory
if 'NARPS_BASEDIR' in os.environ:
basedir = os.environ['NARPS_BASEDIR']
else:
basedir = '/data'
# setup main class
narps = Narps(basedir)
narps.load_data()
narps.dirs.dirs['consensus'] = os.path.join(
narps.dirs.dirs['output'],
'consensus_analysis')
logfile = os.path.join(
narps.dirs.dirs['logs'],
'%s.txt' % sys.argv[0].split('.')[0])
log_to_file(
logfile, 'running %s' %
sys.argv[0].split('.')[0],
flush=True)
if not os.path.exists(narps.dirs.dirs['consensus']):
os.mkdir(narps.dirs.dirs['consensus'])
run_ttests(narps, logfile)
mk_figures(narps, logfile)
|
[
"numpy.sqrt",
"nibabel.load",
"statsmodels.stats.multitest.multipletests",
"os.path.exists",
"numpy.mean",
"sys._getframe",
"matplotlib.pyplot.close",
"os.mkdir",
"numpy.eye",
"numpy.ones",
"numpy.corrcoef",
"numpy.triu_indices_from",
"nibabel.Nifti1Image",
"utils.log_to_file",
"narps.Narps",
"os.path.join",
"numpy.sum",
"matplotlib.pyplot.subplots",
"numpy.var"
] |
[((879, 900), 'numpy.ones', 'numpy.ones', (['(npts, 1)'], {}), '((npts, 1))\n', (889, 900), False, 'import numpy\n'), ((1653, 1691), 'utils.log_to_file', 'log_to_file', (['logfile', "('%s' % func_name)"], {}), "(logfile, '%s' % func_name)\n", (1664, 1691), False, 'from utils import log_to_file\n'), ((3597, 3635), 'utils.log_to_file', 'log_to_file', (['logfile', "('%s' % func_name)"], {}), "(logfile, '%s' % func_name)\n", (3608, 3635), False, 'from utils import log_to_file\n'), ((3668, 3704), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(7)', '(1)'], {'figsize': '(12, 24)'}), '(7, 1, figsize=(12, 24))\n', (3680, 3704), True, 'import matplotlib.pyplot as plt\n'), ((4675, 4689), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (4684, 4689), True, 'import matplotlib.pyplot as plt\n'), ((4968, 4982), 'narps.Narps', 'Narps', (['basedir'], {}), '(basedir)\n', (4973, 4982), False, 'from narps import Narps, hypnums, hypotheses\n'), ((5040, 5101), 'os.path.join', 'os.path.join', (["narps.dirs.dirs['output']", '"""consensus_analysis"""'], {}), "(narps.dirs.dirs['output'], 'consensus_analysis')\n", (5052, 5101), False, 'import os\n'), ((1024, 1039), 'numpy.eye', 'numpy.eye', (['npts'], {}), '(npts)\n', (1033, 1039), False, 'import numpy\n'), ((1721, 1748), 'os.path.exists', 'os.path.exists', (['results_dir'], {}), '(results_dir)\n', (1735, 1748), False, 'import os\n'), ((1758, 1779), 'os.mkdir', 'os.mkdir', (['results_dir'], {}), '(results_dir)\n', (1766, 1779), False, 'import os\n'), ((2348, 2364), 'numpy.mean', 'numpy.mean', (['data'], {}), '(data)\n', (2358, 2364), False, 'import numpy\n'), ((2427, 2447), 'numpy.corrcoef', 'numpy.corrcoef', (['data'], {}), '(data)\n', (2441, 2447), False, 'import numpy\n'), ((3141, 3185), 'statsmodels.stats.multitest.multipletests', 'multipletests', (['pvals[0, :]', '(0.05)', '"""fdr_tsbh"""'], {}), "(pvals[0, :], 0.05, 'fdr_tsbh')\n", (3154, 3185), False, 'from statsmodels.stats.multitest import multipletests\n'), ((3806, 3877), 'os.path.join', 'os.path.join', (["narps.dirs.dirs['consensus']", "('hypo%d_1-fdr.nii.gz' % hyp)"], {}), "(narps.dirs.dirs['consensus'], 'hypo%d_1-fdr.nii.gz' % hyp)\n", (3818, 3877), False, 'import os\n'), ((3918, 3985), 'os.path.join', 'os.path.join', (["narps.dirs.dirs['consensus']", "('hypo%d_t.nii.gz' % hyp)"], {}), "(narps.dirs.dirs['consensus'], 'hypo%d_t.nii.gz' % hyp)\n", (3930, 3985), False, 'import os\n'), ((4026, 4044), 'nibabel.load', 'nibabel.load', (['pmap'], {}), '(pmap)\n', (4038, 4044), False, 'import nibabel\n'), ((4060, 4078), 'nibabel.load', 'nibabel.load', (['tmap'], {}), '(tmap)\n', (4072, 4078), False, 'import nibabel\n'), ((4221, 4272), 'nibabel.Nifti1Image', 'nibabel.Nifti1Image', (['threshdata'], {'affine': 'timg.affine'}), '(threshdata, affine=timg.affine)\n', (4240, 4272), False, 'import nibabel\n'), ((4591, 4652), 'os.path.join', 'os.path.join', (["narps.dirs.dirs['figures']", '"""consensus_map.pdf"""'], {}), "(narps.dirs.dirs['figures'], 'consensus_map.pdf')\n", (4603, 4652), False, 'import os\n'), ((5343, 5387), 'os.path.exists', 'os.path.exists', (["narps.dirs.dirs['consensus']"], {}), "(narps.dirs.dirs['consensus'])\n", (5357, 5387), False, 'import os\n'), ((5397, 5435), 'os.mkdir', 'os.mkdir', (["narps.dirs.dirs['consensus']"], {}), "(narps.dirs.dirs['consensus'])\n", (5405, 5435), False, 'import os\n'), ((1237, 1256), 'numpy.sqrt', 'numpy.sqrt', (['res_var'], {}), '(res_var)\n', (1247, 1256), False, 'import numpy\n'), ((1618, 1633), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (1631, 1633), False, 'import sys\n'), ((2093, 2172), 'os.path.join', 'os.path.join', (["narps.dirs.dirs['output']", "('zstat/*/hypo%d_unthresh.nii.gz' % hyp)"], {}), "(narps.dirs.dirs['output'], 'zstat/*/hypo%d_unthresh.nii.gz' % hyp)\n", (2105, 2172), False, 'import os\n'), ((2394, 2412), 'numpy.var', 'numpy.var', (['data', '(1)'], {}), '(data, 1)\n', (2403, 2412), False, 'import numpy\n'), ((2939, 2989), 'os.path.join', 'os.path.join', (['results_dir', "('hypo%d_t.nii.gz' % hyp)"], {}), "(results_dir, 'hypo%d_t.nii.gz' % hyp)\n", (2951, 2989), False, 'import os\n'), ((3065, 3117), 'os.path.join', 'os.path.join', (['results_dir', "('hypo%d_1-p.nii.gz' % hyp)"], {}), "(results_dir, 'hypo%d_1-p.nii.gz' % hyp)\n", (3077, 3117), False, 'import os\n'), ((3417, 3471), 'os.path.join', 'os.path.join', (['results_dir', "('hypo%d_1-fdr.nii.gz' % hyp)"], {}), "(results_dir, 'hypo%d_1-fdr.nii.gz' % hyp)\n", (3429, 3471), False, 'import os\n'), ((3562, 3577), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (3575, 3577), False, 'import sys\n'), ((1217, 1236), 'numpy.sqrt', 'numpy.sqrt', (['VarMean'], {}), '(VarMean)\n', (1227, 1236), False, 'import numpy\n'), ((1849, 1903), 'os.path.join', 'os.path.join', (['results_dir', "('hypo%d_1-fdr.nii.gz' % hyp)"], {}), "(results_dir, 'hypo%d_1-fdr.nii.gz' % hyp)\n", (1861, 1903), False, 'import os\n'), ((3301, 3326), 'numpy.sum', 'numpy.sum', (['fdr_results[0]'], {}), '(fdr_results[0])\n', (3310, 3326), False, 'import numpy\n'), ((1180, 1196), 'numpy.mean', 'numpy.mean', (['y', '(0)'], {}), '(y, 0)\n', (1190, 1196), False, 'import numpy\n'), ((2599, 2629), 'numpy.triu_indices_from', 'numpy.triu_indices_from', (['cc', '(1)'], {}), '(cc, 1)\n', (2622, 2629), False, 'import numpy\n')]
|
import numpy as np
from multiprocessing import Process, Queue
from mxnet.io import DataIter, DataBatch
import mxnet as mx
import numpy as np
from mxnet.io import DataIter
from PIL import Image
import os
import preprocessing
import logging
import sys
#rgb_mean=(140.5192, 59.6655, 63.8419), #mean on tote trainval
class TrainDataIterator(DataIter):
def __init__(self,
root_dir,
flist_path,
rgb_mean=(128,128,128),
random_flip=True,
random_scale=False,
random_rotate=True,
scale_range=(0.8, 1.2),
crop_size=400,
random_crop=True,
epoch_size=True,
label_shrink_scale=1.0,
shuffle=True,
data_queue_size=100,
batch_size=1,
data_worker_num=1):
self.rgb_mean = np.array(rgb_mean, dtype=np.uint8).reshape((1,1,3))
self.random_flip = random_flip
self.random_scale = random_scale
self.random_rotate = random_rotate
self.scale_range = scale_range
assert scale_range[1]>=scale_range[0]>0
self.crop_size = crop_size
self.label_shrink_scale = label_shrink_scale
self.random_crop = random_crop
self.epoch_size = epoch_size
self.data_count = 0
self.shuffle = shuffle
self.batch_size = batch_size
self.flist = None
self.root_dir = root_dir
self._load_flist(flist_path)
self.data_num = self.get_data_num()
self.avail_data_num = self.data_num
self.cursor = 0
self.reset_list()
self.flist_item_queue = Queue(maxsize=1000)
self.list_producer = Process(target=self._produce_flist_item)
self.list_producer.daemon = True
self.list_producer.start()
self.data_queue = Queue(maxsize=data_queue_size)
for i in range(data_worker_num):
producer = Process(target=self._produce_data)
producer.daemon = True
producer.start()
def _produce_flist_item(self):
while True:
if self.cursor + 1 <= self.data_num:
file = self.flist[self.cursor]
self.flist_item_queue.put(file)
self.cursor += 1
else:
self.reset_list()
def _produce_data(self):
while True:
flist_item = self.flist_item_queue.get()
value = self._process_data(flist_item)
if value is not None:
self.data_queue.put(value)
def get_data(self):
images = []
labels = []
for i in range(self.batch_size):
data = self.data_queue.get()
images.append(data[0])
labels.append(data[1])
images = np.concatenate(images)
labels = np.concatenate(labels)
return (mx.nd.array(images), mx.nd.array(labels))
def get_data_num(self):
return len(self.flist)
def _load_flist(self,
flist_path):
with open(flist_path) as f:
lines = f.readlines()
self.flist = []
for line in lines:
if len(line.rstrip()) == 0:
continue
item = self._parse_flist_item(line.rstrip())
self.flist.append(item)
self.data_num = len(self.flist)
def reset_list(self):
self.cursor = 0
if self.shuffle:
np.random.shuffle(self.flist)
def _process_data(self, item):
try:
im = Image.open(os.path.join(self.root_dir, item[0]))
im = im.convert("RGB")
l = Image.open(os.path.join(self.root_dir, item[1]))
except Exception as e:
logging.info(e)
return None
if self.random_rotate:
deg = np.random.rand(1) * 360
im=im.rotate(deg, resample=Image.BICUBIC, expand=True)
l=l.rotate(deg, resample=Image.NEAREST, expand=True)
im_arr = np.array(im)
l_arr = np.array(l)
r_start, c_start, new_crop_size = preprocessing.calc_crop_params(im_arr, self.scale_range, self.crop_size)
#random flip
if self.random_flip:
im_arr, l_arr = preprocessing.random_flip(im_arr, l_arr)
im_arr, l_arr = preprocessing.pad_image(im_arr, l_arr, new_crop_size, self.rgb_mean)
#do crop
if self.random_crop:
im_arr = im_arr[r_start:r_start+new_crop_size, c_start:c_start+new_crop_size, :]
l_arr = l_arr[r_start:r_start+new_crop_size, c_start:c_start+new_crop_size]
#do resize
im_arr = Image.fromarray(im_arr).resize((self.crop_size, self.crop_size), Image.BICUBIC)
im_arr = np.array(im_arr, dtype=np.float32)
im_arr -= self.rgb_mean
l_dim = int(self.crop_size*self.label_shrink_scale)
l_arr = Image.fromarray(l_arr).resize((l_dim, l_dim), Image.NEAREST)
l_arr = np.array(l_arr, dtype=np.uint8)
im_arr = np.expand_dims(im_arr, 0)
im_arr = np.transpose(im_arr, [0, 3, 1, 2])
l_arr = l_arr.reshape(1, -1)
return (im_arr, l_arr)
def _parse_flist_item(self, line):
items = line.split("\t")
assert len(items) == 2
im = items[0]
l = items[1]
return (im, l)
@property
def provide_data(self):
return [("data", (self.batch_size, 3, self.crop_size, self.crop_size))]
@property
def provide_label(self):
label_dim = int(self.crop_size*self.label_shrink_scale)
return [("softmax_label", (self.batch_size, label_dim*label_dim))]
def reset(self):
self.data_count = 0
pass
def iter_next(self):
self.data_count += self.batch_size
return self.data_count <= self.epoch_size*self.batch_size
def next(self):
if self.iter_next():
data = self.get_data()
return DataBatch(data=[data[0]], label=[data[1]], pad=None, index=None)
else:
raise StopIteration
|
[
"PIL.Image.fromarray",
"numpy.random.rand",
"preprocessing.calc_crop_params",
"multiprocessing.Process",
"os.path.join",
"mxnet.io.DataBatch",
"preprocessing.pad_image",
"numpy.array",
"preprocessing.random_flip",
"numpy.concatenate",
"numpy.expand_dims",
"mxnet.nd.array",
"multiprocessing.Queue",
"numpy.transpose",
"logging.info",
"numpy.random.shuffle"
] |
[((1715, 1734), 'multiprocessing.Queue', 'Queue', ([], {'maxsize': '(1000)'}), '(maxsize=1000)\n', (1720, 1734), False, 'from multiprocessing import Process, Queue\n'), ((1764, 1804), 'multiprocessing.Process', 'Process', ([], {'target': 'self._produce_flist_item'}), '(target=self._produce_flist_item)\n', (1771, 1804), False, 'from multiprocessing import Process, Queue\n'), ((1908, 1938), 'multiprocessing.Queue', 'Queue', ([], {'maxsize': 'data_queue_size'}), '(maxsize=data_queue_size)\n', (1913, 1938), False, 'from multiprocessing import Process, Queue\n'), ((2852, 2874), 'numpy.concatenate', 'np.concatenate', (['images'], {}), '(images)\n', (2866, 2874), True, 'import numpy as np\n'), ((2892, 2914), 'numpy.concatenate', 'np.concatenate', (['labels'], {}), '(labels)\n', (2906, 2914), True, 'import numpy as np\n'), ((4095, 4107), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (4103, 4107), True, 'import numpy as np\n'), ((4124, 4135), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (4132, 4135), True, 'import numpy as np\n'), ((4178, 4250), 'preprocessing.calc_crop_params', 'preprocessing.calc_crop_params', (['im_arr', 'self.scale_range', 'self.crop_size'], {}), '(im_arr, self.scale_range, self.crop_size)\n', (4208, 4250), False, 'import preprocessing\n'), ((4396, 4464), 'preprocessing.pad_image', 'preprocessing.pad_image', (['im_arr', 'l_arr', 'new_crop_size', 'self.rgb_mean'], {}), '(im_arr, l_arr, new_crop_size, self.rgb_mean)\n', (4419, 4464), False, 'import preprocessing\n'), ((4830, 4864), 'numpy.array', 'np.array', (['im_arr'], {'dtype': 'np.float32'}), '(im_arr, dtype=np.float32)\n', (4838, 4864), True, 'import numpy as np\n'), ((5052, 5083), 'numpy.array', 'np.array', (['l_arr'], {'dtype': 'np.uint8'}), '(l_arr, dtype=np.uint8)\n', (5060, 5083), True, 'import numpy as np\n'), ((5103, 5128), 'numpy.expand_dims', 'np.expand_dims', (['im_arr', '(0)'], {}), '(im_arr, 0)\n', (5117, 5128), True, 'import numpy as np\n'), ((5146, 5180), 'numpy.transpose', 'np.transpose', (['im_arr', '[0, 3, 1, 2]'], {}), '(im_arr, [0, 3, 1, 2])\n', (5158, 5180), True, 'import numpy as np\n'), ((2003, 2037), 'multiprocessing.Process', 'Process', ([], {'target': 'self._produce_data'}), '(target=self._produce_data)\n', (2010, 2037), False, 'from multiprocessing import Process, Queue\n'), ((2931, 2950), 'mxnet.nd.array', 'mx.nd.array', (['images'], {}), '(images)\n', (2942, 2950), True, 'import mxnet as mx\n'), ((2952, 2971), 'mxnet.nd.array', 'mx.nd.array', (['labels'], {}), '(labels)\n', (2963, 2971), True, 'import mxnet as mx\n'), ((3527, 3556), 'numpy.random.shuffle', 'np.random.shuffle', (['self.flist'], {}), '(self.flist)\n', (3544, 3556), True, 'import numpy as np\n'), ((4331, 4371), 'preprocessing.random_flip', 'preprocessing.random_flip', (['im_arr', 'l_arr'], {}), '(im_arr, l_arr)\n', (4356, 4371), False, 'import preprocessing\n'), ((6032, 6096), 'mxnet.io.DataBatch', 'DataBatch', ([], {'data': '[data[0]]', 'label': '[data[1]]', 'pad': 'None', 'index': 'None'}), '(data=[data[0]], label=[data[1]], pad=None, index=None)\n', (6041, 6096), False, 'from mxnet.io import DataIter, DataBatch\n'), ((926, 960), 'numpy.array', 'np.array', (['rgb_mean'], {'dtype': 'np.uint8'}), '(rgb_mean, dtype=np.uint8)\n', (934, 960), True, 'import numpy as np\n'), ((3638, 3674), 'os.path.join', 'os.path.join', (['self.root_dir', 'item[0]'], {}), '(self.root_dir, item[0])\n', (3650, 3674), False, 'import os\n'), ((3738, 3774), 'os.path.join', 'os.path.join', (['self.root_dir', 'item[1]'], {}), '(self.root_dir, item[1])\n', (3750, 3774), False, 'import os\n'), ((3819, 3834), 'logging.info', 'logging.info', (['e'], {}), '(e)\n', (3831, 3834), False, 'import logging\n'), ((3917, 3934), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (3931, 3934), True, 'import numpy as np\n'), ((4733, 4756), 'PIL.Image.fromarray', 'Image.fromarray', (['im_arr'], {}), '(im_arr)\n', (4748, 4756), False, 'from PIL import Image\n'), ((4975, 4997), 'PIL.Image.fromarray', 'Image.fromarray', (['l_arr'], {}), '(l_arr)\n', (4990, 4997), False, 'from PIL import Image\n')]
|
from .. import Utils
from SimPEG.EM.Base import BaseEMProblem
from .SurveyDC import Survey
from .FieldsDC import FieldsDC, Fields_CC, Fields_N
import numpy as np
import scipy as sp
from SimPEG.Utils import Zero
from .BoundaryUtils import getxBCyBC_CC
class BaseDCProblem(BaseEMProblem):
"""
Base DC Problem
"""
surveyPair = Survey
fieldsPair = FieldsDC
Ainv = None
def fields(self, m=None):
if m is not None:
self.model = m
if self.Ainv is not None:
self.Ainv.clean()
f = self.fieldsPair(self.mesh, self.survey)
A = self.getA()
self.Ainv = self.Solver(A, **self.solverOpts)
RHS = self.getRHS()
u = self.Ainv * RHS
Srcs = self.survey.srcList
f[Srcs, self._solutionType] = u
return f
def Jvec(self, m, v, f=None):
if f is None:
f = self.fields(m)
self.model = m
# Jv = self.dataPair(self.survey) # same size as the data
Jv = []
A = self.getA()
for src in self.survey.srcList:
u_src = f[src, self._solutionType] # solution vector
dA_dm_v = self.getADeriv(u_src, v)
dRHS_dm_v = self.getRHSDeriv(src, v)
du_dm_v = self.Ainv * (-dA_dm_v + dRHS_dm_v)
for rx in src.rxList:
df_dmFun = getattr(f, f"_{rx.projField!s}Deriv", None)
df_dm_v = df_dmFun(src, du_dm_v, v, adjoint=False)
Jv.append(rx.evalDeriv(src, self.mesh, f, df_dm_v))
# Jv[src, rx] = rx.evalDeriv(src, self.mesh, f, df_dm_v)
# return Utils.mkvc(Jv)
return np.hstack(Jv)
def Jtvec(self, m, v, f=None):
if f is None:
f = self.fields(m)
self.model = m
# Ensure v is a data object.
if not isinstance(v, self.dataPair):
v = self.dataPair(self.survey, v)
Jtv = np.zeros(m.size)
AT = self.getA()
for src in self.survey.srcList:
u_src = f[src, self._solutionType]
for rx in src.rxList:
# wrt f, need possibility wrt m
PTv = rx.evalDeriv(src, self.mesh, f, v[src, rx], adjoint=True)
df_duTFun = getattr(f, f"_{rx.projField!s}Deriv", None)
df_duT, df_dmT = df_duTFun(src, None, PTv, adjoint=True)
ATinvdf_duT = self.Ainv * df_duT
dA_dmT = self.getADeriv(u_src, ATinvdf_duT, adjoint=True)
dRHS_dmT = self.getRHSDeriv(src, ATinvdf_duT, adjoint=True)
du_dmT = -dA_dmT + dRHS_dmT
Jtv += (df_dmT + du_dmT).astype(float)
return Utils.mkvc(Jtv)
def getSourceTerm(self):
"""
Evaluates the sources, and puts them in matrix form
:rtype: tuple
:return: q (nC or nN, nSrc)
"""
Srcs = self.survey.srcList
if self._formulation == "EB":
n = self.mesh.nN
# return NotImplementedError
elif self._formulation == "HJ":
n = self.mesh.nC
q = np.zeros((n, len(Srcs)))
for i, src in enumerate(Srcs):
q[:, i] = src.eval(self)
return q
class Problem3D_CC(BaseDCProblem):
"""
3D cell centered DC problem
"""
_solutionType = "phiSolution"
_formulation = "HJ" # CC potentials means J is on faces
fieldsPair = Fields_CC
bc_type = "Neumann"
def __init__(self, mesh, **kwargs):
BaseDCProblem.__init__(self, mesh, **kwargs)
self.setBC()
def getA(self):
"""
Make the A matrix for the cell centered DC resistivity problem
A = D MfRhoI G
"""
D = self.Div
G = self.Grad
MfRhoI = self.MfRhoI
A = D * MfRhoI * G
if self.bc_type == "Neumann":
Vol = self.mesh.vol
if self.verbose:
print("Perturbing first row of A to remove nullspace for Neumann BC.")
# Handling Null space of A
I, J, V = sp.sparse.find(A[0, :])
for jj in J:
A[0, jj] = 0.0
A[0, 0] = 1.0 / Vol[0]
# I think we should deprecate this for DC problem.
# if self._makeASymmetric is True:
# return V.T * A
return A
def getADeriv(self, u, v, adjoint=False):
D = self.Div
G = self.Grad
MfRhoIDeriv = self.MfRhoIDeriv
if adjoint:
return (MfRhoIDeriv(G * u).T) * (D.T * v)
return D * (MfRhoIDeriv(G * u) * v)
def getRHS(self):
"""
RHS for the DC problem
q
"""
RHS = self.getSourceTerm()
return RHS
def getRHSDeriv(self, src, v, adjoint=False):
"""
Derivative of the right hand side with respect to the model
"""
# TODO: add qDeriv for RHS depending on m
# qDeriv = src.evalDeriv(self, adjoint=adjoint)
# return qDeriv
return Zero()
def setBC(self):
if self.mesh._meshType == "TREE":
if self.bc_type == "Neumann":
raise NotImplementedError()
elif self.bc_type == "Dirchlet":
print(
"Homogeneous Dirchlet is the natural BC for this CC discretization."
)
self.Div = Utils.sdiag(self.mesh.vol) * self.mesh.faceDiv
self.Grad = self.Div.T
else:
if self.mesh.dim == 3:
fxm, fxp, fym, fyp, fzm, fzp = self.mesh.faceBoundaryInd
gBFxm = self.mesh.gridFx[fxm, :]
gBFxp = self.mesh.gridFx[fxp, :]
gBFym = self.mesh.gridFy[fym, :]
gBFyp = self.mesh.gridFy[fyp, :]
gBFzm = self.mesh.gridFz[fzm, :]
gBFzp = self.mesh.gridFz[fzp, :]
# Setup Mixed B.C (alpha, beta, gamma)
temp_xm = np.ones_like(gBFxm[:, 0])
temp_xp = np.ones_like(gBFxp[:, 0])
temp_ym = np.ones_like(gBFym[:, 1])
temp_yp = np.ones_like(gBFyp[:, 1])
temp_zm = np.ones_like(gBFzm[:, 2])
temp_zp = np.ones_like(gBFzp[:, 2])
if self.bc_type == "Neumann":
if self.verbose:
print("Setting BC to Neumann.")
alpha_xm, alpha_xp = temp_xm * 0.0, temp_xp * 0.0
alpha_ym, alpha_yp = temp_ym * 0.0, temp_yp * 0.0
alpha_zm, alpha_zp = temp_zm * 0.0, temp_zp * 0.0
beta_xm, beta_xp = temp_xm, temp_xp
beta_ym, beta_yp = temp_ym, temp_yp
beta_zm, beta_zp = temp_zm, temp_zp
gamma_xm, gamma_xp = temp_xm * 0.0, temp_xp * 0.0
gamma_ym, gamma_yp = temp_ym * 0.0, temp_yp * 0.0
gamma_zm, gamma_zp = temp_zm * 0.0, temp_zp * 0.0
elif self.bc_type == "Dirchlet":
if self.verbose:
print("Setting BC to Dirchlet.")
alpha_xm, alpha_xp = temp_xm, temp_xp
alpha_ym, alpha_yp = temp_ym, temp_yp
alpha_zm, alpha_zp = temp_zm, temp_zp
beta_xm, beta_xp = temp_xm * 0, temp_xp * 0
beta_ym, beta_yp = temp_ym * 0, temp_yp * 0
beta_zm, beta_zp = temp_zm * 0, temp_zp * 0
gamma_xm, gamma_xp = temp_xm * 0.0, temp_xp * 0.0
gamma_ym, gamma_yp = temp_ym * 0.0, temp_yp * 0.0
gamma_zm, gamma_zp = temp_zm * 0.0, temp_zp * 0.0
alpha = [alpha_xm, alpha_xp, alpha_ym, alpha_yp, alpha_zm, alpha_zp]
beta = [beta_xm, beta_xp, beta_ym, beta_yp, beta_zm, beta_zp]
gamma = [gamma_xm, gamma_xp, gamma_ym, gamma_yp, gamma_zm, gamma_zp]
elif self.mesh.dim == 2:
fxm, fxp, fym, fyp = self.mesh.faceBoundaryInd
gBFxm = self.mesh.gridFx[fxm, :]
gBFxp = self.mesh.gridFx[fxp, :]
gBFym = self.mesh.gridFy[fym, :]
gBFyp = self.mesh.gridFy[fyp, :]
# Setup Mixed B.C (alpha, beta, gamma)
temp_xm = np.ones_like(gBFxm[:, 0])
temp_xp = np.ones_like(gBFxp[:, 0])
temp_ym = np.ones_like(gBFym[:, 1])
temp_yp = np.ones_like(gBFyp[:, 1])
alpha_xm, alpha_xp = temp_xm * 0.0, temp_xp * 0.0
alpha_ym, alpha_yp = temp_ym * 0.0, temp_yp * 0.0
beta_xm, beta_xp = temp_xm, temp_xp
beta_ym, beta_yp = temp_ym, temp_yp
gamma_xm, gamma_xp = temp_xm * 0.0, temp_xp * 0.0
gamma_ym, gamma_yp = temp_ym * 0.0, temp_yp * 0.0
alpha = [alpha_xm, alpha_xp, alpha_ym, alpha_yp]
beta = [beta_xm, beta_xp, beta_ym, beta_yp]
gamma = [gamma_xm, gamma_xp, gamma_ym, gamma_yp]
x_BC, y_BC = getxBCyBC_CC(self.mesh, alpha, beta, gamma)
V = self.Vol
self.Div = V * self.mesh.faceDiv
P_BC, B = self.mesh.getBCProjWF_simple()
M = B * self.mesh.aveCC2F
self.Grad = self.Div.T - P_BC * Utils.sdiag(y_BC) * M
class Problem3D_N(BaseDCProblem):
"""
3D nodal DC problem
"""
_solutionType = "phiSolution"
_formulation = "EB" # N potentials means B is on faces
fieldsPair = Fields_N
def __init__(self, mesh, **kwargs):
BaseDCProblem.__init__(self, mesh, **kwargs)
def getA(self):
"""
Make the A matrix for the cell centered DC resistivity problem
A = G.T MeSigma G
"""
MeSigma = self.MeSigma
Grad = self.mesh.nodalGrad
A = Grad.T * MeSigma * Grad
Vol = self.mesh.vol
# Handling Null space of A
I, J, V = sp.sparse.find(A[0, :])
for jj in J:
A[0, jj] = 0.0
A[0, 0] = 1.0 / Vol[0]
return A
def getADeriv(self, u, v, adjoint=False):
"""
Product of the derivative of our system matrix with respect to the
model and a vector
"""
Grad = self.mesh.nodalGrad
if not adjoint:
return Grad.T * (self.MeSigmaDeriv(Grad * u) * v)
elif adjoint:
return self.MeSigmaDeriv(Grad * u).T * (Grad * v)
def getRHS(self):
"""
RHS for the DC problem
q
"""
RHS = self.getSourceTerm()
return RHS
def getRHSDeriv(self, src, v, adjoint=False):
"""
Derivative of the right hand side with respect to the model
"""
# TODO: add qDeriv for RHS depending on m
# qDeriv = src.evalDeriv(self, adjoint=adjoint)
# return qDeriv
return Zero()
|
[
"numpy.ones_like",
"numpy.hstack",
"numpy.zeros",
"scipy.sparse.find",
"SimPEG.Utils.Zero"
] |
[((1663, 1676), 'numpy.hstack', 'np.hstack', (['Jv'], {}), '(Jv)\n', (1672, 1676), True, 'import numpy as np\n'), ((1934, 1950), 'numpy.zeros', 'np.zeros', (['m.size'], {}), '(m.size)\n', (1942, 1950), True, 'import numpy as np\n'), ((5010, 5016), 'SimPEG.Utils.Zero', 'Zero', ([], {}), '()\n', (5014, 5016), False, 'from SimPEG.Utils import Zero\n'), ((9985, 10008), 'scipy.sparse.find', 'sp.sparse.find', (['A[0, :]'], {}), '(A[0, :])\n', (9999, 10008), True, 'import scipy as sp\n'), ((10919, 10925), 'SimPEG.Utils.Zero', 'Zero', ([], {}), '()\n', (10923, 10925), False, 'from SimPEG.Utils import Zero\n'), ((4062, 4085), 'scipy.sparse.find', 'sp.sparse.find', (['A[0, :]'], {}), '(A[0, :])\n', (4076, 4085), True, 'import scipy as sp\n'), ((5955, 5980), 'numpy.ones_like', 'np.ones_like', (['gBFxm[:, 0]'], {}), '(gBFxm[:, 0])\n', (5967, 5980), True, 'import numpy as np\n'), ((6007, 6032), 'numpy.ones_like', 'np.ones_like', (['gBFxp[:, 0]'], {}), '(gBFxp[:, 0])\n', (6019, 6032), True, 'import numpy as np\n'), ((6059, 6084), 'numpy.ones_like', 'np.ones_like', (['gBFym[:, 1]'], {}), '(gBFym[:, 1])\n', (6071, 6084), True, 'import numpy as np\n'), ((6111, 6136), 'numpy.ones_like', 'np.ones_like', (['gBFyp[:, 1]'], {}), '(gBFyp[:, 1])\n', (6123, 6136), True, 'import numpy as np\n'), ((6163, 6188), 'numpy.ones_like', 'np.ones_like', (['gBFzm[:, 2]'], {}), '(gBFzm[:, 2])\n', (6175, 6188), True, 'import numpy as np\n'), ((6215, 6240), 'numpy.ones_like', 'np.ones_like', (['gBFzp[:, 2]'], {}), '(gBFzp[:, 2])\n', (6227, 6240), True, 'import numpy as np\n'), ((8322, 8347), 'numpy.ones_like', 'np.ones_like', (['gBFxm[:, 0]'], {}), '(gBFxm[:, 0])\n', (8334, 8347), True, 'import numpy as np\n'), ((8374, 8399), 'numpy.ones_like', 'np.ones_like', (['gBFxp[:, 0]'], {}), '(gBFxp[:, 0])\n', (8386, 8399), True, 'import numpy as np\n'), ((8426, 8451), 'numpy.ones_like', 'np.ones_like', (['gBFym[:, 1]'], {}), '(gBFym[:, 1])\n', (8438, 8451), True, 'import numpy as np\n'), ((8478, 8503), 'numpy.ones_like', 'np.ones_like', (['gBFyp[:, 1]'], {}), '(gBFyp[:, 1])\n', (8490, 8503), True, 'import numpy as np\n')]
|
"""
Mask R-CNN
Common utility functions and classes.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE_MATTERPORT for details)
Written by <NAME>
Copyright (c) 2021 Skinet Team
Licensed under the MIT License (see LICENSE for details)
Updated/Modified by <NAME>
"""
import json
import logging
import os
import random
import shutil
import urllib.request
import warnings
import zipfile
from distutils.version import LooseVersion
import cv2
import numpy as np
import scipy
import skimage.color
import skimage.io
import skimage.transform
from mrcnn.Config import Config
from mrcnn.visualize import create_multiclass_mask
from datasetTools import datasetDivider as dD
# URL from which to download the latest trained weights
WEIGHTS_URL = []
############################################################
# Masks
############################################################
def reduce_memory(results, config: Config, allow_sparse=True):
"""
Minimize all masks in the results dict from inference
:param results: dict containing results of the inference
:param config: the config object
:param allow_sparse: if False, will only keep biggest region of a mask
:return:
"""
_masks = results['masks']
_bbox = results['rois']
if not allow_sparse:
emptyMasks = []
for idx in range(results['masks'].shape[-1]):
mask = unsparse_mask(results['masks'][:, :, idx])
if mask is None:
emptyMasks.append(idx)
else:
results['masks'][:, :, idx] = mask
if len(emptyMasks) > 0:
results['scores'] = np.delete(results['scores'], emptyMasks)
results['class_ids'] = np.delete(results['class_ids'], emptyMasks)
results['masks'] = np.delete(results['masks'], emptyMasks, axis=2)
results['rois'] = np.delete(results['rois'], emptyMasks, axis=0)
results['rois'] = extract_bboxes(results['masks'])
results['masks'] = minimize_mask(results['rois'], results['masks'], config.get_mini_mask_shape())
return results
def get_mask_area(mask, verbose=0):
"""
Computes mask area
:param mask: the array representing the mask
:param verbose: 0 : nothing, 1+ : errors/problems
:return: the area of the mask and verbose output (None when nothing to print)
"""
maskHistogram = dD.getBWCount(mask)
display = None
if verbose > 0:
nbPx = mask.shape[0] * mask.shape[1]
tempSum = maskHistogram[0] + maskHistogram[1]
if tempSum != nbPx:
display = "Histogram pixels {} != total pixels {}".format(tempSum, nbPx)
return maskHistogram[1], display
def unsparse_mask(base_mask):
"""
Return mask with only its biggest part
:param base_mask: the mask image as np.bool or np.uint8
:return: the main part of the mask as a same shape image and type
"""
# http://www.learningaboutelectronics.com/Articles/How-to-find-the-largest-or-smallest-object-in-an-image-Python-OpenCV.php
# https://stackoverflow.com/a/19222620/9962046
# Convert to np.uint8 if not before processing
convert = False
if type(base_mask[0, 0]) is np.bool_:
convert = True
base_mask = base_mask.astype(np.uint8) * 255
# Padding the mask so that parts on edges will get correct area
base_mask = np.pad(base_mask, 1, mode='constant', constant_values=0)
res = np.zeros_like(base_mask, dtype=np.uint8)
# Detecting contours and keeping only one with biggest area
contours, _ = cv2.findContours(base_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
if len(contours) > 0:
if len(contours) > 1: # If only one region, reconstructing mask is useless
biggest_part = sorted(contours, key=cv2.contourArea, reverse=True)[0]
# Drawing the biggest part on the result mask
cv2.fillPoly(res, pts=[biggest_part], color=255)
else:
res = base_mask
# Removing padding of the mask
res = res[1:-1, 1:-1]
return res.astype(np.bool) if convert else res
else:
return None
############################################################
# Bounding Boxes
############################################################
def in_roi(roi_to_test, roi, epsilon=0):
"""
Tests if the RoI to test is included in the given RoI
:param roi_to_test: the RoI/bbox to test
:param roi: the RoI that should include the one to test
:param epsilon: margin of the RoI to allow boxes that are not exactly inside
:return: True if roi_to_test is included in roi
"""
res = True
i = 0
while i < 4 and res:
res = res and (roi[i % 2] - epsilon <= roi_to_test[i] <= roi[i % 2 + 2] + epsilon)
i += 1
return res
def get_bbox_area(roi):
"""
Returns the bbox area
:param roi: the bbox to use
:return: area of the given bbox
"""
return (roi[3] - roi[1]) * (roi[2] - roi[0])
def get_bboxes_intersection(roiA, roiB):
"""
Computes the intersection area of two bboxes
:param roiA: the first bbox
:param roiB: the second bbox
:return: the area of the intersection
"""
xInter = min(roiA[3], roiB[3]) - max(roiA[1], roiB[1])
yInter = min(roiA[2], roiB[2]) - max(roiA[0], roiB[0])
return max(xInter, 0) * max(yInter, 0)
def global_bbox(roiA, roiB):
"""
Returns the bbox enclosing two given bboxes
:param roiA: the first bbox
:param roiB: the second bbox
:return: the enclosing bbox
"""
return np.array([min(roiA[0], roiB[0]), min(roiA[1], roiB[1]), max(roiA[2], roiB[2]), max(roiA[3], roiB[3])])
def shift_bbox(roi, customShift=None):
"""
Shifts bbox coordinates so that min x and min y equal 0
:param roi: the roi/bbox to transform
:param customShift: custom x and y shift as (yShift, xShift)
:return: the shifted bbox
"""
yMin, xMin, yMax, xMax = roi
if customShift is None:
return np.array([0, 0, yMax - yMin, xMax - xMin])
else:
return np.array([max(yMin - customShift[0], 0), max(xMin - customShift[1], 0),
max(yMax - customShift[0], 0), max(xMax - customShift[1], 0)])
def expand_masks(mini_mask1, roi1, mini_mask2, roi2):
"""
Expands two masks while keeping their relative position
:param mini_mask1: the first mini mask
:param roi1: the first mask bbox/roi
:param mini_mask2: the second mini mask
:param roi2: the second mask bbox/roi
:return: mask1, mask2
"""
roi1And2 = global_bbox(roi1, roi2)
shifted_roi1And2 = shift_bbox(roi1And2)
shifted_roi1 = shift_bbox(roi1, customShift=roi1And2[:2])
shifted_roi2 = shift_bbox(roi2, customShift=roi1And2[:2])
mask1 = expand_mask(shifted_roi1, mini_mask1, shifted_roi1And2[2:])
mask2 = expand_mask(shifted_roi2, mini_mask2, shifted_roi1And2[2:])
return mask1, mask2
def extract_bboxes(mask):
"""Compute bounding boxes from masks.
mask: [height, width, num_instances]. Mask pixels are either 1 or 0.
Returns: bbox array [num_instances, (y1, x1, y2, x2)].
"""
soleMask = False
if len(mask.shape) != 3:
_mask = np.expand_dims(mask, 2)
soleMask = True
else:
_mask = mask
boxes = np.zeros([_mask.shape[-1], 4], dtype=np.int32)
for i in range(_mask.shape[-1]):
m = _mask[:, :, i]
# Bounding box.
horizontal_indicies = np.where(np.any(m, axis=0))[0]
vertical_indicies = np.where(np.any(m, axis=1))[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
y1, y2 = vertical_indicies[[0, -1]]
# x2 and y2 should not be part of the box. Increment by 1.
x2 += 1
y2 += 1
else:
# No mask for this instance. Might happen due to
# resizing or cropping. Set bbox to zeros
x1, x2, y1, y2 = 0, 0, 0, 0
boxes[i] = np.array([y1, x1, y2, x2]).astype(np.int32)
return boxes[0] if soleMask else boxes
def compute_iou(box, boxes, box_area, boxes_area):
"""Calculates IoU of the given box with the array of the given boxes.
box: 1D vector [y1, x1, y2, x2]
boxes: [boxes_count, (y1, x1, y2, x2)]
box_area: float. the area of 'box'
boxes_area: array of length boxes_count.
Note: the areas are passed in rather than calculated here for
efficiency. Calculate once in the caller to avoid duplicate work.
"""
# Calculate intersection areas
y1 = np.maximum(box[0], boxes[:, 0])
y2 = np.minimum(box[2], boxes[:, 2])
x1 = np.maximum(box[1], boxes[:, 1])
x2 = np.minimum(box[3], boxes[:, 3])
intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)
union = box_area + boxes_area[:] - intersection[:]
iou = intersection / union
return iou
def compute_overlaps(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
For better performance, pass the largest set first and the smaller second.
"""
# TODO Possible improvements: using another structure to save overlaps as a lot of bboxes overlaps with only a few ?
# Areas of anchors and GT boxes
area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])
area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])
# Compute overlaps to generate matrix [boxes1 count, boxes2 count]
# Each cell contains the IoU value.
overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))
for i in range(overlaps.shape[1]):
box2 = boxes2[i]
overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)
return overlaps
def compute_overlaps_masks(masks1, boxes1, masks2, boxes2):
"""Computes IoU overlaps between two sets of masks.
masks1, masks2: [Height, Width, instances]
"""
res = np.zeros((masks1.shape[-1], masks2.shape[-1]))
# If either set of masks is empty return empty result
if masks1.shape[-1] == 0 or masks2.shape[-1] == 0:
return res
matching_boxes = compute_overlaps(boxes1, boxes2)
idx, idy = np.nonzero(matching_boxes)
matching_boxes = set(zip(idx, idy))
for idMask1, idMask2 in matching_boxes:
mask1, mask2 = expand_masks(masks1[:, :, idMask1], boxes1[idMask1], masks2[:, :, idMask2], boxes2[idMask2])
mask1Area, _ = get_mask_area(mask1)
mask2Area, _ = get_mask_area(mask2)
if mask1Area != 0 and mask2Area != 0:
mask1AND2 = np.logical_and(mask1, mask2)
intersection, _ = get_mask_area(mask1AND2)
union = mask1Area + mask2Area - intersection
res[idMask1, idMask2] = intersection / union
return res
def non_max_suppression(boxes, scores, threshold):
"""
Performs non-maximum suppression
:param boxes: [N, (y1, x1, y2, x2)]. Notice that (y2, x2) lays outside the box.
:param scores: 1-D array of box scores.
:param threshold: Float. IoU threshold to use for filtering.
:return: indices of kept boxes
"""
assert boxes.shape[0] > 0
if boxes.dtype.kind != "f":
boxes = boxes.astype(np.float32)
# Compute box areas
y1 = boxes[:, 0]
x1 = boxes[:, 1]
y2 = boxes[:, 2]
x2 = boxes[:, 3]
area = (y2 - y1) * (x2 - x1)
# Get indices of boxes sorted by scores (highest first)
ixs = scores.argsort()[::-1]
pick = []
while len(ixs) > 0:
# Pick top box and add its index to the list
i = ixs[0]
pick.append(i)
# Compute IoU of the picked box with the rest
iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]])
# Identify boxes with IoU over the threshold. This
# returns indices into ixs[1:], so add 1 to get
# indices into ixs.
remove_ixs = np.where(iou > threshold)[0] + 1
# Remove indices of the picked and overlapped boxes.
ixs = np.delete(ixs, remove_ixs)
ixs = np.delete(ixs, 0)
return np.array(pick, dtype=np.int32)
############################################################
# Dataset
############################################################
class Dataset(object):
"""The base class for dataset classes.
To use it, create a new class that adds functions specific to the dataset
you want to use. For example:
class CatsAndDogsDataset(Dataset):
def load_cats_and_dogs(self):
...
def load_mask(self, image_id):
...
def image_reference(self, image_id):
...
See COCODataset and ShapesDataset as examples.
"""
def __init__(self, class_map=None):
self._image_ids = []
self.image_info = []
# Background is always the first class
self.class_info = [{"source": "", "id": 0, "name": "BG"}]
self.source_class_ids = {}
def add_class(self, source, class_id, class_name):
assert "." not in source, "Source name cannot contain a dot"
# Does the class exist already?
for info in self.class_info:
if info['source'] == source and info["id"] == class_id:
# source.class_id combination already available, skip
return
# Add the class
self.class_info.append({
"source": source,
"id": class_id,
"name": class_name,
})
def add_image(self, source, image_id, path, **kwargs):
image_info = {
"id": image_id,
"source": source,
"path": path,
}
image_info.update(kwargs)
self.image_info.append(image_info)
def image_reference(self, image_id):
"""Return a link to the image in its source Website or details about
the image that help looking it up or debugging it.
Override for your dataset, but pass to this function
if you encounter images not in your dataset.
"""
return ""
def prepare(self, class_map=None):
"""Prepares the Dataset class for use.
TODO: class map is not supported yet. When done, it should handle mapping
classes from different datasets to the same class ID.
"""
def clean_name(name):
"""Returns a shorter version of object names for cleaner display."""
return ",".join(name.split(",")[:1])
# Build (or rebuild) everything else from the info dicts.
self.num_classes = len(self.class_info)
self.class_ids = np.arange(self.num_classes)
self.class_names = [clean_name(c["name"]) for c in self.class_info]
self.num_images = len(self.image_info)
self._image_ids = np.arange(self.num_images)
# Mapping from source class and image IDs to internal IDs
self.class_from_source_map = {"{}.{}".format(info['source'], info['id']): id
for info, id in zip(self.class_info, self.class_ids)}
self.image_from_source_map = {"{}.{}".format(info['source'], info['id']): id
for info, id in zip(self.image_info, self.image_ids)}
# Map sources to class_ids they support
self.sources = list(set([i['source'] for i in self.class_info]))
self.source_class_ids = {}
# Loop over datasets
for source in self.sources:
self.source_class_ids[source] = []
# Find classes that belong to this dataset
for i, info in enumerate(self.class_info):
# Include BG class in all datasets
if i == 0 or source == info['source']:
self.source_class_ids[source].append(i)
def map_source_class_id(self, source_class_id):
"""Takes a source class ID and returns the int class ID assigned to it.
For example:
dataset.map_source_class_id("coco.12") -> 23
"""
return self.class_from_source_map[source_class_id]
def get_source_class_id(self, class_id, source):
"""Map an internal class ID to the corresponding class ID in the source dataset."""
info = self.class_info[class_id]
assert info['source'] == source
return info['id']
@property
def image_ids(self):
return self._image_ids
def source_image_link(self, image_id):
"""Returns the path or URL to the image.
Override this to return a URL to the image if it's available online for easy
debugging.
"""
return self.image_info[image_id]["path"]
def load_image(self, image_id):
"""Load the specified image and return a [H,W,3] Numpy array.
"""
# Load image
image = skimage.io.imread(self.image_info[image_id]['path'])
# If grayscale. Convert to RGB for consistency.
if image.ndim != 3:
image = skimage.color.gray2rgb(image)
# If has an alpha channel, remove it for consistency
if image.shape[-1] == 4:
image = image[..., :3]
return image
def load_mask(self, image_id):
"""Load instance masks for the given image.
Different datasets use different ways to store masks. Override this
method to load instance masks and return them in the form of am
array of binary masks of shape [height, width, instances].
Returns:
masks: A bool array of shape [height, width, instance count] with
a binary mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# Override this function to load a mask from your dataset.
# Otherwise, it returns an empty mask.
logging.warning("You are using the default load_mask(), maybe you need to define your own one.")
mask = np.empty([0, 0, 0])
class_ids = np.empty([0], np.int32)
return mask, class_ids
def resize_image(image, min_dim=None, max_dim=None, min_scale=None, mode="square"):
"""Resizes an image keeping the aspect ratio unchanged.
min_dim: if provided, resizes the image such that it's smaller
dimension == min_dim
max_dim: if provided, ensures that the image longest side doesn't
exceed this value.
min_scale: if provided, ensure that the image is scaled up by at least
this percent even if min_dim doesn't require it.
mode: Resizing mode.
none: No resizing. Return the image unchanged.
square: Resize and pad with zeros to get a square image
of size [max_dim, max_dim].
pad64: Pads width and height with zeros to make them multiples of 64.
If min_dim or min_scale are provided, it scales the image up
before padding. max_dim is ignored in this mode.
The multiple of 64 is needed to ensure smooth scaling of feature
maps up and down the 6 levels of the FPN pyramid (2**6=64).
crop: Picks random crops from the image. First, scales the image based
on min_dim and min_scale, then picks a random crop of
size min_dim x min_dim. Can be used in training only.
max_dim is not used in this mode.
Returns:
image: the resized image
window: (y1, x1, y2, x2). If max_dim is provided, padding might
be inserted in the returned image. If so, this window is the
coordinates of the image part of the full image (excluding
the padding). The x2, y2 pixels are not included.
scale: The scale factor used to resize the image
padding: Padding added to the image [(top, bottom), (left, right), (0, 0)]
"""
# Keep track of image dtype and return results in the same dtype
image_dtype = image.dtype
# Default window (y1, x1, y2, x2) and default scale == 1.
h, w = image.shape[:2]
window = (0, 0, h, w)
scale = 1
padding = [(0, 0), (0, 0), (0, 0)]
crop = None
if mode == "none":
return image, window, scale, padding, crop
# Scale?
if min_dim:
# Scale up but not down
scale = max(1, min_dim / min(h, w))
if min_scale and scale < min_scale:
scale = min_scale
# Does it exceed max dim?
if max_dim and mode == "square":
image_max = max(h, w)
if round(image_max * scale) > max_dim:
scale = max_dim / image_max
# Resize image using bilinear interpolation
if scale != 1:
image = resize(image, (round(h * scale), round(w * scale)),
preserve_range=True)
# Need padding or cropping?
if mode == "square":
# Get new height and width
h, w = image.shape[:2]
top_pad = (max_dim - h) // 2
bottom_pad = max_dim - h - top_pad
left_pad = (max_dim - w) // 2
right_pad = max_dim - w - left_pad
padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]
image = np.pad(image, padding, mode='constant', constant_values=0)
window = (top_pad, left_pad, h + top_pad, w + left_pad)
elif mode == "pad64":
h, w = image.shape[:2]
# Both sides must be divisible by 64
assert min_dim % 64 == 0, "Minimum dimension must be a multiple of 64"
# Height
if h % 64 > 0:
max_h = h - (h % 64) + 64
top_pad = (max_h - h) // 2
bottom_pad = max_h - h - top_pad
else:
top_pad = bottom_pad = 0
# Width
if w % 64 > 0:
max_w = w - (w % 64) + 64
left_pad = (max_w - w) // 2
right_pad = max_w - w - left_pad
else:
left_pad = right_pad = 0
padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]
image = np.pad(image, padding, mode='constant', constant_values=0)
window = (top_pad, left_pad, h + top_pad, w + left_pad)
elif mode == "crop":
# Pick a random crop
h, w = image.shape[:2]
y = random.randint(0, (h - min_dim))
x = random.randint(0, (w - min_dim))
crop = (y, x, min_dim, min_dim)
image = image[y:y + min_dim, x:x + min_dim]
window = (0, 0, min_dim, min_dim)
else:
raise Exception("Mode {} not supported".format(mode))
return image.astype(image_dtype), window, scale, padding, crop
def resize_mask(mask, scale, padding, crop=None):
"""Resizes a mask using the given scale and padding.
Typically, you get the scale and padding from resize_image() to
ensure both, the image and the mask, are resized consistently.
scale: mask scaling factor
padding: Padding to add to the mask in the form
[(top, bottom), (left, right), (0, 0)]
"""
# Suppress warning from scipy 0.13.0, the output shape of zoom() is
# calculated with round() instead of int()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mask = scipy.ndimage.zoom(mask, zoom=[scale, scale, 1], order=0)
if crop is not None:
y, x, h, w = crop
mask = mask[y:y + h, x:x + w]
else:
mask = np.pad(mask, padding, mode='constant', constant_values=0)
return mask
def minimize_mask(bbox, mask, mini_shape):
"""Resize masks to a smaller version to reduce memory load.
Mini-masks can be resized back to image scale using expand_masks()
See inspect_data.ipynb notebook for more details.
"""
soleMask = False
if len(bbox.shape) != 2 and len(mask.shape) != 3:
soleMask = True
_bbox = np.expand_dims(bbox, 0)
_mask = np.expand_dims(mask, 2)
else:
_bbox = bbox
_mask = mask
mini_mask = np.zeros(mini_shape + (_mask.shape[-1],), dtype=bool)
for i in range(_mask.shape[-1]):
# Pick slice and cast to bool in case load_mask() returned wrong dtype
m = _mask[:, :, i].astype(bool).astype(np.uint8) * 255
y1, x1, y2, x2 = _bbox[i][:4]
m = m[y1:y2, x1:x2]
if m.size == 0:
raise Exception("Invalid bounding box with area of zero")
# Resize with bilinear interpolation
m = resize(m, mini_shape)
mini_mask[:, :, i] = np.around(m).astype(np.bool)
return mini_mask[:, :, 0] if soleMask else mini_mask
def expand_mask(bbox, mini_mask, image_shape):
"""Resizes mini masks back to image size. Reverses the change
of minimize_mask().
See inspect_data.ipynb notebook for more details.
"""
if type(image_shape) is not tuple:
image_shape = tuple(image_shape)
soleMask = False
if len(bbox.shape) != 2 and len(mini_mask.shape) != 3:
soleMask = True
_bbox = np.expand_dims(bbox, 0)
_mini_mask = np.expand_dims(mini_mask, 2)
else:
_bbox = bbox
_mini_mask = mini_mask
mask = np.zeros(image_shape[:2] + (_mini_mask.shape[-1],), dtype=bool)
for i in range(mask.shape[-1]):
m = _mini_mask[:, :, i].astype(bool).astype(np.uint8) * 255
y1, x1, y2, x2 = _bbox[i][:4]
h = y2 - y1
w = x2 - x1
# Resize with bilinear interpolation
m = resize(m, (h, w))
mask[y1:y2, x1:x2, i] = np.around(m).astype(np.bool)
return mask[:, :, 0] if soleMask else mask
def minimize_mask_float(mask, bbox, output_shape=(28, 28), offset=32):
"""
Minimizes given mask(s) to floating point masks of the given shape
:param mask: mask as a 2-D uint8 ndarray of shape (H, W) or masks as a 3-D uint8 ndarray of shape (H, W, N)
:param bbox: bbox as a 1-D uint8 ndarray of shape (4) or masks as a 2-D uint8 ndarray of shape (N, 4)
:param output_shape: shape of the output mini-mask(s)
:param offset: the offset on each side of the image part that will be resized (used to avoid
:return: Minimized mask(s) in the same ndarray format as input ones but with output_shape as (H, W) and with float64
dtype
"""
soleMask = False
if len(bbox.shape) != 2 and len(mask.shape) != 3:
soleMask = True
_bbox = np.expand_dims(bbox, 0)
_mask = np.expand_dims(mask, 2)
else:
_bbox = bbox
_mask = mask
mini_masks = np.zeros(output_shape + (_mask.shape[-1],), dtype=np.float64)
for i in range(_mask.shape[-1]):
# Computing mask shape with offset on all sides
mask_shape = tuple(shift_bbox(_bbox[i][:4])[2:] + np.array([offset * 2] * 2))
temp_mask = np.zeros(mask_shape, dtype=np.uint8) # Empty mask
y1, x1, y2, x2 = _bbox[i][:4]
temp_mask[offset:-offset, offset:-offset] = _mask[y1:y2, x1:x2, i] # Filling it with mask
# Resizing to output shape
mini_masks[:, :, i] = resize(temp_mask.astype(bool).astype(np.float64), output_shape)
return mini_masks[:, :, 0] if soleMask else mini_masks
def expand_mask_float(mini_mask, bbox, output_shape=(1024, 1024), offset=32):
"""
Expands given floating point mini-mask(s) back to binary mask(s) with the same shape as the image
:param mini_mask: mini-mask as a 2-D uint8 ndarray of shape (H, W) or mini-masks as a 3-D uint8 ndarray of
shape (H, W, N)
:param bbox: bbox as a 1-D uint8 ndarray of shape (4) or masks as a 2-D uint8 ndarray of shape (N, 4)
:param output_shape: shape of the output mask(s)
:param offset: the offset on each side of the image part that will be resized (used to avoid
:return: Expanded mask(s) in the same ndarray format as input ones but with output_shape as (H, W) and with uint8
dtype
"""
if type(output_shape) is not tuple:
output_shape = tuple(output_shape)
soleMask = False
if len(bbox.shape) != 2 and len(mini_mask.shape) != 3:
soleMask = True
_bbox = np.expand_dims(bbox, 0)
_mini_mask = np.expand_dims(mini_mask, 2)
else:
_bbox = bbox
_mini_mask = mini_mask
masks = np.zeros(output_shape[:2] + (_mini_mask.shape[-1],), dtype=np.uint8)
for i in range(_mini_mask.shape[-1]):
mask_shape = tuple(shift_bbox(_bbox[i][:4])[2:] + np.array([offset * 2] * 2))
resized_mask = resize(_mini_mask[:, :, i], mask_shape)
y1, x1, y2, x2 = _bbox[i][:4]
masks[y1:y2, x1:x2, i] = np.where(resized_mask[offset:-offset, offset:-offset] >= 0.5,
255, 0).astype(np.uint8)
return masks[:, :, 0] if soleMask else masks
def unmold_mask(mask, bbox, image_shape):
"""Converts a mask generated by the neural network to a format similar
to its original shape.
mask: [height, width] of type float. A small, typically 28x28 mask.
bbox: [y1, x1, y2, x2]. The box to fit the mask in.
Returns a binary mask with the same size as the original image.
"""
threshold = 0.5
y1, x1, y2, x2 = bbox
mask = resize(mask, (y2 - y1, x2 - x1))
mask = np.where(mask >= threshold, 1, 0).astype(np.bool)
# Put the mask in the right location.
full_mask = np.zeros(image_shape[:2], dtype=np.bool)
full_mask[y1:y2, x1:x2] = mask
return full_mask
############################################################
# Miscellaneous
############################################################
def export_results(output_path: str, class_ids, boxes=None, masks=None, scores=None, bbox_areas=None, mask_areas=None):
"""
Exports result dictionary to a JSON file for debug
:param output_path: path to the output JSON file
:param class_ids: value of the 'class_ids' key of results dictionary
:param boxes: value of the 'class_ids' key of results dictionary
:param masks: value of the 'class_ids' key of results dictionary
:param scores: value of the 'class_ids' key of results dictionary
:param bbox_areas: value of the 'bbox_areas' key of results dictionary
:param mask_areas: value of the 'masks_areas' key of results dictionary
:return: None
"""
if type(class_ids) is dict:
if 'rois' in class_ids:
boxes = class_ids['rois']
if 'masks' in class_ids:
masks = class_ids['masks']
if 'scores' in class_ids:
scores = class_ids['scores']
if 'bbox_areas' in class_ids:
bbox_areas = class_ids['bbox_areas']
if 'mask_areas' in class_ids:
mask_areas = class_ids['mask_areas']
class_ids = class_ids['class_ids']
oneDArrays = [
(class_ids, "class_ids", int),
(scores, "scores", float),
(bbox_areas, "bbox_areas", float),
(mask_areas, "mask_areas", float),
]
data = {key: [arrayType(v) for v in array] for array, key, arrayType in oneDArrays if array is not None}
if boxes is not None:
data["rois"] = [[int(v) for v in bbox] for bbox in boxes]
if masks is not None:
data["masks"] = [[[int(bool(v)) * 255 for v in row] for row in mask] for mask in masks]
with open(output_path, 'w') as output:
json.dump(data, output)
def import_results(input_path: str):
"""
Imports result dictionary from JSON file for debug
:param input_path: path to the input JSON file
:return: results dictionary
"""
with open(input_path, 'r') as inputFile:
data = json.load(inputFile)
keyType = {'rois': np.int32, 'masks': np.uint8, 'class_ids': int,
'scores': float, 'bbox_areas': float, 'mask_areas': float}
for key in data.keys():
data[key] = np.array(data[key]).astype(keyType[key])
return data
def classes_level(classes_hierarchy):
"""
Return each level of the given class hierarchy with its classes
:param classes_hierarchy: a structure made of list, int for classes of the same lvl, and dict to describe "key class
contains value class(es)". ex : [1, {2: [3, 4]}, {5: 6}] -> [[1, 2, 5], [3, 4, 6]]
:return: list containing each classes of a level as a list : [[ lvl0 ], [ lvl1 ], ...]
"""
if type(classes_hierarchy) is int:
return [[classes_hierarchy]] # Return a hierarchy with only one level containing the value
elif type(classes_hierarchy) is list:
res = []
for element in classes_hierarchy: # For each element of the list
temp = classes_level(element)
for lvl, indices in enumerate(temp): # For each hierarchy level of the current element
if len(indices) > 0:
if len(res) < lvl + 1: # Adding a new level if needed
res.append([])
res[lvl].extend(indices) # Fusing the current hierarchy level to list hierarchy one
return res
elif type(classes_hierarchy) is dict:
res = [[]]
for key in classes_hierarchy:
res[0].append(key) # Append key to lvl 0 classes
if classes_hierarchy[key] is not None:
temp = classes_level(classes_hierarchy[key])
for lvl, indices in enumerate(temp): # For each lvl of class inside the value of key element
if len(res) < lvl + 2: # Adding a new level if needed
res.append([])
res[lvl + 1].extend(indices) # Offsetting each level of the child to be relative to parent class
return res
def remove_redundant_classes(classes_lvl, keepFirst=True):
"""
Remove classes that appears more than once in the classes' levels
:param classes_lvl: list of each level of classes as list : [[ lvl 0 ], [ lvl 1 ], ...]
:param keepFirst: if True, class will be kept in the min level in which it is present, else in the max/last level.
:return: [[ lvl 0 ], [ lvl 1 ], ...] with classes only appearing once
"""
res = [[] for _ in classes_lvl]
seenClass = []
for lvlID, lvl in enumerate(classes_lvl[::1 if keepFirst else -1]): # For each lvl in normal or reverse order
for classID in lvl:
if classID not in seenClass: # Checking if the class ID has already been added or not
seenClass.append(classID) # Adding the class ID to the added ones
res[lvlID if keepFirst else (-1 - lvlID)].append(classID) # Adding the class to its level
for lvl in res: # Removing empty levels
if len(lvl) == 0:
res.remove(lvl)
return res
def compute_confusion_matrix(image_shape: iter, expectedResults: dict, predictedResults: dict, num_classes: int,
config: Config = None):
"""
Computes confusion matrix at pixel precision
:param image_shape: the initial image shape
:param expectedResults: the expected results dict
:param predictedResults: the predicted results dict
:param num_classes: number of classes (max class ID)
:param config: the config object of the AI
:return: confusion matrix as a ndarray of shape (num_classes + 1, num_classes + 1), 0 being background class
"""
expectedImg = create_multiclass_mask(image_shape, expectedResults, config)
predictedImg = create_multiclass_mask(image_shape, predictedResults, config)
confusion_matrix = np.zeros((num_classes + 1, num_classes + 1), dtype=np.int64)
for y in range(image_shape[0]):
for x in range(image_shape[1]):
confusion_matrix[expectedImg[y, x]][predictedImg[y, x]] += 1
return confusion_matrix
def trim_zeros(x):
"""It's common to have tensors larger than the available data and
pad with zeros. This function removes rows that are all zeros.
x: [rows, columns].
"""
assert len(x.shape) == 2
return x[~np.all(x == 0, axis=1)]
def compute_matches(gt_boxes, gt_class_ids, gt_masks, pred_boxes,
pred_class_ids, pred_scores, pred_masks,
ap_iou_threshold=0.5, min_iou_to_count=0.0,
nb_class=-1, confusion_iou_threshold=0.1,
classes_hierarchy=None, confusion_background_class=True, confusion_only_best_match=True):
"""Finds matches between prediction and ground truth instances.
Returns:
gt_match: 1-D array. For each GT box it has the index of the matched
predicted box.
pred_match: 1-D array. For each predicted box, it has the index of
the matched ground truth box.
overlaps: [pred_boxes, gt_boxes] IoU overlaps.
"""
if nb_class > 0:
bg = 1 if confusion_background_class else 0
confusion_matrix = np.zeros((nb_class + bg, nb_class + bg), dtype=np.int64)
else:
confusion_matrix = None
confusion_iou_threshold = 1.
classes_hierarchy_ = None
if classes_hierarchy is not None and type(classes_hierarchy) is list:
classes_hierarchy_ = {list(c.keys())[0]: c[list(c.keys())[0]] for c in classes_hierarchy if type(c) is dict}
elif classes_hierarchy is not None and type(classes_hierarchy) is dict:
classes_hierarchy_ = classes_hierarchy
# Trim zero padding
# TODO: cleaner to do zero unpadding upstream
gt_boxes = trim_zeros(gt_boxes)
gt_masks = gt_masks[..., :gt_boxes.shape[0]]
pred_boxes = trim_zeros(pred_boxes)
pred_scores = pred_scores[:pred_boxes.shape[0]]
# Sort predictions by score from high to low
indices = np.argsort(pred_scores)[::-1]
pred_boxes = pred_boxes[indices]
pred_class_ids = pred_class_ids[indices]
pred_scores = pred_scores[indices]
pred_masks = pred_masks[..., indices]
# Compute IoU overlaps [pred_masks, gt_masks]
overlaps = compute_overlaps_masks(pred_masks, pred_boxes, gt_masks, gt_boxes)
# Loop through predictions and find matching ground truth boxes
pred_match = -1 * np.ones([pred_boxes.shape[0]])
gt_match = -1 * np.ones([gt_boxes.shape[0]])
for pred_idx in range(len(pred_boxes)):
# Find best matching ground truth box
# 1. Sort matches by score
sorted_ixs = np.argsort(overlaps[pred_idx])[::-1]
# 2. Remove low scores
low_score_idx = np.where(overlaps[pred_idx, sorted_ixs] < min_iou_to_count)[0]
if low_score_idx.size > 0:
sorted_ixs = sorted_ixs[:low_score_idx[0]]
# 3. Find the match
match = False
pred_class = pred_class_ids[pred_idx]
for gt_idx in sorted_ixs:
gt_class = gt_class_ids[gt_idx]
# If classes_hierarchy is provided and (gt_class, pred_class) are parent/child classes we skip
if classes_hierarchy_ is not None and (
(
gt_class in classes_hierarchy_
and pred_class in classes_hierarchy_[gt_class]
) or (
pred_class in classes_hierarchy_
and gt_class in classes_hierarchy_[pred_class]
)
):
continue
# If we reach IoU smaller than the threshold, end the loop (list is sorted so all the followings will be
# smaller too)
iou = overlaps[pred_idx, gt_idx]
breakAP = iou < ap_iou_threshold
breakConfusion = iou < confusion_iou_threshold
if breakAP and breakConfusion:
break
if not breakConfusion and confusion_matrix is not None and (not confusion_only_best_match or not match):
match = True
if confusion_background_class:
confusion_matrix[gt_class][pred_class] += 1
else:
confusion_matrix[gt_class - 1][pred_class - 1] += 1
# If ground truth box is already matched, go to next one
# TODO : Rework that part, specially for confusion matrix, we are counting positive predictions for each
# match with a gt_mask not only the first time
if gt_match[gt_idx] > -1:
continue
if not breakAP:
# Do we have a match?
if pred_class == gt_class:
gt_match[gt_idx] = pred_idx
pred_match[pred_idx] = gt_idx
# Something has been predicted but no ground truth annotation
if confusion_matrix is not None and confusion_background_class and not match:
confusion_matrix[0][pred_class] += 1
# Looking for a ground truth box without overlapping prediction
if confusion_matrix is not None and confusion_background_class:
for gt_idx in range(len(gt_match)):
if gt_match[gt_idx] == -1:
if gt_class_ids[gt_idx] > nb_class:
print(f"Error : got class id = {gt_class_ids[gt_idx]} while max class id = {nb_class}")
else:
confusion_matrix[gt_class_ids[gt_idx]][0] += 1
return gt_match, pred_match, overlaps, confusion_matrix
def compute_ap(gt_boxes, gt_class_ids, gt_masks,
pred_boxes, pred_class_ids, pred_scores, pred_masks,
iou_threshold=0.5, score_threshold=0.3,
nb_class=-1, confusion_iou_threshold=0.3, classes_hierarchy=None,
confusion_background_class=True, confusion_only_best_match=True):
"""Compute Average Precision at a set IoU threshold (default 0.5).
Returns:
mAP: Mean Average Precision
precisions: List of precisions at different class score thresholds.
recalls: List of recall values at different class score thresholds.
overlaps: [pred_boxes, gt_boxes] IoU overlaps.
"""
# Get matches and overlaps
gt_match, pred_match, overlaps, confusion_matrix = compute_matches(
gt_boxes=gt_boxes, gt_class_ids=gt_class_ids, gt_masks=gt_masks, min_iou_to_count=score_threshold,
pred_boxes=pred_boxes, pred_class_ids=pred_class_ids, pred_masks=pred_masks, pred_scores=pred_scores,
nb_class=nb_class, ap_iou_threshold=iou_threshold, confusion_iou_threshold=confusion_iou_threshold,
classes_hierarchy=classes_hierarchy, confusion_background_class=confusion_background_class,
confusion_only_best_match=confusion_only_best_match
)
if len(gt_class_ids) == len(pred_class_ids) == 0:
return 1., 1., 1., overlaps, confusion_matrix
# Compute precision and recall at each prediction box step
precisions = np.cumsum(pred_match > -1) / (np.arange(len(pred_match)) + 1)
recalls = np.cumsum(pred_match > -1).astype(np.float32) / len(gt_match)
for i in range(len(recalls)):
if np.isnan(recalls[i]):
recalls[i] = 0.
# Pad with start and end values to simplify the math
precisions = np.concatenate([[0], precisions, [0]])
recalls = np.concatenate([[0], recalls, [1]])
# Ensure precision values decrease but don't increase. This way, the
# precision value at each recall threshold is the maximum it can be
# for all following recall thresholds, as specified by the VOC paper.
for i in range(len(precisions) - 2, -1, -1):
precisions[i] = np.maximum(precisions[i], precisions[i + 1])
# Compute mean AP over recall range
indices = np.where(recalls[:-1] != recalls[1:])[0] + 1
mAP = np.sum((recalls[indices] - recalls[indices - 1]) *
precisions[indices])
return mAP, precisions, recalls, overlaps, confusion_matrix
def compute_ap_range(gt_box, gt_class_id, gt_mask,
pred_box, pred_class_id, pred_score, pred_mask,
iou_thresholds=None, verbose=1):
"""Compute AP over a range or IoU thresholds. Default range is 0.5-0.95."""
# Default is 0.5 to 0.95 with increments of 0.05
iou_thresholds = iou_thresholds or np.arange(0.5, 1.0, 0.05)
# Compute AP over range of IoU thresholds
AP = []
for iou_threshold in iou_thresholds:
ap, precisions, recalls, overlaps, _ = \
compute_ap(gt_box, gt_class_id, gt_mask,
pred_box, pred_class_id, pred_score, pred_mask,
iou_threshold=iou_threshold)
if verbose:
print(f"AP @{iou_threshold:.2f}:\t {ap:.3f}")
AP.append(ap)
AP = np.array(AP).mean()
if verbose:
print(f"AP @{iou_thresholds[0]:.2f}-{iou_thresholds[-1]:.2f}:\t {AP:.3f}")
return AP
def compute_recall(pred_boxes, gt_boxes, iou):
"""Compute the recall at the given IoU threshold. It's an indication
of how many GT boxes were found by the given prediction boxes.
pred_boxes: [N, (y1, x1, y2, x2)] in image coordinates
gt_boxes: [N, (y1, x1, y2, x2)] in image coordinates
"""
# Measure overlaps
overlaps = compute_overlaps(pred_boxes, gt_boxes)
iou_max = np.max(overlaps, axis=1)
iou_argmax = np.argmax(overlaps, axis=1)
positive_ids = np.where(iou_max >= iou)[0]
matched_gt_boxes = iou_argmax[positive_ids]
recall = len(set(matched_gt_boxes)) / gt_boxes.shape[0]
return recall, positive_ids
def download_trained_weights(weights=None, verbose=1):
""" Download trained weights from Releases. """
if weights is None:
weights = WEIGHTS_URL
if verbose > 0:
print("Downloading weights files if needed ...", end='')
for weightsUrl in weights:
path = weightsUrl.split('/')[-1]
if not os.path.exists(path) and not os.path.exists(path.replace(".zip", "")):
with urllib.request.urlopen(weightsUrl) as resp, open(path, 'wb') as out:
shutil.copyfileobj(resp, out)
if not os.path.exists(path.replace(".zip", "")):
with zipfile.ZipFile(path, 'r') as zip_ref:
zip_ref.extractall(".")
if verbose > 0:
print(" Done !")
def norm_boxes(boxes, shape):
"""Converts boxes from pixel coordinates to normalized coordinates.
boxes: [N, (y1, x1, y2, x2)] in pixel coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[N, (y1, x1, y2, x2)] in normalized coordinates
"""
h, w = shape
scale = np.array([h - 1, w - 1, h - 1, w - 1])
shift = np.array([0, 0, 1, 1])
return np.divide((boxes - shift), scale).astype(np.float32)
def denorm_boxes(boxes, shape):
"""Converts boxes from normalized coordinates to pixel coordinates.
boxes: [N, (y1, x1, y2, x2)] in normalized coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[N, (y1, x1, y2, x2)] in pixel coordinates
"""
h, w = shape
scale = np.array([h - 1, w - 1, h - 1, w - 1])
shift = np.array([0, 0, 1, 1])
return np.around(np.multiply(boxes, scale) + shift).astype(np.int32)
def resize(image, output_shape, order=1, mode='constant', cval=0, clip=True,
preserve_range=False, anti_aliasing=False, anti_aliasing_sigma=None):
"""A wrapper for Scikit-Image resize().
Scikit-Image generates warnings on every call to resize() if it doesn't
receive the right parameters. The right parameters depend on the version
of skimage. This solves the problem by using different parameters per
version. And it provides a central place to control resizing defaults.
"""
if LooseVersion(skimage.__version__) >= LooseVersion("0.14"):
# New in 0.14: anti_aliasing. Default it to False for backward
# compatibility with skimage 0.13.
return skimage.transform.resize(
image, output_shape,
order=order, mode=mode, cval=cval, clip=clip,
preserve_range=preserve_range, anti_aliasing=anti_aliasing,
anti_aliasing_sigma=anti_aliasing_sigma)
else:
return skimage.transform.resize(
image, output_shape,
order=order, mode=mode, cval=cval, clip=clip,
preserve_range=preserve_range)
|
[
"zipfile.ZipFile",
"datasetTools.datasetDivider.getBWCount",
"numpy.argsort",
"numpy.array",
"scipy.ndimage.zoom",
"numpy.arange",
"numpy.divide",
"os.path.exists",
"numpy.multiply",
"numpy.where",
"numpy.delete",
"numpy.max",
"numpy.empty",
"numpy.concatenate",
"mrcnn.visualize.create_multiclass_mask",
"warnings.simplefilter",
"numpy.maximum",
"distutils.version.LooseVersion",
"random.randint",
"cv2.fillPoly",
"numpy.all",
"numpy.ones",
"shutil.copyfileobj",
"logging.warning",
"numpy.argmax",
"numpy.any",
"numpy.isnan",
"numpy.around",
"numpy.nonzero",
"numpy.minimum",
"numpy.logical_and",
"warnings.catch_warnings",
"numpy.sum",
"numpy.zeros",
"numpy.expand_dims",
"json.load",
"numpy.cumsum",
"numpy.pad",
"numpy.zeros_like",
"json.dump"
] |
[((2388, 2407), 'datasetTools.datasetDivider.getBWCount', 'dD.getBWCount', (['mask'], {}), '(mask)\n', (2401, 2407), True, 'from datasetTools import datasetDivider as dD\n'), ((3369, 3425), 'numpy.pad', 'np.pad', (['base_mask', '(1)'], {'mode': '"""constant"""', 'constant_values': '(0)'}), "(base_mask, 1, mode='constant', constant_values=0)\n", (3375, 3425), True, 'import numpy as np\n'), ((3436, 3476), 'numpy.zeros_like', 'np.zeros_like', (['base_mask'], {'dtype': 'np.uint8'}), '(base_mask, dtype=np.uint8)\n', (3449, 3476), True, 'import numpy as np\n'), ((7307, 7353), 'numpy.zeros', 'np.zeros', (['[_mask.shape[-1], 4]'], {'dtype': 'np.int32'}), '([_mask.shape[-1], 4], dtype=np.int32)\n', (7315, 7353), True, 'import numpy as np\n'), ((8566, 8597), 'numpy.maximum', 'np.maximum', (['box[0]', 'boxes[:, 0]'], {}), '(box[0], boxes[:, 0])\n', (8576, 8597), True, 'import numpy as np\n'), ((8607, 8638), 'numpy.minimum', 'np.minimum', (['box[2]', 'boxes[:, 2]'], {}), '(box[2], boxes[:, 2])\n', (8617, 8638), True, 'import numpy as np\n'), ((8648, 8679), 'numpy.maximum', 'np.maximum', (['box[1]', 'boxes[:, 1]'], {}), '(box[1], boxes[:, 1])\n', (8658, 8679), True, 'import numpy as np\n'), ((8689, 8720), 'numpy.minimum', 'np.minimum', (['box[3]', 'boxes[:, 3]'], {}), '(box[3], boxes[:, 3])\n', (8699, 8720), True, 'import numpy as np\n'), ((9548, 9592), 'numpy.zeros', 'np.zeros', (['(boxes1.shape[0], boxes2.shape[0])'], {}), '((boxes1.shape[0], boxes2.shape[0]))\n', (9556, 9592), True, 'import numpy as np\n'), ((9928, 9974), 'numpy.zeros', 'np.zeros', (['(masks1.shape[-1], masks2.shape[-1])'], {}), '((masks1.shape[-1], masks2.shape[-1]))\n', (9936, 9974), True, 'import numpy as np\n'), ((10177, 10203), 'numpy.nonzero', 'np.nonzero', (['matching_boxes'], {}), '(matching_boxes)\n', (10187, 10203), True, 'import numpy as np\n'), ((12055, 12085), 'numpy.array', 'np.array', (['pick'], {'dtype': 'np.int32'}), '(pick, dtype=np.int32)\n', (12063, 12085), True, 'import numpy as np\n'), ((23673, 23726), 'numpy.zeros', 'np.zeros', (['(mini_shape + (_mask.shape[-1],))'], {'dtype': 'bool'}), '(mini_shape + (_mask.shape[-1],), dtype=bool)\n', (23681, 23726), True, 'import numpy as np\n'), ((24809, 24872), 'numpy.zeros', 'np.zeros', (['(image_shape[:2] + (_mini_mask.shape[-1],))'], {'dtype': 'bool'}), '(image_shape[:2] + (_mini_mask.shape[-1],), dtype=bool)\n', (24817, 24872), True, 'import numpy as np\n'), ((26159, 26220), 'numpy.zeros', 'np.zeros', (['(output_shape + (_mask.shape[-1],))'], {'dtype': 'np.float64'}), '(output_shape + (_mask.shape[-1],), dtype=np.float64)\n', (26167, 26220), True, 'import numpy as np\n'), ((27888, 27956), 'numpy.zeros', 'np.zeros', (['(output_shape[:2] + (_mini_mask.shape[-1],))'], {'dtype': 'np.uint8'}), '(output_shape[:2] + (_mini_mask.shape[-1],), dtype=np.uint8)\n', (27896, 27956), True, 'import numpy as np\n'), ((28958, 28998), 'numpy.zeros', 'np.zeros', (['image_shape[:2]'], {'dtype': 'np.bool'}), '(image_shape[:2], dtype=np.bool)\n', (28966, 28998), True, 'import numpy as np\n'), ((34893, 34953), 'mrcnn.visualize.create_multiclass_mask', 'create_multiclass_mask', (['image_shape', 'expectedResults', 'config'], {}), '(image_shape, expectedResults, config)\n', (34915, 34953), False, 'from mrcnn.visualize import create_multiclass_mask\n'), ((34973, 35034), 'mrcnn.visualize.create_multiclass_mask', 'create_multiclass_mask', (['image_shape', 'predictedResults', 'config'], {}), '(image_shape, predictedResults, config)\n', (34995, 35034), False, 'from mrcnn.visualize import create_multiclass_mask\n'), ((35058, 35118), 'numpy.zeros', 'np.zeros', (['(num_classes + 1, num_classes + 1)'], {'dtype': 'np.int64'}), '((num_classes + 1, num_classes + 1), dtype=np.int64)\n', (35066, 35118), True, 'import numpy as np\n'), ((42510, 42548), 'numpy.concatenate', 'np.concatenate', (['[[0], precisions, [0]]'], {}), '([[0], precisions, [0]])\n', (42524, 42548), True, 'import numpy as np\n'), ((42563, 42598), 'numpy.concatenate', 'np.concatenate', (['[[0], recalls, [1]]'], {}), '([[0], recalls, [1]])\n', (42577, 42598), True, 'import numpy as np\n'), ((43047, 43118), 'numpy.sum', 'np.sum', (['((recalls[indices] - recalls[indices - 1]) * precisions[indices])'], {}), '((recalls[indices] - recalls[indices - 1]) * precisions[indices])\n', (43053, 43118), True, 'import numpy as np\n'), ((44547, 44571), 'numpy.max', 'np.max', (['overlaps'], {'axis': '(1)'}), '(overlaps, axis=1)\n', (44553, 44571), True, 'import numpy as np\n'), ((44589, 44616), 'numpy.argmax', 'np.argmax', (['overlaps'], {'axis': '(1)'}), '(overlaps, axis=1)\n', (44598, 44616), True, 'import numpy as np\n'), ((45966, 46004), 'numpy.array', 'np.array', (['[h - 1, w - 1, h - 1, w - 1]'], {}), '([h - 1, w - 1, h - 1, w - 1])\n', (45974, 46004), True, 'import numpy as np\n'), ((46017, 46039), 'numpy.array', 'np.array', (['[0, 0, 1, 1]'], {}), '([0, 0, 1, 1])\n', (46025, 46039), True, 'import numpy as np\n'), ((46531, 46569), 'numpy.array', 'np.array', (['[h - 1, w - 1, h - 1, w - 1]'], {}), '([h - 1, w - 1, h - 1, w - 1])\n', (46539, 46569), True, 'import numpy as np\n'), ((46582, 46604), 'numpy.array', 'np.array', (['[0, 0, 1, 1]'], {}), '([0, 0, 1, 1])\n', (46590, 46604), True, 'import numpy as np\n'), ((6008, 6050), 'numpy.array', 'np.array', (['[0, 0, yMax - yMin, xMax - xMin]'], {}), '([0, 0, yMax - yMin, xMax - xMin])\n', (6016, 6050), True, 'import numpy as np\n'), ((7216, 7239), 'numpy.expand_dims', 'np.expand_dims', (['mask', '(2)'], {}), '(mask, 2)\n', (7230, 7239), True, 'import numpy as np\n'), ((8740, 8762), 'numpy.maximum', 'np.maximum', (['(x2 - x1)', '(0)'], {}), '(x2 - x1, 0)\n', (8750, 8762), True, 'import numpy as np\n'), ((8765, 8787), 'numpy.maximum', 'np.maximum', (['(y2 - y1)', '(0)'], {}), '(y2 - y1, 0)\n', (8775, 8787), True, 'import numpy as np\n'), ((11985, 12011), 'numpy.delete', 'np.delete', (['ixs', 'remove_ixs'], {}), '(ixs, remove_ixs)\n', (11994, 12011), True, 'import numpy as np\n'), ((12026, 12043), 'numpy.delete', 'np.delete', (['ixs', '(0)'], {}), '(ixs, 0)\n', (12035, 12043), True, 'import numpy as np\n'), ((14565, 14592), 'numpy.arange', 'np.arange', (['self.num_classes'], {}), '(self.num_classes)\n', (14574, 14592), True, 'import numpy as np\n'), ((14742, 14768), 'numpy.arange', 'np.arange', (['self.num_images'], {}), '(self.num_images)\n', (14751, 14768), True, 'import numpy as np\n'), ((17737, 17843), 'logging.warning', 'logging.warning', (['"""You are using the default load_mask(), maybe you need to define your own one."""'], {}), "(\n 'You are using the default load_mask(), maybe you need to define your own one.'\n )\n", (17752, 17843), False, 'import logging\n'), ((17849, 17868), 'numpy.empty', 'np.empty', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (17857, 17868), True, 'import numpy as np\n'), ((17889, 17912), 'numpy.empty', 'np.empty', (['[0]', 'np.int32'], {}), '([0], np.int32)\n', (17897, 17912), True, 'import numpy as np\n'), ((20950, 21008), 'numpy.pad', 'np.pad', (['image', 'padding'], {'mode': '"""constant"""', 'constant_values': '(0)'}), "(image, padding, mode='constant', constant_values=0)\n", (20956, 21008), True, 'import numpy as np\n'), ((22855, 22880), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (22878, 22880), False, 'import warnings\n'), ((22890, 22921), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (22911, 22921), False, 'import warnings\n'), ((22937, 22994), 'scipy.ndimage.zoom', 'scipy.ndimage.zoom', (['mask'], {'zoom': '[scale, scale, 1]', 'order': '(0)'}), '(mask, zoom=[scale, scale, 1], order=0)\n', (22955, 22994), False, 'import scipy\n'), ((23109, 23166), 'numpy.pad', 'np.pad', (['mask', 'padding'], {'mode': '"""constant"""', 'constant_values': '(0)'}), "(mask, padding, mode='constant', constant_values=0)\n", (23115, 23166), True, 'import numpy as np\n'), ((23541, 23564), 'numpy.expand_dims', 'np.expand_dims', (['bbox', '(0)'], {}), '(bbox, 0)\n', (23555, 23564), True, 'import numpy as np\n'), ((23581, 23604), 'numpy.expand_dims', 'np.expand_dims', (['mask', '(2)'], {}), '(mask, 2)\n', (23595, 23604), True, 'import numpy as np\n'), ((24662, 24685), 'numpy.expand_dims', 'np.expand_dims', (['bbox', '(0)'], {}), '(bbox, 0)\n', (24676, 24685), True, 'import numpy as np\n'), ((24707, 24735), 'numpy.expand_dims', 'np.expand_dims', (['mini_mask', '(2)'], {}), '(mini_mask, 2)\n', (24721, 24735), True, 'import numpy as np\n'), ((26026, 26049), 'numpy.expand_dims', 'np.expand_dims', (['bbox', '(0)'], {}), '(bbox, 0)\n', (26040, 26049), True, 'import numpy as np\n'), ((26066, 26089), 'numpy.expand_dims', 'np.expand_dims', (['mask', '(2)'], {}), '(mask, 2)\n', (26080, 26089), True, 'import numpy as np\n'), ((26420, 26456), 'numpy.zeros', 'np.zeros', (['mask_shape'], {'dtype': 'np.uint8'}), '(mask_shape, dtype=np.uint8)\n', (26428, 26456), True, 'import numpy as np\n'), ((27740, 27763), 'numpy.expand_dims', 'np.expand_dims', (['bbox', '(0)'], {}), '(bbox, 0)\n', (27754, 27763), True, 'import numpy as np\n'), ((27785, 27813), 'numpy.expand_dims', 'np.expand_dims', (['mini_mask', '(2)'], {}), '(mini_mask, 2)\n', (27799, 27813), True, 'import numpy as np\n'), ((30915, 30938), 'json.dump', 'json.dump', (['data', 'output'], {}), '(data, output)\n', (30924, 30938), False, 'import json\n'), ((31192, 31212), 'json.load', 'json.load', (['inputFile'], {}), '(inputFile)\n', (31201, 31212), False, 'import json\n'), ((36399, 36455), 'numpy.zeros', 'np.zeros', (['(nb_class + bg, nb_class + bg)'], {'dtype': 'np.int64'}), '((nb_class + bg, nb_class + bg), dtype=np.int64)\n', (36407, 36455), True, 'import numpy as np\n'), ((37196, 37219), 'numpy.argsort', 'np.argsort', (['pred_scores'], {}), '(pred_scores)\n', (37206, 37219), True, 'import numpy as np\n'), ((37613, 37643), 'numpy.ones', 'np.ones', (['[pred_boxes.shape[0]]'], {}), '([pred_boxes.shape[0]])\n', (37620, 37643), True, 'import numpy as np\n'), ((37664, 37692), 'numpy.ones', 'np.ones', (['[gt_boxes.shape[0]]'], {}), '([gt_boxes.shape[0]])\n', (37671, 37692), True, 'import numpy as np\n'), ((42203, 42229), 'numpy.cumsum', 'np.cumsum', (['(pred_match > -1)'], {}), '(pred_match > -1)\n', (42212, 42229), True, 'import numpy as np\n'), ((42386, 42406), 'numpy.isnan', 'np.isnan', (['recalls[i]'], {}), '(recalls[i])\n', (42394, 42406), True, 'import numpy as np\n'), ((42892, 42936), 'numpy.maximum', 'np.maximum', (['precisions[i]', 'precisions[i + 1]'], {}), '(precisions[i], precisions[i + 1])\n', (42902, 42936), True, 'import numpy as np\n'), ((43549, 43574), 'numpy.arange', 'np.arange', (['(0.5)', '(1.0)', '(0.05)'], {}), '(0.5, 1.0, 0.05)\n', (43558, 43574), True, 'import numpy as np\n'), ((44636, 44660), 'numpy.where', 'np.where', (['(iou_max >= iou)'], {}), '(iou_max >= iou)\n', (44644, 44660), True, 'import numpy as np\n'), ((47200, 47233), 'distutils.version.LooseVersion', 'LooseVersion', (['skimage.__version__'], {}), '(skimage.__version__)\n', (47212, 47233), False, 'from distutils.version import LooseVersion\n'), ((47237, 47257), 'distutils.version.LooseVersion', 'LooseVersion', (['"""0.14"""'], {}), "('0.14')\n", (47249, 47257), False, 'from distutils.version import LooseVersion\n'), ((1650, 1690), 'numpy.delete', 'np.delete', (["results['scores']", 'emptyMasks'], {}), "(results['scores'], emptyMasks)\n", (1659, 1690), True, 'import numpy as np\n'), ((1726, 1769), 'numpy.delete', 'np.delete', (["results['class_ids']", 'emptyMasks'], {}), "(results['class_ids'], emptyMasks)\n", (1735, 1769), True, 'import numpy as np\n'), ((1801, 1848), 'numpy.delete', 'np.delete', (["results['masks']", 'emptyMasks'], {'axis': '(2)'}), "(results['masks'], emptyMasks, axis=2)\n", (1810, 1848), True, 'import numpy as np\n'), ((1879, 1925), 'numpy.delete', 'np.delete', (["results['rois']", 'emptyMasks'], {'axis': '(0)'}), "(results['rois'], emptyMasks, axis=0)\n", (1888, 1925), True, 'import numpy as np\n'), ((3900, 3948), 'cv2.fillPoly', 'cv2.fillPoly', (['res'], {'pts': '[biggest_part]', 'color': '(255)'}), '(res, pts=[biggest_part], color=255)\n', (3912, 3948), False, 'import cv2\n'), ((10563, 10591), 'numpy.logical_and', 'np.logical_and', (['mask1', 'mask2'], {}), '(mask1, mask2)\n', (10577, 10591), True, 'import numpy as np\n'), ((21769, 21827), 'numpy.pad', 'np.pad', (['image', 'padding'], {'mode': '"""constant"""', 'constant_values': '(0)'}), "(image, padding, mode='constant', constant_values=0)\n", (21775, 21827), True, 'import numpy as np\n'), ((28849, 28882), 'numpy.where', 'np.where', (['(mask >= threshold)', '(1)', '(0)'], {}), '(mask >= threshold, 1, 0)\n', (28857, 28882), True, 'import numpy as np\n'), ((35530, 35552), 'numpy.all', 'np.all', (['(x == 0)'], {'axis': '(1)'}), '(x == 0, axis=1)\n', (35536, 35552), True, 'import numpy as np\n'), ((37839, 37869), 'numpy.argsort', 'np.argsort', (['overlaps[pred_idx]'], {}), '(overlaps[pred_idx])\n', (37849, 37869), True, 'import numpy as np\n'), ((37931, 37990), 'numpy.where', 'np.where', (['(overlaps[pred_idx, sorted_ixs] < min_iou_to_count)'], {}), '(overlaps[pred_idx, sorted_ixs] < min_iou_to_count)\n', (37939, 37990), True, 'import numpy as np\n'), ((42992, 43029), 'numpy.where', 'np.where', (['(recalls[:-1] != recalls[1:])'], {}), '(recalls[:-1] != recalls[1:])\n', (43000, 43029), True, 'import numpy as np\n'), ((44009, 44021), 'numpy.array', 'np.array', (['AP'], {}), '(AP)\n', (44017, 44021), True, 'import numpy as np\n'), ((46051, 46082), 'numpy.divide', 'np.divide', (['(boxes - shift)', 'scale'], {}), '(boxes - shift, scale)\n', (46060, 46082), True, 'import numpy as np\n'), ((7481, 7498), 'numpy.any', 'np.any', (['m'], {'axis': '(0)'}), '(m, axis=0)\n', (7487, 7498), True, 'import numpy as np\n'), ((7540, 7557), 'numpy.any', 'np.any', (['m'], {'axis': '(1)'}), '(m, axis=1)\n', (7546, 7557), True, 'import numpy as np\n'), ((8000, 8026), 'numpy.array', 'np.array', (['[y1, x1, y2, x2]'], {}), '([y1, x1, y2, x2])\n', (8008, 8026), True, 'import numpy as np\n'), ((11877, 11902), 'numpy.where', 'np.where', (['(iou > threshold)'], {}), '(iou > threshold)\n', (11885, 11902), True, 'import numpy as np\n'), ((21989, 22019), 'random.randint', 'random.randint', (['(0)', '(h - min_dim)'], {}), '(0, h - min_dim)\n', (22003, 22019), False, 'import random\n'), ((22034, 22064), 'random.randint', 'random.randint', (['(0)', '(w - min_dim)'], {}), '(0, w - min_dim)\n', (22048, 22064), False, 'import random\n'), ((24174, 24186), 'numpy.around', 'np.around', (['m'], {}), '(m)\n', (24183, 24186), True, 'import numpy as np\n'), ((25162, 25174), 'numpy.around', 'np.around', (['m'], {}), '(m)\n', (25171, 25174), True, 'import numpy as np\n'), ((26372, 26398), 'numpy.array', 'np.array', (['([offset * 2] * 2)'], {}), '([offset * 2] * 2)\n', (26380, 26398), True, 'import numpy as np\n'), ((28057, 28083), 'numpy.array', 'np.array', (['([offset * 2] * 2)'], {}), '([offset * 2] * 2)\n', (28065, 28083), True, 'import numpy as np\n'), ((28219, 28288), 'numpy.where', 'np.where', (['(resized_mask[offset:-offset, offset:-offset] >= 0.5)', '(255)', '(0)'], {}), '(resized_mask[offset:-offset, offset:-offset] >= 0.5, 255, 0)\n', (28227, 28288), True, 'import numpy as np\n'), ((31405, 31424), 'numpy.array', 'np.array', (['data[key]'], {}), '(data[key])\n', (31413, 31424), True, 'import numpy as np\n'), ((42279, 42305), 'numpy.cumsum', 'np.cumsum', (['(pred_match > -1)'], {}), '(pred_match > -1)\n', (42288, 42305), True, 'import numpy as np\n'), ((45140, 45160), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (45154, 45160), False, 'import os\n'), ((45313, 45342), 'shutil.copyfileobj', 'shutil.copyfileobj', (['resp', 'out'], {}), '(resp, out)\n', (45331, 45342), False, 'import shutil\n'), ((45417, 45443), 'zipfile.ZipFile', 'zipfile.ZipFile', (['path', '"""r"""'], {}), "(path, 'r')\n", (45432, 45443), False, 'import zipfile\n'), ((46626, 46651), 'numpy.multiply', 'np.multiply', (['boxes', 'scale'], {}), '(boxes, scale)\n', (46637, 46651), True, 'import numpy as np\n')]
|
# Copyright 2020 Adap GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Federating: Fast and Slow."""
import statistics
from typing import Callable, Dict, List, Optional, Tuple, cast
import numpy as np
from flower.client_manager import ClientManager
from flower.client_proxy import ClientProxy
from flower.typing import EvaluateRes, FitIns, FitRes, Weights
from .aggregate import aggregate, weighted_loss_avg
from .fedavg import FedAvg
from .parameter import parameters_to_weights, weights_to_parameters
class FastAndSlow(FedAvg):
"""Strategy implementation which alternates between fast and slow rounds."""
# pylint: disable-msg=too-many-arguments,too-many-instance-attributes
def __init__(
self,
fraction_fit: float = 0.1,
fraction_eval: float = 0.1,
min_fit_clients: int = 1,
min_eval_clients: int = 1,
min_available_clients: int = 1,
eval_fn: Optional[Callable[[Weights], Optional[Tuple[float, float]]]] = None,
min_completion_rate_fit: float = 0.5,
min_completion_rate_evaluate: float = 0.5,
on_fit_config_fn: Optional[Callable[[int], Dict[str, str]]] = None,
on_evaluate_config_fn: Optional[Callable[[int], Dict[str, str]]] = None,
r_fast: int = 1,
r_slow: int = 1,
t_fast: int = 10,
t_slow: int = 10,
) -> None:
super().__init__(
fraction_fit=fraction_fit,
fraction_eval=fraction_eval,
min_fit_clients=min_fit_clients,
min_eval_clients=min_eval_clients,
min_available_clients=min_available_clients,
eval_fn=eval_fn,
on_fit_config_fn=on_fit_config_fn,
on_evaluate_config_fn=on_evaluate_config_fn,
)
self.min_completion_rate_fit = min_completion_rate_fit
self.min_completion_rate_evaluate = min_completion_rate_evaluate
self.r_fast = r_fast
self.r_slow = r_slow
self.t_fast = t_fast
self.t_slow = t_slow
self.contributions: Dict[str, List[Tuple[int, float]]] = {}
# pylint: disable-msg=too-many-locals
def on_configure_fit(
self, rnd: int, weights: Weights, client_manager: ClientManager
) -> List[Tuple[ClientProxy, FitIns]]:
"""Configure the next round of training."""
# Block until `min_num_clients` are available
sample_size, min_num_clients = self.num_fit_clients(
client_manager.num_available()
)
success = client_manager.wait_for(num_clients=min_num_clients, timeout=60)
if not success:
# Do not continue if not enough clients are available
return []
# Prepare parameters and config
parameters = weights_to_parameters(weights)
config = {}
if self.on_fit_config_fn is not None:
# Use custom fit config function if provided
config = self.on_fit_config_fn(rnd)
use_fast_timeout = is_fast_round(rnd, self.r_fast, self.r_slow)
config["timeout"] = str(self.t_fast if use_fast_timeout else self.t_slow)
fit_ins = (parameters, config)
# Get all clients and gather their contributions
all_clients: Dict[str, ClientProxy] = client_manager.all()
cid_idx: Dict[int, str] = {}
logits: List[float] = []
for idx, (cid, _) in enumerate(all_clients.items()):
cid_idx[idx] = cid
penalty = 0.0
if cid in self.contributions.keys():
contribs: List[Tuple[int, float]] = self.contributions[cid]
penalty = statistics.mean([c for _, c in contribs])
# `p` should be:
# - High for clients which have never been picked before
# - Medium for clients which have contributed, but not used their entire budget
# - Low (but not 0) for clients which have been picked and used their budget
logits.append(1.1 - penalty)
# Sample clients
indices = np.arange(len(all_clients.keys()))
probs = softmax(np.array(logits))
idxs = np.random.choice(indices, size=sample_size, replace=False, p=probs)
clients = [all_clients[cid_idx[idx]] for idx in idxs]
# Return client/config pairs
return [(client, fit_ins) for client in clients]
def on_aggregate_fit(
self,
rnd: int,
results: List[Tuple[ClientProxy, FitRes]],
failures: List[BaseException],
) -> Optional[Weights]:
"""Aggregate fit results using weighted average."""
if not results:
return None
# Check if enough results are available
completion_rate = len(results) / (len(results) + len(failures))
if completion_rate < self.min_completion_rate_fit:
# Not enough results for aggregation
return None
# Convert results
weights_results = [
(parameters_to_weights(parameters), num_examples)
for client, (parameters, num_examples, _) in results
]
weights_prime = aggregate(weights_results)
# Track contributions to the global model
for client, fit_res in results:
cid = client.cid
contribution: Tuple[int, float] = (rnd, fit_res[1] / fit_res[2])
if cid not in self.contributions.keys():
self.contributions[cid] = []
self.contributions[cid].append(contribution)
return weights_prime
def on_aggregate_evaluate(
self,
rnd: int,
results: List[Tuple[ClientProxy, EvaluateRes]],
failures: List[BaseException],
) -> Optional[float]:
"""Aggregate evaluation losses using weighted average."""
if not results:
return None
# Check if enough results are available
completion_rate = len(results) / (len(results) + len(failures))
if completion_rate < self.min_completion_rate_evaluate:
# Not enough results for aggregation
return None
return weighted_loss_avg([evaluate_res for _, evaluate_res in results])
def is_fast_round(rnd: int, r_fast: int, r_slow: int) -> bool:
"""Determine if the round is fast or slow."""
remainder = rnd % (r_fast + r_slow)
return remainder - r_fast < 0
def softmax(logits: np.ndarray) -> np.ndarray:
"""Compute softmax."""
e_x = np.exp(logits - np.max(logits))
return cast(np.ndarray, e_x / e_x.sum(axis=0))
|
[
"numpy.random.choice",
"numpy.array",
"statistics.mean",
"numpy.max"
] |
[((4713, 4780), 'numpy.random.choice', 'np.random.choice', (['indices'], {'size': 'sample_size', 'replace': '(False)', 'p': 'probs'}), '(indices, size=sample_size, replace=False, p=probs)\n', (4729, 4780), True, 'import numpy as np\n'), ((4680, 4696), 'numpy.array', 'np.array', (['logits'], {}), '(logits)\n', (4688, 4696), True, 'import numpy as np\n'), ((7030, 7044), 'numpy.max', 'np.max', (['logits'], {}), '(logits)\n', (7036, 7044), True, 'import numpy as np\n'), ((4209, 4250), 'statistics.mean', 'statistics.mean', (['[c for _, c in contribs]'], {}), '([c for _, c in contribs])\n', (4224, 4250), False, 'import statistics\n')]
|
import pickle
import random
import cv2 as cv
import numpy as np
import torch
from torch.utils.data import Dataset
from torchvision import transforms
from config import pickle_file, num_workers
from utils import align_face
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
# transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
]),
'val': transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
# transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
]),
}
class ArcFaceDataset(Dataset):
def __init__(self, split):
with open(pickle_file, 'rb') as file:
data = pickle.load(file)
samples = data['samples']
num_samples = len(samples)
num_train = num_samples
if split == 'train':
self.samples = samples[:num_train]
self.transformer = data_transforms['train']
def __getitem__(self, i):
sample = self.samples[i]
full_path = sample['full_path']
landmarks = sample['landmarks']
try:
img = align_face(full_path, landmarks)
except Exception:
print('full_path: ' + full_path)
raise
img = transforms.ToPILImage()(img)
img = self.transformer(img)
class_id = sample['class_id']
return img, class_id
def __len__(self):
return len(self.samples)
def shuffle(self):
np.random.shuffle(self.samples)
def show_align():
with open(pickle_file, 'rb') as file:
data = pickle.load(file)
samples = random.sample(data['samples'], 10)
for i, sample in enumerate(samples):
full_path = sample['full_path']
landmarks = sample['landmarks']
raw = cv.imread(full_path)
raw = cv.resize(raw, (224, 224))
img = align_face(full_path, landmarks)
filename = 'images/{}_raw.jpg'.format(i)
cv.imwrite(filename, raw)
filename = 'images/{}_img.jpg'.format(i)
cv.imwrite(filename, img)
if __name__ == "__main__":
train_dataset = ArcFaceDataset('train')
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=256, shuffle=True,
num_workers=num_workers,
pin_memory=True)
print(len(train_dataset))
print(len(train_loader))
|
[
"cv2.imwrite",
"random.sample",
"torchvision.transforms.ToPILImage",
"torchvision.transforms.ToTensor",
"pickle.load",
"torchvision.transforms.RandomHorizontalFlip",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
"cv2.resize",
"cv2.imread",
"utils.align_face",
"numpy.random.shuffle"
] |
[((1861, 1895), 'random.sample', 'random.sample', (["data['samples']", '(10)'], {}), "(data['samples'], 10)\n", (1874, 1895), False, 'import random\n'), ((2399, 2517), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': '(256)', 'shuffle': '(True)', 'num_workers': 'num_workers', 'pin_memory': '(True)'}), '(train_dataset, batch_size=256, shuffle=True,\n num_workers=num_workers, pin_memory=True)\n', (2426, 2517), False, 'import torch\n'), ((1719, 1750), 'numpy.random.shuffle', 'np.random.shuffle', (['self.samples'], {}), '(self.samples)\n', (1736, 1750), True, 'import numpy as np\n'), ((1828, 1845), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (1839, 1845), False, 'import pickle\n'), ((2032, 2052), 'cv2.imread', 'cv.imread', (['full_path'], {}), '(full_path)\n', (2041, 2052), True, 'import cv2 as cv\n'), ((2067, 2093), 'cv2.resize', 'cv.resize', (['raw', '(224, 224)'], {}), '(raw, (224, 224))\n', (2076, 2093), True, 'import cv2 as cv\n'), ((2108, 2140), 'utils.align_face', 'align_face', (['full_path', 'landmarks'], {}), '(full_path, landmarks)\n', (2118, 2140), False, 'from utils import align_face\n'), ((2198, 2223), 'cv2.imwrite', 'cv.imwrite', (['filename', 'raw'], {}), '(filename, raw)\n', (2208, 2223), True, 'import cv2 as cv\n'), ((2281, 2306), 'cv2.imwrite', 'cv.imwrite', (['filename', 'img'], {}), '(filename, img)\n', (2291, 2306), True, 'import cv2 as cv\n'), ((374, 407), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (405, 407), False, 'from torchvision import transforms\n'), ((417, 438), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (436, 438), False, 'from torchvision import transforms\n'), ((448, 514), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (468, 514), False, 'from torchvision import transforms\n'), ((628, 649), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (647, 649), False, 'from torchvision import transforms\n'), ((659, 725), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (679, 725), False, 'from torchvision import transforms\n'), ((930, 947), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (941, 947), False, 'import pickle\n'), ((1360, 1392), 'utils.align_face', 'align_face', (['full_path', 'landmarks'], {}), '(full_path, landmarks)\n', (1370, 1392), False, 'from utils import align_face\n'), ((1497, 1520), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (1518, 1520), False, 'from torchvision import transforms\n')]
|
#%%
import matplotlib.pyplot as plt
import cv2
import numpy as np
import os
import re
from pathlib import Path
#%%
def find_contours(img):
# img channels assumed to be RGB
img_bw = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
_, thresh = cv2.threshold(img_bw, 0, 255, cv2.THRESH_BINARY_INV)
contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
return contours
def mask_leaf_parts(img):
contours = find_contours(img)
# longest contour usually corresponds to the whole leaf (not necessarily always)
i = np.argmax([len(c) for c in contours])
leaf_contour = contours[i]
mask = np.zeros(img.shape[:2], np.uint8)
cv2.fillPoly(mask, pts=[leaf_contour], color=(255, 255, 255))
masked = cv2.bitwise_and(img, img, mask=mask)
return masked
#%%
def get_bounding_boxes(path_masked):
# assumes image is masked
img_orig = cv2.imread(path_masked)
# plt.imshow(img_orig)
img = cv2.cvtColor(img_orig, cv2.COLOR_BGR2RGB)
# plt.imshow(img)
contours = find_contours(img)
# print(len(contours))
contours = contours[1:]
boxes = []
for i, c in enumerate(contours[::-1]):
if (cv2.contourArea(c) > 10000):
boxes.append(cv2.boundingRect(c))
return boxes
def cut(img, bounding_boxes, is_masked=True):
segments = []
for x,y,w,h in bounding_boxes:
img_segmented = img[y:y+h, x:x+w]
if is_masked:
img_segmented = mask_leaf_parts(img_segmented)
segments.append(img_segmented)
return segments
def write(segments, path, img_original, output_path):
filename = Path(path).stem
pathname = os.path.join(output_path, filename)
original_filetype = os.path.splitext(path)[1]
segmented_paths = []
if not os.path.exists(pathname):
os.makedirs(pathname)
# for now, save the original image in the same location as the segments, just for easy checking that the segmentation has gone right
cv2.imwrite(os.path.join(pathname, f"{filename}{original_filetype}"), img_original)
for i, segment in enumerate(segments):
segmented_path = os.path.join(pathname, f"{filename}_{i}.png")
segmented_paths.append(segmented_path)
cv2.imwrite(segmented_path, segment)
return segmented_paths
def segment(path_masked, path_original, output_path):
img_orig_masked = cv2.imread(path_masked)
img_masked = cv2.cvtColor(img_orig_masked, cv2.COLOR_BGR2RGB)
img_orig = cv2.imread(path_original)
img_original = cv2.cvtColor(img_orig, cv2.COLOR_BGR2RGB)
bounding_boxes = get_bounding_boxes(path_masked)
segments_masked = cut(img_masked, bounding_boxes, is_masked=True)
segments_original = cut(img_original, bounding_boxes, is_masked=False)
# TODO: if original image and masked image names will be the same (the separation is done on the folder level for example), the original image will overwrite the segmented masked image
segmented_paths_masked = write(segments_masked, path_masked, img_masked, output_path)
segmented_paths_original = write(segments_original, path_original, img_original, output_path)
return segmented_paths_masked, segmented_paths_original
|
[
"cv2.fillPoly",
"os.path.exists",
"cv2.imwrite",
"os.makedirs",
"pathlib.Path",
"cv2.threshold",
"cv2.bitwise_and",
"os.path.join",
"os.path.splitext",
"cv2.contourArea",
"numpy.zeros",
"cv2.cvtColor",
"cv2.findContours",
"cv2.imread",
"cv2.boundingRect"
] |
[((191, 228), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (203, 228), False, 'import cv2\n'), ((245, 297), 'cv2.threshold', 'cv2.threshold', (['img_bw', '(0)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(img_bw, 0, 255, cv2.THRESH_BINARY_INV)\n', (258, 297), False, 'import cv2\n'), ((316, 380), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (332, 380), False, 'import cv2\n'), ((639, 672), 'numpy.zeros', 'np.zeros', (['img.shape[:2]', 'np.uint8'], {}), '(img.shape[:2], np.uint8)\n', (647, 672), True, 'import numpy as np\n'), ((677, 738), 'cv2.fillPoly', 'cv2.fillPoly', (['mask'], {'pts': '[leaf_contour]', 'color': '(255, 255, 255)'}), '(mask, pts=[leaf_contour], color=(255, 255, 255))\n', (689, 738), False, 'import cv2\n'), ((753, 789), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'img'], {'mask': 'mask'}), '(img, img, mask=mask)\n', (768, 789), False, 'import cv2\n'), ((897, 920), 'cv2.imread', 'cv2.imread', (['path_masked'], {}), '(path_masked)\n', (907, 920), False, 'import cv2\n'), ((958, 999), 'cv2.cvtColor', 'cv2.cvtColor', (['img_orig', 'cv2.COLOR_BGR2RGB'], {}), '(img_orig, cv2.COLOR_BGR2RGB)\n', (970, 999), False, 'import cv2\n'), ((1666, 1701), 'os.path.join', 'os.path.join', (['output_path', 'filename'], {}), '(output_path, filename)\n', (1678, 1701), False, 'import os\n'), ((2387, 2410), 'cv2.imread', 'cv2.imread', (['path_masked'], {}), '(path_masked)\n', (2397, 2410), False, 'import cv2\n'), ((2428, 2476), 'cv2.cvtColor', 'cv2.cvtColor', (['img_orig_masked', 'cv2.COLOR_BGR2RGB'], {}), '(img_orig_masked, cv2.COLOR_BGR2RGB)\n', (2440, 2476), False, 'import cv2\n'), ((2493, 2518), 'cv2.imread', 'cv2.imread', (['path_original'], {}), '(path_original)\n', (2503, 2518), False, 'import cv2\n'), ((2538, 2579), 'cv2.cvtColor', 'cv2.cvtColor', (['img_orig', 'cv2.COLOR_BGR2RGB'], {}), '(img_orig, cv2.COLOR_BGR2RGB)\n', (2550, 2579), False, 'import cv2\n'), ((1635, 1645), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (1639, 1645), False, 'from pathlib import Path\n'), ((1727, 1749), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (1743, 1749), False, 'import os\n'), ((1791, 1815), 'os.path.exists', 'os.path.exists', (['pathname'], {}), '(pathname)\n', (1805, 1815), False, 'import os\n'), ((1825, 1846), 'os.makedirs', 'os.makedirs', (['pathname'], {}), '(pathname)\n', (1836, 1846), False, 'import os\n'), ((2001, 2057), 'os.path.join', 'os.path.join', (['pathname', 'f"""{filename}{original_filetype}"""'], {}), "(pathname, f'{filename}{original_filetype}')\n", (2013, 2057), False, 'import os\n'), ((2142, 2187), 'os.path.join', 'os.path.join', (['pathname', 'f"""{filename}_{i}.png"""'], {}), "(pathname, f'{filename}_{i}.png')\n", (2154, 2187), False, 'import os\n'), ((2243, 2279), 'cv2.imwrite', 'cv2.imwrite', (['segmented_path', 'segment'], {}), '(segmented_path, segment)\n', (2254, 2279), False, 'import cv2\n'), ((1184, 1202), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (1199, 1202), False, 'import cv2\n'), ((1238, 1257), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (1254, 1257), False, 'import cv2\n')]
|
from sklearn.preprocessing import scale
import numpy as np
class GradientDescent:
learning_rate = 0.01
max_iter = 2000
scale = False
new_theta = []
def __init__(self, learning_rate=0.01, max_iter=2000, scale=False):
self.learning_rate = learning_rate
self.max_iter = max_iter
self.scale = scale
def fit(self, X, y):
ones = [1] * len(X)
if self.scale:
X = scale(X)
X = np.transpose(np.concatenate((np.array([ones]).reshape(-1, 1), X), axis=1))
zeroes = [0] * X.shape[0]
theta = np.array([zeroes])
for i in range(self.max_iter):
htheta = np.dot(theta, X)
diff_theta = htheta - y.values
partial_derivative_theta = np.dot(diff_theta, np.transpose(X)) / len(y.values)
theta = theta - self.learning_rate * partial_derivative_theta
self.new_theta.append(theta)
def predict(self, X):
if scale:
X = scale(X)
theta0 = self.new_theta[self.max_iter-1][0][0]
thetas = []
for i in range(1, self.new_theta[self.max_iter-1].shape[1]):
thetas.append(self.new_theta[self.max_iter-1][0][i])
predict = theta0 + (thetas * X).sum(axis=1)
return(predict)
def score(self, X, y):
if scale:
X = scale(X)
pred_y = self.predict(X)
mean_y = y.mean()
ess = sum((y - mean_y) ** 2)
rss = sum((y - pred_y) ** 2)
rsquared = 1 - (rss/ess)
return rsquared
|
[
"numpy.array",
"numpy.dot",
"numpy.transpose",
"sklearn.preprocessing.scale"
] |
[((616, 634), 'numpy.array', 'np.array', (['[zeroes]'], {}), '([zeroes])\n', (624, 634), True, 'import numpy as np\n'), ((467, 475), 'sklearn.preprocessing.scale', 'scale', (['X'], {}), '(X)\n', (472, 475), False, 'from sklearn.preprocessing import scale\n'), ((697, 713), 'numpy.dot', 'np.dot', (['theta', 'X'], {}), '(theta, X)\n', (703, 713), True, 'import numpy as np\n'), ((1036, 1044), 'sklearn.preprocessing.scale', 'scale', (['X'], {}), '(X)\n', (1041, 1044), False, 'from sklearn.preprocessing import scale\n'), ((1406, 1414), 'sklearn.preprocessing.scale', 'scale', (['X'], {}), '(X)\n', (1411, 1414), False, 'from sklearn.preprocessing import scale\n'), ((817, 832), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (829, 832), True, 'import numpy as np\n'), ((518, 534), 'numpy.array', 'np.array', (['[ones]'], {}), '([ones])\n', (526, 534), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
@author: miko
"""
from sklearn.feature_extraction import DictVectorizer
import csv
from sklearn import tree
from sklearn import preprocessing
from sklearn.externals.six import StringIO
import numpy as np
np.set_printoptions(threshold = 1e6)#设置打印数量的阈值
# Read in the csv file and put features into list of dict and list of class label
allElectronicsData = open(r'D:\development\DailyImprove\July机器学习与深度学习\(Part One)深度学习基础\代码与素材\代码与素材(1)\01DTree\AllElectronics.csv', 'r')
reader = csv.reader(allElectronicsData)
#headers = reader.next()
headers = next(reader)
print(headers)
print("~"*10+"headers end"+"~"*10)
featureList = []
labelList = []
for row in reader: # 遍历每一列
labelList.append(row[len(row)-1]) # 标签列表
rowDict = {} # 每一行的所有特征放入一个字典
for i in range(1, len(row)-1): # 左闭右开 遍历从age到credit_rating
rowDict[headers[i]] = row[i] # 字典的赋值
featureList.append(rowDict) #将每一行的特征字典装入特征列表内
print(featureList)
print("~"*10+"featureList end"+"~"*10)
# Vetorize features
vec = DictVectorizer() # Vectorizer 矢量化
dummyX = vec.fit_transform(featureList).toarray()
print("dummyX: " + str(dummyX))
print(vec.get_feature_names())
print("~"*10+"dummyX end"+"~"*10)
print("labelList: " + str(labelList))
print("~"*10+"labelList end"+"~"*10)
# vectorize class labels
lb = preprocessing.LabelBinarizer()
dummyY = lb.fit_transform(labelList)
print("dummyY: " + str(dummyY))
print("~"*10+"dummyY end"+"~"*10)
# Using decision tree for classification
# clf = tree.DecisionTreeClassifier()
clf = tree.DecisionTreeClassifier(criterion='entropy') # 标准 熵
clf = clf.fit(dummyX, dummyY)
print("clf: " + str(clf))
# Visualize model
with open("allElectronicInformationGainOri.dot", 'w') as f:
# 输出到dot文件里,安装 Graphviz软件后,可使用 dot -Tpdf allElectronicInformationGainOri.dot -o outpu.pdf 命令 转化dot文件至pdf可视化决策树
f = tree.export_graphviz(clf, feature_names=vec.get_feature_names(), out_file=f)
oneRowX = dummyX[0, :]
print("oneRowX: " + str(oneRowX))
newRowX = oneRowX
newRowX[0] = 1
newRowX[2] = 0
print("newRowX: " + str(newRowX))
predictedY = clf.predict(newRowX)
print("predictedY: " + str(predictedY))
|
[
"sklearn.preprocessing.LabelBinarizer",
"sklearn.feature_extraction.DictVectorizer",
"sklearn.tree.DecisionTreeClassifier",
"csv.reader",
"numpy.set_printoptions"
] |
[((236, 276), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': '(1000000.0)'}), '(threshold=1000000.0)\n', (255, 276), True, 'import numpy as np\n'), ((512, 542), 'csv.reader', 'csv.reader', (['allElectronicsData'], {}), '(allElectronicsData)\n', (522, 542), False, 'import csv\n'), ((1073, 1089), 'sklearn.feature_extraction.DictVectorizer', 'DictVectorizer', ([], {}), '()\n', (1087, 1089), False, 'from sklearn.feature_extraction import DictVectorizer\n'), ((1365, 1395), 'sklearn.preprocessing.LabelBinarizer', 'preprocessing.LabelBinarizer', ([], {}), '()\n', (1393, 1395), False, 'from sklearn import preprocessing\n'), ((1586, 1634), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {'criterion': '"""entropy"""'}), "(criterion='entropy')\n", (1613, 1634), False, 'from sklearn import tree\n')]
|
"""Data generator for image datasets."""
import itertools
import random
import numpy as np
from kaishi.image.util import swap_channel_dimension
from kaishi.image import ops
def augment_and_label(imobj):
"""Augment an image with common issues and return the modified image + label vector.
Labels at output layer (probabilities, no softmax): [DOCUMENT, RECTIFIED, ROTATED_RIGHT, ROTATED_LEFT, UPSIDE_DOWN, STRETCHING]
:param imobj: image object to randomly augment and label
:type imobj: :class:`kaishi.image.file.ImageFile`
:return: augmented image and label vector applied
:rtype: :class:`kaishi.image.file.ImageFile` and `numpy.array`
"""
label = np.zeros((6,))
im = imobj.small_image.convert("RGB")
if "document" in imobj.relative_path: # Document label
label[0] = 1
if np.random.random() < 0.5: # Remove colors sometimes, no matter the source
im = im.convert("L").convert("RGB")
rot_param = np.random.random() # Rotation (<0.25 does nothing)
if rot_param <= 0.25:
label[1] = 1
elif 0.25 < rot_param <= 0.5:
im = ops.add_rotation(im, ccw_rotation_degrees=90)
label[3] = 1
elif 0.5 < rot_param <= 0.75:
im = ops.add_rotation(im, ccw_rotation_degrees=180)
label[4] = 1
elif rot_param > 0.75:
im = ops.add_rotation(im, ccw_rotation_degrees=270)
label[2] = 1
stretch_param = np.random.random() # Stretching
if 0.25 < stretch_param <= 0.75:
if 0.25 < stretch_param <= 0.5:
h_stretch = 100
v_stretch = 0
elif 0.5 < stretch_param <= 0.75:
h_stretch = 0
v_stretch = 100
pre_stretch_size = im.size
im = ops.add_stretching(im, h_stretch, v_stretch)
im = ops.extract_patch(
im, pre_stretch_size
) # Crop back to original size if stretched
label[5] = 1
return im, label
def train_generator(self, batch_size: int = 32, string_to_match: str = None):
"""Generator for training the data labeler. Operates on a :class:`kaishi.image.dataset.ImageDataset` object.
:param self: image dataset
:type self: :class:`kaishi.image.dataset.ImageDatset`
:param batch_size: batch size for generated data
:type batch_size: int
:param string_to_match: string to match (ignores files without this string in the relative path)
:type string_to_match: str
:return: batch arrays and label vectors
:rtype: :class:`numpy.array` and list
"""
indexes = list(range(len(self.files)))
random.seed(42)
np.random.seed(42)
random.shuffle(indexes)
bi = 0 # Index within batch
for imind in itertools.cycle(indexes):
if "validate" in self.files[imind].relative_path: # Don't use validation data
continue
if "high_res" in self.files[imind].relative_path: # Use only low res photos
continue
if (
string_to_match is not None
and string_to_match not in self.files[imind].relative_path
):
continue
self.files[imind].verify_loaded()
if self.files[imind].image is None:
continue
if bi == 0: # Initialize the batch if needed
batch = [None] * batch_size
labels = np.zeros((batch_size, 6))
# Perturb the image randomly and label
batch[bi], labels[bi, :] = augment_and_label(self.files[imind])
if bi == batch_size - 1:
bi = 0
batch = np.stack(batch)
yield swap_channel_dimension(batch), labels
else:
bi += 1
def generate_validation_data(self, n_examples: int = 400, string_to_match: str = None):
"""Generate a reproducibly random validation data set.
:param n_examples: number of examples in the validation set
:type n_examples: int
:param string_to_match: string to match (ignores files without this string in the relative path)
:type string_to_match: str
:return: stacked training examples (first dimension is batch) and stacked labels
:rtype: `numpy.array` and `numpy.array`
"""
indexes = list(range(len(self.files)))
random.seed(42)
np.random.seed(42)
random.shuffle(indexes)
X = [None] * n_examples
y = np.zeros((n_examples, 6))
i = 0
for imind in itertools.cycle(indexes):
if i == n_examples:
break
if (
"validate" not in self.files[imind].relative_path
): # Use only validation data
continue
if "high_res" in self.files[imind].relative_path: # Disregard high res images
continue
if (
string_to_match is not None
and string_to_match not in self.files[imind].relative_path
):
continue
self.files[imind].verify_loaded()
if self.files[imind].image is None:
continue
# Perturb the image randomly and label
X[i], y[i, :] = augment_and_label(self.files[imind])
i += 1
X = np.stack(X)
return swap_channel_dimension(X), y
|
[
"itertools.cycle",
"kaishi.image.util.swap_channel_dimension",
"random.shuffle",
"kaishi.image.ops.add_stretching",
"numpy.random.random",
"random.seed",
"numpy.stack",
"numpy.zeros",
"kaishi.image.ops.add_rotation",
"numpy.random.seed",
"kaishi.image.ops.extract_patch"
] |
[((684, 698), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (692, 698), True, 'import numpy as np\n'), ((966, 984), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (982, 984), True, 'import numpy as np\n'), ((1422, 1440), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1438, 1440), True, 'import numpy as np\n'), ((2571, 2586), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (2582, 2586), False, 'import random\n'), ((2591, 2609), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (2605, 2609), True, 'import numpy as np\n'), ((2614, 2637), 'random.shuffle', 'random.shuffle', (['indexes'], {}), '(indexes)\n', (2628, 2637), False, 'import random\n'), ((2689, 2713), 'itertools.cycle', 'itertools.cycle', (['indexes'], {}), '(indexes)\n', (2704, 2713), False, 'import itertools\n'), ((4189, 4204), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (4200, 4204), False, 'import random\n'), ((4209, 4227), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (4223, 4227), True, 'import numpy as np\n'), ((4232, 4255), 'random.shuffle', 'random.shuffle', (['indexes'], {}), '(indexes)\n', (4246, 4255), False, 'import random\n'), ((4292, 4317), 'numpy.zeros', 'np.zeros', (['(n_examples, 6)'], {}), '((n_examples, 6))\n', (4300, 4317), True, 'import numpy as np\n'), ((4346, 4370), 'itertools.cycle', 'itertools.cycle', (['indexes'], {}), '(indexes)\n', (4361, 4370), False, 'import itertools\n'), ((5057, 5068), 'numpy.stack', 'np.stack', (['X'], {}), '(X)\n', (5065, 5068), True, 'import numpy as np\n'), ((831, 849), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (847, 849), True, 'import numpy as np\n'), ((1730, 1774), 'kaishi.image.ops.add_stretching', 'ops.add_stretching', (['im', 'h_stretch', 'v_stretch'], {}), '(im, h_stretch, v_stretch)\n', (1748, 1774), False, 'from kaishi.image import ops\n'), ((1788, 1827), 'kaishi.image.ops.extract_patch', 'ops.extract_patch', (['im', 'pre_stretch_size'], {}), '(im, pre_stretch_size)\n', (1805, 1827), False, 'from kaishi.image import ops\n'), ((5081, 5106), 'kaishi.image.util.swap_channel_dimension', 'swap_channel_dimension', (['X'], {}), '(X)\n', (5103, 5106), False, 'from kaishi.image.util import swap_channel_dimension\n'), ((1112, 1157), 'kaishi.image.ops.add_rotation', 'ops.add_rotation', (['im'], {'ccw_rotation_degrees': '(90)'}), '(im, ccw_rotation_degrees=90)\n', (1128, 1157), False, 'from kaishi.image import ops\n'), ((3308, 3333), 'numpy.zeros', 'np.zeros', (['(batch_size, 6)'], {}), '((batch_size, 6))\n', (3316, 3333), True, 'import numpy as np\n'), ((3527, 3542), 'numpy.stack', 'np.stack', (['batch'], {}), '(batch)\n', (3535, 3542), True, 'import numpy as np\n'), ((1226, 1272), 'kaishi.image.ops.add_rotation', 'ops.add_rotation', (['im'], {'ccw_rotation_degrees': '(180)'}), '(im, ccw_rotation_degrees=180)\n', (1242, 1272), False, 'from kaishi.image import ops\n'), ((1334, 1380), 'kaishi.image.ops.add_rotation', 'ops.add_rotation', (['im'], {'ccw_rotation_degrees': '(270)'}), '(im, ccw_rotation_degrees=270)\n', (1350, 1380), False, 'from kaishi.image import ops\n'), ((3561, 3590), 'kaishi.image.util.swap_channel_dimension', 'swap_channel_dimension', (['batch'], {}), '(batch)\n', (3583, 3590), False, 'from kaishi.image.util import swap_channel_dimension\n')]
|
import os;
import abc;
import math;
import multiprocessing;
import psutil;
import numpy as np;
import matplotlib.pyplot as plt;
from Errors import *;
from KMeans import *;
from UnaryLinearRegression import *;
class _Node:
def __init__(self, samplesCount, featureIndex = None, featureValue = None, leftChild = None, rightChild = None):
self.samplesCount = samplesCount;
self.featureIndex = featureIndex;
self.featureValue = featureValue;
self.leftChild = leftChild;
self.rightChild = rightChild;
def __repr__(self):
return self.__str__();
def __str__(self):
if self.isLeaf():
return "Leaf node, {0} samples".format(self.samplesCount);
else:
return "Internal node, {0} samples, feature {1} of value {2}".format(self.samplesCount, self.featureIndex, self.featureValue);
def isLeaf(self):
return self.featureIndex is None;
def getLeafCount(self):
if self.isLeaf():
return 1;
return (self.leftChild.getLeafCount() if self.leftChild is not None else 0) +\
(self.rightChild.getLeafCount() if self.rightChild is not None else 0);
class IsolationForest:
def __init__(self, treeCount = 100, subSamplingSize = 256, thresholdFinder = None):
if thresholdFinder is None or not isinstance(thresholdFinder, IThresholdFinder):
raise ValueError();
self.__treeCount = treeCount;
self.__subSamplingSize = subSamplingSize;
self.__treesList = [];
self.__scores = None;
self.__threshold = None;
self.__thresholdFinder = thresholdFinder;
@property
def scores(self):
return self.__scores;
@property
def threshold(self):
return self.__threshold;
@threshold.setter
def threshold(self, value):
if value <= 0.5 or value >= 1:
raise ValueError();
self.__threshold = value;
def __calcHarmonicNumber(self, i):
return np.log(i) + np.euler_gamma;
def __calcAveragePathLength(self, psi):
if psi < 2:
return 0;
if psi == 2:
return 1;
return 2 * self.__calcHarmonicNumber(psi - 1) - 2 * (psi - 1) / psi;
def __getPathLength(self, instance, node, currentLength, lengthLimit):
if node.isLeaf() or currentLength >= lengthLimit:
return currentLength + self.__calcAveragePathLength(node.samplesCount);
if instance[0, node.featureIndex] < node.featureValue:
return self.__getPathLength(instance, node.leftChild, currentLength + 1, lengthLimit);
else:
return self.__getPathLength(instance, node.rightChild, currentLength + 1, lengthLimit);
def __getAnomalyScore(self, instance, lengthLimit):
length = 0;
for tree in self.__treesList:
length += self.__getPathLength(instance, tree, 0, lengthLimit);
length /= self.__treeCount;
return 1 / (2 ** (length / self.__calcAveragePathLength(self.__subSamplingSize)));
def __hasSameFeatureValues(self, dataSet, featureIndex):
if dataSet.shape[0] == 0:
return True;
result = True;
value = dataSet[0, featureIndex];
for rowIndex in range(0, dataSet.shape[0]):
if dataSet[rowIndex, featureIndex] != value:
result = False;
break;
return result;
def __choiceFeatureIndex(self, features):
if len(features) == 1:
return features[0];
return features[np.random.randint(0, len(features))];
def __choiceFeatureValue(self, dataSet, featureIndex):
values = dataSet[:, featureIndex];
minValue, maxValue = values.min(), values.max();
return minValue + (maxValue - minValue) * np.random.random();
def __createNode(self, dataSet, features, currentHeight):
samplesCount = dataSet.shape[0];
if samplesCount == 0:
return None;
if samplesCount == 1:
return _Node(samplesCount);
for index in [item for item in features if self.__hasSameFeatureValues(dataSet, item)]:
features.remove(index);
if len(features) == 0:
return _Node(samplesCount);
featureIndex = self.__choiceFeatureIndex(features);
featureValue = self.__choiceFeatureValue(dataSet, featureIndex);
return _Node(samplesCount, featureIndex, featureValue,
self.__createNode(dataSet[(dataSet[:, featureIndex] < featureValue).A.flatten(), :], features[:], currentHeight + 1),
self.__createNode(dataSet[(dataSet[:, featureIndex] >= featureValue).A.flatten(), :], features[:], currentHeight + 1));
def _createTree(self, subSet):
return self.__createNode(subSet, list(range(0, subSet.shape[1])), 0);
def fill(self, dataSet):
if dataSet is None or not isinstance(dataSet, np.matrix):
raise ValueError();
self.__scores = None;
self.__threshold = None;
n = dataSet.shape[0];
with multiprocessing.Pool(max(1, psutil.cpu_count(False) - 2)) as pool:
self.__treesList = pool.map(self._createTree, [dataSet[np.random.choice(n, self.__subSamplingSize, False), :] for i in range(0, self.__treeCount)]);
def getAnomalyScore(self, instance):
if instance is None:
raise ValueError();
return self.__getAnomalyScore(instance, self.__subSamplingSize - 1);
def train(self, dataSet):
if self.__threshold is not None and self.__scores is not None:
return False;
if dataSet is None or not isinstance(dataSet, np.matrix):
raise ValueError();
if len(self.__treesList) != self.__treeCount:
raise InvalidOperationError();
with multiprocessing.Pool(max(1, psutil.cpu_count(False) - 2)) as pool:
self.__scores = pool.map(self.getAnomalyScore, [item for item in dataSet]);
self.__threshold = self.__thresholdFinder.find(self.__scores);
return True;
class IThresholdFinder(metaclass = abc.ABCMeta):
@abc.abstractmethod
def find(self, scores):
pass;
class ProportionThresholdFinder(IThresholdFinder):
def __init__(self, proportion):
self.__proportion = max(0, max(1, proportion));
def find(self, scores):
scores.sort(reverse = True);
return np.quantile(scores, self.__proportion);
class CurvesThresholdFinder(IThresholdFinder):
MIN_SAMPLES_NUMBER = 10;
MIN_PARALLEL_NUMBER = 10000;
def __init__(self, minCheckValue, maxCheckValue, defaultThreshold, showPlot = False):
self._minCheckValue = minCheckValue;
self._maxCheckValue = maxCheckValue;
self._defaultThreshold = defaultThreshold;
self.__showPlot = showPlot;
self._values = [];
self._curves = None;
self._leftLines = None;
self._rightLines = None;
def __reset(self):
self._curves = None;
self._leftLines = None;
self._rightLines = None;
def _leftOnly(self, y):
maxValue = y.max();
value, residual = None, None;
for i in range(CurvesThresholdFinder.MIN_SAMPLES_NUMBER, y.shape[0]):
line = UnaryLinearRegression();
line.fit(np.mat(np.arange(i)).T, y[:i, 0]);
line.sigLevel = None;
if line.slop <= 0:
continue;
value = line.predictValue(i - 1);
if value > maxValue:
continue;
residual = y[i:, 0] - value;
self._leftLines[i] = (line, value);
self._curves.append([line, i - 1, None, None, value, line.rss + (residual.T * residual)[0, 0]]);
def _rightOnly(self, y):
n, maxValue = y.shape[0], y.max();
value, residual = None, None;
for j in range(n - CurvesThresholdFinder.MIN_SAMPLES_NUMBER, 0, -1):
line = UnaryLinearRegression();
line.fit(np.mat(np.arange(j, n)).T, y[j:, 0]);
line.sigLevel = None;
if line.slop >= 0:
continue;
value = line.predictValue(j);
if value > maxValue:
continue;
residual = y[:j, 0] - value;
self._rightLines[j] = (line, value);
self._curves.append([None, None, line, j, value, line.rss + (residual.T * residual)[0, 0]]);
def _processItem(self, i, j, y, maxValue):
leftLine, leftValue = self._leftLines[i] if i in self._leftLines else (None, None);
if leftLine is None:
return None;
rightLine, rightValue = self._rightLines[j] if j in self._rightLines else (None, None);
if rightLine is None:
return None;
value, residual = None, None;
endIndex, startIndex = None, None;
if leftValue < rightValue:
value = rightValue;
startIndex = j;
endIndex = math.floor(leftLine.inverse(rightValue));
elif rightValue < leftValue:
value = leftValue;
endIndex = i - 1;
startIndex = math.ceil(rightLine.inverse(leftValue));
else:
endIndex = i - 1;
startIndex = j;
value = leftValue;
if endIndex >= startIndex - 1 or value > maxValue:
return None;
residual = y[endIndex + 1:startIndex, 0] - value;
leftRss = (leftLine.calcRss(np.mat(np.arange(i, endIndex + 1)).T, y[i:endIndex + 1, 0]) if endIndex > i - 1 else 0) + leftLine.rss;
rightRss = (rightLine.calcRss(np.mat(np.arange(startIndex, j)).T, y[startIndex:j, 0]) if startIndex < j else 0) + rightLine.rss;
return [leftLine, endIndex, rightLine, startIndex, value, leftRss + rightRss + (residual.T * residual)[0, 0]];
def _bothSides(self, y):
points = [];
n, maxValue = y.shape[0], y.max();
for i in range(CurvesThresholdFinder.MIN_SAMPLES_NUMBER, n - CurvesThresholdFinder.MIN_SAMPLES_NUMBER - 1):
for j in range(n - CurvesThresholdFinder.MIN_SAMPLES_NUMBER, i, -1):
points.append((i, j, y, maxValue));
curves = None;
if len(points) >= CurvesThresholdFinder.MIN_PARALLEL_NUMBER:
with multiprocessing.Pool(max(1, psutil.cpu_count(False) - 2)) as pool:
curves = pool.starmap(self._processItem, points);
else:
curves = list(map(lambda obj: self._processItem(*obj), points));
for item in curves:
if item is None:
continue;
self._curves.append(item);
def _fit(self, y):
if y is None:
raise ValueError();
n = y.shape[0];
if n < CurvesThresholdFinder.MIN_SAMPLES_NUMBER * 2 + 1:
return None, None;
self._curves = [];
self._leftLines = {};
self._rightLines = {};
self._leftOnly(y);
self._rightOnly(y);
self._bothSides(y);
if len(self._curves) == 0:
value = y.mean();
self._values.append(value);
return [0, n - 1], [value, value];
self._curves = np.mat(self._curves);
leftLine, endIndex, rightLine, startIndex, value, rss = tuple(self._curves[self._curves[:, -1].argmin(0)[0, 0], :].A.flatten().tolist());
self._values.append(value);
if leftLine is not None and rightLine is not None:
return [0, endIndex, endIndex + 1, startIndex - 1, startIndex, n - 1],\
[leftLine.predictValue(0), leftLine.predictValue(endIndex), value, value, rightLine.predictValue(startIndex), rightLine.predictValue(n - 1)];
elif leftLine is not None:
return [0, endIndex, n - 1], [leftLine.predictValue(0), leftLine.predictValue(endIndex), leftLine.predictValue(endIndex)];
elif rightLine is not None:
return [0, startIndex, n - 1], [rightLine.predictValue(startIndex), rightLine.predictValue(startIndex), rightLine.predictValue(n - 1)];
else:
return None, None;
def find(self, scores):
scale = 100;
data = np.mat(scores).T * scale;
indices, distances, center = KMeans(lambda X, k: np.mat([X.min(), X.mean(), X.max()]).T).clustering(data, 3, 1);
print("anomaly score centers:{0}".format(center.T));
checkValue = center[2, 0];
minCheckValue = self._minCheckValue * scale;
maxCheckValue = self._maxCheckValue * scale;
defaultThreshold = self._defaultThreshold * scale;
minValue = data[(indices == 2).A.flatten(), :].min(0)[0, 0];
maxValue = data[(indices == 2).A.flatten(), :].max(0)[0, 0];
if maxValue <= defaultThreshold:
return defaultThreshold / scale;
if checkValue >= defaultThreshold:
checkValue = (minValue + checkValue) / 2;
elif checkValue <= minCheckValue:
checkValue = (checkValue + maxValue) / 2;
if checkValue < minCheckValue:
checkValue = minCheckValue;
elif checkValue > maxCheckValue:
checkValue = maxCheckValue;
print("threshold check value: {0}".format(checkValue));
i = None;
for j in range(0, data.shape[0]):
if data[j, 0] >= checkValue and i is None:
i = j;
if data[j, 0] < checkValue and i is not None:
if j - i > CurvesThresholdFinder.MIN_SAMPLES_NUMBER * 2:
x, y = self._fit(data[i:j, 0]);
if self.__showPlot:
plt.figure(1, (16, 10));
plt.plot(list(range(0, j - i)), data[i:j, 0].A.flatten().tolist(), color = "b", marker = "x");
if x is not None and y is not None:
plt.plot(x, y, color = "r");
plt.show();
i = None;
print("threshold all values: {0}".format(self._values));
threshold = (np.mean(self._values) if len(self._values) > 0 else defaultThreshold) / scale;
print("threshold found: {0}".format(threshold));
self.__reset();
return threshold;
|
[
"numpy.mat",
"numpy.mean",
"numpy.random.random",
"numpy.random.choice",
"numpy.log",
"matplotlib.pyplot.plot",
"numpy.quantile",
"matplotlib.pyplot.figure",
"psutil.cpu_count",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((6468, 6506), 'numpy.quantile', 'np.quantile', (['scores', 'self.__proportion'], {}), '(scores, self.__proportion)\n', (6479, 6506), True, 'import numpy as np\n'), ((11234, 11254), 'numpy.mat', 'np.mat', (['self._curves'], {}), '(self._curves)\n', (11240, 11254), True, 'import numpy as np\n'), ((2021, 2030), 'numpy.log', 'np.log', (['i'], {}), '(i)\n', (2027, 2030), True, 'import numpy as np\n'), ((3839, 3857), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3855, 3857), True, 'import numpy as np\n'), ((12208, 12222), 'numpy.mat', 'np.mat', (['scores'], {}), '(scores)\n', (12214, 12222), True, 'import numpy as np\n'), ((14059, 14080), 'numpy.mean', 'np.mean', (['self._values'], {}), '(self._values)\n', (14066, 14080), True, 'import numpy as np\n'), ((5154, 5177), 'psutil.cpu_count', 'psutil.cpu_count', (['(False)'], {}), '(False)\n', (5170, 5177), False, 'import psutil\n'), ((5903, 5926), 'psutil.cpu_count', 'psutil.cpu_count', (['(False)'], {}), '(False)\n', (5919, 5926), False, 'import psutil\n'), ((7375, 7387), 'numpy.arange', 'np.arange', (['i'], {}), '(i)\n', (7384, 7387), True, 'import numpy as np\n'), ((8062, 8077), 'numpy.arange', 'np.arange', (['j', 'n'], {}), '(j, n)\n', (8071, 8077), True, 'import numpy as np\n'), ((13648, 13671), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)', '(16, 10)'], {}), '(1, (16, 10))\n', (13658, 13671), True, 'import matplotlib.pyplot as plt\n'), ((13933, 13943), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13941, 13943), True, 'import matplotlib.pyplot as plt\n'), ((5260, 5310), 'numpy.random.choice', 'np.random.choice', (['n', 'self.__subSamplingSize', '(False)'], {}), '(n, self.__subSamplingSize, False)\n', (5276, 5310), True, 'import numpy as np\n'), ((9527, 9553), 'numpy.arange', 'np.arange', (['i', '(endIndex + 1)'], {}), '(i, endIndex + 1)\n', (9536, 9553), True, 'import numpy as np\n'), ((9669, 9693), 'numpy.arange', 'np.arange', (['startIndex', 'j'], {}), '(startIndex, j)\n', (9678, 9693), True, 'import numpy as np\n'), ((10364, 10387), 'psutil.cpu_count', 'psutil.cpu_count', (['(False)'], {}), '(False)\n', (10380, 10387), False, 'import psutil\n'), ((13880, 13905), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': '"""r"""'}), "(x, y, color='r')\n", (13888, 13905), True, 'import matplotlib.pyplot as plt\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 15 15:13:29 2018
@author: kazuki.onodera
"""
import numpy as np
import pandas as pd
import os, gc
from glob import glob
from tqdm import tqdm
import sys
sys.path.append(f'/home/{os.environ.get("USER")}/PythonLibrary')
import lgbextension as ex
import lightgbm as lgb
from multiprocessing import cpu_count
import utils
utils.start(__file__)
#==============================================================================
SUBMIT_FILE_PATH = '../output/1015-4.csv.gz'
COMMENT = '1015-2 + weight'
EXE_SUBMIT = True
DROP = ['f001_hostgal_specz']
SEED = np.random.randint(9999)
print('SEED:', SEED)
NFOLD = 4
LOOP = 2
param = {
'objective': 'multiclass',
'num_class': 14,
'metric': 'multi_logloss',
'learning_rate': 0.01,
'max_depth': 6,
'num_leaves': 63,
'max_bin': 255,
'min_child_weight': 10,
'min_data_in_leaf': 150,
'reg_lambda': 0.5, # L2 regularization term on weights.
'reg_alpha': 0.5, # L1 regularization term on weights.
'colsample_bytree': 0.5,
'subsample': 0.5,
# 'nthread': 32,
'nthread': cpu_count(),
'bagging_freq': 1,
'verbose':-1,
}
classes = [6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95]
class_weight = {6: 1,
15: 2,
16: 1,
42: 1,
52: 1,
53: 1,
62: 1,
64: 2,
65: 1,
67: 1,
88: 1,
90: 1,
92: 1,
95: 1}
# =============================================================================
# load
# =============================================================================
files_tr = sorted(glob('../data/train_f*.f'))
[print(f) for f in files_tr]
X = pd.concat([
pd.read_feather(f) for f in tqdm(files_tr, mininterval=60)
], axis=1)
y = utils.load_target().target
X.drop(DROP, axis=1, inplace=True)
target_dict = {}
target_dict_r = {}
for i,e in enumerate(y.sort_values().unique()):
target_dict[e] = i
target_dict_r[i] = e
y = y.replace(target_dict)
if X.columns.duplicated().sum()>0:
raise Exception(f'duplicated!: { X.columns[X.columns.duplicated()] }')
print('no dup :) ')
print(f'X.shape {X.shape}')
gc.collect()
COL = X.columns.tolist()
#CAT = list( set(X.columns)&set(utils_cat.ALL))
#print(f'CAT: {CAT}')
# =============================================================================
# cv
# =============================================================================
def lgb_multi_weighted_logloss(y_preds, train_data):
"""
@author olivier https://www.kaggle.com/ogrellier
https://www.kaggle.com/ogrellier/plasticc-in-a-kernel-meta-and-data/code
multi logloss for PLAsTiCC challenge
"""
# class_weights taken from Giba's topic : https://www.kaggle.com/titericz
# https://www.kaggle.com/c/PLAsTiCC-2018/discussion/67194
# with <NAME>'s post https://www.kaggle.com/kyleboone
y_true = train_data.get_label()
if len(np.unique(y_true)) > 14:
classes.append(99)
class_weight[99] = 2
y_p = y_preds.reshape(y_true.shape[0], len(classes), order='F')
# Trasform y_true in dummies
y_ohe = pd.get_dummies(y_true)
# Normalize rows and limit y_preds to 1e-15, 1-1e-15
y_p = np.clip(a=y_p, a_min=1e-15, a_max=1 - 1e-15)
# Transform to log
y_p_log = np.log(y_p)
# Get the log for ones, .values is used to drop the index of DataFrames
# Exclude class 99 for now, since there is no class99 in the training set
# we gave a special process for that class
y_log_ones = np.sum(y_ohe.values * y_p_log, axis=0)
# Get the number of positives for each class
nb_pos = y_ohe.sum(axis=0).values.astype(float)
# Weight average and divide by the number of positives
class_arr = np.array([class_weight[k] for k in sorted(class_weight.keys())])
y_w = y_log_ones * class_arr / nb_pos
loss = - np.sum(y_w) / np.sum(class_arr)
return 'wloss', loss, False
dtrain = lgb.Dataset(X, y, #categorical_feature=CAT,
free_raw_data=False)
gc.collect()
model_all = []
for i in range(LOOP):
gc.collect()
param['seed'] = np.random.randint(9999)
ret, models = lgb.cv(param, dtrain, 99999, nfold=NFOLD,
feval=lgb_multi_weighted_logloss,
early_stopping_rounds=100, verbose_eval=50,
seed=SEED)
model_all += models
result = f"CV auc-mean: {ret['multi_logloss-mean'][-1]} + {ret['multi_logloss-stdv'][-1]}"
print(result)
imp = ex.getImp(model_all)
imp['split'] /= imp['split'].max()
imp['gain'] /= imp['gain'].max()
imp['total'] = imp['split'] + imp['gain']
imp.sort_values('total', ascending=False, inplace=True)
imp.reset_index(drop=True, inplace=True)
imp.to_csv(f'LOG/imp_{__file__}.csv', index=False)
png = f'LOG/imp_{__file__}.png'
utils.savefig_imp(imp, png, x='total', title=f'{__file__}')
utils.send_line(result, png)
# =============================================================================
# test
# =============================================================================
files_te = sorted(glob('../data/test_f*.f'))
X_test = pd.concat([
pd.read_feather(f) for f in tqdm(files_te, mininterval=60)
], axis=1)[COL]
for i,model in enumerate(tqdm(model_all)):
y_pred = model.predict(X_test)
if i==0:
y_pred_all = y_pred
else:
y_pred_all += y_pred
y_pred_all /= len(model_all)
sub = pd.read_csv('../input/sample_submission.csv.zip')
df = pd.DataFrame(y_pred_all, columns=sub.columns[1:-1])
# Compute preds_99 as the proba of class not being any of the others
# preds_99 = 0.1 gives 1.769
preds_99 = np.ones(df.shape[0])
for i in range(df.shape[1]):
preds_99 *= (1 - df.iloc[:, i])
df['class_99'] = preds_99
sub = pd.concat([sub[['object_id']], df], axis=1)
sub.to_csv(SUBMIT_FILE_PATH, index=False, compression='gzip')
sub.iloc[:, 1:].hist(bins=30, figsize=(16, 12))
png = f'LOG/sub_{__file__}.png'
utils.savefig_sub(sub, png)
utils.send_line('DONE!', png)
# =============================================================================
# submission
# =============================================================================
if EXE_SUBMIT:
print('submit')
utils.submit(SUBMIT_FILE_PATH, COMMENT)
#==============================================================================
utils.end(__file__)
utils.stop_instance()
|
[
"numpy.clip",
"pandas.read_csv",
"utils.send_line",
"numpy.log",
"multiprocessing.cpu_count",
"utils.start",
"lightgbm.Dataset",
"utils.submit",
"utils.savefig_sub",
"pandas.read_feather",
"pandas.DataFrame",
"glob.glob",
"utils.stop_instance",
"numpy.ones",
"utils.savefig_imp",
"gc.collect",
"pandas.get_dummies",
"lightgbm.cv",
"numpy.unique",
"tqdm.tqdm",
"utils.load_target",
"os.environ.get",
"numpy.sum",
"numpy.random.randint",
"utils.end",
"pandas.concat",
"lgbextension.getImp"
] |
[((392, 413), 'utils.start', 'utils.start', (['__file__'], {}), '(__file__)\n', (403, 413), False, 'import utils\n'), ((627, 650), 'numpy.random.randint', 'np.random.randint', (['(9999)'], {}), '(9999)\n', (644, 650), True, 'import numpy as np\n'), ((2473, 2485), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2483, 2485), False, 'import os, gc\n'), ((4241, 4279), 'lightgbm.Dataset', 'lgb.Dataset', (['X', 'y'], {'free_raw_data': '(False)'}), '(X, y, free_raw_data=False)\n', (4252, 4279), True, 'import lightgbm as lgb\n'), ((4328, 4340), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4338, 4340), False, 'import os, gc\n'), ((4802, 4822), 'lgbextension.getImp', 'ex.getImp', (['model_all'], {}), '(model_all)\n', (4811, 4822), True, 'import lgbextension as ex\n'), ((5117, 5176), 'utils.savefig_imp', 'utils.savefig_imp', (['imp', 'png'], {'x': '"""total"""', 'title': 'f"""{__file__}"""'}), "(imp, png, x='total', title=f'{__file__}')\n", (5134, 5176), False, 'import utils\n'), ((5177, 5205), 'utils.send_line', 'utils.send_line', (['result', 'png'], {}), '(result, png)\n', (5192, 5205), False, 'import utils\n'), ((5744, 5793), 'pandas.read_csv', 'pd.read_csv', (['"""../input/sample_submission.csv.zip"""'], {}), "('../input/sample_submission.csv.zip')\n", (5755, 5793), True, 'import pandas as pd\n'), ((5799, 5850), 'pandas.DataFrame', 'pd.DataFrame', (['y_pred_all'], {'columns': 'sub.columns[1:-1]'}), '(y_pred_all, columns=sub.columns[1:-1])\n', (5811, 5850), True, 'import pandas as pd\n'), ((5961, 5981), 'numpy.ones', 'np.ones', (['df.shape[0]'], {}), '(df.shape[0])\n', (5968, 5981), True, 'import numpy as np\n'), ((6081, 6124), 'pandas.concat', 'pd.concat', (["[sub[['object_id']], df]"], {'axis': '(1)'}), "([sub[['object_id']], df], axis=1)\n", (6090, 6124), True, 'import pandas as pd\n'), ((6270, 6297), 'utils.savefig_sub', 'utils.savefig_sub', (['sub', 'png'], {}), '(sub, png)\n', (6287, 6297), False, 'import utils\n'), ((6298, 6327), 'utils.send_line', 'utils.send_line', (['"""DONE!"""', 'png'], {}), "('DONE!', png)\n", (6313, 6327), False, 'import utils\n'), ((6666, 6685), 'utils.end', 'utils.end', (['__file__'], {}), '(__file__)\n', (6675, 6685), False, 'import utils\n'), ((6686, 6707), 'utils.stop_instance', 'utils.stop_instance', ([], {}), '()\n', (6705, 6707), False, 'import utils\n'), ((1245, 1256), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (1254, 1256), False, 'from multiprocessing import cpu_count\n'), ((1910, 1936), 'glob.glob', 'glob', (['"""../data/train_f*.f"""'], {}), "('../data/train_f*.f')\n", (1914, 1936), False, 'from glob import glob\n'), ((2089, 2108), 'utils.load_target', 'utils.load_target', ([], {}), '()\n', (2106, 2108), False, 'import utils\n'), ((3428, 3450), 'pandas.get_dummies', 'pd.get_dummies', (['y_true'], {}), '(y_true)\n', (3442, 3450), True, 'import pandas as pd\n'), ((3518, 3562), 'numpy.clip', 'np.clip', ([], {'a': 'y_p', 'a_min': '(1e-15)', 'a_max': '(1 - 1e-15)'}), '(a=y_p, a_min=1e-15, a_max=1 - 1e-15)\n', (3525, 3562), True, 'import numpy as np\n'), ((3600, 3611), 'numpy.log', 'np.log', (['y_p'], {}), '(y_p)\n', (3606, 3611), True, 'import numpy as np\n'), ((3830, 3868), 'numpy.sum', 'np.sum', (['(y_ohe.values * y_p_log)'], {'axis': '(0)'}), '(y_ohe.values * y_p_log, axis=0)\n', (3836, 3868), True, 'import numpy as np\n'), ((4383, 4395), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4393, 4395), False, 'import os, gc\n'), ((4416, 4439), 'numpy.random.randint', 'np.random.randint', (['(9999)'], {}), '(9999)\n', (4433, 4439), True, 'import numpy as np\n'), ((4458, 4592), 'lightgbm.cv', 'lgb.cv', (['param', 'dtrain', '(99999)'], {'nfold': 'NFOLD', 'feval': 'lgb_multi_weighted_logloss', 'early_stopping_rounds': '(100)', 'verbose_eval': '(50)', 'seed': 'SEED'}), '(param, dtrain, 99999, nfold=NFOLD, feval=lgb_multi_weighted_logloss,\n early_stopping_rounds=100, verbose_eval=50, seed=SEED)\n', (4464, 4592), True, 'import lightgbm as lgb\n'), ((5393, 5418), 'glob.glob', 'glob', (['"""../data/test_f*.f"""'], {}), "('../data/test_f*.f')\n", (5397, 5418), False, 'from glob import glob\n'), ((5574, 5589), 'tqdm.tqdm', 'tqdm', (['model_all'], {}), '(model_all)\n', (5578, 5589), False, 'from tqdm import tqdm\n'), ((6541, 6580), 'utils.submit', 'utils.submit', (['SUBMIT_FILE_PATH', 'COMMENT'], {}), '(SUBMIT_FILE_PATH, COMMENT)\n', (6553, 6580), False, 'import utils\n'), ((2000, 2018), 'pandas.read_feather', 'pd.read_feather', (['f'], {}), '(f)\n', (2015, 2018), True, 'import pandas as pd\n'), ((4180, 4197), 'numpy.sum', 'np.sum', (['class_arr'], {}), '(class_arr)\n', (4186, 4197), True, 'import numpy as np\n'), ((251, 273), 'os.environ.get', 'os.environ.get', (['"""USER"""'], {}), "('USER')\n", (265, 273), False, 'import os, gc\n'), ((2028, 2058), 'tqdm.tqdm', 'tqdm', (['files_tr'], {'mininterval': '(60)'}), '(files_tr, mininterval=60)\n', (2032, 2058), False, 'from tqdm import tqdm\n'), ((3233, 3250), 'numpy.unique', 'np.unique', (['y_true'], {}), '(y_true)\n', (3242, 3250), True, 'import numpy as np\n'), ((4166, 4177), 'numpy.sum', 'np.sum', (['y_w'], {}), '(y_w)\n', (4172, 4177), True, 'import numpy as np\n'), ((5458, 5476), 'pandas.read_feather', 'pd.read_feather', (['f'], {}), '(f)\n', (5473, 5476), True, 'import pandas as pd\n'), ((5486, 5516), 'tqdm.tqdm', 'tqdm', (['files_te'], {'mininterval': '(60)'}), '(files_te, mininterval=60)\n', (5490, 5516), False, 'from tqdm import tqdm\n')]
|
from __future__ import print_function
import os
import warnings
import shutil
import re
import numpy as np
import sami
from astropy.io import fits
class Tester():
"""This class handles the testing of the SAMI pipeline. To run it requires
additional data:
1) the folder ``sami_ppl_test_data``, i.e. the raw data to be reduced
locally (~1.1 GB - ~0.5 GB xz archive)
2) the reduced data (folders ``slow_test_reference`` or
``fast_test_reference`` for the ``slow`` and ``fast`` reduction
respectively (~7.5 GB - ~4.1 GB xz archive).
To run the test (fast mode):
>>> import sami.tester
>>> mytest = tester.Tester(fast=True)
>>> mngr = mytest.dr_reduce()
>>> comp = mytest.dr_comparison()
1) mngr is the instance of ``sami.manager.Manager`` that controls the local
data reduction. It can be used to perform additional operations on the
local data reduction, but strictly speaking should not be needed.
2) comp is the result of the comparison. Should be True.
Parameters
----------
fast : bool, optional
Whether to use the ``fast`` or ``slow`` data reduction option for the
``sami.manager.Manager``.
rtol : float, optional
The relative tolerance to assess whether any two numbers are the same.
The default value of 1.e-07 is appropriate for single precision numbers.
output_dir : str, optional
The directory where to write the local data reduction. Default
is None, which uses either ``slow_test`` or ``fast_test``, depending on
the value of the keyword ``fast``.
reference_dir : str, optional
The directory where to search for the reference data reduction. Default
is None, which uses either ``slow_test_reference`` or
``fast_test_reference``, depending on the value of the keyword ``fast``.
create_reduction : bool, optional
This flag is set to True in order to create the reference data
reduction. When testing one should leave this keyword to its default
value (False).
"""
def __init__(self, fast=True, rtol=1.e-07, output_dir=None,
reference_dir=None, create_reduction=False):
self.fast=fast
self.rtol=rtol
if output_dir is None:
self.output_dir = 'fast_test' if fast else 'slow_test'
else:
self.output_dir = output_dir
if reference_dir is None:
self.reference_dir = 'fast_test_reference' if fast \
else 'slow_test_reference'
else:
self.reference_dir = reference_dir
if not create_reduction:
# Check that ``reference_dir`` contains the expected files.
self._check_reference_exists()
def _check_reference_exists(self):
"""Method to assess whether the required reference data exists in the
directory ``self.reference_dir``.
"""
if not _check_existing_cubing(self.reference_dir):
error_message = ('The directory "{}" does not appear to contain ' \
+ ' the required (reduced) data. Please contact the pipeline ' \
+ ' support team to obtain the reference dataset.').format(
self.reference_dir)
raise IOError(error_message)
else:
pass
def dr_reduce(self,
overwrite_bias=False, overwrite_dark=False,
overwrite_lflat=False, overwrite_tlm=False,
overwrite_arc_n_flat=False, overwrite_sky_n_object=False,
overwrite_fluxcal=False, overwrite_cubing=False):
mngr = dr_reduce(
fast=self.fast, output_dir=self.output_dir,
overwrite_bias=overwrite_bias, overwrite_dark=overwrite_dark,
overwrite_lflat=overwrite_lflat, overwrite_tlm=overwrite_tlm,
overwrite_arc_n_flat=overwrite_arc_n_flat,
overwrite_sky_n_object=overwrite_sky_n_object,
overwrite_fluxcal=overwrite_fluxcal,
overwrite_cubing=overwrite_cubing)
return mngr
def dr_comparison(self):
return dr_comparison(self.output_dir, self.reference_dir,
rtol=self.rtol)
# Example usage:
# >>> import sami.tester
# >>> res = tester.dr_reduce(fast=True)
# Test to relative tolerance.
# >>> comp = tester.dr_comparison('fast_test', 'fast_test_reference', rtol=1.e-07)
# +---+------------------------------------------------------------------------+
# |1. | Performing the data reduction. |
# +---+------------------------------------------------------------------------+
def dr_reduce(fast=True, output_dir=None,
overwrite_bias=False, overwrite_dark=False, overwrite_lflat=False,
overwrite_tlm=False, overwrite_arc_n_flat=False,
overwrite_sky_n_object=False, overwrite_fluxcal=False,
overwrite_cubing=False):
"""This method does the data reduction on the test data suite.
Parameters
----------
fast: bool, True
whether to perform the fast or slow data reduction (see the
relevant documentation for sami.manager.Manager for more information).
overwrite_<function>: bool, False
whether to manually overwrite the preexisting data reduction step
corresponding to <function> (if exists).
Return
------
mngr: ``sami.manager.Manager`` instance
The Manager instance with the data reduction.
"""
# Declare the output directory.
if output_dir is None:
output_dir = 'fast_test' if fast else 'slow_test'
# If an old reduction exists, ask the user whether to delete it or keep it.
if _check_existing_reduction(output_dir):
_delete_existing_reduction(output_dir)
# Importing the data.
mngr = sami.manager.Manager(output_dir, fast=fast)
mngr.import_dir('sami_ppl_test_data')
mngr.remove_directory_locks()
message('Processing the bias data...')
mngr.reduce_bias(overwrite=overwrite_bias)
mngr.combine_bias(overwrite=overwrite_bias)
message('Processing the dark data...')
mngr.reduce_dark(overwrite=overwrite_dark)
mngr.combine_dark(overwrite=overwrite_dark)
message('Processing the detector flat (lflat) data...')
mngr.reduce_lflat(overwrite=overwrite_lflat)
mngr.combine_lflat(overwrite=overwrite_lflat)
message('Tracing the fibres (tramlines)...')
mngr.make_tlm(overwrite=overwrite_tlm)
message('Reducing the arc & flat frames...')
mngr.reduce_arc(overwrite=overwrite_arc_n_flat)
mngr.reduce_fflat(overwrite=overwrite_arc_n_flat)
message('Reducing the sky and object frames...')
mngr.reduce_sky(overwrite=overwrite_sky_n_object)
mngr.reduce_object(overwrite=overwrite_sky_n_object)
message('Flux calibration...')
mngr.derive_transfer_function(overwrite=overwrite_fluxcal)
mngr.combine_transfer_function(overwrite=overwrite_fluxcal)
mngr.flux_calibrate(overwrite=overwrite_fluxcal)
mngr.telluric_correct(overwrite=overwrite_fluxcal)
mngr.get_stellar_photometry()
mngr.scale_frames(overwrite=overwrite_fluxcal)
mngr.measure_offsets(overwrite=overwrite_fluxcal)
message('Cubing...')
# Check whether cubing has been done in the past. If yes, use the keyword
# ``overwrite_cubing`` to determine whether or not to redo this process.
# This step is necessary because the keyword ``overwrite`` does not work
# for ``sami.manager.Manager.cube``.
if (not _check_existing_cubing(output_dir)) or overwrite_cubing:
warn_message = 'Notice: sami.manager.Manger.cube`` is time consuming.' \
+ '\nThis tester will only cube one IFU (the secondary star one).'
warnings.warn(warn_message)
mngr.cube(overwrite=overwrite_cubing, star_only=True)
mngr.scale_cubes(overwrite=overwrite_cubing)
#mngr.record_dust(overwrite=overwrite_cubing)
#mngr.gzip_cubes(overwrite=overwrite_cubing) # Unsupported
mngr.qc_summary()
check_results(mngr)
return mngr
# +---+------------------------------------------------------------------------+
# |2. | Assessing the results. |
# +---+------------------------------------------------------------------------+
def dr_comparison(output_dir, reference_dir, rtol=1.e-07):
comparison = []
for prod_name in ['bias', 'dark', 'lflat', 'calibrators', 'ststar', 'main',
'cubed']:
filelist_a, filelist_b = _retrieve_filenames(prod_name, output_dir,
reference_dir)
for fn_a, fn_b in zip(filelist_a, filelist_b):
fa, fb = os.path.basename(fn_a), os.path.basename(fn_b)
comparison.append([fa, fb, _compare_files(fn_a, fn_b, rtol=rtol)])
all_equal = [comp[2] for comp in comparison]
if np.all(all_equal):
return True
else:
warnings.warn('Not all comparisons have been successfull')
return comparison
def _retrieve_filenames(input_product_name, output_dir, reference_dir):
"""This method retrieves the filenames from the data reduction (directory
``output_dir``) and from the standard (or reference) data reduction
(directory ``reference_dir``). It returns couples of filenames that need to
be compared.
input_product_name: str
['bias'|'dark'|'lflat'|'main'|'calibrators'|'ststar'|'cubed']
Return
------
Two lists of filenames, with the names of the files that need to be compared
as in:
[file_1a, file_2a, ..., file_Na], [file_1b, file_2b, ..., file_Nb]
"""
pn_dic = {'bias': 'bias', 'dark': 'dark', 'lflat': 'lflat', 'cubed': 'cubed',
'main': 'main', 'calibrators': 'calibrators', 'ststar': 'EG21'}
product_name = pn_dic[input_product_name]
pn_regex = {'bias': '.*' + product_name + \
'.*/([0-9]{2,2}[a-z]{3,3}[0-9]{5,5}red.fits|' + \
'.*BIAScombined.*fits)',
'dark': '.*' + product_name + \
'.*/([0-9]{2,2}[a-z]{3,3}[0-9]{5,5}red.fits|' + \
'.*DARKcombined.*fits)',
'lflat': '.*' + product_name + \
'.*/([0-9]{2,2}[a-z]{3,3}[0-9]{5,5}red.fits|' + \
'.*LFLATcombined.*fits)',
'main': '.*' + product_name + \
'.*/[0-9]{2,2}[a-z]{3,3}[0-9]{5,5}sci.fits',
'calibrators': '.*' + product_name + \
'.*/[0-9]{2,2}[a-z]{3,3}[0-9]{5,5}red.fits',
'ststar': '.*' + product_name + \
'.*/[0-9]{2,2}[a-z]{3,3}[0-9]{5,5}im.fits',
'cubed': '.*' + product_name + \
'.*/[0-9]{4,}_(blue|red)_.*_Y.*.fits$'}
regex = pn_regex[input_product_name]
# Loop over all directories of processed galaxies.
match_files = re.compile(regex)
# Result files.
result_file_list = [dirpath + '/' + filename
for dirpath, dirnames, filenames in os.walk(output_dir)
for filename in filenames
if match_files.search(dirpath + '/' + filename)]
reference_file_list = [dirpath + '/' + filename
for dirpath, dirnames, filenames in os.walk(reference_dir)
for filename in filenames
if match_files.search(dirpath + '/' + filename)]
if input_product_name == 'main':
result_file_list = _replace_names(result_file_list,
instr='sci', outstr='red')
reference_file_list = _replace_names(reference_file_list,
instr='sci', outstr='red')
elif input_product_name == 'ststar':
result_file_list = _replace_names(result_file_list,
instr='im', outstr='red')
reference_file_list = _replace_names(reference_file_list,
instr='im', outstr='red')
result_file_list.sort(), reference_file_list.sort()
# Assess whether the files are correctly matched (i.e. result_file_list[n]
# has the same filename as reference_file_list[n], for every n).
matched = _assess_matched_files(reference_file_list, result_file_list)
if not matched:
error_message = ('The filenames associated with the product "{}" ' \
+ 'between the directories "{}" and "{}" do not match.\n' \
+ 'Please inspect the relevant directories manually.').format(
product_name, output_dir, reference_dir)
raise AssertionError(error_message)
else:
return result_file_list, reference_file_list
def _compare_files(file_a, file_b, rtol=1.e-07):
"""Compare the contents of the FITS files ``file_a`` and ``file_b`` to a
precision of ``rtol``.
Return
------
True if the data is equal to the required precision, False otherwise.
"""
data_a, data_b = fits.open(file_a), fits.open(file_b)
# List to store the results
are_close = [True for ext in data_a]
for n, (ext_a, ext_b) in enumerate(zip(data_a, data_b)):
try:
are_close[n] = np.testing.assert_allclose(
ext_a.data, ext_b.data, rtol=rtol,
atol=0., verbose=True)
except TypeError:
warnings.warn('Skipping binary table(s): testing not implemented yet.')
pass
except AssertionError as ae:
warnings.warn(ae.args[0])
return False
return True
def _assess_matched_files(filenames_list_a, filenames_list_b):
"""Assess whether the filenames in the input lists ``filenames_list_a`` and
``filenames_list_b`` are the same (apart from the path). This is a helper
method for ``_retrieve_filenames``.
Return
------
True if the check is passed, False otherwise.
"""
if len(filenames_list_a) != len(filenames_list_b):
return False
else:
for fn_a, fn_b in zip(filenames_list_a, filenames_list_b):
if os.path.basename(fn_a) != os.path.basename(fn_b):
return False
# If it gets here, all checks have been passed and the list have the
# same filenames in the correct order.
return True
# +---+------------------------------------------------------------------------+
# |3. | Utilities. Hic sunt leones. |
# +---+------------------------------------------------------------------------+
def _check_existing_reduction(dir_name):
reduction_exists = os.path.exists(dir_name)
if reduction_exists:
warn_message = 'An old reduction has been detected.\n' \
+ 'Please notice that it might (or might not) be incomplete.'
warnings.warn(warn_message)
return reduction_exists
def _check_existing_cubing(dir_name):
regex = '.*cubed.*/[0-9]{4,}_(blue|red)_.*_Y.*.fits$'
# Loop over all directories of processed galaxies.
match_files = re.compile(regex)
# Result files.
cubes_file_list = [dirpath + '/' + filename
for dirpath, dirnames, filenames in os.walk(dir_name)
for filename in filenames
if match_files.search(dirpath + '/' + filename)]
return len(cubes_file_list)==2 # 1 IFU (star_only=True) x 2 spectrogroph arms.
return len(cubes_file_list)==26 # 13 IFUs x 2 spectrogroph arms.
def _delete_existing_reduction(dir_name):
"""Ask the user whether to delete the old data reduction.
"""
delete_er = input(('Delete the old reduction ({}) or resume it? ' \
+ '(y=yes / any other key=continue):\n').format(dir_name))
if delete_er == 'y':
shutil.rmtree(dir_name)
else:
warnings.warn('Continuing the existing data reduction...')
pass
def _replace_names(input_list, instr='sci', outstr='red'):
"""Replace ``instr`` with ``outstr`` in every element of ``input_list``.
"""
instr += '.fits'
outstr += '.fits'
for n in range(len(input_list)):
input_list[n] = input_list[n].replace(instr, outstr)
return input_list
def check_results(input_mngr):
"""Asks the user whether to perform manual checking of the results. """
do_checks = True
while do_checks:
do_checks = input('Check the results? (y=yes, any other key=no):\n')
if do_checks == 'y':
try:
input_mngr.check_next_group()
do_checks = True
except IndexError:
message('No more checks to be done')
do_checks = False
else:
message('No more checks will be done')
do_checks = False
def message(message):
"""Yapp, yet another pritty printer. ``message`` is the text to be printed.
"""
print()
print('*******************************************************************')
print('* {0:<{1}} *'.format(message, 63))
print('*******************************************************************')
print()
|
[
"os.path.exists",
"re.compile",
"numpy.testing.assert_allclose",
"shutil.rmtree",
"os.path.basename",
"astropy.io.fits.open",
"warnings.warn",
"numpy.all",
"os.walk",
"sami.manager.Manager"
] |
[((5894, 5937), 'sami.manager.Manager', 'sami.manager.Manager', (['output_dir'], {'fast': 'fast'}), '(output_dir, fast=fast)\n', (5914, 5937), False, 'import sami\n'), ((8989, 9006), 'numpy.all', 'np.all', (['all_equal'], {}), '(all_equal)\n', (8995, 9006), True, 'import numpy as np\n'), ((11015, 11032), 're.compile', 're.compile', (['regex'], {}), '(regex)\n', (11025, 11032), False, 'import re\n'), ((14679, 14703), 'os.path.exists', 'os.path.exists', (['dir_name'], {}), '(dir_name)\n', (14693, 14703), False, 'import os\n'), ((15108, 15125), 're.compile', 're.compile', (['regex'], {}), '(regex)\n', (15118, 15125), False, 'import re\n'), ((7813, 7840), 'warnings.warn', 'warnings.warn', (['warn_message'], {}), '(warn_message)\n', (7826, 7840), False, 'import warnings\n'), ((9046, 9104), 'warnings.warn', 'warnings.warn', (['"""Not all comparisons have been successfull"""'], {}), "('Not all comparisons have been successfull')\n", (9059, 9104), False, 'import warnings\n'), ((13052, 13069), 'astropy.io.fits.open', 'fits.open', (['file_a'], {}), '(file_a)\n', (13061, 13069), False, 'from astropy.io import fits\n'), ((13071, 13088), 'astropy.io.fits.open', 'fits.open', (['file_b'], {}), '(file_b)\n', (13080, 13088), False, 'from astropy.io import fits\n'), ((14877, 14904), 'warnings.warn', 'warnings.warn', (['warn_message'], {}), '(warn_message)\n', (14890, 14904), False, 'import warnings\n'), ((15796, 15819), 'shutil.rmtree', 'shutil.rmtree', (['dir_name'], {}), '(dir_name)\n', (15809, 15819), False, 'import shutil\n'), ((15838, 15896), 'warnings.warn', 'warnings.warn', (['"""Continuing the existing data reduction..."""'], {}), "('Continuing the existing data reduction...')\n", (15851, 15896), False, 'import warnings\n'), ((11147, 11166), 'os.walk', 'os.walk', (['output_dir'], {}), '(output_dir)\n', (11154, 11166), False, 'import os\n'), ((11355, 11377), 'os.walk', 'os.walk', (['reference_dir'], {}), '(reference_dir)\n', (11362, 11377), False, 'import os\n'), ((13266, 13355), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ext_a.data', 'ext_b.data'], {'rtol': 'rtol', 'atol': '(0.0)', 'verbose': '(True)'}), '(ext_a.data, ext_b.data, rtol=rtol, atol=0.0,\n verbose=True)\n', (13292, 13355), True, 'import numpy as np\n'), ((15239, 15256), 'os.walk', 'os.walk', (['dir_name'], {}), '(dir_name)\n', (15246, 15256), False, 'import os\n'), ((8805, 8827), 'os.path.basename', 'os.path.basename', (['fn_a'], {}), '(fn_a)\n', (8821, 8827), False, 'import os\n'), ((8829, 8851), 'os.path.basename', 'os.path.basename', (['fn_b'], {}), '(fn_b)\n', (8845, 8851), False, 'import os\n'), ((13422, 13493), 'warnings.warn', 'warnings.warn', (['"""Skipping binary table(s): testing not implemented yet."""'], {}), "('Skipping binary table(s): testing not implemented yet.')\n", (13435, 13493), False, 'import warnings\n'), ((13560, 13585), 'warnings.warn', 'warnings.warn', (['ae.args[0]'], {}), '(ae.args[0])\n', (13573, 13585), False, 'import warnings\n'), ((14145, 14167), 'os.path.basename', 'os.path.basename', (['fn_a'], {}), '(fn_a)\n', (14161, 14167), False, 'import os\n'), ((14171, 14193), 'os.path.basename', 'os.path.basename', (['fn_b'], {}), '(fn_b)\n', (14187, 14193), False, 'import os\n')]
|
#!/usr/bin/env python3
import os, re, random
import numpy as np
from myClass import Theta
from functions import grad_loss
from hyperParameters import hyperParameters
# hyperParameters
T, D = hyperParameters.T, hyperParameters.D
alpha, epsilon = hyperParameters.alpha, hyperParameters.epsilon
moment = hyperParameters.moment
# directory (train files)
dir = os.getcwd() + '/train/'
# train graph_files
files = [file for file in os.listdir(dir) if re.search('_graph.txt', file)]
num_of_files = len(files)
# stochastic gradient descent (sgd)
# Let b_files be a list of file_name of batchs, theta be learnable parameters.
def sgd(b_files, theta):
batch_size = len(b_files)
tmp_theta = Theta(
np.zeros((D,D)),
np.zeros(D).T,
0
)
for graph_file in b_files:
label_file = graph_file.rstrip('_graph.txt') + '_label.txt'
file = open(dir+graph_file)
N, adj = int(file.readline()), []
for i in range(N):
adj.append([int(x) for x in file.readline().split()])
adj = np.array(adj)
file.close()
file = open(dir+label_file)
y = int(file.readline())
file.close()
tmp_theta += grad_loss(adj, y, theta)
delta_theta = tmp_theta*(1/batch_size)
return theta + delta_theta*(-alpha)
# momentum stochastic gradient descent (momentum_sgd)
def momentum_sgd(b_files, theta, w):
batch_size = len(b_files)
tmp_theta = Theta(
np.zeros((D,D)),
np.zeros(D).T,
0
)
for graph_file in b_files:
label_file = graph_file.rstrip('_graph.txt') + '_label.txt'
file = open(dir+graph_file)
N, adj = int(file.readline()), []
for i in range(N):
adj.append([int(x) for x in file.readline().split()])
adj = np.array(adj)
file.close()
file = open(dir+label_file)
y = int(file.readline())
file.close()
tmp_theta += grad_loss(adj, y, theta)
delta_theta = tmp_theta*(1/batch_size)
theta += delta_theta*(-alpha) + w*moment
w += delta_theta*(-alpha) + w*moment
return (theta, w)
|
[
"functions.grad_loss",
"os.listdir",
"os.getcwd",
"numpy.array",
"numpy.zeros",
"re.search"
] |
[((359, 370), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (368, 370), False, 'import os, re, random\n'), ((430, 445), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (440, 445), False, 'import os, re, random\n'), ((449, 478), 're.search', 're.search', (['"""_graph.txt"""', 'file'], {}), "('_graph.txt', file)\n", (458, 478), False, 'import os, re, random\n'), ((708, 724), 'numpy.zeros', 'np.zeros', (['(D, D)'], {}), '((D, D))\n', (716, 724), True, 'import numpy as np\n'), ((1049, 1062), 'numpy.array', 'np.array', (['adj'], {}), '(adj)\n', (1057, 1062), True, 'import numpy as np\n'), ((1197, 1221), 'functions.grad_loss', 'grad_loss', (['adj', 'y', 'theta'], {}), '(adj, y, theta)\n', (1206, 1221), False, 'from functions import grad_loss\n'), ((1459, 1475), 'numpy.zeros', 'np.zeros', (['(D, D)'], {}), '((D, D))\n', (1467, 1475), True, 'import numpy as np\n'), ((1800, 1813), 'numpy.array', 'np.array', (['adj'], {}), '(adj)\n', (1808, 1813), True, 'import numpy as np\n'), ((1948, 1972), 'functions.grad_loss', 'grad_loss', (['adj', 'y', 'theta'], {}), '(adj, y, theta)\n', (1957, 1972), False, 'from functions import grad_loss\n'), ((733, 744), 'numpy.zeros', 'np.zeros', (['D'], {}), '(D)\n', (741, 744), True, 'import numpy as np\n'), ((1484, 1495), 'numpy.zeros', 'np.zeros', (['D'], {}), '(D)\n', (1492, 1495), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import random as rdm
# temporary variable, only used in testing, replace with actual graph inputs
# currently, generates 100 evenly spaced numbers between 0 and pi
ps1 = np.linspace(0, 1 * np.pi, 100)
# create the graph plot
fig, ax = plt.subplots()
# animated=True tells matplotlib to only draw the artist when we
# explicitly request it
# create artist "lin", which is an axis on fig "ax",
(lin,) = ax.plot(ps1, np.sin(ps1), animated=True)
ax.set_xlim(0, 5000)
# make sure the window is raised, but the script keeps going
plt.show(block=False)
# stop to admire our empty window axes and ensure it is rendered at
# least once.
#
# We need to fully draw the figure at its final size on the screen
# before we continue on so that :
# a) we have the correctly sized and drawn background to grab
# b) we have a cached renderer so that ``ax.draw_artist`` works
# so we spin the event loop to let the backend process any pending operations
plt.pause(0.1)
# get copy of entire figure (everything inside fig.bbox) sans animated artist
bg = fig.canvas.copy_from_bbox(fig.bbox)
# draw the animated artist, this uses a cached renderer
ax.draw_artist(lin)
# show the result to the screen, this pushes the updated RGBA buffer from the
# renderer to the GUI framework, so you can see it
fig.canvas.blit(fig.bbox)
for j in range(1000):
# reset the background back in the canvas state, screen unchanged
fig.canvas.restore_region(bg)
# update the artist, neither the canvas state nor the screen have changed
lin.set_ydata(np.sin(ps1 + (j / 100) * np.pi))
# re-render the artist, updating the canvas state, but not the screen
ax.draw_artist(lin)
# copy the image to the GUI state, but screen might not be changed yet
fig.canvas.blit(fig.bbox)
# flush any pending GUI events, re-painting the screen if needed
fig.canvas.flush_events()
# you can put a pause in if you want to slow things down
# plt.pause(.1)
|
[
"numpy.sin",
"numpy.linspace",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((263, 293), 'numpy.linspace', 'np.linspace', (['(0)', '(1 * np.pi)', '(100)'], {}), '(0, 1 * np.pi, 100)\n', (274, 293), True, 'import numpy as np\n'), ((329, 343), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (341, 343), True, 'import matplotlib.pyplot as plt\n'), ((622, 643), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (630, 643), True, 'import matplotlib.pyplot as plt\n'), ((1036, 1050), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (1045, 1050), True, 'import matplotlib.pyplot as plt\n'), ((510, 521), 'numpy.sin', 'np.sin', (['ps1'], {}), '(ps1)\n', (516, 521), True, 'import numpy as np\n'), ((1625, 1654), 'numpy.sin', 'np.sin', (['(ps1 + j / 100 * np.pi)'], {}), '(ps1 + j / 100 * np.pi)\n', (1631, 1654), True, 'import numpy as np\n')]
|
import torch
import numpy as np
from torch import nn
from torch import optim
from torch.utils.data import TensorDataset, DataLoader
from forPython.datasets.uci import load_mhealth
from forPython.models.torch.cnn import SimpleCNN
from forPython.utility.trainer import TorchSimpleTrainer
np.random.seed(0)
torch.random.manual_seed(0)
(x_train, y_train), (x_test, y_test) = load_mhealth()
y_train -= 1
y_test -= 1
n_timesteps, n_features, n_outputs = x_train.shape[1], x_train.shape[2], 12
batch_size, epochs = 32, 10
x_train = torch.tensor(x_train).float()
x_test = torch.tensor(x_test).float()
y_train = torch.tensor(y_train[:, 0]).long()
y_test = torch.tensor(y_test[:, 0]).long()
mid_size = 128 * 62
model = SimpleCNN(n_features, mid_size, n_outputs)
loss_func = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters())
train_ds = TensorDataset(x_train, y_train)
test_ds = TensorDataset(x_test, y_test)
train_loader = DataLoader(train_ds, batch_size, False)
test_loader = DataLoader(test_ds, batch_size, False)
clf = TorchSimpleTrainer(model, loss_func, optimizer)
clf.fit(train_loader, epochs)
clf.evaluate(test_loader)
|
[
"torch.random.manual_seed",
"forPython.utility.trainer.TorchSimpleTrainer",
"forPython.models.torch.cnn.SimpleCNN",
"torch.nn.CrossEntropyLoss",
"torch.utils.data.TensorDataset",
"torch.tensor",
"numpy.random.seed",
"torch.utils.data.DataLoader",
"forPython.datasets.uci.load_mhealth"
] |
[((288, 305), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (302, 305), True, 'import numpy as np\n'), ((306, 333), 'torch.random.manual_seed', 'torch.random.manual_seed', (['(0)'], {}), '(0)\n', (330, 333), False, 'import torch\n'), ((375, 389), 'forPython.datasets.uci.load_mhealth', 'load_mhealth', ([], {}), '()\n', (387, 389), False, 'from forPython.datasets.uci import load_mhealth\n'), ((718, 760), 'forPython.models.torch.cnn.SimpleCNN', 'SimpleCNN', (['n_features', 'mid_size', 'n_outputs'], {}), '(n_features, mid_size, n_outputs)\n', (727, 760), False, 'from forPython.models.torch.cnn import SimpleCNN\n'), ((773, 794), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (792, 794), False, 'from torch import nn\n'), ((850, 881), 'torch.utils.data.TensorDataset', 'TensorDataset', (['x_train', 'y_train'], {}), '(x_train, y_train)\n', (863, 881), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((892, 921), 'torch.utils.data.TensorDataset', 'TensorDataset', (['x_test', 'y_test'], {}), '(x_test, y_test)\n', (905, 921), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((938, 977), 'torch.utils.data.DataLoader', 'DataLoader', (['train_ds', 'batch_size', '(False)'], {}), '(train_ds, batch_size, False)\n', (948, 977), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((992, 1030), 'torch.utils.data.DataLoader', 'DataLoader', (['test_ds', 'batch_size', '(False)'], {}), '(test_ds, batch_size, False)\n', (1002, 1030), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((1038, 1085), 'forPython.utility.trainer.TorchSimpleTrainer', 'TorchSimpleTrainer', (['model', 'loss_func', 'optimizer'], {}), '(model, loss_func, optimizer)\n', (1056, 1085), False, 'from forPython.utility.trainer import TorchSimpleTrainer\n'), ((531, 552), 'torch.tensor', 'torch.tensor', (['x_train'], {}), '(x_train)\n', (543, 552), False, 'import torch\n'), ((570, 590), 'torch.tensor', 'torch.tensor', (['x_test'], {}), '(x_test)\n', (582, 590), False, 'import torch\n'), ((610, 637), 'torch.tensor', 'torch.tensor', (['y_train[:, 0]'], {}), '(y_train[:, 0])\n', (622, 637), False, 'import torch\n'), ((654, 680), 'torch.tensor', 'torch.tensor', (['y_test[:, 0]'], {}), '(y_test[:, 0])\n', (666, 680), False, 'import torch\n')]
|
###------------------------------------------------------###
### Replay and Remember Memory Class ###
###------------------------------------------------------###
import numpy as np
from hyperparameters import *
# expand dimensions to (1, 84, 84, 5) from (84, 84, 5)
# normalize 0-255 -> 0-1 to reduce exploding gradient
def normalize_states(current_frame_history):
return current_frame_history.astype('float32') / 255.
class ReplayMemory:
def __init__(self, memory_size, state_size, action_size):
# set the state size, HEIGHT : default 84px
self.state_height = state_size[0]
# set the state size, WIDTH : default 84px
self.state_width = state_size[1]
# set the state size, DEPTH : default 4 for discrete frame sets, for 4 frames
# 5 with frame windows
self.state_depth = state_size[2]
# set the action size, 3 actions, minus the first no-op
self.action_size = action_size
# initial size
self.size = 0
# set the max size of the remember and replay memory
self.maxsize = memory_size
# default current index
self.current_index = 0
if hp['DISCRETE_FRAMING']:
# create the current states of the game (N, 64, 64, 4)
self.current_states = np.zeros([memory_size, self.state_height, self.state_width, self.state_depth], dtype=np.uint8)
# create the next states of the game (N, 64, 64, 5)
self.next_states = np.zeros([memory_size, self.state_height, self.state_width, self.state_depth], dtype=np.uint8)
# used if using frame sliding
else:
self.states = np.zeros([memory_size, self.state_height, self.state_width, self.state_depth], dtype=np.uint8)
# reward array (N)
self.reward = np.zeros([memory_size], dtype=np.uint8)
# integer action
self.action = [0]*memory_size
# Boolean (terminal transition?)
self.lost_life = [False]*memory_size
def remember_discrete(self, current_states, action, reward, next_states, lost_life):
# Stores a single memory item
self.current_states[self.current_index,:] = current_states
self.next_states[self.current_index,:] = next_states
# get the rest of the items
self.action[self.current_index] = action
self.reward[self.current_index] = reward
self.lost_life[self.current_index] = lost_life
# offset the current index
self.current_index = (self.current_index + 1) % self.maxsize
# increase the size
self.size = max(self.current_index,self.size)
def replay_discrete(self, model, target_model):
# Run replay!
# set the number of samples to train on
num_samples = hp['REPLAY_ITERATIONS']
# set the sample size out of the memory bank
sample_size = hp['BATCH_SIZE']
# discount rate
gamma = hp['GAMMA']
# show the learning fit
show_fit = hp['SHOW_FIT']
# Can't train if we don't yet have enough samples to begin with...
if self.size < sample_size:
return
# number of replays
for i in range(num_samples):
# Select sample_size memory indices from the whole set
current_sample = np.random.choice(self.size, sample_size, replace=False)
# Slice memory into training sample
# current state is frames [0, 1, 2, 3]
# and normalize states [0,1] instead of 0-255
current_states = normalize_states(self.current_states[current_sample, :, :, :])
# next_state is frames [1, 2, 3, 4]
# and normalize states [0,1] instead of 0-255
next_states = normalize_states(self.next_states[current_sample, :, :, :])
# get the rest of the items from memory
actions = [self.action[j] for j in current_sample]
reward = self.reward[current_sample]
lost_lives = [self.lost_life[j] for j in current_sample]
# Obtain model's current Q-values
model_targets = model.predict(current_states)
# Create targets from argmax(Q(s+1,a+1))
# Use the target model!
targets = reward + gamma * np.amax(target_model.predict(next_states), axis=1)
# Absorb the reward on terminal state-action transitions
targets[lost_lives] = reward[lost_lives]
# Update just the relevant parts of the model_target vector...
model_targets[range(sample_size), actions] = targets
# Current State: (32, 84, 84, 4)
# Model Targets: (32, 4)
# Update the weights accordingly
model.fit(current_states, model_targets,
epochs=1 ,verbose=show_fit, batch_size=sample_size)
def remember_frame_sliding(self, states, action, reward, lost_life):
# Stores a single memory item
self.states[self.current_index,:] = states
# get the rest of the items
self.action[self.current_index] = action
self.reward[self.current_index] = reward
self.lost_life[self.current_index] = lost_life
# offset the current index
self.current_index = (self.current_index + 1) % self.maxsize
# increase the size
self.size = max(self.current_index,self.size)
def replay_slidding(self, model, target_model):
# Run replay!
# set the number of samples to train on
num_samples = hp['REPLAY_ITERATIONS']
# set the sample size out of the memory bank
sample_size = hp['BATCH_SIZE']
# discount rate
gamma = hp['GAMMA']
# show the learning fit
show_fit = hp['SHOW_FIT']
# Can't train if we don't yet have enough samples to begin with...
if self.size < sample_size:
return
# number of replays
for i in range(num_samples):
# Select sample_size memory indices from the whole set
current_sample = np.random.choice(self.size, sample_size, replace=False)
# Slice memory into training sample
# current state is frames [0, 1, 2, 3]
# and normalize states [0,1] instead of 0-255
current_states = normalize_states(self.states[current_sample, :, :, :hp['FRAME_BATCH_SIZE']])
# next_state is frames [1, 2, 3, 4]
# and normalize states [0,1] instead of 0-255
next_states = normalize_states(self.states[current_sample, :, :, 1:])
# get the rest of the items from memory
actions = [self.action[j] for j in current_sample]
reward = self.reward[current_sample]
lost_lives = [self.lost_life[j] for j in current_sample]
# Obtain model's current Q-values
model_targets = model.predict(current_states)
# Create targets from argmax(Q(s+1,a+1))
# Use the target model
targets = reward + gamma * np.amax(target_model.predict(next_states), axis=1)
# Absorb the reward on terminal state-action transitions
targets[lost_lives] = reward[lost_lives]
# Update just the relevant parts of the model_target vector...
model_targets[range(sample_size), actions] = targets
# Current State: (32, 84, 84, 4)
# Model Targets: (32, 4)
# Update the weights accordingly
model.fit(current_states, model_targets,
epochs=1 ,verbose=show_fit, batch_size=sample_size)
|
[
"numpy.random.choice",
"numpy.zeros"
] |
[((1868, 1907), 'numpy.zeros', 'np.zeros', (['[memory_size]'], {'dtype': 'np.uint8'}), '([memory_size], dtype=np.uint8)\n', (1876, 1907), True, 'import numpy as np\n'), ((1338, 1437), 'numpy.zeros', 'np.zeros', (['[memory_size, self.state_height, self.state_width, self.state_depth]'], {'dtype': 'np.uint8'}), '([memory_size, self.state_height, self.state_width, self.\n state_depth], dtype=np.uint8)\n', (1346, 1437), True, 'import numpy as np\n'), ((1541, 1640), 'numpy.zeros', 'np.zeros', (['[memory_size, self.state_height, self.state_width, self.state_depth]'], {'dtype': 'np.uint8'}), '([memory_size, self.state_height, self.state_width, self.\n state_depth], dtype=np.uint8)\n', (1549, 1640), True, 'import numpy as np\n'), ((1723, 1822), 'numpy.zeros', 'np.zeros', (['[memory_size, self.state_height, self.state_width, self.state_depth]'], {'dtype': 'np.uint8'}), '([memory_size, self.state_height, self.state_width, self.\n state_depth], dtype=np.uint8)\n', (1731, 1822), True, 'import numpy as np\n'), ((3446, 3501), 'numpy.random.choice', 'np.random.choice', (['self.size', 'sample_size'], {'replace': '(False)'}), '(self.size, sample_size, replace=False)\n', (3462, 3501), True, 'import numpy as np\n'), ((6335, 6390), 'numpy.random.choice', 'np.random.choice', (['self.size', 'sample_size'], {'replace': '(False)'}), '(self.size, sample_size, replace=False)\n', (6351, 6390), True, 'import numpy as np\n')]
|
import numpy as np
import scipy.stats as stat
from utils import dichotomic_search
""" Implementation of last particle variant """
def ImportanceSplittingLp(gen,kernel,h,tau=0,N=100,s=0.1,decay=0.9,T = 20, accept_ratio = 0.9,
alpha_est = 0.95, alpha_test=0.99,verbose=1, gain_thresh=0.01, check_every=3, p_c = 10**(-20),n_max = int(10**6),
reject_forget_rate =0, gain_forget_rate=0, reject_thresh=0.005):
"""
Importance splitting last particle estimator, i.e. the importance splitting algorithm with K=N-1
Args:
gen: generator of iid samples X_i [fun]
kernel: mixing kernel invariant to f_X [fun]
h: score function from gaussian vector [fun]
tau: threshold. The rare events are defined as h(X)>tau_j [tx1]
N: number of samples [1x1] (100)
s: strength of the the kernel [1x1] (0.1)
T: number of repetitions of the mixing kernel [1x1] (20)
n_max: max number of iterations [1x1] (200)
n_mod: check each n_mod iteration [1x1] (100)
decay: decay rate of the strength [1x1] (0.9)
accept_ratio: lower bound of accept ratio [1x1] (0.5)
alpha: level of confidence interval [1x1] (0.95)
verbose: level of verbosity [1x1] (0)
Returns:
P_est: estimated probability
s_out: a dictionary containing additional data
-s_out['Var_est']: estimated variance
-s_out['CI_est']: estimated confidence of interval
-s_out['Xrare']: Examples of the rare event
-s_out['result']: Result of the estimation/hypothesis testing process
"""
# Internals
q = -stat.norm.ppf((1-alpha_est)/2) # gaussian quantile
#d =gen(1).shape[-1] # dimension of the random vectors
k = 1 # Number of iterations
p = (N-1)/N
confidence_level_m = lambda y :stat.gamma.sf(-np.log(p_c),a=y, scale =1/N)
m, _ = dichotomic_search(f = confidence_level_m, a=100, b=n_max, thresh=alpha_test)
m = int(m)+1
if verbose:
print(f"Starting Last Particle algorithm with {m}, to certify p<p_c={p_c}, with confidence level alpha ={1-alpha_test}.")
if m>=n_max:
raise AssertionError(f"Confidence level requires more than n_max={n_max} iterations... increase n_max ?")
tau_j = -np.inf
P_est = 0
Var_est = 0
CI_est = np.zeros((2))
kernel_pass=0
Count_accept = 0
check=0
## Init
# step A0: generate & compute scores
X = gen(N) # generate N samples
SX = h(X) # compute their scores
Count_h = N # Number of calls to function h
reject_rate = 0
avg_gain=0
#step B: find new threshold
## While
while (k<=m):
#find new threshold
i_dead = np.argmin(SX,axis = None) # sort in descending order
#print(SX[i_dead], tau_j )
if tau_j!=-np.inf:
gain = np.abs((SX[i_dead]-tau_j)/tau_j)
else:
gain=0
gamma = 1+gain_forget_rate*(k-1)
avg_gain = (1-gamma/k)*avg_gain + (gamma/k)*gain
if k>1 and avg_gain<gain_thresh and reject_rate<reject_thresh:
s = s/decay
if verbose>=1 and check%check_every==0:
print('Strength of kernel increased!')
print(f's={s}')
tau_j = SX[i_dead] # set the threshold to the last particule's score
if tau_j>tau:
P_est= p**(k-1)
break #it is useless to compute new minimum if desired level has already been reached
if verbose>=1 and check%check_every==0:
print('Iter = ',k, ' tau_j = ', tau_j, " Calls = ", Count_h)
check+=1
# Refresh samples
i_new = np.random.choice(list(set(range(N))-set([i_dead])))
z0 = X[i_new,:]
sz0 = SX[i_new]
for t in range(T):
w = kernel(z0,s)
sw = h(w)
if sw>=tau_j:
z0 = w
sz0 = sw
Count_accept+=1
X[i_dead,:] = z0
SX[i_dead] = sz0
Count_h+=T
gamma = T+reject_forget_rate*kernel_pass
reject_rate = (1-gamma/(kernel_pass+T))*reject_rate + gamma*(1-Count_accept/T)/(kernel_pass+T)
if check%check_every==0 and verbose>=1:
print(f'Accept ratio:{Count_accept/T}')
print(f'Reject rate:{reject_rate}')
kernel_pass+=T
if reject_rate > (1-accept_ratio):
s = s*decay
if verbose>=1 and check%check_every==0:
print('Strength of kernel diminished!')
print(f's={s}')
Count_accept = 0
k += 1 # increase iteration number
if tau_j>tau:
Var_est = P_est**2*(P_est**(-1/N)-1)
CI_est[0] = P_est*np.exp(-q/np.sqrt(N)*np.sqrt(-np.log(P_est)+(q**2)/4/N) - (q**2)/2/N)
CI_est[1] = P_est*np.exp(q/np.sqrt(N)*np.sqrt(-np.log(P_est)+(q**2)/4/N) - (q**2)/2/N)
s_out = {'Var_est':Var_est,'CI_est':CI_est,'Iter':k,'Calls':Count_h,'Sample size':N}
s_out['Cert']=False
s_out['Xrare'] = X
else:
s_out = {'Var_est':None, 'CI_est':[0,p_c],'Iter':k,'Calls':Count_h,'Sample size':N}
P_est = p_c
s_out['Cert']=True
s_out['Xrare']= None
return P_est, s_out
def ImportanceSplittingLpBatch(gen,kernel_b,h,h_big,nb_system=5,d=784,tau=0,N=100,s=0.1,decay=0.92,T = 20, accept_ratio = 0.9,
alpha_est = 0.95, alpha_test=0.99,verbose=1, gain_thresh=0.01, check_every=3, p_c = 10**(-20),n_max = int(10**6),
reject_forget_rate =0, gain_forget_rate=0, reject_thresh=0.005,fast_decay=True, fast_d=1):
"""
Importance splitting last particle estimator, i.e. the importance splitting algorithm with K=N-1
with several particle systems.
Args:
gen: generator of iid samples X_i [fun]
kernel_batch: mixing kernel invariant to f_X [fun]
h: score function from gaussian vector [fun]
tau: threshold. The rare events are defined as h(X)>tau_j [tx1]
N: number of samples [1x1] (100)
s: strength of the the kernel [1x1] (0.1)
T: number of repetitions of the mixing kernel [1x1] (20)
n_max: max number of iterations [1x1] (200)
n_mod: check each n_mod iteration [1x1] (100)
decay: decay rate of the strength [1x1] (0.9)
accept_ratio: lower bound of accept ratio [1x1] (0.5)
alpha: level of confidence interval [1x1] (0.95)
verbose: level of verbosity [1x1] (0)
Returns:
P_est: estimated probability
s_out: a dictionary containing additional data
-s_out['Var_est']: estimated variance
-s_out['CI_est']: estimated confidence of interval
-s_out['Xrare']: Examples of the rare event
-s_out['result']: Result of the estimation/hypothesis testing process
"""
q = -stat.norm.ppf((1-alpha_est)/2) # gaussian quantile
s_b = s*np.ones(nb_system)
k = 1 # Number of iterations
p = (N-1)/N
confidence_level_m = lambda y :stat.gamma.sf(-np.log(p_c),a=y, scale =1/N)
m, _ = dichotomic_search(f = confidence_level_m, a=100, b=n_max, thresh=alpha_test)
m = int(m)+1
if verbose:
print(f"Starting Last Particle algorithm with {m}, to certify p<p_c={p_c}, with confidence level alpha ={1-alpha_test}.")
if m>=n_max:
raise AssertionError(f"Confidence level requires more than n_max={n_max} iterations... increase n_max ?")
tau_j = np.array(nb_system*[-np.inf])
is_done = np.zeros(nb_system)
done_k = -np.ones(nb_system)
kernel_pass= 0
Count_accept = np.zeros(nb_system)
check=0
X = gen(nb_system*N).reshape((nb_system,N,d)) # generate N*nb_system samples
SX = h_big(X.reshape((nb_system*N,d))).reshape((nb_system,N)) # compute their scores
Count_h = nb_system*N # Number of calls to function h
reject_rate = np.zeros(nb_system)
avg_gain= np.zeros(nb_system)
Xrare = -np.ones((nb_system,N,d))
nb_system_c = nb_system #current number, as systems can get deleted as algorithm goes
real_indices = np.arange(nb_system) #keeping track of initial systems indices as systems gets deleted
local_indices = np.arange(nb_system_c)
while (k<=m):
#find new threshold
i_deads = np.argmin(SX,axis = 1) # sort in descending order
#we switch the 'last' particle in terms of score and the first particle as indices go, for simplicity
tempXs, tempSs = np.array(X[:,0],copy=True), np.array(SX[:,0],copy=True)
X[:,0], SX[:,0] = X[local_indices,i_deads],SX[local_indices,i_deads]
X[local_indices,i_deads],SX[local_indices,i_deads] = tempXs, tempSs
del tempSs, tempXs
#print(SX[i_dead], tau_j )
if k>1:
gain = np.abs((SX[local_indices, i_deads]-tau_j[None])/tau_j[None])
else:
gain=np.zeros(nb_system_c)
gamma = 1+gain_forget_rate*(k-1)
avg_gain = (1-gamma/k)*avg_gain + (gamma/k)*gain
if k>1:
is_too_low = (avg_gain<gain_thresh) * (reject_rate<reject_thresh)
if is_too_low.sum()>0:
s_b = s_b/decay*is_too_low+s_b*(1-is_too_low)
s_b = s_b.reshape(-1)
if verbose>=1 and check%check_every==0:
print('Strengths of kernels updated!')
print(f's_b={s_b}')
tau_j = SX[:,0] # set the threshold to the last particules's scores
if (tau_j>tau).sum()>0:
is_over = np.where(tau_j>tau)[0]
if verbose:
print(f"System(s):{is_over} reached required level.")
#we need to kill systems that have reached required level, while taking this into account for the real systems indices
is_done[real_indices[is_over]],done_k[real_indices[is_over]]=1,k
if is_done.sum()==nb_system:
break #if all the systems have reached the final level we can stop the itertions there
nb_system_c-=len(is_over)
local_indices = np.arange(nb_system_c)
Xrare[is_over] = X[is_over]
X,SX = np.delete(X,is_over, axis=0),np.delete(SX,is_over, axis=0)
gain, avg_gain,tau_j = np.delete(gain,is_over), np.delete(avg_gain,is_over), np.delete(tau_j,is_over)
reject_rate, Count_accept = np.delete(reject_rate,is_over), np.delete(Count_accept,is_over)
real_indices = np.delete(real_indices,is_over)
s_b = np.delete(s_b ,is_over)
if verbose>=1 and check%check_every==0:
print('Iter = ',k, ' tau_j = ', tau_j, " Calls = ", Count_h)
check+=1
# Refresh samples
i_news = np.random.choice(range(1,N),size=nb_system_c)
z0s = X[local_indices,i_news]
sz0s = SX[local_indices,i_news]
for _ in range(T):
w = kernel_b(z0s,s_b) #kernel_b must take into account the number of systems and different strengths
sw = h(w, real_indices)
is_good_move = sw>=tau_j
z0s,sz0s = z0s*(1-is_good_move)[:,None] + is_good_move[:,None]*w, sz0s *(1-is_good_move) + is_good_move*sw
Count_accept = Count_accept + is_good_move
X[:,0] = z0s
SX[:,0] = sz0s
del z0s, sz0s
Count_h+=T*nb_system_c
gamma = T+reject_forget_rate*kernel_pass
reject_rate = (1-gamma/(kernel_pass+T))*reject_rate + gamma*(1-Count_accept/T)/(kernel_pass+T)
if check%check_every==0 and verbose>=1:
print(f'Accept ratios (local averages):{Count_accept/T}')
print(f'Reject rates (moving averages):{reject_rate}')
kernel_pass+=T
is_zero_accept = Count_accept==0
is_too_high = reject_rate > (1-accept_ratio)
if is_too_high.sum()>0:
s_b = s_b*decay*is_too_high+s_b*(1-is_too_high)
s_b = s_b.reshape(-1)
if fast_decay:
s_b = s_b*decay**fast_d*is_zero_accept+(1-is_zero_accept)*s_b
if verbose>=1 and check%check_every==0:
print('Strengths of kernel updated!')
print(f's_b={s_b}')
Count_accept = np.zeros(nb_system_c)
k += 1 # increase iteration number
if is_done.sum()>0:
P_est = p**(done_k-1)*is_done+(1-is_done)*p_c
Var_est = is_done*P_est**2*(P_est**(-1/N)-1)-(1-is_done)
CI_est = np.zeros((nb_system,2))
CI_est[:,0] = is_done*(P_est*np.exp(-q/np.sqrt(N)*np.sqrt(-np.log(P_est)+(q**2)/4/N) - (q**2)/2/N))
CI_est[:,1] = is_done*(P_est*np.exp(q/np.sqrt(N)*np.sqrt(-np.log(P_est)+(q**2)/4/N) - (q**2)/2/N)) + (1-is_done)*p_c
cert_ = 1-is_done
s_out ={'Var_est':Var_est,'CI_est':CI_est,'Iter':k,'Calls':Count_h,'Sample size':N,'Cert':cert_}
s_out['Xrare'] = Xrare
else:
s_out = {'Var_est': -np.ones(nb_system), 'CI_est':np.array(nb_system*[0,p_c]),'Iter':k,'Calls':Count_h,'Sample size':N}
s_out['Cert']= np.array([True]*nb_system)
s_out['Xrare']= None
P_est = np.array(nb_system*[p_c])
return P_est, s_out
def ImportanceSplitting(gen,kernel,h,tau,N=2000,K=1000,s=1,decay=0.99,T = 30,n_max = 300, alpha = 0.95,
verbose=1, track_rejection=False, rejection_ctrl = False, rej_threshold=0.9, gain_rate = 1.0001,
prog_thresh=0.01):
"""
Importance splitting estimator
Args:
gen: generator of iid samples X_i [fun]
kernel: mixing kernel invariant to f_X [fun]
h: score function [fun]
tau: threshold. The rare event is defined as h(X)>tau [1x1]
N: number of samples [1x1] (2000)
K: number of survivors [1x1] (1000)
s: strength of the the mixing kernel [1x1] (1)
decay: decay rate of the strength of the kernel [1x1] (0.9)
T: number of repetitions of the mixing kernel [1x1] (20)
n_max: max number of iterations [1x1] (200)
alpha: level of confidence interval [1x1] (0.95)
verbose: level of verbosity [1x1] (1)
Returns:
P_est: estimated probability
s_out: a dictionary containing additional data
-s_out['Var_est']: estimated variance
-s_out.['CI_est']: estimated confidence of interval
-s_out.['Xrare']: Examples of the rare event
"""
# Internals
q = -stat.norm.ppf((1-alpha)/2) # gaussian quantile
d =gen(1).shape[-1] # dimension of the random vectors
n = 1 # Number of iterations
## Init
# step A0: generate & compute scores
X = gen(N) # generate N samples
SX = h(X) # compute their scores
Count_h = N # Number of calls to function h
#step B: find new threshold
ind = np.argsort(SX,axis=None)[::-1] # sort in descending order
S_sort= SX[ind]
tau_j = S_sort[K] # set the threshold to (K+1)-th
h_mean = SX.mean()
if verbose>=1:
print('Iter = ',n, ' tau_j = ', tau_j, "h_mean",h_mean, " Calls = ", Count_h)
rejection_rate=0
kernel_pass=0
rejection_rates=[0]
## While
while (n<n_max) and (tau_j<tau):
n += 1 # increase iteration number
if n >=n_max:
raise RuntimeError('The estimator failed. Increase n_max?')
# step C: Keep K highest scores samples in Y
Y = X[ind[0:K],:]
SY = SX[ind[0:K]] # Keep their scores in SY
# step D: refresh samples
Z = np.zeros((N-K,d))
SZ = np.zeros((N-K,1))
for k in range(N-K):
u = np.random.choice(range(K),size=1,replace=False) # pick a sample at random in Y
z0 = Y[u,:]
accept_flag = False
for t in range(T):
w = kernel(z0,s) # propose a refreshed sample
kernel_pass+=1
sw = h(w) # compute its score
Count_h = Count_h + 1
if sw>tau_j: # accept if true
z0 = w
sz0 = sw
accept_flag = True # monitor if accepted
elif track_rejection:
rejection_rate=((kernel_pass-1.)/kernel_pass)*rejection_rate+(1/kernel_pass)
Z[k,:] = z0 # a fresh sample
SZ[k] = sz0 # its score
if rejection_ctrl and rejection_rate>=rej_threshold:
print('Strength of kernel diminished!')
s = s*decay
print(f's={s}')
if not accept_flag:
s = s * decay # decrease the strength of the mixing kernel
# step A: update set X and the scores
X[:K,:] = Y # copy paste the old samples of Y into X
SX[:K] = SY
X[K:N,:] = Z # copy paste the new samples of Z into X
SX[K:N] = SZ
# step B: Find new threshold
ind = np.argsort(SX,axis=None)[::-1] # sort in descending order
S_sort= SX[ind]
new_tau = S_sort[K]
if (new_tau-tau_j)/tau_j<prog_thresh:
s = s*gain_rate
print('Strength of kernel increased!')
print(f's={s}')
tau_j = S_sort[K] # set the threshold to (K+1)-th
h_mean = SX.mean()
if verbose>=1:
print('Iter = ',n, ' tau_j = ', tau_j, "h_mean",h_mean, " Calls = ", Count_h)
if track_rejection:
print(f'Rejection rate: {rejection_rate}')
rejection_rates+=[rejection_rate]
# step E: Last round
K_last = (SX>=tau).sum() # count the nb of score above the target threshold
#Estimation
p = K/N
p_last = K_last/N
P_est = (p**(n-1))*p_last
Var_est = (P_est**2)*((n-1)*(1-p)/p + (1-p_last)/p_last)/N
P_bias = P_est*n*(1-p)/p/N
CI_est = P_est*np.array([1,1]) + q*np.sqrt(Var_est)*np.array([-1,1])
Xrare = X[(SX>=tau).reshape(-1),:]
s_out = {"Var_est":Var_est,"CI_est": CI_est,"N":N,"K":K,"s":s,"decay":decay,"T":T,"Count_h":Count_h,
"P_bias":P_bias,"n":n,"Xrare":Xrare}
if track_rejection:
s_out["rejection_rates"]=np.array(rejection_rates)
s_out["Avg. rejection rate"]=rejection_rate
return P_est,s_out
|
[
"numpy.abs",
"numpy.sqrt",
"numpy.ones",
"numpy.where",
"numpy.delete",
"numpy.log",
"scipy.stats.norm.ppf",
"utils.dichotomic_search",
"numpy.array",
"numpy.zeros",
"numpy.argsort",
"numpy.argmin",
"numpy.arange"
] |
[((2177, 2251), 'utils.dichotomic_search', 'dichotomic_search', ([], {'f': 'confidence_level_m', 'a': '(100)', 'b': 'n_max', 'thresh': 'alpha_test'}), '(f=confidence_level_m, a=100, b=n_max, thresh=alpha_test)\n', (2194, 2251), False, 'from utils import dichotomic_search\n'), ((2611, 2622), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2619, 2622), True, 'import numpy as np\n'), ((7660, 7734), 'utils.dichotomic_search', 'dichotomic_search', ([], {'f': 'confidence_level_m', 'a': '(100)', 'b': 'n_max', 'thresh': 'alpha_test'}), '(f=confidence_level_m, a=100, b=n_max, thresh=alpha_test)\n', (7677, 7734), False, 'from utils import dichotomic_search\n'), ((8043, 8074), 'numpy.array', 'np.array', (['(nb_system * [-np.inf])'], {}), '(nb_system * [-np.inf])\n', (8051, 8074), True, 'import numpy as np\n'), ((8092, 8111), 'numpy.zeros', 'np.zeros', (['nb_system'], {}), '(nb_system)\n', (8100, 8111), True, 'import numpy as np\n'), ((8188, 8207), 'numpy.zeros', 'np.zeros', (['nb_system'], {}), '(nb_system)\n', (8196, 8207), True, 'import numpy as np\n'), ((8467, 8486), 'numpy.zeros', 'np.zeros', (['nb_system'], {}), '(nb_system)\n', (8475, 8486), True, 'import numpy as np\n'), ((8501, 8520), 'numpy.zeros', 'np.zeros', (['nb_system'], {}), '(nb_system)\n', (8509, 8520), True, 'import numpy as np\n'), ((8669, 8689), 'numpy.arange', 'np.arange', (['nb_system'], {}), '(nb_system)\n', (8678, 8689), True, 'import numpy as np\n'), ((8777, 8799), 'numpy.arange', 'np.arange', (['nb_system_c'], {}), '(nb_system_c)\n', (8786, 8799), True, 'import numpy as np\n'), ((1920, 1954), 'scipy.stats.norm.ppf', 'stat.norm.ppf', (['((1 - alpha_est) / 2)'], {}), '((1 - alpha_est) / 2)\n', (1933, 1954), True, 'import scipy.stats as stat\n'), ((2993, 3017), 'numpy.argmin', 'np.argmin', (['SX'], {'axis': 'None'}), '(SX, axis=None)\n', (3002, 3017), True, 'import numpy as np\n'), ((7426, 7460), 'scipy.stats.norm.ppf', 'stat.norm.ppf', (['((1 - alpha_est) / 2)'], {}), '((1 - alpha_est) / 2)\n', (7439, 7460), True, 'import scipy.stats as stat\n'), ((7489, 7507), 'numpy.ones', 'np.ones', (['nb_system'], {}), '(nb_system)\n', (7496, 7507), True, 'import numpy as np\n'), ((8126, 8144), 'numpy.ones', 'np.ones', (['nb_system'], {}), '(nb_system)\n', (8133, 8144), True, 'import numpy as np\n'), ((8534, 8560), 'numpy.ones', 'np.ones', (['(nb_system, N, d)'], {}), '((nb_system, N, d))\n', (8541, 8560), True, 'import numpy as np\n'), ((8873, 8894), 'numpy.argmin', 'np.argmin', (['SX'], {'axis': '(1)'}), '(SX, axis=1)\n', (8882, 8894), True, 'import numpy as np\n'), ((12769, 12790), 'numpy.zeros', 'np.zeros', (['nb_system_c'], {}), '(nb_system_c)\n', (12777, 12790), True, 'import numpy as np\n'), ((13009, 13033), 'numpy.zeros', 'np.zeros', (['(nb_system, 2)'], {}), '((nb_system, 2))\n', (13017, 13033), True, 'import numpy as np\n'), ((13591, 13619), 'numpy.array', 'np.array', (['([True] * nb_system)'], {}), '([True] * nb_system)\n', (13599, 13619), True, 'import numpy as np\n'), ((13663, 13690), 'numpy.array', 'np.array', (['(nb_system * [p_c])'], {}), '(nb_system * [p_c])\n', (13671, 13690), True, 'import numpy as np\n'), ((15217, 15247), 'scipy.stats.norm.ppf', 'stat.norm.ppf', (['((1 - alpha) / 2)'], {}), '((1 - alpha) / 2)\n', (15230, 15247), True, 'import scipy.stats as stat\n'), ((15577, 15602), 'numpy.argsort', 'np.argsort', (['SX'], {'axis': 'None'}), '(SX, axis=None)\n', (15587, 15602), True, 'import numpy as np\n'), ((16266, 16286), 'numpy.zeros', 'np.zeros', (['(N - K, d)'], {}), '((N - K, d))\n', (16274, 16286), True, 'import numpy as np\n'), ((16297, 16317), 'numpy.zeros', 'np.zeros', (['(N - K, 1)'], {}), '((N - K, 1))\n', (16305, 16317), True, 'import numpy as np\n'), ((18885, 18910), 'numpy.array', 'np.array', (['rejection_rates'], {}), '(rejection_rates)\n', (18893, 18910), True, 'import numpy as np\n'), ((3127, 3163), 'numpy.abs', 'np.abs', (['((SX[i_dead] - tau_j) / tau_j)'], {}), '((SX[i_dead] - tau_j) / tau_j)\n', (3133, 3163), True, 'import numpy as np\n'), ((9058, 9086), 'numpy.array', 'np.array', (['X[:, 0]'], {'copy': '(True)'}), '(X[:, 0], copy=True)\n', (9066, 9086), True, 'import numpy as np\n'), ((9086, 9115), 'numpy.array', 'np.array', (['SX[:, 0]'], {'copy': '(True)'}), '(SX[:, 0], copy=True)\n', (9094, 9115), True, 'import numpy as np\n'), ((9364, 9428), 'numpy.abs', 'np.abs', (['((SX[local_indices, i_deads] - tau_j[None]) / tau_j[None])'], {}), '((SX[local_indices, i_deads] - tau_j[None]) / tau_j[None])\n', (9370, 9428), True, 'import numpy as np\n'), ((9456, 9477), 'numpy.zeros', 'np.zeros', (['nb_system_c'], {}), '(nb_system_c)\n', (9464, 9477), True, 'import numpy as np\n'), ((10619, 10641), 'numpy.arange', 'np.arange', (['nb_system_c'], {}), '(nb_system_c)\n', (10628, 10641), True, 'import numpy as np\n'), ((11008, 11040), 'numpy.delete', 'np.delete', (['real_indices', 'is_over'], {}), '(real_indices, is_over)\n', (11017, 11040), True, 'import numpy as np\n'), ((11058, 11081), 'numpy.delete', 'np.delete', (['s_b', 'is_over'], {}), '(s_b, is_over)\n', (11067, 11081), True, 'import numpy as np\n'), ((13497, 13527), 'numpy.array', 'np.array', (['(nb_system * [0, p_c])'], {}), '(nb_system * [0, p_c])\n', (13505, 13527), True, 'import numpy as np\n'), ((17675, 17700), 'numpy.argsort', 'np.argsort', (['SX'], {'axis': 'None'}), '(SX, axis=None)\n', (17685, 17700), True, 'import numpy as np\n'), ((18589, 18605), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (18597, 18605), True, 'import numpy as np\n'), ((18626, 18643), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (18634, 18643), True, 'import numpy as np\n'), ((2136, 2147), 'numpy.log', 'np.log', (['p_c'], {}), '(p_c)\n', (2142, 2147), True, 'import numpy as np\n'), ((7619, 7630), 'numpy.log', 'np.log', (['p_c'], {}), '(p_c)\n', (7625, 7630), True, 'import numpy as np\n'), ((10082, 10103), 'numpy.where', 'np.where', (['(tau_j > tau)'], {}), '(tau_j > tau)\n', (10090, 10103), True, 'import numpy as np\n'), ((10701, 10730), 'numpy.delete', 'np.delete', (['X', 'is_over'], {'axis': '(0)'}), '(X, is_over, axis=0)\n', (10710, 10730), True, 'import numpy as np\n'), ((10730, 10760), 'numpy.delete', 'np.delete', (['SX', 'is_over'], {'axis': '(0)'}), '(SX, is_over, axis=0)\n', (10739, 10760), True, 'import numpy as np\n'), ((10796, 10820), 'numpy.delete', 'np.delete', (['gain', 'is_over'], {}), '(gain, is_over)\n', (10805, 10820), True, 'import numpy as np\n'), ((10821, 10849), 'numpy.delete', 'np.delete', (['avg_gain', 'is_over'], {}), '(avg_gain, is_over)\n', (10830, 10849), True, 'import numpy as np\n'), ((10850, 10875), 'numpy.delete', 'np.delete', (['tau_j', 'is_over'], {}), '(tau_j, is_over)\n', (10859, 10875), True, 'import numpy as np\n'), ((10917, 10948), 'numpy.delete', 'np.delete', (['reject_rate', 'is_over'], {}), '(reject_rate, is_over)\n', (10926, 10948), True, 'import numpy as np\n'), ((10949, 10981), 'numpy.delete', 'np.delete', (['Count_accept', 'is_over'], {}), '(Count_accept, is_over)\n', (10958, 10981), True, 'import numpy as np\n'), ((13468, 13486), 'numpy.ones', 'np.ones', (['nb_system'], {}), '(nb_system)\n', (13475, 13486), True, 'import numpy as np\n'), ((18609, 18625), 'numpy.sqrt', 'np.sqrt', (['Var_est'], {}), '(Var_est)\n', (18616, 18625), True, 'import numpy as np\n'), ((5049, 5059), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (5056, 5059), True, 'import numpy as np\n'), ((5144, 5154), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (5151, 5154), True, 'import numpy as np\n'), ((13080, 13090), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (13087, 13090), True, 'import numpy as np\n'), ((5069, 5082), 'numpy.log', 'np.log', (['P_est'], {}), '(P_est)\n', (5075, 5082), True, 'import numpy as np\n'), ((5164, 5177), 'numpy.log', 'np.log', (['P_est'], {}), '(P_est)\n', (5170, 5177), True, 'import numpy as np\n'), ((13187, 13197), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (13194, 13197), True, 'import numpy as np\n'), ((13100, 13113), 'numpy.log', 'np.log', (['P_est'], {}), '(P_est)\n', (13106, 13113), True, 'import numpy as np\n'), ((13207, 13220), 'numpy.log', 'np.log', (['P_est'], {}), '(P_est)\n', (13213, 13220), True, 'import numpy as np\n')]
|
"""
this is a stripped down version of the SWHear class.
It's designed to hold only a single audio sample in memory.
check my githib for a more complete version:
http://github.com/swharden
"""
import serial, os, pty
import time
import numpy as np
from threading import Thread
import random
class SpoofSerial:
"""
Creates some sine function for testing the GUI in variables *x* and *y*
"""
def __init__(self, freq=1, time_interval=2.0, period=0.016, generate_noise=False, snr=0.1):
self.freq = freq
self.time_interval = time_interval
self.period = period
self.generate_noise = generate_noise
self.snr = snr
self.x = np.arange(0, time_interval, period)
self.y = np.sin(2 * np.pi * self.x * freq)
self.dont_touch_me = False
self.paused = False
self.t = Thread(target=self.run_stream)
def start(self):
"""Starts running a stream on a new thread"""
self.t.start()
def run_stream(self):
"""Begins streaming a sine in *x* and *y*"""
while not self.paused:
time.sleep(self.period)
self.dont_touch_me = True
new_x_val = self.x[-1] + 1.0 * self.period
self.x = np.append(self.x, [new_x_val])
new_y_val = np.sin(2 * np.pi * new_x_val * self.freq)
if self.generate_noise:
new_y_val *= 1 + random.uniform(-self.snr, self.snr)
self.y = np.append(self.y, [new_y_val])
self.dont_touch_me = False
def pause(self):
"""Temporarily stops updating the sine, but the values are still kept"""
self.paused = True
def unpause(self):
"""Continue updating the sine"""
self.paused = False
if __name__ == "__main__":
print("Hi")
try:
thing = SpoofSerial(time_interval=1, period=0.5)
print("hi")
thing.start()
while True:
print(thing.x, thing.y)
except KeyboardInterrupt:
print("Exiting...")
exit()
|
[
"random.uniform",
"time.sleep",
"numpy.append",
"numpy.sin",
"threading.Thread",
"numpy.arange"
] |
[((714, 749), 'numpy.arange', 'np.arange', (['(0)', 'time_interval', 'period'], {}), '(0, time_interval, period)\n', (723, 749), True, 'import numpy as np\n'), ((768, 801), 'numpy.sin', 'np.sin', (['(2 * np.pi * self.x * freq)'], {}), '(2 * np.pi * self.x * freq)\n', (774, 801), True, 'import numpy as np\n'), ((889, 919), 'threading.Thread', 'Thread', ([], {'target': 'self.run_stream'}), '(target=self.run_stream)\n', (895, 919), False, 'from threading import Thread\n'), ((1151, 1174), 'time.sleep', 'time.sleep', (['self.period'], {}), '(self.period)\n', (1161, 1174), False, 'import time\n'), ((1296, 1326), 'numpy.append', 'np.append', (['self.x', '[new_x_val]'], {}), '(self.x, [new_x_val])\n', (1305, 1326), True, 'import numpy as np\n'), ((1354, 1395), 'numpy.sin', 'np.sin', (['(2 * np.pi * new_x_val * self.freq)'], {}), '(2 * np.pi * new_x_val * self.freq)\n', (1360, 1395), True, 'import numpy as np\n'), ((1527, 1557), 'numpy.append', 'np.append', (['self.y', '[new_y_val]'], {}), '(self.y, [new_y_val])\n', (1536, 1557), True, 'import numpy as np\n'), ((1467, 1502), 'random.uniform', 'random.uniform', (['(-self.snr)', 'self.snr'], {}), '(-self.snr, self.snr)\n', (1481, 1502), False, 'import random\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import animation
# thermal conductivity
c = 1.0
# define the discretization grid
xmin = -5.0 # left/bottom bound
xmax = 5.0 # right/top bound
dx = 0.1 # space increment (default 0.1)
nx = int((xmax-xmin)/dx) # number of points on xy grid
# compute timestep such that the scheme is stable
dt = 0.002
# set initial condition
u0 = np.zeros( (nx,nx) )
# step wave equation
def step_wave(t):
if t == 0:
print( 'stability:', c*dt/(dx**2) )
for i in range(0,10):
un = u0.copy()
# compute second x-derivative using central differences
L = (
u0[1:nx-1,0:nx-2] +
u0[2:nx,1:nx-1] - 4*u0[1:nx-1,1:nx-1] + u0[0:nx-2,1:nx-1] +
u0[1:nx-1,2:nx]
)
# apply second-order central differences in time
un[1:nx-1,1:nx-1] = u0[1:nx-1,1:nx-1] + c*dt/(dx**2) * L
# apply boundary conditions
un[0,0:nx+1] = 0
un[nx-1,0:nx+1] = 0
un[0:nx+1,0] = 0
un[0:nx+1,nx-1] = 0
# heater
un[40:-40,50:80] = 1.0
u0[:] = un
img.set_array(u0)
return img,
fig = plt.figure()
img = plt.imshow( u0,
vmax=1.0,
vmin=0.0,
extent=[xmin, xmax, xmin, xmax],
cmap=cm.YlOrRd )
anim = animation.FuncAnimation( fig, step_wave, 10000,
interval=1,
repeat=False,
blit=True)
plt.title( "2D Heat Equation" )
plt.xlim( xmin, xmax )
plt.ylim( xmin, xmax )
plt.show()
|
[
"matplotlib.pyplot.imshow",
"matplotlib.animation.FuncAnimation",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.show"
] |
[((440, 458), 'numpy.zeros', 'np.zeros', (['(nx, nx)'], {}), '((nx, nx))\n', (448, 458), True, 'import numpy as np\n'), ((1263, 1275), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1273, 1275), True, 'import matplotlib.pyplot as plt\n'), ((1282, 1370), 'matplotlib.pyplot.imshow', 'plt.imshow', (['u0'], {'vmax': '(1.0)', 'vmin': '(0.0)', 'extent': '[xmin, xmax, xmin, xmax]', 'cmap': 'cm.YlOrRd'}), '(u0, vmax=1.0, vmin=0.0, extent=[xmin, xmax, xmin, xmax], cmap=cm\n .YlOrRd)\n', (1292, 1370), True, 'import matplotlib.pyplot as plt\n'), ((1452, 1539), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'step_wave', '(10000)'], {'interval': '(1)', 'repeat': '(False)', 'blit': '(True)'}), '(fig, step_wave, 10000, interval=1, repeat=False,\n blit=True)\n', (1475, 1539), False, 'from matplotlib import animation\n'), ((1637, 1666), 'matplotlib.pyplot.title', 'plt.title', (['"""2D Heat Equation"""'], {}), "('2D Heat Equation')\n", (1646, 1666), True, 'import matplotlib.pyplot as plt\n'), ((1669, 1689), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xmin', 'xmax'], {}), '(xmin, xmax)\n', (1677, 1689), True, 'import matplotlib.pyplot as plt\n'), ((1692, 1712), 'matplotlib.pyplot.ylim', 'plt.ylim', (['xmin', 'xmax'], {}), '(xmin, xmax)\n', (1700, 1712), True, 'import matplotlib.pyplot as plt\n'), ((1715, 1725), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1723, 1725), True, 'import matplotlib.pyplot as plt\n')]
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import better.marketdata.globaldatamatrix as gdm
import numpy as np
import pandas as pd
import logging
from better.tools.configprocess import parse_time
from better.tools.data import get_volume_forward, get_type_list
import better.marketdata.replaybuffer as rb
MIN_NUM_PERIOD = 3
class DataMatrices:
def __init__(self, start, end, period, batch_size=50, volume_average_days=30, buffur_bias_ratio=0,
market="poloniex", coin_filter=1, window_size=50, feature_number=3, test_portion=0.15,
portion_reversed=False, online=False, is_permed=False):
"""
:param start: Unix time
:param end: Unix time
:param access_period: the data access period of the input matrix.
:param trade_period: the trading period of the agent.
:param global_period: the data access period of the global price matrix.
if it is not equal to the access period, there will be inserted observations
:param coin_filter: number of coins that would be selected
:param window_size: periods of input data
:param train_portion: portion of training set
:param is_permed: if False, the sample inside a mini-batch is in order
:param validation_portion: portion of cross-validation set
:param test_portion: portion of test set
:param portion_reversed: if False, the order to sets are [train, validation, test]
else the order is [test, validation, train]
"""
start = int(start)
self.__end = int(end)
# assert window_size >= MIN_NUM_PERIOD
self.__coin_no = coin_filter
type_list = get_type_list(feature_number)
self.__features = type_list
self.feature_number = feature_number
volume_forward = get_volume_forward(self.__end-start, test_portion, portion_reversed)
self.__history_manager = gdm.HistoryManager(coin_number=coin_filter, end=self.__end,
volume_average_days=volume_average_days,
volume_forward=volume_forward, online=online)
if market == "poloniex":
self.__global_data = self.__history_manager.get_global_panel(start,
self.__end,
period=period,
features=type_list)
else:
raise ValueError("market {} is not valid".format(market))
self.__period_length = period
# portfolio vector memory, [time, assets]
self.__PVM = pd.DataFrame(index=self.__global_data.minor_axis,
columns=self.__global_data.major_axis)
self.__PVM = self.__PVM.fillna(1.0 / self.__coin_no)
self._window_size = window_size
self._num_periods = len(self.__global_data.minor_axis)
self.__divide_data(test_portion, portion_reversed)
self._portion_reversed = portion_reversed
self.__is_permed = is_permed
self.__batch_size = batch_size
self.__replay_buffer = None
self.__delta = 0 # the count of global increased
end_index = self._train_ind[-1]
self.__replay_buffer = rb.ReplayBuffer(start_index=self._train_ind[0],
end_index=end_index,
sample_bias=buffur_bias_ratio,
batch_size=self.__batch_size,
coin_number=self.__coin_no,
is_permed=self.__is_permed)
logging.info("the number of training examples is %s"
", of test examples is %s" % (self._num_train_samples, self._num_test_samples))
logging.debug("the training set is from %s to %s" % (min(self._train_ind), max(self._train_ind)))
logging.debug("the test set is from %s to %s" % (min(self._test_ind), max(self._test_ind)))
@property
def global_weights(self):
return self.__PVM
@staticmethod
def create_from_config(config):
"""main method to create the DataMatrices in this project
@:param config: config dictionary
@:return: a DataMatrices object
"""
config = config.copy()
input_config = config["input"]
train_config = config["training"]
start = parse_time(input_config["start_date"])
end = parse_time(input_config["end_date"])
return DataMatrices(start=start,
end=end,
market=input_config["market"],
feature_number=input_config["feature_number"],
window_size=input_config["window_size"],
online=input_config["online"],
period=input_config["global_period"],
coin_filter=input_config["coin_number"],
is_permed=input_config["is_permed"],
buffur_bias_ratio=train_config["buffer_biased"],
batch_size=train_config["batch_size"],
volume_average_days=input_config["volume_average_days"],
test_portion=input_config["test_portion"],
portion_reversed=input_config["portion_reversed"],
)
@property
def global_matrix(self):
return self.__global_data
@property
def coin_list(self):
return self.__history_manager.coins
@property
def num_train_samples(self):
return self._num_train_samples
@property
def test_indices(self):
return self._test_ind[:-(self._window_size+1):]
@property
def num_test_samples(self):
return self._num_test_samples
def append_experience(self, online_w=None):
"""
:param online_w: (number of assets + 1, ) numpy array
Let it be None if in the backtest case.
"""
self.__delta += 1
self._train_ind.append(self._train_ind[-1]+1)
appended_index = self._train_ind[-1]
self.__replay_buffer.append_experience(appended_index)
def get_test_set(self):
return self.__pack_samples(self.test_indices)
def get_training_set(self):
return self.__pack_samples(self._train_ind[:-self._window_size])
def next_batch(self):
"""
@:return: the next batch of training sample. The sample is a dictionary
with key "X"(input data); "y"(future relative price); "last_w" a numpy array
with shape [batch_size, assets]; "w" a list of numpy arrays list length is
batch_size
"""
batch = self.__pack_samples([exp.state_index for exp in self.__replay_buffer.next_experience_batch()])
return batch
def __pack_samples(self, indexs):
indexs = np.array(indexs)
last_w = self.__PVM.values[indexs-1, :]
def setw(w):
self.__PVM.iloc[indexs, :] = w
M = [self.get_submatrix(index) for index in indexs]
M = np.array(M)
X = M[:, :, :, :-1]
y = M[:, :, :, -1] / M[:, 0, None, :, -2]
return {"X": X, "y": y, "last_w": last_w, "setw": setw}
# volume in y is the volume in next access period
def get_submatrix(self, ind):
return self.__global_data.values[:, :, ind:ind+self._window_size+1]
def __divide_data(self, test_portion, portion_reversed):
train_portion = 1 - test_portion
s = float(train_portion + test_portion)
if portion_reversed:
portions = np.array([test_portion]) / s
portion_split = (portions * self._num_periods).astype(int)
indices = np.arange(self._num_periods)
self._test_ind, self._train_ind = np.split(indices, portion_split)
else:
portions = np.array([train_portion]) / s
portion_split = (portions * self._num_periods).astype(int)
indices = np.arange(self._num_periods)
self._train_ind, self._test_ind = np.split(indices, portion_split)
self._train_ind = self._train_ind[:-(self._window_size + 1)]
# NOTE(zhengyao): change the logic here in order to fit both
# reversed and normal version
self._train_ind = list(self._train_ind)
self._num_train_samples = len(self._train_ind)
self._num_test_samples = len(self.test_indices)
|
[
"better.tools.data.get_volume_forward",
"better.tools.data.get_type_list",
"numpy.arange",
"numpy.array",
"numpy.split",
"better.marketdata.replaybuffer.ReplayBuffer",
"pandas.DataFrame",
"better.marketdata.globaldatamatrix.HistoryManager",
"logging.info",
"better.tools.configprocess.parse_time"
] |
[((1773, 1802), 'better.tools.data.get_type_list', 'get_type_list', (['feature_number'], {}), '(feature_number)\n', (1786, 1802), False, 'from better.tools.data import get_volume_forward, get_type_list\n'), ((1909, 1979), 'better.tools.data.get_volume_forward', 'get_volume_forward', (['(self.__end - start)', 'test_portion', 'portion_reversed'], {}), '(self.__end - start, test_portion, portion_reversed)\n', (1927, 1979), False, 'from better.tools.data import get_volume_forward, get_type_list\n'), ((2011, 2165), 'better.marketdata.globaldatamatrix.HistoryManager', 'gdm.HistoryManager', ([], {'coin_number': 'coin_filter', 'end': 'self.__end', 'volume_average_days': 'volume_average_days', 'volume_forward': 'volume_forward', 'online': 'online'}), '(coin_number=coin_filter, end=self.__end,\n volume_average_days=volume_average_days, volume_forward=volume_forward,\n online=online)\n', (2029, 2165), True, 'import better.marketdata.globaldatamatrix as gdm\n'), ((2834, 2927), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'self.__global_data.minor_axis', 'columns': 'self.__global_data.major_axis'}), '(index=self.__global_data.minor_axis, columns=self.\n __global_data.major_axis)\n', (2846, 2927), True, 'import pandas as pd\n'), ((3474, 3667), 'better.marketdata.replaybuffer.ReplayBuffer', 'rb.ReplayBuffer', ([], {'start_index': 'self._train_ind[0]', 'end_index': 'end_index', 'sample_bias': 'buffur_bias_ratio', 'batch_size': 'self.__batch_size', 'coin_number': 'self.__coin_no', 'is_permed': 'self.__is_permed'}), '(start_index=self._train_ind[0], end_index=end_index,\n sample_bias=buffur_bias_ratio, batch_size=self.__batch_size,\n coin_number=self.__coin_no, is_permed=self.__is_permed)\n', (3489, 3667), True, 'import better.marketdata.replaybuffer as rb\n'), ((3904, 4043), 'logging.info', 'logging.info', (["('the number of training examples is %s, of test examples is %s' % (self.\n _num_train_samples, self._num_test_samples))"], {}), "(\n 'the number of training examples is %s, of test examples is %s' % (self\n ._num_train_samples, self._num_test_samples))\n", (3916, 4043), False, 'import logging\n'), ((4678, 4716), 'better.tools.configprocess.parse_time', 'parse_time', (["input_config['start_date']"], {}), "(input_config['start_date'])\n", (4688, 4716), False, 'from better.tools.configprocess import parse_time\n'), ((4731, 4767), 'better.tools.configprocess.parse_time', 'parse_time', (["input_config['end_date']"], {}), "(input_config['end_date'])\n", (4741, 4767), False, 'from better.tools.configprocess import parse_time\n'), ((7216, 7232), 'numpy.array', 'np.array', (['indexs'], {}), '(indexs)\n', (7224, 7232), True, 'import numpy as np\n'), ((7418, 7429), 'numpy.array', 'np.array', (['M'], {}), '(M)\n', (7426, 7429), True, 'import numpy as np\n'), ((8062, 8090), 'numpy.arange', 'np.arange', (['self._num_periods'], {}), '(self._num_periods)\n', (8071, 8090), True, 'import numpy as np\n'), ((8137, 8169), 'numpy.split', 'np.split', (['indices', 'portion_split'], {}), '(indices, portion_split)\n', (8145, 8169), True, 'import numpy as np\n'), ((8330, 8358), 'numpy.arange', 'np.arange', (['self._num_periods'], {}), '(self._num_periods)\n', (8339, 8358), True, 'import numpy as np\n'), ((8405, 8437), 'numpy.split', 'np.split', (['indices', 'portion_split'], {}), '(indices, portion_split)\n', (8413, 8437), True, 'import numpy as np\n'), ((7940, 7964), 'numpy.array', 'np.array', (['[test_portion]'], {}), '([test_portion])\n', (7948, 7964), True, 'import numpy as np\n'), ((8207, 8232), 'numpy.array', 'np.array', (['[train_portion]'], {}), '([train_portion])\n', (8215, 8232), True, 'import numpy as np\n')]
|
# How to Do Linear Regression using Gradient Descent - Live session from 3/29/17
# https://www.youtube.com/watch?v=XdM6ER7zTLk
# https://github.com/llSourcell/linear_regression_live
# My modification, that uses Numpy to the full extent, which can be faster.
import numpy as np
def computeErrorForGivenPoints(m, b, points):
x, y = points[:, 0], points[:, 1]
squareDiff = np.square(y - (m*x + b))
totalError = squareDiff.mean()
return totalError
def step_gradient(mCurrent, bCurrent, points, learningRate):
""" gradient descent """
x, y = points[:, 0], points[:, 1]
bGradient = (mCurrent*x + bCurrent) - y
mGradient = x*bGradient
mGradient = 2.*mGradient.mean()
bGradient = 2.*bGradient.mean()
newM = mCurrent - learningRate*mGradient
newB = bCurrent - learningRate*bGradient
return newM, newB
def gradient_descent_runner(points, startingM, startingB, learningRate, numIterations):
m = startingM
b = startingB
for i in range(numIterations):
m, b = step_gradient(m, b, points, learningRate)
return m, b
def run():
points = np.genfromtxt('data.csv', delimiter=',')
# hyperparameter(s)
learningRate = .0001
# y = mx + b (slope formula)
initialM = 0.
initialB = 0.
numIterations = 1000
print('Starting gradient descent at m = {}, b = {}, error = {}'.format(initialM, initialB, computeErrorForGivenPoints(initialM, initialB, points))) # error = 5565.1078
print('Running...')
m, b = gradient_descent_runner(points, initialM, initialB, learningRate, numIterations)
print('After {} iterations:'.format(numIterations))
print('m =', m) # 1.4777
print('b =', b) # 0.0889
print('error = ', computeErrorForGivenPoints(m, b, points)) # 112.6148
if __name__ == "__main__":
run()
|
[
"numpy.genfromtxt",
"numpy.square"
] |
[((391, 417), 'numpy.square', 'np.square', (['(y - (m * x + b))'], {}), '(y - (m * x + b))\n', (400, 417), True, 'import numpy as np\n'), ((1139, 1179), 'numpy.genfromtxt', 'np.genfromtxt', (['"""data.csv"""'], {'delimiter': '""","""'}), "('data.csv', delimiter=',')\n", (1152, 1179), True, 'import numpy as np\n')]
|
import numpy as np
from pyNN.random import NumpyRNG
from sklearn.linear_model import LinearRegression
import math
def gaussian_convolution(spikes,dt):
#---- takes a spiketrain and the simulation time constant
# and computes the smoothed spike rate
#-----works only after the simulation has run; not online!!!!!!!!
kernel_size = 10
gaussian_kernel = signal.gaussian(kernel_size, std=2)
scaling_factor = 1/np.sum(gaussian_kernel)*1/dt
gauss_rate = np.convolve(spikes,gaussian_kernel,mode='same')*scaling_factor
mean_rate = np.mean(gauss_rate)
return mean_rate
def spike_mean_rate(spikes, sim_time):
return len(spikes) / sim_time
def generate_testImage(direction):
potential = 100
if direction=="left":
return [potential,0,0,potential,0,0,potential,0,0]
elif direction=='middle':
return [0,potential,0,0,potential,0,0,potential,0]
elif direction=='right':
return [0,0,potential,0,0,potential,0,0,potential]
else:
return [0,0,0,0,0,0,0,0,0]
# Labeled image has the form (image, label)
# Label is a list [on1, on2], on# being the correct value for
# the output neurons
def generate_labeledImages(nr):
labeledImages = []
for i in range(nr/3):
labeledImages.append((generate_testImage("right"), [0,10]))
labeledImages.append((generate_testImage("middle"), [0,0]))
labeledImages.append((generate_testImage("left"), [10,0]))
return labeledImages
# title: title of result
# strains: spiking trains
def print_mean_spike_rate(strains):
mean_left = spike_mean_rate(strains[0], param.simulation_time)
mean_right = spike_mean_rate(strains[1], param.simulation_time)
print('Mean rate readout neurons (left, right)')
print('(' + str(mean_left) + ',' + str(mean_right) + ')')
return (mean_left, mean_right)
def compute_linear_weights(X, rout_left, rout_right):
print('size of X',np.size(X))
regr1 = LinearRegression()
regr1.fit(X,rout_left)
#print('Coefficients: \n', regr1.coef_)
w1 = regr1.coef_
regr2 = LinearRegression()
regr2.fit(X,rout_right)
#print('Coefficients: \n', regr2.coef_)
w2 = regr2.coef_
w = []
for i in range(param.reservoir_nr):
w.append(w1[i])
w.append(w2[i])
return w
def compute_weights(X, rout_left, rout_right):
######### Fit weights to each output neuron with linear regression ###########
w1 = np.linalg.lstsq(X.T.dot(X) + 0.1*np.identity(param.reservoir_nr), X.T.dot(rout_left))[0].tolist()
# The coefficients
print('Weights w1 reservoir - readout neuron left')
print(w1)
w2 = np.linalg.lstsq(X.T.dot(X) + 0.1*np.identity(param.reservoir_nr), X.T.dot(rout_right))[0].tolist()
print('Weights w2 reservoir - readout neuron right')
print(w2)
# Connection['r2rout'] looks like
# [ [r0, rout0, value], [r0, rout1, v], [r1, rout0, v] ... ]
w = []
for i in range(param.reservoir_nr):
w.append(w1[i])
w.append(w2[i])
return w
def compute_weights_exc_inh(X, rout_left, rout_right):
######### Fit weights to each output neuron with linear regression ###########
w1 = np.linalg.lstsq(X.T.dot(X) + 0.1*np.identity(param.res_exc_nr+param.res_inh_nr), X.T.dot(rout_left))[0].tolist()
# The coefficients
print('Weights w1 reservoir - readout neuron left')
print(w1)
w2 = np.linalg.lstsq(X.T.dot(X) + 0.1*np.identity(param.res_exc_nr+param.res_inh_nr), X.T.dot(rout_right))[0].tolist()
print('Weights w2 reservoir - readout neuron right')
print(w2)
# Connection['r2rout'] looks like
# [ [r0, rout0, value], [r0, rout1, v], [r1, rout0, v] ... ]
w = []
for i in range(param.res_exc_nr+param.res_inh_nr):
w.append(w1[i])
w.append(w2[i])
w_exc = []
for i in range(param.res_exc_nr):
w_exc.append(w1[i])
w_exc.append(w2[i])
w_inh = []
for i in range(param.res_inh_nr):
w_inh.append(w1[param.res_exc_nr + i])
w_inh.append(w2[param.res_exc_nr + i])
return (w_exc, w_inh)
class param:
seed = 8658764 # Seed for reproduction of random number
rng = NumpyRNG() # Use seed to reproduce
input_nr = 9 # Number of input neurons
readout_nr = 2 # Number of readout neurons
reservoir_nr = 50 # Number of reservour neurons
simulation_time = 19.0 # Simulation time for each input
dt = 1 # Timestep in simulation
res_pconn = 0.1 # sparse connection probability for reservoir
images_train_nr = 9 # Number of training images to train with,
# Must be a factor of 3
images_test_nr = 9 # Number of test images
images_train = generate_labeledImages(images_train_nr)
images_test = generate_labeledImages(images_test_nr)
# If network uses excitatory and inhibatory neurons
res_exc_nr = int(math.ceil(reservoir_nr*0.8)) # Number of excitatory neurons
res_inh_nr = int(math.floor(reservoir_nr*0.2)) # Number of inhibitory neurons
print('exc:', res_exc_nr)
|
[
"numpy.identity",
"numpy.mean",
"pyNN.random.NumpyRNG",
"numpy.convolve",
"math.ceil",
"math.floor",
"numpy.size",
"numpy.sum",
"sklearn.linear_model.LinearRegression"
] |
[((558, 577), 'numpy.mean', 'np.mean', (['gauss_rate'], {}), '(gauss_rate)\n', (565, 577), True, 'import numpy as np\n'), ((1862, 1880), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1878, 1880), False, 'from sklearn.linear_model import LinearRegression\n'), ((1975, 1993), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1991, 1993), False, 'from sklearn.linear_model import LinearRegression\n'), ((3902, 3912), 'pyNN.random.NumpyRNG', 'NumpyRNG', ([], {}), '()\n', (3910, 3912), False, 'from pyNN.random import NumpyRNG\n'), ((479, 528), 'numpy.convolve', 'np.convolve', (['spikes', 'gaussian_kernel'], {'mode': '"""same"""'}), "(spikes, gaussian_kernel, mode='same')\n", (490, 528), True, 'import numpy as np\n'), ((1841, 1851), 'numpy.size', 'np.size', (['X'], {}), '(X)\n', (1848, 1851), True, 'import numpy as np\n'), ((4561, 4590), 'math.ceil', 'math.ceil', (['(reservoir_nr * 0.8)'], {}), '(reservoir_nr * 0.8)\n', (4570, 4590), False, 'import math\n'), ((4640, 4670), 'math.floor', 'math.floor', (['(reservoir_nr * 0.2)'], {}), '(reservoir_nr * 0.2)\n', (4650, 4670), False, 'import math\n'), ((433, 456), 'numpy.sum', 'np.sum', (['gaussian_kernel'], {}), '(gaussian_kernel)\n', (439, 456), True, 'import numpy as np\n'), ((2341, 2372), 'numpy.identity', 'np.identity', (['param.reservoir_nr'], {}), '(param.reservoir_nr)\n', (2352, 2372), True, 'import numpy as np\n'), ((2531, 2562), 'numpy.identity', 'np.identity', (['param.reservoir_nr'], {}), '(param.reservoir_nr)\n', (2542, 2562), True, 'import numpy as np\n'), ((3029, 3077), 'numpy.identity', 'np.identity', (['(param.res_exc_nr + param.res_inh_nr)'], {}), '(param.res_exc_nr + param.res_inh_nr)\n', (3040, 3077), True, 'import numpy as np\n'), ((3234, 3282), 'numpy.identity', 'np.identity', (['(param.res_exc_nr + param.res_inh_nr)'], {}), '(param.res_exc_nr + param.res_inh_nr)\n', (3245, 3282), True, 'import numpy as np\n')]
|
import numpy as np
from PIL import Image
from scipy.ndimage import gaussian_filter, sobel
from scipy.ndimage.filters import laplace
def calc_gradients_test(test_dir):
for i in range(24):
calc_gradients(test_dir + '/test{}'.format(i))
def calc_gradients(dir):
g_noisy_dir = dir + '/g_noisy.png'
p_noisy_dir = dir + '/p_noisy.png'
g_noisy = Image.open(g_noisy_dir)
g_noisy = np.asarray(g_noisy)
p_noisy = Image.open(p_noisy_dir)
p_noisy = np.asarray(p_noisy)
g_noisy_grad = gradients(g_noisy)
p_noisy_grad = gradients(p_noisy)
Image.fromarray(g_noisy_grad).save(dir + '/g_noisy_grad.png')
Image.fromarray(p_noisy_grad).save(dir + '/p_noisy_grad.png')
def gradients(img):
"""Compute the xy derivatives of the input buffer. This helper is used in the _preprocess_<base_model>(...) functions
Args:
buf(np.array)[h, w, c]: input image-like tensor.
Returns:
(np.array)[h, w, 2*c]: horizontal and vertical gradients of buf.
"""
# dx = img[:, 1:, ...] - img[:, :-1, ...]
# dy = img[1:, ...] - img[:-1, ...]
# dx = np.pad(dx, [[0, 0], [1, 0], [0, 0]], mode="constant") # zero padding o the left
# dy = np.pad(dy, [[1, 0], [0, 0], [0, 0]], mode='constant') # zero padding to the up
# dx = sobel(gaussian_filter(img, 31), axis=0, mode='nearest')
# dy = sobel(gaussian_filter(img, 31), axis=1, mode='nearest')
dx = laplace(gaussian_filter(img, 10))
return dx
# calc_gradients('test/kpcn_decomp_mask_2/test5')
calc_gradients_test('test/kpcn_decomp_mask_2')
|
[
"PIL.Image.fromarray",
"PIL.Image.open",
"numpy.asarray",
"scipy.ndimage.gaussian_filter"
] |
[((365, 388), 'PIL.Image.open', 'Image.open', (['g_noisy_dir'], {}), '(g_noisy_dir)\n', (375, 388), False, 'from PIL import Image\n'), ((403, 422), 'numpy.asarray', 'np.asarray', (['g_noisy'], {}), '(g_noisy)\n', (413, 422), True, 'import numpy as np\n'), ((437, 460), 'PIL.Image.open', 'Image.open', (['p_noisy_dir'], {}), '(p_noisy_dir)\n', (447, 460), False, 'from PIL import Image\n'), ((475, 494), 'numpy.asarray', 'np.asarray', (['p_noisy'], {}), '(p_noisy)\n', (485, 494), True, 'import numpy as np\n'), ((1427, 1451), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['img', '(10)'], {}), '(img, 10)\n', (1442, 1451), False, 'from scipy.ndimage import gaussian_filter, sobel\n'), ((575, 604), 'PIL.Image.fromarray', 'Image.fromarray', (['g_noisy_grad'], {}), '(g_noisy_grad)\n', (590, 604), False, 'from PIL import Image\n'), ((641, 670), 'PIL.Image.fromarray', 'Image.fromarray', (['p_noisy_grad'], {}), '(p_noisy_grad)\n', (656, 670), False, 'from PIL import Image\n')]
|
'''
Created on Dec 5, 2016
@author: wjadams
'''
import numpy as np
class AhpNode(object):
def __init__(self, parent_tree, name, nalts, pw=None):
self.children = []
self.name = name
self.alt_scores = np.zeros([nalts])
self.nalts = nalts
self.parent_tree = parent_tree
self.pw = pw
if pw != None:
self.add_children_pw(pw)
def add_children_pw(self, pw):
for alt_name in pw.alt_names:
self.add_child(alt_name)
def add_child(self, alt_name):
self.children.append(AhpNode(self.parent_tree, alt_name, self.nalts))
def add_alt(self):
self.alt_scores = np.append(self.alt_scores, 0)
self.nalts += 1
for child in self.children:
child.add_alt()
def set_alt_scores_old(self, new_scores):
if (len(new_scores)!=self.nalts):
raise NameError("Wrong length for new alt scores")
self.alt_scores = np.array(new_scores)
self.alt_scores = self.alt_scores
def set_pw(self, pw):
if pw.nalts() != self.nchildren():
raise NameError("Wrong number of children in Pairwise")
self.pw = pw
def nchildren(self):
return len(self.children)
def has_children(self):
return len(self.children) != 0
def set_alt_scores(self, vals):
nvals = np.array(vals)
s = np.max(nvals)
if s != 0:
nvals /= s
self.alt_scores = nvals
def synthesize(self, user = None):
if not self.has_children():
return(self.alt_scores)
#This node has children
rval = np.zeros([self.nalts])
if (self.pw is not None) and (user is not None):
coeffs = self.pw.single_stats(user)
else:
coeffs = np.array([0 for i in self.children])
#print(rval)
count = 0
i = 0
for kid in self.children:
kid_vals = kid.synthesize(user)
if np.max(kid_vals) > 0:
count+=1
rval += coeffs[i] * kid_vals
i += 1
if count > 0:
rval /= (count+0.0)
return(rval)
def get_child(self, node_path_list):
if len(node_path_list) <= 0:
return(self)
for child in self.children:
if child.name == node_path_list[0]:
return(child.get_child(node_path_list[1:]))
#If we make it here, we could not find a child
raise NameError("Could not find child `"+node_path_list[0]+"'")
class AhpTree(object):
def __init__(self, alt_names=None, pw=None):
self.usernames = []
if alt_names == None:
alt_names = []
self.nalts = len(alt_names)
self.alt_names = alt_names
self.root = AhpNode(self, "root", self.nalts, pw)
def add_alt(self, alt_name):
self.alt_names.append(alt_name)
self.root.add_alt()
def synthesize(self, user=None):
return self.root.synthesize(user)
def get_node(self, node_path_list):
return self.root.get_child(node_path_list)
|
[
"numpy.append",
"numpy.array",
"numpy.zeros",
"numpy.max"
] |
[((234, 251), 'numpy.zeros', 'np.zeros', (['[nalts]'], {}), '([nalts])\n', (242, 251), True, 'import numpy as np\n'), ((702, 731), 'numpy.append', 'np.append', (['self.alt_scores', '(0)'], {}), '(self.alt_scores, 0)\n', (711, 731), True, 'import numpy as np\n'), ((1010, 1030), 'numpy.array', 'np.array', (['new_scores'], {}), '(new_scores)\n', (1018, 1030), True, 'import numpy as np\n'), ((1441, 1455), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (1449, 1455), True, 'import numpy as np\n'), ((1468, 1481), 'numpy.max', 'np.max', (['nvals'], {}), '(nvals)\n', (1474, 1481), True, 'import numpy as np\n'), ((1719, 1741), 'numpy.zeros', 'np.zeros', (['[self.nalts]'], {}), '([self.nalts])\n', (1727, 1741), True, 'import numpy as np\n'), ((1882, 1920), 'numpy.array', 'np.array', (['[(0) for i in self.children]'], {}), '([(0) for i in self.children])\n', (1890, 1920), True, 'import numpy as np\n'), ((2065, 2081), 'numpy.max', 'np.max', (['kid_vals'], {}), '(kid_vals)\n', (2071, 2081), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
"""
Bevington & Robinson's model of dual exponential decay
References::
[5] Bevington & Robinson (1992).
Data Reduction and Error Analysis for the Physical Sciences,
Second Edition, McGraw-Hill, Inc., New York.
"""
from numpy import exp, sqrt, vstack, array, asarray
def dual_exponential(t, A, B, C, tauA, tauB):
"""
Computes dual exponential decay.
y = A exp(-t/tauA) + B exp(-t/tauB) + C
"""
t = asarray(t)
return C + A*exp(-t/tauA) + B*exp(-t/tauB)
# data from Chapter 8 of [5].
data = array([[15, 775], [30, 479], [45, 380], [60, 302],
[75, 185], [90, 157], [105,137], [120, 119], [135, 110],
[150, 89], [165, 74], [180, 61], [195, 66], [210, 68],
[225, 48], [240, 54], [255, 51], [270, 46], [285, 55],
[300, 29], [315, 28], [330, 37], [345, 49], [360, 26],
[375, 35], [390, 29], [405, 31], [420, 24], [435, 25],
[450, 35], [465, 24], [480, 30], [495, 26], [510, 28],
[525, 21], [540, 18], [555, 20], [570, 27], [585, 17],
[600, 17], [615, 14], [630, 17], [645, 24], [660, 11],
[675, 22], [690, 17], [705, 12], [720, 10], [735, 13],
[750, 16], [765, 9], [780, 9], [795, 14], [810, 21],
[825, 17], [840, 13], [855, 12], [870, 18], [885, 10]])
# Set uncertainty to sqrt(counts)
data = { 'x': data[0], 'y': data[1], 'dy': sqrt(data[1]) }
#coeff = {'A': 1, 'B': 1, 'C': 1, 'tauA': 1, 'tauB': 1}
|
[
"numpy.exp",
"numpy.array",
"numpy.sqrt",
"numpy.asarray"
] |
[((557, 1255), 'numpy.array', 'array', (['[[15, 775], [30, 479], [45, 380], [60, 302], [75, 185], [90, 157], [105, \n 137], [120, 119], [135, 110], [150, 89], [165, 74], [180, 61], [195, 66\n ], [210, 68], [225, 48], [240, 54], [255, 51], [270, 46], [285, 55], [\n 300, 29], [315, 28], [330, 37], [345, 49], [360, 26], [375, 35], [390, \n 29], [405, 31], [420, 24], [435, 25], [450, 35], [465, 24], [480, 30],\n [495, 26], [510, 28], [525, 21], [540, 18], [555, 20], [570, 27], [585,\n 17], [600, 17], [615, 14], [630, 17], [645, 24], [660, 11], [675, 22],\n [690, 17], [705, 12], [720, 10], [735, 13], [750, 16], [765, 9], [780, \n 9], [795, 14], [810, 21], [825, 17], [840, 13], [855, 12], [870, 18], [\n 885, 10]]'], {}), '([[15, 775], [30, 479], [45, 380], [60, 302], [75, 185], [90, 157], [\n 105, 137], [120, 119], [135, 110], [150, 89], [165, 74], [180, 61], [\n 195, 66], [210, 68], [225, 48], [240, 54], [255, 51], [270, 46], [285, \n 55], [300, 29], [315, 28], [330, 37], [345, 49], [360, 26], [375, 35],\n [390, 29], [405, 31], [420, 24], [435, 25], [450, 35], [465, 24], [480,\n 30], [495, 26], [510, 28], [525, 21], [540, 18], [555, 20], [570, 27],\n [585, 17], [600, 17], [615, 14], [630, 17], [645, 24], [660, 11], [675,\n 22], [690, 17], [705, 12], [720, 10], [735, 13], [750, 16], [765, 9], [\n 780, 9], [795, 14], [810, 21], [825, 17], [840, 13], [855, 12], [870, \n 18], [885, 10]])\n', (562, 1255), False, 'from numpy import exp, sqrt, vstack, array, asarray\n'), ((461, 471), 'numpy.asarray', 'asarray', (['t'], {}), '(t)\n', (468, 471), False, 'from numpy import exp, sqrt, vstack, array, asarray\n'), ((1292, 1305), 'numpy.sqrt', 'sqrt', (['data[1]'], {}), '(data[1])\n', (1296, 1305), False, 'from numpy import exp, sqrt, vstack, array, asarray\n'), ((506, 520), 'numpy.exp', 'exp', (['(-t / tauB)'], {}), '(-t / tauB)\n', (509, 520), False, 'from numpy import exp, sqrt, vstack, array, asarray\n'), ((489, 503), 'numpy.exp', 'exp', (['(-t / tauA)'], {}), '(-t / tauA)\n', (492, 503), False, 'from numpy import exp, sqrt, vstack, array, asarray\n')]
|
import cv2
import numpy as np
import face_recognition
import os
from datetime import datetime
path = 'ImageAttendance'
images = []
classNames = []
myList = os.listdir(path)
print(myList)
for cl in myList:
curImg = cv2.imread(f'{path}/{cl}')
images.append(curImg)
classNames.append(os.path.splitext(cl)[0])
print(classNames)
def findEncodings(images):
encodelist = []
for img in images:
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
encode = face_recognition.face_encodings(img)[0]
encodelist.append(encode)
return encodelist
def markAttendance(name):
with open('Attendance.csv','r+') as f :
myDataList = f.readlines()
nameList= []
for line in myDataList:
entry = line.split(',')
nameList.append(entry[0])
if name not in nameList:
now = datetime.now()
dtString = now.strftime('%H:%M:%S')
f.writelines(f'\n{name},{dtString}')
markAttendance('Elon')
encodeListKnown = findEncodings(images)
print("Encoding Complete")
cap = cv2.VideoCapture(0)
while True:
success,img = cap.read()
imgS = cv2.resize(img,(0,0),None,fx=0.25, fy=0.25)
imgS = cv2.cvtColor(imgS,cv2.COLOR_BGR2RGB)
facesCurFrame = face_recognition.face_locations(imgS)
encodesCurFrame = face_recognition.face_encodings(imgS,facesCurFrame)
for encodeFace, faceLoc in zip(encodesCurFrame,facesCurFrame):
matches = face_recognition.compare_faces(encodeListKnown,encodeFace)
faceDis = face_recognition.face_distance(encodeListKnown,encodeFace)
print(faceDis)
matchIndex = np.argmin(faceDis)
if faceDis[matchIndex]< 0.50:
name = classNames[matchIndex].upper()
markAttendance(name)
else: name = 'Unknown'
#print(name)
y1,x2,y2,x1 = faceLoc
y1, x2, y2, x1 = y1*4,x2*4,y2*4,x1*4
cv2.rectangle(img,(x1,y1),(x2,y2),(0,255,0),2)
cv2.rectangle(img,(x1,y2-35),(x2,y2),(0,255,0),cv2.FILLED)
cv2.putText(img,name,(x1+6,y2-6),cv2.FONT_HERSHEY_COMPLEX_SMALL,1,(255,255,255),1)
cv2.imshow('Webcam',img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
|
[
"cv2.rectangle",
"face_recognition.face_locations",
"os.listdir",
"os.path.splitext",
"cv2.imshow",
"cv2.putText",
"datetime.datetime.now",
"face_recognition.face_distance",
"cv2.waitKey",
"cv2.VideoCapture",
"cv2.cvtColor",
"face_recognition.face_encodings",
"face_recognition.compare_faces",
"numpy.argmin",
"cv2.resize",
"cv2.imread"
] |
[((157, 173), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (167, 173), False, 'import os\n'), ((1066, 1085), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1082, 1085), False, 'import cv2\n'), ((220, 246), 'cv2.imread', 'cv2.imread', (['f"""{path}/{cl}"""'], {}), "(f'{path}/{cl}')\n", (230, 246), False, 'import cv2\n'), ((1139, 1186), 'cv2.resize', 'cv2.resize', (['img', '(0, 0)', 'None'], {'fx': '(0.25)', 'fy': '(0.25)'}), '(img, (0, 0), None, fx=0.25, fy=0.25)\n', (1149, 1186), False, 'import cv2\n'), ((1194, 1231), 'cv2.cvtColor', 'cv2.cvtColor', (['imgS', 'cv2.COLOR_BGR2RGB'], {}), '(imgS, cv2.COLOR_BGR2RGB)\n', (1206, 1231), False, 'import cv2\n'), ((1252, 1289), 'face_recognition.face_locations', 'face_recognition.face_locations', (['imgS'], {}), '(imgS)\n', (1283, 1289), False, 'import face_recognition\n'), ((1312, 1364), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['imgS', 'facesCurFrame'], {}), '(imgS, facesCurFrame)\n', (1343, 1364), False, 'import face_recognition\n'), ((2124, 2149), 'cv2.imshow', 'cv2.imshow', (['"""Webcam"""', 'img'], {}), "('Webcam', img)\n", (2134, 2149), False, 'import cv2\n'), ((423, 459), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (435, 459), False, 'import cv2\n'), ((1450, 1509), 'face_recognition.compare_faces', 'face_recognition.compare_faces', (['encodeListKnown', 'encodeFace'], {}), '(encodeListKnown, encodeFace)\n', (1480, 1509), False, 'import face_recognition\n'), ((1527, 1586), 'face_recognition.face_distance', 'face_recognition.face_distance', (['encodeListKnown', 'encodeFace'], {}), '(encodeListKnown, encodeFace)\n', (1557, 1586), False, 'import face_recognition\n'), ((1630, 1648), 'numpy.argmin', 'np.argmin', (['faceDis'], {}), '(faceDis)\n', (1639, 1648), True, 'import numpy as np\n'), ((1914, 1968), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x1, y1)', '(x2, y2)', '(0, 255, 0)', '(2)'], {}), '(img, (x1, y1), (x2, y2), (0, 255, 0), 2)\n', (1927, 1968), False, 'import cv2\n'), ((1969, 2037), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x1, y2 - 35)', '(x2, y2)', '(0, 255, 0)', 'cv2.FILLED'], {}), '(img, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)\n', (1982, 2037), False, 'import cv2\n'), ((2036, 2135), 'cv2.putText', 'cv2.putText', (['img', 'name', '(x1 + 6, y2 - 6)', 'cv2.FONT_HERSHEY_COMPLEX_SMALL', '(1)', '(255, 255, 255)', '(1)'], {}), '(img, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1,\n (255, 255, 255), 1)\n', (2047, 2135), False, 'import cv2\n'), ((295, 315), 'os.path.splitext', 'os.path.splitext', (['cl'], {}), '(cl)\n', (311, 315), False, 'import os\n'), ((476, 512), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['img'], {}), '(img)\n', (507, 512), False, 'import face_recognition\n'), ((856, 870), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (868, 870), False, 'from datetime import datetime\n'), ((2156, 2170), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2167, 2170), False, 'import cv2\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import os
from federatedscope.core.message import Message
import logging
logger = logging.getLogger(__name__)
def plot_target_loss(loss_list, outdir):
'''
Args:
loss_list: the list of loss regrading the target data
outdir: the directory to store the loss
'''
target_data_loss = np.vstack(loss_list)
logger.info(target_data_loss.shape)
plt.plot(target_data_loss)
plt.savefig(os.path.join(outdir, 'target_loss.png'))
plt.close()
def sav_target_loss(loss_list, outdir):
target_data_loss = np.vstack(loss_list)
np.savetxt(os.path.join(outdir, 'target_loss.txt'),
target_data_loss.transpose(),
delimiter=',')
def callback_funcs_for_finish(self, message: Message):
logger.info(
"================= receiving Finish Message ============================"
)
if message.content != None:
self.trainer.update(message.content)
if self.is_attacker and self._cfg.attack.attack_method.lower(
) == "gradascent":
logger.info(
"================= start attack post-processing ======================="
)
plot_target_loss(self.trainer.ctx.target_data_loss,
self.trainer.ctx.outdir)
sav_target_loss(self.trainer.ctx.target_data_loss,
self.trainer.ctx.outdir)
def add_atk_method_to_Client_GradAscent(client_class):
setattr(client_class, 'callback_funcs_for_finish',
callback_funcs_for_finish)
return client_class
|
[
"logging.getLogger",
"matplotlib.pyplot.plot",
"os.path.join",
"matplotlib.pyplot.close",
"numpy.vstack"
] |
[((134, 161), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (151, 161), False, 'import logging\n'), ((367, 387), 'numpy.vstack', 'np.vstack', (['loss_list'], {}), '(loss_list)\n', (376, 387), True, 'import numpy as np\n'), ((432, 458), 'matplotlib.pyplot.plot', 'plt.plot', (['target_data_loss'], {}), '(target_data_loss)\n', (440, 458), True, 'import matplotlib.pyplot as plt\n'), ((520, 531), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (529, 531), True, 'import matplotlib.pyplot as plt\n'), ((597, 617), 'numpy.vstack', 'np.vstack', (['loss_list'], {}), '(loss_list)\n', (606, 617), True, 'import numpy as np\n'), ((475, 514), 'os.path.join', 'os.path.join', (['outdir', '"""target_loss.png"""'], {}), "(outdir, 'target_loss.png')\n", (487, 514), False, 'import os\n'), ((633, 672), 'os.path.join', 'os.path.join', (['outdir', '"""target_loss.txt"""'], {}), "(outdir, 'target_loss.txt')\n", (645, 672), False, 'import os\n')]
|
import numpy as np
import os, sys, re
import mpi4py
import time
from mpi4py import MPI
# Paths
MACHINE_NAME = 'tmp'
TUNER_NAME = 'tmp'
ROOTDIR = os.path.abspath(os.path.join(os.path.realpath(__file__), os.pardir, os.pardir))
EXPDIR = os.path.abspath(os.path.join(ROOTDIR, "hypre-driver/exp", MACHINE_NAME + '/' + TUNER_NAME))
EXCUDIR = os.path.abspath(os.path.join(ROOTDIR, "hypre/src/test/ij"))
# print(EXPDIR)
# print(EXCUDIR)
max_setup_time = 1000
max_solve_time = 1000
comm = MPI.COMM_WORLD
# nprocs = comm.Get_size()
# print("ENTER HYPRE DRIVER, nprocs=", nprocs)
def execute(params, RUNDIR, niter = 1, max_iter = '1000', tol = '1e-8'):
print("params: ", params)
# extract arguments
Problem = params['problem_name']; solver = params['solver']
coeffs_c = params['coeffs_c']; coeffs_a = params['coeffs_a']
nx = params['nx']; ny = params['ny']; nz = params['nz']
Px = params['Px']; Py = params['Py']; Pz = params['Pz']
strong_threshold = params['strong_threshold']
trunc_factor = params['trunc_factor']
P_max_elmts = params['P_max_elmts']
coarsen_type = params['coarsen_type']
relax_type = params['relax_type']
smooth_type = params['smooth_type']
smooth_num_levels = params['smooth_num_levels']
interp_type = params['interp_type']
agg_num_levels = params['agg_num_levels']
nthreads = params['nthreads']
npernode = params['npernode']
# reshape for args
NProc = Px*Py*Pz
Size = "-n %d %d %d " % (nx, ny, nz)
ProcTopo = "-P %d %d %d " % (Px, Py, Pz)
StrThr = f"-th {strong_threshold} "
TrunFac = f"-tr {trunc_factor} "
PMax = "-Pmx %d " % P_max_elmts
RelType = "-rlx %d " % relax_type
SmooType = "-smtype %d " % smooth_type
SmooLev = "-smlv %d " % smooth_num_levels
InterType = "-interptype %d " % interp_type
AggLev = "-agg_nl %d " % agg_num_levels
CoarsTypes = {0:"-cljp", 1:"-ruge", 2:"-ruge2b", 3:"-ruge2b", 4:"-ruge3c", 6:"-falgout", 8:"-pmis", 10:"-hmis"}
CoarsType = CoarsTypes[coarsen_type]
outputfilename = os.path.abspath(os.path.join(RUNDIR,f"ijoutput_{nx}_{ny}_{nz}_{Px}_{Py}_{Pz}_{strong_threshold}_{trunc_factor}_{P_max_elmts}_{coarsen_type}_{relax_type}_{smooth_type}_{smooth_num_levels}_{interp_type}_{agg_num_levels}"))
myargs = Problem + Size + coeffs_c + coeffs_a + f"-solver {solver} " + ProcTopo + StrThr + TrunFac + PMax + RelType + SmooType + SmooLev + InterType + AggLev + CoarsType
myargslist = [Problem, '-n', f'{nx}', f'{ny}', f'{nz}', coeffs_c, coeffs_a, '-solver', f'{solver}', '-P', f'{Px}', f'{Py}', f'{Pz}', '-th', f'{strong_threshold}', '-tr', f'{trunc_factor}',
'-Pmx', f'{P_max_elmts}', '-rlx', f'{relax_type}', '-smtype', f'{smooth_type}', '-smlv', f'{smooth_num_levels}', '-interptype', f'{interp_type}', '-agg_nl', f'{agg_num_levels}', CoarsType, '-logfile', outputfilename, '-max_iter', max_iter, '-tol', tol]
# default params
# myargslist = [Problem, '-n', f'{nx}', f'{ny}', f'{nz}', coeffs_c, coeffs_a, '-solver', f'{solver}',
# '-logfile', outputfilename, '-max_iter', max_iter, '-tol', tol]
# print(f"myargslist: ", myargslist)
def read_output(outputfilename):
setup_time = max_setup_time
solve_time = max_solve_time
with open(outputfilename,'r') as outputfile:
while True:
line = outputfile.readline()
if not line:
break
if 'ERROR' in line:
break
if 'Setup phase times' in line:
outputfile.readline()
outputfile.readline()
setup_wallclocktime_str = outputfile.readline()
time_str = re.findall("\d+\.\d+", setup_wallclocktime_str)
if time_str:
setup_time = float(time_str[0])
if 'Solve phase times' in line:
outputfile.readline()
outputfile.readline()
solve_wallclocktime_str = outputfile.readline()
time_str = re.findall("\d+\.\d+", solve_wallclocktime_str)
if time_str:
solve_time = float(time_str[0])
runtime = setup_time + solve_time
print("[----- runtime = %f -----]\n" % runtime)
return runtime
def v_parallel():
info = MPI.Info.Create()
info.Set('env', 'OMP_NUM_THREADS=%d\n' %(nthreads))
info.Set('npernode','%d'%(npernode)) # YL: npernode is deprecated in openmpi 4.0, but no other parameter (e.g. 'map-by') works
print('exec ', EXCUDIR, 'args: ', myargslist, 'nproc', NProc)
runtimes = []
for i in range(niter):
# os.system("rm -rf %s"%(outputfilename))
comm = MPI.COMM_SELF.Spawn(EXCUDIR, args=myargslist, maxprocs=NProc,info=info)
comm.Disconnect()
time.sleep(2.0) # this gives new MPI_spawn more time to find the resource
runtime = read_output(outputfilename)
runtimes.append(runtime)
return np.mean(runtimes)
runtime = v_parallel()
return runtime
def hypredriver(params, niter=1, JOBID: int=-1, max_iter = '1000', tol = '1e-8', budget=None):
global EXPDIR
global ROOTDIR
MACHINE_NAME = os.environ['MACHINE_NAME']
TUNER_NAME = os.environ['TUNER_NAME']
EXPDIR = os.path.abspath(os.path.join(ROOTDIR, "hypre-driver/exp", MACHINE_NAME + '/' + TUNER_NAME))
# map budget to tol or max_iter, if budget is given
if budget != None:
assert budget<=10, "Max_budget = 10"
assert budget>=1, "Min_budget = 1"
tol = str(10**(-14*budget/15 + 4/3))
max_iter = '1000'
# print(f"Hypredriver received budget, budget={budget}, tol={tol}")
if (JOBID==-1): # -1 is the default value if jobid is not set from command line
JOBID = os.getpid()
RUNDIR = os.path.abspath(os.path.join(EXPDIR, str(JOBID)))
os.makedirs("%s"%(RUNDIR), exist_ok=True)
dtype = [("nx", int), ("ny", int), ("nz", int), ("coeffs_a", 'U10'), ("coeffs_c", 'U10'), ("problem_name", 'U10'), ("solver", int),
("Px", int), ("Py", int), ("Pz", int), ("strong_threshold", float),
("trunc_factor", float), ("P_max_elmts", int), ("coarsen_type", int), ("relax_type", int),
("smooth_type", int), ("smooth_num_levels", int), ("interp_type", int), ("agg_num_levels", int), ("nthreads", int), ("npernode", int)]
params = np.array(params, dtype=dtype)
times = []
for param in params:
print(f"Current param {param}, tol={tol}")
time_cur = execute(param, RUNDIR, niter=niter, max_iter = max_iter, tol = tol)
times.append(time_cur)
os.system('rm -fr %s'%(RUNDIR))
return times
if __name__ == "__main__":
os.environ['MACHINE_NAME'] = 'cori'
os.environ['TUNER_NAME'] = 'GPTune'
params = [(60, 50, 80, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian ', 3, 2, 2, 2, 0.25, 0, 4, 10, 8, 6, 0, 6, 0, 1, 1),\
(60, 50, 80, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian ', 3, 2, 2, 2, 0.3, 0.2, 5, 10, 8, 6, 1, 6, 1, 1, 1)
]
times = hypredriver(params, niter=1)
print(times)
|
[
"numpy.mean",
"os.makedirs",
"os.path.join",
"time.sleep",
"mpi4py.MPI.Info.Create",
"os.path.realpath",
"numpy.array",
"mpi4py.MPI.COMM_SELF.Spawn",
"os.getpid",
"os.system",
"re.findall"
] |
[((251, 325), 'os.path.join', 'os.path.join', (['ROOTDIR', '"""hypre-driver/exp"""', "(MACHINE_NAME + '/' + TUNER_NAME)"], {}), "(ROOTDIR, 'hypre-driver/exp', MACHINE_NAME + '/' + TUNER_NAME)\n", (263, 325), False, 'import os, sys, re\n'), ((353, 395), 'os.path.join', 'os.path.join', (['ROOTDIR', '"""hypre/src/test/ij"""'], {}), "(ROOTDIR, 'hypre/src/test/ij')\n", (365, 395), False, 'import os, sys, re\n'), ((6044, 6085), 'os.makedirs', 'os.makedirs', (["('%s' % RUNDIR)"], {'exist_ok': '(True)'}), "('%s' % RUNDIR, exist_ok=True)\n", (6055, 6085), False, 'import os, sys, re\n'), ((6567, 6596), 'numpy.array', 'np.array', (['params'], {'dtype': 'dtype'}), '(params, dtype=dtype)\n', (6575, 6596), True, 'import numpy as np\n'), ((6810, 6841), 'os.system', 'os.system', (["('rm -fr %s' % RUNDIR)"], {}), "('rm -fr %s' % RUNDIR)\n", (6819, 6841), False, 'import os, sys, re\n'), ((175, 201), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (191, 201), False, 'import os, sys, re\n'), ((2071, 2284), 'os.path.join', 'os.path.join', (['RUNDIR', 'f"""ijoutput_{nx}_{ny}_{nz}_{Px}_{Py}_{Pz}_{strong_threshold}_{trunc_factor}_{P_max_elmts}_{coarsen_type}_{relax_type}_{smooth_type}_{smooth_num_levels}_{interp_type}_{agg_num_levels}"""'], {}), "(RUNDIR,\n f'ijoutput_{nx}_{ny}_{nz}_{Px}_{Py}_{Pz}_{strong_threshold}_{trunc_factor}_{P_max_elmts}_{coarsen_type}_{relax_type}_{smooth_type}_{smooth_num_levels}_{interp_type}_{agg_num_levels}'\n )\n", (2083, 2284), False, 'import os, sys, re\n'), ((4420, 4437), 'mpi4py.MPI.Info.Create', 'MPI.Info.Create', ([], {}), '()\n', (4435, 4437), False, 'from mpi4py import MPI\n'), ((5130, 5147), 'numpy.mean', 'np.mean', (['runtimes'], {}), '(runtimes)\n', (5137, 5147), True, 'import numpy as np\n'), ((5464, 5538), 'os.path.join', 'os.path.join', (['ROOTDIR', '"""hypre-driver/exp"""', "(MACHINE_NAME + '/' + TUNER_NAME)"], {}), "(ROOTDIR, 'hypre-driver/exp', MACHINE_NAME + '/' + TUNER_NAME)\n", (5476, 5538), False, 'import os, sys, re\n'), ((5965, 5976), 'os.getpid', 'os.getpid', ([], {}), '()\n', (5974, 5976), False, 'import os, sys, re\n'), ((4840, 4912), 'mpi4py.MPI.COMM_SELF.Spawn', 'MPI.COMM_SELF.Spawn', (['EXCUDIR'], {'args': 'myargslist', 'maxprocs': 'NProc', 'info': 'info'}), '(EXCUDIR, args=myargslist, maxprocs=NProc, info=info)\n', (4859, 4912), False, 'from mpi4py import MPI\n'), ((4954, 4969), 'time.sleep', 'time.sleep', (['(2.0)'], {}), '(2.0)\n', (4964, 4969), False, 'import time\n'), ((3755, 3805), 're.findall', 're.findall', (['"""\\\\d+\\\\.\\\\d+"""', 'setup_wallclocktime_str'], {}), "('\\\\d+\\\\.\\\\d+', setup_wallclocktime_str)\n", (3765, 3805), False, 'import os, sys, re\n'), ((4123, 4173), 're.findall', 're.findall', (['"""\\\\d+\\\\.\\\\d+"""', 'solve_wallclocktime_str'], {}), "('\\\\d+\\\\.\\\\d+', solve_wallclocktime_str)\n", (4133, 4173), False, 'import os, sys, re\n')]
|
import math
import numpy as np
import random
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(2, 96)
self.fc2 = nn.Linear(96, 96)
self.fc3 = nn.Linear(96, 96)
self.fc4 = nn.Linear(96, 10)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.linear(self.fc4(x))
return x
class RlConfig:
def __init__(self,
pods_min,
pods_max,
resource_cost,
violation_cost,
autoscaling_period,
learning_rate,
discount_factor,
epsilon):
self.pods_min = pods_min
self.pods_max = pods_max
self.resource_cost = resource_cost
self.violation_cost = violation_cost
self.autoscaling_period = autoscaling_period
self.alpha = learning_rate
self.gamma = discount_factor
self.epsilon = epsilon
class HPA_Q_Learning:
def __init__(self, rl_config):
self.pods_min = rl_config.pods_min
self.pods_max = rl_config.pods_max
self.a_history = []
self.s_history = []
self.r_history = []
# (utilization, # of pods, actions)
#(2,10)
self.Q = Net()
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.SGD(Q.parameters(), lr=0.01)
self.action_space = list(range(self.pods_min, self.pods_max+1))
self.alpha = rl_config.alpha
self.gamma = rl_config.gamma
self.epsilon = rl_config.epsilon
# m4.xlarge => 4 vCPU => 0.2 USD / hour
# 1 vCPU => 0.05 USD / hour
# pod => 0.2 core => 0.01 USD
# error => 0.0005 USD
self.resource_cost = rl_config.resource_cost
self.violation_cost = rl_config.violation_cost
self.autoscaling_period = rl_config.autoscaling_period
def convert_obs(self, obs):
u = int(float(obs.U) // 0.1)
c = int(obs.C[-1])
c_avg = sum(obs.C) / len(obs.C)
e = sum(obs.E)
reward = -1 * self.resource_cost * c_avg * self.autoscaling_period + -1 * self.violation_cost * e
state = (u, c)
self.s_history.append(state)
self.r_history.append(reward)
return state, reward
def epsilon_decay(self):
self.epsilon = self.epsilon * 0.9
def get_action(self, state):
max_q = float('-inf')
max_a = []
if np.random.rand() < self.epsilon:
return random.choice(self.action_space)
for i in range(self.pods_min, self.pods_max+1):
if max_q < self.Q[state[0], state[1], i]:
max_q = self.Q[state[0], state[1], i]
max_a = [i]
elif max_q == self.Q[state[0], state[1], i]:
max_a.append(i)
desired_c = random.choice(max_a)
self.a_history.append(desired_c)
return desired_c
def update(self, s, a, s_next, r_next):
self.Q[s[0], s[1], a] = self.Q[s[0], s[1], a] + self.alpha * (r_next + self.gamma * np.nanmax(self.Q[s_next[0], s_next[1],: ]) - self.Q[s[0], s[1], a])
|
[
"random.choice",
"numpy.random.rand",
"torch.nn.CrossEntropyLoss",
"numpy.nanmax",
"torch.nn.Linear"
] |
[((278, 294), 'torch.nn.Linear', 'nn.Linear', (['(2)', '(96)'], {}), '(2, 96)\n', (287, 294), True, 'import torch.nn as nn\n'), ((314, 331), 'torch.nn.Linear', 'nn.Linear', (['(96)', '(96)'], {}), '(96, 96)\n', (323, 331), True, 'import torch.nn as nn\n'), ((351, 368), 'torch.nn.Linear', 'nn.Linear', (['(96)', '(96)'], {}), '(96, 96)\n', (360, 368), True, 'import torch.nn as nn\n'), ((388, 405), 'torch.nn.Linear', 'nn.Linear', (['(96)', '(10)'], {}), '(96, 10)\n', (397, 405), True, 'import torch.nn as nn\n'), ((1452, 1473), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1471, 1473), True, 'import torch.nn as nn\n'), ((3004, 3024), 'random.choice', 'random.choice', (['max_a'], {}), '(max_a)\n', (3017, 3024), False, 'import random\n'), ((2608, 2624), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2622, 2624), True, 'import numpy as np\n'), ((2660, 2692), 'random.choice', 'random.choice', (['self.action_space'], {}), '(self.action_space)\n', (2673, 2692), False, 'import random\n'), ((3229, 3271), 'numpy.nanmax', 'np.nanmax', (['self.Q[s_next[0], s_next[1], :]'], {}), '(self.Q[s_next[0], s_next[1], :])\n', (3238, 3271), True, 'import numpy as np\n')]
|
import numpy as np
import cv2 as cv
from abc import ABC
class ins_pos_kalman_filter(ABC):
def __init__(self, F, Q, H, R, initial_state_mean, initial_state_covariance):
"""
abstract initialization of kalman filter for INS data fusion for position estimation
Matrix notation matches that provided by https://en.wikipedia.org/wiki/Kalman_filter
:param F: state transition model matrix
:param Q: process noise covariance matrix
:param H: observation model matrix
:param R: observation noise covariance matrix
"""
if type(F) is not np.ndarray:
raise TypeError('F matrix must by np.ndarray')
if type(Q) is not np.ndarray:
raise TypeError('Q matrix must by np.ndarray')
if type(H) is not np.ndarray:
raise TypeError('H matrix must by np.ndarray')
if type(R) is not np.ndarray:
raise TypeError('R matrix must by np.ndarray')
if F.shape[1] != H.shape[1]:
raise RuntimeError('F and H must have same number of columns')
if Q.shape[1] != R.shape[1]:
raise RuntimeError('Q and R must have same number of columns')
self._kf = cv.KalmanFilter(F.shape[1], Q.shape[1])
self._kf.transitionMatrix = F
self._kf.processNoiseCov = Q
self._kf.measurementMatrix = H
self._kf.measurementNoiseCov = R
self._kf.statePost = initial_state_mean
self._kf.errorCovPost = initial_state_covariance
def estimate(self, measurement):
"""
incorporates measurment into kalman filter to update estimate and returns the current estimate provided by the
kalman filter
:param measurement: the measurement from sensors
:return: the estimate state
:return: the estimate state covariance
"""
self._kf.predict()
self._kf.correct(measurement)
return self._kf.statePost, self._kf.errorCovPost
class linear_gpsimu_pos_kalman_filter(ins_pos_kalman_filter):
def __init__(self, T, x0_mean, x0_cov):
"""
initializes linear kalman filter that fuses GPS and IMU sensors with linear transition matrices
:param T: time step in between estimations
"""
if type(T) not in [int, float]:
raise TypeError('T must be a number')
I3 = np.eye(3)
O3 = np.zeros((3, 3))
B = np.array([[1.], [1.], [1.]])
F = np.block([[I3, T * I3, T ** 2 / 2 * I3],
[O3, I3, T * I3],
[O3, O3, I3]])
Q = np.diag(np.hstack([T ** 3 / 6 * B.T, T ** 2 / 2 * B.T, T * B.T]).flatten())
H = np.block([[I3, O3, O3],
[O3, O3, O3],
[O3, O3, I3]])
R = np.eye(9) # THIS IS A PLACE HOLDER, REPLACE WITH NOISE COV OF GPS AND IMU SENSORS
super().__init__(F, Q, H, R, x0_mean, x0_cov)
|
[
"numpy.block",
"numpy.eye",
"numpy.hstack",
"numpy.array",
"numpy.zeros",
"cv2.KalmanFilter"
] |
[((1212, 1251), 'cv2.KalmanFilter', 'cv.KalmanFilter', (['F.shape[1]', 'Q.shape[1]'], {}), '(F.shape[1], Q.shape[1])\n', (1227, 1251), True, 'import cv2 as cv\n'), ((2369, 2378), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (2375, 2378), True, 'import numpy as np\n'), ((2392, 2408), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (2400, 2408), True, 'import numpy as np\n'), ((2421, 2452), 'numpy.array', 'np.array', (['[[1.0], [1.0], [1.0]]'], {}), '([[1.0], [1.0], [1.0]])\n', (2429, 2452), True, 'import numpy as np\n'), ((2463, 2536), 'numpy.block', 'np.block', (['[[I3, T * I3, T ** 2 / 2 * I3], [O3, I3, T * I3], [O3, O3, I3]]'], {}), '([[I3, T * I3, T ** 2 / 2 * I3], [O3, I3, T * I3], [O3, O3, I3]])\n', (2471, 2536), True, 'import numpy as np\n'), ((2681, 2733), 'numpy.block', 'np.block', (['[[I3, O3, O3], [O3, O3, O3], [O3, O3, I3]]'], {}), '([[I3, O3, O3], [O3, O3, O3], [O3, O3, I3]])\n', (2689, 2733), True, 'import numpy as np\n'), ((2790, 2799), 'numpy.eye', 'np.eye', (['(9)'], {}), '(9)\n', (2796, 2799), True, 'import numpy as np\n'), ((2601, 2657), 'numpy.hstack', 'np.hstack', (['[T ** 3 / 6 * B.T, T ** 2 / 2 * B.T, T * B.T]'], {}), '([T ** 3 / 6 * B.T, T ** 2 / 2 * B.T, T * B.T])\n', (2610, 2657), True, 'import numpy as np\n')]
|
import code.book_plots as bp
import code.gh_internal as gh
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np;
from filterpy.discrete_bayes import normalize
def scaled_update (hall, belief, z, prob):
scale_ = prob/(1-prob)
belief[hall==1] *=scale_
normalize(belief)
belief = np.array([0.1]*10)
hallway = np.array([1, 1, 0, 0, 0, 0, 0, 0, 1, 0])
reading = 1
scaled_update(hallway, belief, reading, prob=0.75)
belief /= sum(belief);
print("belief:", belief)
print ("sum = ", sum(belief))
plt.figure()
bp.bar_plot(belief).show()
|
[
"filterpy.discrete_bayes.normalize",
"numpy.array",
"matplotlib.pyplot.figure",
"code.book_plots.bar_plot"
] |
[((332, 352), 'numpy.array', 'np.array', (['([0.1] * 10)'], {}), '([0.1] * 10)\n', (340, 352), True, 'import numpy as np\n'), ((361, 401), 'numpy.array', 'np.array', (['[1, 1, 0, 0, 0, 0, 0, 0, 1, 0]'], {}), '([1, 1, 0, 0, 0, 0, 0, 0, 1, 0])\n', (369, 401), True, 'import numpy as np\n'), ((545, 557), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (555, 557), True, 'import matplotlib.pyplot as plt\n'), ((295, 312), 'filterpy.discrete_bayes.normalize', 'normalize', (['belief'], {}), '(belief)\n', (304, 312), False, 'from filterpy.discrete_bayes import normalize\n'), ((558, 577), 'code.book_plots.bar_plot', 'bp.bar_plot', (['belief'], {}), '(belief)\n', (569, 577), True, 'import code.book_plots as bp\n')]
|
# mfield / mfield.py
import numpy as np
try:
import matlab
import matlab.engine
except ImportError:
pass
import time
import io
import os
class MField(object):
'''
Implementation of FIELD II using the MATLAB engine for python.
'''
def __init__(self, path=None):
# set default path to location of m-files (where this module is)
if path is None:
path = os.path.dirname(os.path.abspath(__file__))
# try, for at most 1 minute, to start MATLAB engine
for i in range(6):
try:
self._mateng = matlab.engine.start_matlab()
break
except (matlab.engine.EngineError, TypeError):
time.sleep(10)
# set MATLAB engine path to location of m-files
self._mateng.cd(str(os.path.normpath(path)), nargout=0)
def __del__(self):
# self.field_end() # end FIELD II
self._mateng.quit() # shutdown MATLAB engine
def _numpy_to_mat(self, array, orient='row'):
if array.ndim == 1:
if orient.lower() == 'row':
sz = (1, array.size)
elif orient.lower() in ('col', 'column'):
sz = (array.size, 1)
else:
sz = None
ret = matlab.double(initializer=array.tolist(), size=sz)
return ret
def _mat_to_numpy(self, array):
return np.array(array).squeeze()
## FIELD FUNCTIONS ##
def field_init(self, suppress=-1):
self._mateng.field_init(suppress, nargout=0)
def field_end(self):
self._mateng.field_end(nargout=0, stdout=io.StringIO())
def set_field(self, option_name, value):
self._mateng.set_field(option_name, value, nargout=0)
def field_info(self):
self._mateng.field_info(nargout=0)
## CALC FUNCTIONS ##
def calc_scat(self, Th1, Th2, points, amplitudes):
points_mat = self._numpy_to_mat(points, orient='row')
amplitudes_mat = self._numpy_to_mat(amplitudes, orient='col')
ret = self._mateng.calc_scat(Th1, Th2, points_mat, amplitudes_mat,
nargout=2, stdout=io.StringIO())
scat = self._mat_to_numpy(ret[0])
t0 = ret[1]
return scat, t0
def calc_scat_all(self, Th1, Th2, points, amplitudes, dec_factor):
points_mat = self._numpy_to_mat(points, orient='row')
amplitudes_mat = self._numpy_to_mat(amplitudes, orient='col')
ret = self._mateng.calc_scat_all(Th1, Th2, points_mat, amplitudes_mat,
dec_factor, nargout=2, stdout=io.StringIO())
scat = self._mat_to_numpy(ret[0])
t0 = ret[1]
return scat, t0
def calc_scat_multi(self, Th1, Th2, points, amplitudes):
points_mat = self._numpy_to_mat(points, orient='row')
amplitudes_mat = self._numpy_to_mat(amplitudes, orient='col')
ret = self._mateng.calc_scat_multi(Th1, Th2, points_mat, amplitudes_mat,
nargout=2, stdout=io.StringIO())
scat = self._mat_to_numpy(ret[0])
t0 = ret[1]
return scat, t0
def calc_h(self, Th, points):
points_mat = self._numpy_to_mat(points, orient='row')
ret = self._mateng.calc_h(Th, points_mat, nargout=2, stdout=io.StringIO())
h = self._mat_to_numpy(ret[0])
t0 = ret[1]
return h, t0
def calc_hp(self, Th, points):
points_mat = self._numpy_to_mat(points, orient='row')
ret = self._mateng.calc_hp(Th, points_mat, nargout=2, stdout=io.StringIO())
hp = self._mat_to_numpy(ret[0])
t0 = ret[1]
return hp, t0
def calc_hhp(self, Th1, Th2, points):
points_mat = self._numpy_to_mat(points, orient='row')
ret = self._mateng.calc_hhp(Th1, Th2, points_mat, nargout=2, stdout=io.StringIO())
hhp = self._mat_to_numpy(ret[0])
t0 = ret[1]
return hhp, t0
## XDC FUNCTIONS ##
def xdc_impulse(self, Th, pulse):
pulse_mat = self._numpy_to_mat(pulse, orient='row')
self._mateng.xdc_impulse(Th, pulse_mat, nargout=0)
def xdc_excitation(self, Th, pulse):
pulse_mat = self._numpy_to_mat(pulse, orient='row')
self._mateng.xdc_excitation(Th, pulse_mat, nargout=0)
def xdc_linear_array(self, no_elements, width, height, kerf, no_sub_x,
no_sub_y, focus):
focus_mat = self._numpy_to_mat(focus, orient='row')
ret = self._mateng.xdc_linear_array(no_elements, width, height, kerf,
no_sub_x, no_sub_y, focus_mat, nargout=1)
return ret
def xdc_show(self, Th, info_type='all'):
self._mateng.xdc_show(Th, info_type, nargout=0)
def xdc_focus(self, Th, times, points):
times_mat = self._numpy_to_mat(times, orient='col')
points_mat = self._numpy_to_mat(points, orient='row')
self._mateng.xdc_focus(Th, times_mat, points_mat, nargout=0)
def xdc_focus_times(self, Th, times, delays):
times_mat = self._numpy_to_mat(times, orient='col')
delays_mat = self._numpy_to_mat(delays, orient='row')
self._mateng.xdc_focus_times(Th, times_mat, delays_mat, nargout=0)
def xdc_free(self, Th):
self._mateng.xdc_free(Th, nargout=0)
def xdc_get(self, Th, info_type='rect'):
ret = self._mat_to_numpy(self._mateng.xdc_get(Th, info_type, nargout=1))
return ret
def xdc_rectangles(self, rect, center, focus):
rect_mat = self._numpy_to_mat(rect, orient='row')
center_mat = self._numpy_to_mat(center, orient='row')
focus_mat = self._numpy_to_mat(focus, orient='row')
ret = self._mateng.xdc_rectangles(rect_mat, center_mat, focus_mat,
nargout=1)
return ret
def xdc_focused_array(self, no_elements, width, height, kerf, rfocus, no_sub_x, no_sub_y, focus):
focus_mat = self._numpy_to_mat(focus, orient='row')
ret = self._mateng.xdc_focused_array(no_elements, width, height, kerf,
rfocus, no_sub_x, no_sub_y, focus_mat, nargout=1)
return ret
def xdc_piston(self, radius, ele_size):
ret = self._mateng.xdc_piston(radius, ele_size)
return ret
def xdc_apodization(self, Th, times, values):
times_mat = self._numpy_to_mat(times, orient='col')
values_mat = self._numpy_to_mat(values, orient='row')
self._mateng.xdc_apodization(Th, times_mat, values_mat, nargout=0)
def xdc_quantization(self, Th, value):
self._mateng.xdc_quantization(Th, value, nargout=0)
def xdc_2d_array(self):
raise NotImplementedError
def xdc_concave(self, radius, focal_radius, ele_size):
ret = self._mateng.xdc_concave(radius, focal_radius, ele_size)
return ret
def xdc_convex_array(self):
raise NotImplementedError
def xdc_convex_focused_array(self):
raise NotImplementedError
## ELE FUNCTIONS ##
def ele_apodization(self, Th, element_no, apo):
element_no_mat = self._numpy_to_mat(element_no, orient='col')
apo_mat = self._numpy_to_mat(apo, orient='row')
self._mateng.ele_apodization(Th, element_no_mat, apo_mat, nargout=0)
def ele_delay(self, Th, element_no, delays):
element_no_mat = self._numpy_to_mat(element_no, orient='col')
delays_mat = self._numpy_to_mat(delays, orient='row')
self._mateng.ele_delay(Th, element_no_mat, delays_mat, nargout=0)
## TEST ##
if __name__ == '__main__':
# from scipy.signal import gausspulse
from .. simulations import sim_functions as sim
field = MField()
field.field_init()
field.set_field('c', 1500)
field.set_field('fs', 100e6)
field.set_field('att', 0)
field.set_field('freq_att', 10e6)
field.set_field('att_f0', 0)
field.set_field('use_att', 1)
fc = 10e6
fbw = 1.0
fs = 100e6
pulse, t = sim.gausspulse(fc, fbw, fs)
tx = field.xdc_linear_array(64, 0.0002, 0.001, 300e-6, 1, 2, np.array([0, 0, 0.03]))
field.xdc_impulse(tx, pulse)
field.xdc_excitation(tx, np.array([1]))
field.field_info()
# field.xdc_show(tx)
scat, t0 = field.calc_scat_multi(tx, tx, np.array([0, 0, 0.03]), np.array([1]))
field.field_end()
field.close()
|
[
"matlab.engine.start_matlab",
"time.sleep",
"os.path.normpath",
"numpy.array",
"os.path.abspath",
"io.StringIO"
] |
[((7976, 7998), 'numpy.array', 'np.array', (['[0, 0, 0.03]'], {}), '([0, 0, 0.03])\n', (7984, 7998), True, 'import numpy as np\n'), ((8062, 8075), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (8070, 8075), True, 'import numpy as np\n'), ((8172, 8194), 'numpy.array', 'np.array', (['[0, 0, 0.03]'], {}), '([0, 0, 0.03])\n', (8180, 8194), True, 'import numpy as np\n'), ((8196, 8209), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (8204, 8209), True, 'import numpy as np\n'), ((428, 453), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (443, 453), False, 'import os\n'), ((591, 619), 'matlab.engine.start_matlab', 'matlab.engine.start_matlab', ([], {}), '()\n', (617, 619), False, 'import matlab\n'), ((817, 839), 'os.path.normpath', 'os.path.normpath', (['path'], {}), '(path)\n', (833, 839), False, 'import os\n'), ((1395, 1410), 'numpy.array', 'np.array', (['array'], {}), '(array)\n', (1403, 1410), True, 'import numpy as np\n'), ((1617, 1630), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (1628, 1630), False, 'import io\n'), ((2131, 2144), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (2142, 2144), False, 'import io\n'), ((2561, 2574), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (2572, 2574), False, 'import io\n'), ((2971, 2984), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (2982, 2984), False, 'import io\n'), ((3241, 3254), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (3252, 3254), False, 'import io\n'), ((3507, 3520), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (3518, 3520), False, 'import io\n'), ((3789, 3802), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (3800, 3802), False, 'import io\n'), ((717, 731), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (727, 731), False, 'import time\n')]
|
import librosa
from numba import jit
import numpy as np
@jit(nopython=True, cache=True)
def __C_to_DE(C: np.ndarray = None,
dn: np.ndarray = np.array([1, 1, 0], np.int64),
dm: np.ndarray = np.array([1, 0, 1], np.int64),
dw: np.ndarray = np.array([1.0, 1.0, 1.0], np.float64),
sub_sequence: bool = False) -> (np.ndarray, np.ndarray):
"""This function computes the accumulated cost matrix D and the step index
matrix E.
Parameters
----------
C : np.ndarray (np.float32 / np.float64) [shape=(N, M)]
Cost matrix
dn : np.ndarray (np.int64) [shape=(1, S)]
Integer array defining valid steps (N direction of C), default: [1, 1, 0]
dm : np.ndarray (np.int64) [shape=(1, S)]
Integer array defining valid steps (M direction of C), default: [1, 0, 1]
dw : np.ndarray (np.float64) [shape=(1, S)]
Double array defining the weight of the each step, default: [1.0, 1.0, 1.0]
sub_sequence : bool
Set `True` for SubSequence DTW, default: False
Returns
-------
D : np.ndarray (np.float64) [shape=(N, M)]
Accumulated cost matrix of type double
E : np.ndarray (np.int64) [shape=(N, M)]
Step index matrix.
E[n, m] holds the index of the step take to determine the value of D[n, m].
If E[n, m] is zero, no valid step was possible.
NaNs in the cost matrix are preserved, invalid fields in the cost matrix are NaNs.
"""
if C is None:
raise ValueError('C must be a 2D numpy array.')
N, M = C.shape
S = dn.size
if S != dm.size or S != dw.size:
raise ValueError('The parameters dn,dm, and dw must be of equal length.')
# calc bounding box size of steps
sbbn = np.max(dn)
sbbm = np.max(dm)
# initialize E
E = np.zeros((N, M), np.int64) - 1
# initialize extended D matrix
D = np.ones((sbbn + N, sbbm + M), np.float64) * np.inf
if sub_sequence:
for m in range(M):
D[sbbn, sbbm + m] = C[0, m]
else:
D[sbbn, sbbm] = C[0, 0]
# accumulate
for m in range(sbbm, M + sbbm):
for n in range(sbbn, N + sbbn):
for s in range(S):
cost = D[n - dn[s], m - dm[s]] + C[n - sbbn, m - sbbm] * dw[s]
if cost < D[n, m]:
D[n, m] = cost
E[n - sbbn, m - sbbm] = s
D = D[sbbn: N + sbbn, sbbm: M + sbbm]
return D, E
@jit(nopython=True, cache=True)
def __E_to_warping_path(E: np.ndarray,
dn: np.ndarray = np.array([1, 1, 0], np.int64),
dm: np.ndarray = np.array([1, 0, 1], np.int64),
sub_sequence: bool = False,
end_index: int = -1) -> np.ndarray:
"""This function computes a warping path based on the provided matrix E
and the allowed steps.
Parameters
----------
E : np.ndarray (np.int64) [shape=(N, M)]
Step index matrix
dn : np.ndarray (np.int64) [shape=(1, S)]
Integer array defining valid steps (N direction of C), default: [1, 1, 0]
dm : np.ndarray (np.int64) [shape=(1, S)]
Integer array defining valid steps (M direction of C), default: [1, 0, 1]
sub_sequence : bool
Set `True` for SubSequence DTW, default: False
end_index : int
In case of SubSequence DTW
Returns
-------
warping_path : np.ndarray (np.int64) [shape=(2, M)]
Resulting optimal warping path
"""
N, M = E.shape
if not sub_sequence and end_index == -1:
end_index = M - 1
m = end_index
n = N - 1
warping_path = np.zeros((2, n + m + 1))
index = 0
def _loop(m, n, index):
warping_path[:, index] = np.array([n, m])
step_index = E[n, m]
m -= dm[step_index]
n -= dn[step_index]
index += 1
return m, n, index
if sub_sequence:
while n > 0:
m, n, index = _loop(m, n, index)
else:
while m > 0 or n > 0:
m, n, index = _loop(m, n, index)
warping_path[:, index] = np.array([n, m])
warping_path = warping_path[:, index::-1]
return warping_path
def compute_warping_path(C: np.ndarray,
step_sizes: np.ndarray = np.array([[1, 0], [0, 1], [1, 1]], np.int64),
step_weights: np.ndarray = np.array([1.0, 1.0, 1.0], np.float64),
implementation: str = 'synctoolbox'):
"""Applies DTW on cost matrix C.
Parameters
----------
C : np.ndarray (np.float32 / np.float64) [shape=(N, M)]
Cost matrix
step_sizes : np.ndarray (np.int64) [shape=(2, S)]
Array of step sizes
step_weights : np.ndarray (np.float64) [shape=(2, S)]
Array of step weights
implementation: str
Choose among ``synctoolbox`` and ``librosa``. (default: ``synctoolbox``)
Returns
-------
D : np.ndarray (np.float64) [shape=(N, M)]
Accumulated cost matrix
E : np.ndarray (np.int64) [shape=(N, M)]
Step index matrix
wp : np.ndarray (np.int64) [shape=(2, M)]
Warping path
"""
if implementation == 'librosa':
D, wp, E = librosa.sequence.dtw(C=C,
step_sizes_sigma=step_sizes,
weights_add=np.array([0, 0, 0]),
weights_mul=step_weights,
return_steps=True,
subseq=False)
wp = wp[::-1].T
elif implementation == 'synctoolbox':
dn = step_sizes[:, 0]
dm = step_sizes[:, 1]
D, E = __C_to_DE(C,
dn=dn,
dm=dm,
dw=step_weights,
sub_sequence=False)
wp = __E_to_warping_path(E=E,
dn=dn,
dm=dm,
sub_sequence=False)
else:
raise NotImplementedError(f'No implementation found called {implementation}')
return D, E, wp
|
[
"numpy.ones",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numba.jit"
] |
[((59, 89), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'cache': '(True)'}), '(nopython=True, cache=True)\n', (62, 89), False, 'from numba import jit\n'), ((2478, 2508), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'cache': '(True)'}), '(nopython=True, cache=True)\n', (2481, 2508), False, 'from numba import jit\n'), ((157, 186), 'numpy.array', 'np.array', (['[1, 1, 0]', 'np.int64'], {}), '([1, 1, 0], np.int64)\n', (165, 186), True, 'import numpy as np\n'), ((219, 248), 'numpy.array', 'np.array', (['[1, 0, 1]', 'np.int64'], {}), '([1, 0, 1], np.int64)\n', (227, 248), True, 'import numpy as np\n'), ((281, 318), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]', 'np.float64'], {}), '([1.0, 1.0, 1.0], np.float64)\n', (289, 318), True, 'import numpy as np\n'), ((1777, 1787), 'numpy.max', 'np.max', (['dn'], {}), '(dn)\n', (1783, 1787), True, 'import numpy as np\n'), ((1799, 1809), 'numpy.max', 'np.max', (['dm'], {}), '(dm)\n', (1805, 1809), True, 'import numpy as np\n'), ((2589, 2618), 'numpy.array', 'np.array', (['[1, 1, 0]', 'np.int64'], {}), '([1, 1, 0], np.int64)\n', (2597, 2618), True, 'import numpy as np\n'), ((2661, 2690), 'numpy.array', 'np.array', (['[1, 0, 1]', 'np.int64'], {}), '([1, 0, 1], np.int64)\n', (2669, 2690), True, 'import numpy as np\n'), ((3676, 3700), 'numpy.zeros', 'np.zeros', (['(2, n + m + 1)'], {}), '((2, n + m + 1))\n', (3684, 3700), True, 'import numpy as np\n'), ((4129, 4145), 'numpy.array', 'np.array', (['[n, m]'], {}), '([n, m])\n', (4137, 4145), True, 'import numpy as np\n'), ((4309, 4353), 'numpy.array', 'np.array', (['[[1, 0], [0, 1], [1, 1]]', 'np.int64'], {}), '([[1, 0], [0, 1], [1, 1]], np.int64)\n', (4317, 4353), True, 'import numpy as np\n'), ((4407, 4444), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]', 'np.float64'], {}), '([1.0, 1.0, 1.0], np.float64)\n', (4415, 4444), True, 'import numpy as np\n'), ((1838, 1864), 'numpy.zeros', 'np.zeros', (['(N, M)', 'np.int64'], {}), '((N, M), np.int64)\n', (1846, 1864), True, 'import numpy as np\n'), ((1913, 1954), 'numpy.ones', 'np.ones', (['(sbbn + N, sbbm + M)', 'np.float64'], {}), '((sbbn + N, sbbm + M), np.float64)\n', (1920, 1954), True, 'import numpy as np\n'), ((3778, 3794), 'numpy.array', 'np.array', (['[n, m]'], {}), '([n, m])\n', (3786, 3794), True, 'import numpy as np\n'), ((5389, 5408), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (5397, 5408), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright INRIA
# Contributors: <NAME> (<EMAIL>)
# <NAME> (<EMAIL>)
#
# This software is governed by the CeCILL license under French law and abiding
# by the rules of distribution of free software. You can use, modify and/ or
# redistribute the software under the terms of the CeCILL license as circulated
# by CEA, CNRS and INRIA at the following URL
# http://www.cecill.info/index.en.html.
#
# As a counterpart to the access to the source code and rights to copy, modify
# and redistribute granted by the license, users are provided only with a
# limited warranty and the software's author, the holder of the economic
# rights, and the successive licensors have only limited liability.
#
# In this respect, the user's attention is drawn to the risks associated with
# loading, using, modifying and/or developing or reproducing the software by
# the user in light of its specific status of free software, that may mean that
# it is complicated to manipulate, and that also therefore means that it is
# reserved for developers and experienced professionals having in-depth
# computer knowledge. Users are therefore encouraged to load and test the
# software's suitability as regards their requirements in conditions enabling
# the security of their systems and/or data to be ensured and, more generally,
# to use and operate it in the same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
# -----------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from projections import *
# -----------------------------------------------------------------------------
def polar_frame(ax, title=None, legend=False, zoom=False, labels=True):
""" Draw a polar frame """
for rho in [0, 2,5,10,20,40,60,80,90]:
lw, color, alpha = 1, '0.00', 0.25
if rho == 90 and not zoom:
color, lw, alpha = '0.00', 2, 1
n = 500
R = np.ones(n)*rho/90.0
T = np.linspace(-np.pi/2,np.pi/2,n)
X,Y = polar_to_cartesian(R,T)
ax.plot(X, Y-1/2, color=color, lw=lw, alpha=alpha)
if not zoom and rho in [0,10,20,40,80] and labels:
ax.text(X[-1]*1.0-0.075, Y[-1],u'%d°' % rho, color='k', # size=15,
horizontalalignment='center', verticalalignment='center')
for theta in [-90,-60,-30,0,+30,+60,+90]:
lw, color, alpha = 1, '0.00', 0.25
if theta in[-90,+90] and not zoom:
color, lw, alpha = '0.00', 2, 1
angle = theta/90.0*np.pi/2
n = 500
R = np.linspace(0,1,n)
T = np.ones(n)*angle
X,Y = polar_to_cartesian(R,T)
ax.plot(X, Y, color=color, lw=lw, alpha=alpha)
if not zoom and theta in [-90,-60,-30,+30,+60,+90] and labels:
ax.text(X[-1]*1.05, Y[-1]*1.05,u'%d°' % theta, color='k', # size=15,
horizontalalignment='left', verticalalignment='center')
d = 0.01
ax.set_xlim( 0.0-d, 1.0+d)
ax.set_ylim(-1.0-d, 1.0+d)
ax.set_xticks([])
ax.set_yticks([])
if legend:
ax.set_frame_on(True)
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',-1.2))
ax.set_xticks([])
ax.text(0.0,-1.1, "$\longleftarrow$ Foveal",
verticalalignment='top', horizontalalignment='left', size=12)
ax.text(1.0,-1.1, "Peripheral $\longrightarrow$",
verticalalignment='top', horizontalalignment='right', size=12)
else:
ax.set_frame_on(False)
if title:
ax.title(title)
# -----------------------------------------------------------------------------
def logpolar_frame(ax, title=None, legend=False, labels=True):
""" Draw a log polar frame """
for rho in [2,5,10,20,40,60,80,90]:
lw, color, alpha = 1, '0.00', 0.25
if rho == 90:
color, lw, alpha = '0.00', 2, 1
n = 500
R = np.ones(n)*rho/90.0
T = np.linspace(-np.pi/2,np.pi/2,n)
X,Y = polar_to_logpolar(R,T)
X,Y = X*2, 2*Y-1
ax.plot(X, Y, color=color, lw=lw, alpha=alpha)
if labels and rho in [2,5,10,20,40,80]:
ax.text(X[-1], Y[-1]+0.05, u'%d°' % rho, color='k', # size=15,
horizontalalignment='right', verticalalignment='bottom')
for theta in [-90,-60,-30, 0, +30,+60,+90]:
lw, color, alpha = 1, '0.00', 0.25
if theta in[-90,+90]:
color, lw, alpha = '0.00', 2, 1
angle = theta/90.0*np.pi/2
n = 500
R = np.linspace(0,1,n)
T = np.ones(n)*angle
X,Y = polar_to_logpolar(R,T)
X,Y = X*2, 2*Y-1
ax.plot(X,Y, color=color, lw=lw, alpha=alpha)
if labels:
ax.text(X[-1]*1.0+.05, Y[-1]*1.0,u'%d°' % theta, color='k', # size=15,
horizontalalignment='left', verticalalignment='center')
d = 0.01
ax.set_xlim( 0.0-d, 2.0+d)
ax.set_ylim(-1.0-d, 1.0+d)
ax.set_xticks([])
ax.set_yticks([])
if legend:
ax.set_frame_on(True)
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',-1.2))
ax.set_xticks([0,2])
ax.set_xticklabels(['0', '4.8 (mm)'])
ax.text(0.0,-1.1, "$\longleftarrow$ Rostral",
verticalalignment='top', horizontalalignment='left', size=12)
ax.text(2,-1.1, "Caudal $\longrightarrow$",
verticalalignment='top', horizontalalignment='right', size=12)
else:
ax.set_frame_on(False)
if title:
ax.title(title)
# -----------------------------------------------------------------------------
def polar_imshow(axis, Z, *args, **kwargs):
kwargs['interpolation'] = kwargs.get('interpolation', 'nearest')
kwargs['cmap'] = kwargs.get('cmap', plt.cm.gray_r)
#kwargs['vmin'] = kwargs.get('vmin', Z.min())
#kwargs['vmax'] = kwargs.get('vmax', Z.max())
kwargs['vmin'] = kwargs.get('vmin', 0)
kwargs['vmax'] = kwargs.get('vmax', 1)
kwargs['origin'] = kwargs.get('origin', 'lower')
axis.imshow(Z, extent=[0,1,-1, 1], *args, **kwargs)
# -----------------------------------------------------------------------------
def logpolar_imshow(axis, Z, *args, **kwargs):
kwargs['interpolation'] = kwargs.get('interpolation', 'nearest')
kwargs['cmap'] = kwargs.get('cmap', plt.cm.gray_r)
#kwargs['vmin'] = kwargs.get('vmin', Z.min())
#kwargs['vmax'] = kwargs.get('vmax', Z.max())
kwargs['vmin'] = kwargs.get('vmin', 0)
kwargs['vmax'] = kwargs.get('vmax', 1)
kwargs['origin'] = kwargs.get('origin', 'lower')
im = axis.imshow(Z, extent=[0,2,-1, 1], *args, **kwargs)
# axins = inset_axes(axis, width='25%', height='5%', loc=3)
# vmin, vmax = Z.min(), Z.max()
# plt.colorbar(im, cax=axins, orientation='horizontal', ticks=[vmin,vmax], format = '%.2f')
# axins.xaxis.set_ticks_position('bottom')
|
[
"numpy.linspace",
"numpy.ones"
] |
[((2374, 2411), 'numpy.linspace', 'np.linspace', (['(-np.pi / 2)', '(np.pi / 2)', 'n'], {}), '(-np.pi / 2, np.pi / 2, n)\n', (2385, 2411), True, 'import numpy as np\n'), ((2961, 2981), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (2972, 2981), True, 'import numpy as np\n'), ((4499, 4536), 'numpy.linspace', 'np.linspace', (['(-np.pi / 2)', '(np.pi / 2)', 'n'], {}), '(-np.pi / 2, np.pi / 2, n)\n', (4510, 4536), True, 'import numpy as np\n'), ((5081, 5101), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (5092, 5101), True, 'import numpy as np\n'), ((2992, 3002), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (2999, 3002), True, 'import numpy as np\n'), ((5112, 5122), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (5119, 5122), True, 'import numpy as np\n'), ((2342, 2352), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (2349, 2352), True, 'import numpy as np\n'), ((4467, 4477), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (4474, 4477), True, 'import numpy as np\n')]
|
"""Unit tests for radar_statistics.py."""
import unittest
import numpy
from gewittergefahr.gg_utils import radar_statistics as radar_stats
TOLERANCE = 1e-6
FAKE_STATISTIC_NAME = 'foo'
FAKE_PERCENTILE_LEVEL = -9999.
# The following constants are used to test
# radar_field_and_statistic_to_column_name,
# radar_field_and_percentile_to_column_name, and
# _column_name_to_statistic_params.
RADAR_FIELD_NAME = 'reflectivity_dbz'
RADAR_HEIGHT_M_ASL = 250
STATISTIC_NAME = 'kurtosis'
COLUMN_NAME_FOR_NON_PERCENTILE = 'reflectivity_dbz_250metres_kurtosis'
PERCENTILE_LEVEL_UNROUNDED = 75.12
PERCENTILE_LEVEL_ROUNDED = 75.1
COLUMN_NAME_FOR_PERCENTILE = 'reflectivity_dbz_250metres_percentile075.1'
INVALID_COLUMN_NAME = 'foo'
# The following constants are used to test extract_radar_grid_points.
RADAR_FIELD_MATRIX = numpy.array([
[-1, -1, 10, 20, 30, 40],
[-1, 5, 15, 25, 35, 50],
[5, 10, 25, 40, 55, 70],
[10, 30, 50, 70, 75, -1]
], dtype=float)
RADAR_FIELD_MATRIX[RADAR_FIELD_MATRIX < 0] = numpy.nan
ROW_INDICES_FOR_1D_ARRAY = numpy.array([0, 0, 1, 1, 2, 2, 3, 3], dtype=int)
COLUMN_INDICES_FOR_1D_ARRAY = numpy.array([0, 5, 1, 4, 2, 3, 0, 5], dtype=int)
RADAR_FIELD_1D_ARRAY = numpy.array([
numpy.nan, 40, 5, 35, 25, 40, 10, numpy.nan
])
# The following constants are used to test get_spatial_statistics.
RADAR_FIELD_FOR_STATS = numpy.array([
[-1, 0, 20],
[20, 50, 60]
], dtype=float)
RADAR_FIELD_FOR_STATS[RADAR_FIELD_FOR_STATS < 0] = numpy.nan
STATISTIC_NAMES = [
radar_stats.AVERAGE_NAME, radar_stats.STANDARD_DEVIATION_NAME,
radar_stats.SKEWNESS_NAME, radar_stats.KURTOSIS_NAME
]
STATISTIC_VALUES = numpy.array([30, 24.494897, 0.170103, -1.75])
PERCENTILE_LEVELS = numpy.array([0, 5, 25, 50, 75, 95, 100], dtype=float)
PERCENTILE_VALUES = numpy.array([0, 4, 20, 20, 50, 58, 60], dtype=float)
class RadarStatisticsTests(unittest.TestCase):
"""Each method is a unit test for radar_statistics.py."""
def test_radar_field_and_statistic_to_column_name(self):
"""Ensures correctness of radar_field_and_statistic_to_column_name."""
this_column_name = radar_stats.radar_field_and_statistic_to_column_name(
radar_field_name=RADAR_FIELD_NAME,
radar_height_m_asl=RADAR_HEIGHT_M_ASL,
statistic_name=STATISTIC_NAME)
self.assertTrue(this_column_name == COLUMN_NAME_FOR_NON_PERCENTILE)
def test_radar_field_and_percentile_to_column_name_reflectivity(self):
"""Ensures correctness of radar_field_and_percentile_to_column_name."""
this_column_name = (
radar_stats.radar_field_and_percentile_to_column_name(
radar_field_name=RADAR_FIELD_NAME,
radar_height_m_asl=RADAR_HEIGHT_M_ASL,
percentile_level=PERCENTILE_LEVEL_UNROUNDED)
)
self.assertTrue(this_column_name == COLUMN_NAME_FOR_PERCENTILE)
def test_column_name_to_statistic_params_percentile(self):
"""Ensures correct output from _column_name_to_statistic_params.
In this case, statistic is a percentile.
"""
this_parameter_dict = radar_stats._column_name_to_statistic_params(
COLUMN_NAME_FOR_PERCENTILE)
self.assertFalse(
this_parameter_dict[radar_stats.IS_GRIDRAD_STATISTIC_KEY]
)
self.assertTrue(
this_parameter_dict[radar_stats.RADAR_FIELD_NAME_KEY] ==
RADAR_FIELD_NAME
)
self.assertTrue(
this_parameter_dict[radar_stats.RADAR_HEIGHT_KEY] ==
RADAR_HEIGHT_M_ASL
)
self.assertTrue(
this_parameter_dict[radar_stats.STATISTIC_NAME_KEY] is None
)
self.assertTrue(
this_parameter_dict[radar_stats.PERCENTILE_LEVEL_KEY] ==
PERCENTILE_LEVEL_ROUNDED
)
def test_column_name_to_statistic_params_non_percentile(self):
"""Ensures correct output from _column_name_to_statistic_params.
In this case, statistic is *not* a percentile.
"""
this_parameter_dict = radar_stats._column_name_to_statistic_params(
COLUMN_NAME_FOR_NON_PERCENTILE)
self.assertFalse(
this_parameter_dict[radar_stats.IS_GRIDRAD_STATISTIC_KEY]
)
self.assertTrue(
this_parameter_dict[radar_stats.RADAR_FIELD_NAME_KEY] ==
RADAR_FIELD_NAME
)
self.assertTrue(
this_parameter_dict[radar_stats.RADAR_HEIGHT_KEY] ==
RADAR_HEIGHT_M_ASL
)
self.assertTrue(
this_parameter_dict[radar_stats.STATISTIC_NAME_KEY] ==
STATISTIC_NAME
)
self.assertTrue(
this_parameter_dict[radar_stats.PERCENTILE_LEVEL_KEY] is None
)
def test_column_name_to_statistic_params_invalid(self):
"""Ensures correct output from _column_name_to_statistic_params.
In this case, column name is invalid (does not correspond to a radar
statistic).
"""
this_parameter_dict = radar_stats._column_name_to_statistic_params(
INVALID_COLUMN_NAME)
self.assertTrue(this_parameter_dict is None)
def test_check_statistic_params_all_good(self):
"""Ensures correct output from _check_statistic_params.
In this case, all inputs are valid.
"""
radar_stats._check_statistic_params(
radar_stats.STATISTIC_NAMES, radar_stats.DEFAULT_PERCENTILE_LEVELS)
def test_check_statistic_params_bad_string(self):
"""Ensures correct output from _check_statistic_params.
In this case, one statistic name is invalid.
"""
with self.assertRaises(ValueError):
radar_stats._check_statistic_params(
radar_stats.STATISTIC_NAMES + [FAKE_STATISTIC_NAME],
radar_stats.DEFAULT_PERCENTILE_LEVELS
)
def test_check_statistic_params_bad_percentile(self):
"""Ensures correct output from _check_statistic_params.
In this case, one percentile level is invalid.
"""
these_percentile_levels = numpy.concatenate((
radar_stats.DEFAULT_PERCENTILE_LEVELS,
numpy.array([FAKE_PERCENTILE_LEVEL])
))
with self.assertRaises(ValueError):
radar_stats._check_statistic_params(
radar_stats.STATISTIC_NAMES, these_percentile_levels)
def test_extract_radar_grid_points(self):
"""Ensures correct output from extract_radar_grid_points."""
this_field_1d_array = radar_stats.extract_radar_grid_points(
RADAR_FIELD_MATRIX, row_indices=ROW_INDICES_FOR_1D_ARRAY,
column_indices=COLUMN_INDICES_FOR_1D_ARRAY)
self.assertTrue(numpy.allclose(
this_field_1d_array, RADAR_FIELD_1D_ARRAY, equal_nan=True,
atol=TOLERANCE
))
def test_get_spatial_statistics(self):
"""Ensures correct output from get_spatial_statistics."""
these_statistic_values, these_percentile_values = (
radar_stats.get_spatial_statistics(
RADAR_FIELD_FOR_STATS, statistic_names=STATISTIC_NAMES,
percentile_levels=PERCENTILE_LEVELS)
)
self.assertTrue(numpy.allclose(
these_statistic_values, STATISTIC_VALUES, atol=TOLERANCE
))
self.assertTrue(numpy.allclose(
these_percentile_values, PERCENTILE_VALUES, atol=TOLERANCE
))
if __name__ == '__main__':
unittest.main()
|
[
"gewittergefahr.gg_utils.radar_statistics._check_statistic_params",
"gewittergefahr.gg_utils.radar_statistics.get_spatial_statistics",
"numpy.allclose",
"gewittergefahr.gg_utils.radar_statistics.radar_field_and_percentile_to_column_name",
"numpy.array",
"gewittergefahr.gg_utils.radar_statistics.radar_field_and_statistic_to_column_name",
"unittest.main",
"gewittergefahr.gg_utils.radar_statistics.extract_radar_grid_points",
"gewittergefahr.gg_utils.radar_statistics._column_name_to_statistic_params"
] |
[((815, 947), 'numpy.array', 'numpy.array', (['[[-1, -1, 10, 20, 30, 40], [-1, 5, 15, 25, 35, 50], [5, 10, 25, 40, 55, 70],\n [10, 30, 50, 70, 75, -1]]'], {'dtype': 'float'}), '([[-1, -1, 10, 20, 30, 40], [-1, 5, 15, 25, 35, 50], [5, 10, 25,\n 40, 55, 70], [10, 30, 50, 70, 75, -1]], dtype=float)\n', (826, 947), False, 'import numpy\n'), ((1046, 1094), 'numpy.array', 'numpy.array', (['[0, 0, 1, 1, 2, 2, 3, 3]'], {'dtype': 'int'}), '([0, 0, 1, 1, 2, 2, 3, 3], dtype=int)\n', (1057, 1094), False, 'import numpy\n'), ((1125, 1173), 'numpy.array', 'numpy.array', (['[0, 5, 1, 4, 2, 3, 0, 5]'], {'dtype': 'int'}), '([0, 5, 1, 4, 2, 3, 0, 5], dtype=int)\n', (1136, 1173), False, 'import numpy\n'), ((1197, 1255), 'numpy.array', 'numpy.array', (['[numpy.nan, 40, 5, 35, 25, 40, 10, numpy.nan]'], {}), '([numpy.nan, 40, 5, 35, 25, 40, 10, numpy.nan])\n', (1208, 1255), False, 'import numpy\n'), ((1354, 1407), 'numpy.array', 'numpy.array', (['[[-1, 0, 20], [20, 50, 60]]'], {'dtype': 'float'}), '([[-1, 0, 20], [20, 50, 60]], dtype=float)\n', (1365, 1407), False, 'import numpy\n'), ((1647, 1692), 'numpy.array', 'numpy.array', (['[30, 24.494897, 0.170103, -1.75]'], {}), '([30, 24.494897, 0.170103, -1.75])\n', (1658, 1692), False, 'import numpy\n'), ((1713, 1766), 'numpy.array', 'numpy.array', (['[0, 5, 25, 50, 75, 95, 100]'], {'dtype': 'float'}), '([0, 5, 25, 50, 75, 95, 100], dtype=float)\n', (1724, 1766), False, 'import numpy\n'), ((1787, 1839), 'numpy.array', 'numpy.array', (['[0, 4, 20, 20, 50, 58, 60]'], {'dtype': 'float'}), '([0, 4, 20, 20, 50, 58, 60], dtype=float)\n', (1798, 1839), False, 'import numpy\n'), ((7506, 7521), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7519, 7521), False, 'import unittest\n'), ((2120, 2287), 'gewittergefahr.gg_utils.radar_statistics.radar_field_and_statistic_to_column_name', 'radar_stats.radar_field_and_statistic_to_column_name', ([], {'radar_field_name': 'RADAR_FIELD_NAME', 'radar_height_m_asl': 'RADAR_HEIGHT_M_ASL', 'statistic_name': 'STATISTIC_NAME'}), '(radar_field_name=\n RADAR_FIELD_NAME, radar_height_m_asl=RADAR_HEIGHT_M_ASL, statistic_name\n =STATISTIC_NAME)\n', (2172, 2287), True, 'from gewittergefahr.gg_utils import radar_statistics as radar_stats\n'), ((2590, 2771), 'gewittergefahr.gg_utils.radar_statistics.radar_field_and_percentile_to_column_name', 'radar_stats.radar_field_and_percentile_to_column_name', ([], {'radar_field_name': 'RADAR_FIELD_NAME', 'radar_height_m_asl': 'RADAR_HEIGHT_M_ASL', 'percentile_level': 'PERCENTILE_LEVEL_UNROUNDED'}), '(radar_field_name=\n RADAR_FIELD_NAME, radar_height_m_asl=RADAR_HEIGHT_M_ASL,\n percentile_level=PERCENTILE_LEVEL_UNROUNDED)\n', (2643, 2771), True, 'from gewittergefahr.gg_utils import radar_statistics as radar_stats\n'), ((3125, 3197), 'gewittergefahr.gg_utils.radar_statistics._column_name_to_statistic_params', 'radar_stats._column_name_to_statistic_params', (['COLUMN_NAME_FOR_PERCENTILE'], {}), '(COLUMN_NAME_FOR_PERCENTILE)\n', (3169, 3197), True, 'from gewittergefahr.gg_utils import radar_statistics as radar_stats\n'), ((4070, 4146), 'gewittergefahr.gg_utils.radar_statistics._column_name_to_statistic_params', 'radar_stats._column_name_to_statistic_params', (['COLUMN_NAME_FOR_NON_PERCENTILE'], {}), '(COLUMN_NAME_FOR_NON_PERCENTILE)\n', (4114, 4146), True, 'from gewittergefahr.gg_utils import radar_statistics as radar_stats\n'), ((5044, 5109), 'gewittergefahr.gg_utils.radar_statistics._column_name_to_statistic_params', 'radar_stats._column_name_to_statistic_params', (['INVALID_COLUMN_NAME'], {}), '(INVALID_COLUMN_NAME)\n', (5088, 5109), True, 'from gewittergefahr.gg_utils import radar_statistics as radar_stats\n'), ((5360, 5467), 'gewittergefahr.gg_utils.radar_statistics._check_statistic_params', 'radar_stats._check_statistic_params', (['radar_stats.STATISTIC_NAMES', 'radar_stats.DEFAULT_PERCENTILE_LEVELS'], {}), '(radar_stats.STATISTIC_NAMES,\n radar_stats.DEFAULT_PERCENTILE_LEVELS)\n', (5395, 5467), True, 'from gewittergefahr.gg_utils import radar_statistics as radar_stats\n'), ((6561, 6705), 'gewittergefahr.gg_utils.radar_statistics.extract_radar_grid_points', 'radar_stats.extract_radar_grid_points', (['RADAR_FIELD_MATRIX'], {'row_indices': 'ROW_INDICES_FOR_1D_ARRAY', 'column_indices': 'COLUMN_INDICES_FOR_1D_ARRAY'}), '(RADAR_FIELD_MATRIX, row_indices=\n ROW_INDICES_FOR_1D_ARRAY, column_indices=COLUMN_INDICES_FOR_1D_ARRAY)\n', (6598, 6705), True, 'from gewittergefahr.gg_utils import radar_statistics as radar_stats\n'), ((7059, 7191), 'gewittergefahr.gg_utils.radar_statistics.get_spatial_statistics', 'radar_stats.get_spatial_statistics', (['RADAR_FIELD_FOR_STATS'], {'statistic_names': 'STATISTIC_NAMES', 'percentile_levels': 'PERCENTILE_LEVELS'}), '(RADAR_FIELD_FOR_STATS, statistic_names=\n STATISTIC_NAMES, percentile_levels=PERCENTILE_LEVELS)\n', (7093, 7191), True, 'from gewittergefahr.gg_utils import radar_statistics as radar_stats\n'), ((5719, 5851), 'gewittergefahr.gg_utils.radar_statistics._check_statistic_params', 'radar_stats._check_statistic_params', (['(radar_stats.STATISTIC_NAMES + [FAKE_STATISTIC_NAME])', 'radar_stats.DEFAULT_PERCENTILE_LEVELS'], {}), '(radar_stats.STATISTIC_NAMES + [\n FAKE_STATISTIC_NAME], radar_stats.DEFAULT_PERCENTILE_LEVELS)\n', (5754, 5851), True, 'from gewittergefahr.gg_utils import radar_statistics as radar_stats\n'), ((6307, 6400), 'gewittergefahr.gg_utils.radar_statistics._check_statistic_params', 'radar_stats._check_statistic_params', (['radar_stats.STATISTIC_NAMES', 'these_percentile_levels'], {}), '(radar_stats.STATISTIC_NAMES,\n these_percentile_levels)\n', (6342, 6400), True, 'from gewittergefahr.gg_utils import radar_statistics as radar_stats\n'), ((6751, 6844), 'numpy.allclose', 'numpy.allclose', (['this_field_1d_array', 'RADAR_FIELD_1D_ARRAY'], {'equal_nan': '(True)', 'atol': 'TOLERANCE'}), '(this_field_1d_array, RADAR_FIELD_1D_ARRAY, equal_nan=True,\n atol=TOLERANCE)\n', (6765, 6844), False, 'import numpy\n'), ((7255, 7327), 'numpy.allclose', 'numpy.allclose', (['these_statistic_values', 'STATISTIC_VALUES'], {'atol': 'TOLERANCE'}), '(these_statistic_values, STATISTIC_VALUES, atol=TOLERANCE)\n', (7269, 7327), False, 'import numpy\n'), ((7375, 7449), 'numpy.allclose', 'numpy.allclose', (['these_percentile_values', 'PERCENTILE_VALUES'], {'atol': 'TOLERANCE'}), '(these_percentile_values, PERCENTILE_VALUES, atol=TOLERANCE)\n', (7389, 7449), False, 'import numpy\n'), ((6202, 6238), 'numpy.array', 'numpy.array', (['[FAKE_PERCENTILE_LEVEL]'], {}), '([FAKE_PERCENTILE_LEVEL])\n', (6213, 6238), False, 'import numpy\n')]
|
from sklearn.decomposition import NMF
from nltk.tokenize import sent_tokenize
import numpy as np
class NonNegativeFactorization():
def __init__(self, A, r, feature_names, num_top_words, num_top_documents, corpus):
self.A = A
self.r = r
self.features_names = feature_names
self.corpus = corpus
self.num_top_words = num_top_words
self.num_top_documents = num_top_documents
def decomposition(self):
nmf_model = NMF(n_components=self.r, init='nndsvdar', solver='mu', beta_loss='frobenius', tol=0.1,
random_state=1)
self.W = nmf_model.fit_transform(self.A)
self.H = nmf_model.components_
self.frobenius_norm = nmf_model.reconstruction_err_
self.iter = nmf_model.n_iter_
self.WH = self.W.dot(self.H)
def display_summary(self):
self.data = []
self.index_data = []
for topic_index, topic in enumerate(self.H):
self.data.append([self.features_names[i] for i in topic.argsort()[:-self.num_top_words - 1:-1]])
top_doc_indices = np.argsort(self.W[:, topic_index])[::-1][0:self.num_top_documents]
self.index_data.append(top_doc_indices)
summary_list = []
for index in self.index_data[0]:
summary_list.append(self.corpus[index])
self.summary = summary_list
data = {
'top_words': self.data[0],
'summary_result': self.summary
}
return data
|
[
"numpy.argsort",
"sklearn.decomposition.NMF"
] |
[((475, 582), 'sklearn.decomposition.NMF', 'NMF', ([], {'n_components': 'self.r', 'init': '"""nndsvdar"""', 'solver': '"""mu"""', 'beta_loss': '"""frobenius"""', 'tol': '(0.1)', 'random_state': '(1)'}), "(n_components=self.r, init='nndsvdar', solver='mu', beta_loss=\n 'frobenius', tol=0.1, random_state=1)\n", (478, 582), False, 'from sklearn.decomposition import NMF\n'), ((1103, 1137), 'numpy.argsort', 'np.argsort', (['self.W[:, topic_index]'], {}), '(self.W[:, topic_index])\n', (1113, 1137), True, 'import numpy as np\n')]
|
from ..utils import entropy_gaussian
from ..core import cmutinf, centropy, ncmutinf
from ..metrics import (AlphaAngleTransferEntropy, ContactTransferEntropy,
DihedralTransferEntropy)
from msmbuilder.example_datasets import FsPeptide
import numpy as np
from numpy.testing import assert_almost_equal as eq, assert_allclose as close
rs = np.random.RandomState(42)
n, d = 50000, 3
P = np.array([[1, .5, .25], [.5, 1, 0], [.25, 0, 1]])
COV = np.dot(P, P.T)
Y = rs.randn(d, n)
a, b, c = np.dot(P, Y)
a, b, c = np.atleast_2d(a).T, np.atleast_2d(b).T, np.atleast_2d(c).T
true_cmutinf = (entropy_gaussian(COV[[[0, 0], [0, 2]], [[0, 2], [2, 2]]]) +
entropy_gaussian(COV[[[1, 1], [1, 2]], [[1, 2], [2, 2]]]) -
entropy_gaussian(COV) - entropy_gaussian(COV[2, 2]))
true_cond_ent = (entropy_gaussian(COV[[[0, 0], [0, 2]], [[0, 2], [2, 2]]]) -
entropy_gaussian(COV[2, 2]))
TRUE_NCMUTINF = true_cmutinf / true_cond_ent
def test_ncmutinf_kde():
close(ncmutinf(3, a, b, c, method='kde'), TRUE_NCMUTINF, atol=.05, rtol=.2)
def test_ncmutinf_knn():
close(ncmutinf(3, a, b, c, method='knn'), TRUE_NCMUTINF, atol=.05, rtol=.2)
def test_ncmutinf_chaowangjost():
close(ncmutinf(8, a, b, c, method='chaowangjost'), TRUE_NCMUTINF, atol=.05,
rtol=.2)
def test_ncmutinf_grassberger():
close(ncmutinf(8, a, b, c, method='grassberger'), TRUE_NCMUTINF, atol=.05,
rtol=.2)
def test_ncmutinf_doanes_rule():
close(ncmutinf(None, a, b, c, method='grassberger'), TRUE_NCMUTINF,
atol=.05, rtol=.4)
def test_ncmutinf_naive():
close(ncmutinf(8, a, b, c, method=None), TRUE_NCMUTINF, atol=.05, rtol=.2)
def test_ncmutinf():
a = rs.uniform(low=0, high=360, size=1000).reshape(-1, 1)
b = rs.uniform(low=0, high=360, size=1000).reshape(-1, 1)
c = rs.uniform(low=0, high=360, size=1000).reshape(-1, 1)
NCMI_REF = (cmutinf(10, a, b, c) /
centropy(10, a, c))
NCMI = ncmutinf(10, a, b, c)
eq(NCMI, NCMI_REF, 5)
def test_fs_tent():
traj1, traj2 = FsPeptide().get().trajectories[:2]
idx = [at.index for at in traj1.topology.atoms
if at.residue.index in [3, 4, 5, 6, 7, 8]]
traj1 = traj1.atom_slice(atom_indices=idx)[::100]
traj2 = traj2.atom_slice(atom_indices=idx)[::100]
traj = (traj1, traj2)
yield _test_tent_alpha, traj
yield _test_tent_contact, traj
yield _test_tent_dihedral, traj
def _test_tent_alpha(traj):
tent = AlphaAngleTransferEntropy()
T = tent.partial_transform(traj)
assert T is not None
def _test_tent_contact(traj):
tent = ContactTransferEntropy()
T = tent.partial_transform(traj)
assert T is not None
def _test_tent_dihedral(traj):
tent = DihedralTransferEntropy()
T = tent.partial_transform(traj)
assert T is not None
_test_tent_shuffle(tent, traj)
def _test_tent_shuffle(tent, traj):
T = tent.partial_transform(traj, shuffle=0)
TS = tent.partial_transform(traj, shuffle=1)
assert T is not None
assert TS is not None
|
[
"numpy.atleast_2d",
"msmbuilder.example_datasets.FsPeptide",
"numpy.array",
"numpy.dot",
"numpy.testing.assert_almost_equal",
"numpy.random.RandomState"
] |
[((362, 387), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (383, 387), True, 'import numpy as np\n'), ((409, 462), 'numpy.array', 'np.array', (['[[1, 0.5, 0.25], [0.5, 1, 0], [0.25, 0, 1]]'], {}), '([[1, 0.5, 0.25], [0.5, 1, 0], [0.25, 0, 1]])\n', (417, 462), True, 'import numpy as np\n'), ((465, 479), 'numpy.dot', 'np.dot', (['P', 'P.T'], {}), '(P, P.T)\n', (471, 479), True, 'import numpy as np\n'), ((509, 521), 'numpy.dot', 'np.dot', (['P', 'Y'], {}), '(P, Y)\n', (515, 521), True, 'import numpy as np\n'), ((2031, 2052), 'numpy.testing.assert_almost_equal', 'eq', (['NCMI', 'NCMI_REF', '(5)'], {}), '(NCMI, NCMI_REF, 5)\n', (2033, 2052), True, 'from numpy.testing import assert_almost_equal as eq, assert_allclose as close\n'), ((532, 548), 'numpy.atleast_2d', 'np.atleast_2d', (['a'], {}), '(a)\n', (545, 548), True, 'import numpy as np\n'), ((552, 568), 'numpy.atleast_2d', 'np.atleast_2d', (['b'], {}), '(b)\n', (565, 568), True, 'import numpy as np\n'), ((572, 588), 'numpy.atleast_2d', 'np.atleast_2d', (['c'], {}), '(c)\n', (585, 588), True, 'import numpy as np\n'), ((2095, 2106), 'msmbuilder.example_datasets.FsPeptide', 'FsPeptide', ([], {}), '()\n', (2104, 2106), False, 'from msmbuilder.example_datasets import FsPeptide\n')]
|
import tensorflow as tf
import numpy as np
import os
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
from dl_utils.tf.plot_weights import plot_weights
# CUDA GPU
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES']='0'
def get_stats(X,Y):
print("X : shape : (%d,%d)" % (X.shape), end='')
print(",min : %f, max : %f" % (np.min(X), np.max(X)))
print("Y : shape : (%d,%d)" % (Y.shape), end='')
print(", min : %f, max : %f" % (np.min(Y), np.max(Y)))
def load_data(one_hot=False, nb_classes=10):
from tensorflow.examples.tutorials.mnist import input_data
# load data
mnist = input_data.read_data_sets('MNIST_data/', one_hot=one_hot)
x_train, y_train = mnist.train.images, mnist.train.labels
x_test, y_test = mnist.test.images, mnist.test.labels
x_validation, y_validation = mnist.validation.images, mnist.validation.labels
if not(one_hot):
y_train = tf.keras.utils.to_categorical(y_train, num_classes=nb_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes=nb_classes)
y_validation = tf.keras.utils.to_categorical(y_validation, num_classes=nb_classes)
# print stats
print("train : ")
get_stats(x_train, y_train)
print("test : ")
get_stats(x_test, y_test)
print("validation : ")
get_stats(x_validation, y_validation)
return mnist, x_train, y_train, x_test, y_test, x_validation, y_validation
def build_model(use_softmax=False):
model = Sequential()
model.add(Dense(256, input_shape=(None, 784), activation='relu'))
model.add(Dense(100, activation='relu'))
model.add(Dense(10))
# softmax
if use_softmax:
model.add(Activation('softmax'))
return model
def main():
print('In main...')
# 1. load the data
data, x_train, y_train, x_test, y_test, x_validation, y_validation = load_data()
# 2. create model
model = build_model()
#3. get logits
x = tf.placeholder(dtype=tf.float32, shape=(None, 784))
y = tf.placeholder(dtype=tf.float32, shape=(None, 10))
logits = model(x)
model.summary()
# 4. get loss
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y)
cost = tf.reduce_sum(cross_entropy)
# 5. Optimization
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(cost)
# 6. Performance checks
y_pred = tf.nn.softmax(logits)
correct_prediction = tf.equal(tf.argmax(y_pred, axis=1), tf.argmax(y, axis=1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# 7. session run
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
nb_epochs = 100
nb_batches = 256
for epoch in range(nb_epochs):
avg_cost = 0
# shuffle
x_train, y_train = shuffle(x_train, y_train)
for j in range(0, int(x_train.shape[0]/nb_batches)):
start = j*nb_batches
end = (j+1)*nb_batches
if end > x_train.shape[0]:
end = x_train.shape[0]
x_batch, y_batch = x_train[start:end,:], y_train[start:end,:]
# run optimization on this batch
_, c = sess.run([optimizer,cost], feed_dict={x:x_batch, y:y_batch})
avg_cost += c/nb_batches
# Display results
if epoch % 10 == 0:
acc = sess.run(accuracy, feed_dict={x:x_validation, y:y_validation})
print("Epoch:", '%04d' % (epoch+1),
"cost={:.9f}".format(avg_cost),
"accuracy=", acc)
#layer_weights = model.layers[2].get_weights()[0]
#plot_weights(layer_weights, (10,10), idx=epoch)
print("Optimization finished...")
## 8. Test accuracy
#acc = sess.run(accuracy, feed_dict={x:x_test, y:y_test})
#print("Test accuracy = ", acc)
if __name__ == '__main__' :
main()
|
[
"tensorflow.keras.utils.to_categorical",
"tensorflow.reduce_sum",
"tensorflow.placeholder",
"tensorflow.Session",
"sklearn.utils.shuffle",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"numpy.max",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.argmax",
"tensorflow.keras.layers.Dense",
"tensorflow.global_variables_initializer",
"tensorflow.nn.softmax",
"numpy.min",
"tensorflow.keras.layers.Activation",
"tensorflow.cast",
"tensorflow.keras.models.Sequential"
] |
[((750, 807), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['"""MNIST_data/"""'], {'one_hot': 'one_hot'}), "('MNIST_data/', one_hot=one_hot)\n", (775, 807), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((1606, 1618), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1616, 1618), False, 'from tensorflow.keras.models import Sequential\n'), ((2076, 2127), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '(None, 784)'}), '(dtype=tf.float32, shape=(None, 784))\n', (2090, 2127), True, 'import tensorflow as tf\n'), ((2136, 2186), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '(None, 10)'}), '(dtype=tf.float32, shape=(None, 10))\n', (2150, 2186), True, 'import tensorflow as tf\n'), ((2268, 2335), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'logits', 'labels': 'y'}), '(logits=logits, labels=y)\n', (2310, 2335), True, 'import tensorflow as tf\n'), ((2347, 2375), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['cross_entropy'], {}), '(cross_entropy)\n', (2360, 2375), True, 'import tensorflow as tf\n'), ((2527, 2548), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (2540, 2548), True, 'import tensorflow as tf\n'), ((1050, 1112), 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['y_train'], {'num_classes': 'nb_classes'}), '(y_train, num_classes=nb_classes)\n', (1079, 1112), True, 'import tensorflow as tf\n'), ((1130, 1191), 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['y_test'], {'num_classes': 'nb_classes'}), '(y_test, num_classes=nb_classes)\n', (1159, 1191), True, 'import tensorflow as tf\n'), ((1215, 1282), 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['y_validation'], {'num_classes': 'nb_classes'}), '(y_validation, num_classes=nb_classes)\n', (1244, 1282), True, 'import tensorflow as tf\n'), ((1633, 1687), 'tensorflow.keras.layers.Dense', 'Dense', (['(256)'], {'input_shape': '(None, 784)', 'activation': '"""relu"""'}), "(256, input_shape=(None, 784), activation='relu')\n", (1638, 1687), False, 'from tensorflow.keras.layers import Dense, Activation\n'), ((1703, 1732), 'tensorflow.keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""relu"""'}), "(100, activation='relu')\n", (1708, 1732), False, 'from tensorflow.keras.layers import Dense, Activation\n'), ((1748, 1757), 'tensorflow.keras.layers.Dense', 'Dense', (['(10)'], {}), '(10)\n', (1753, 1757), False, 'from tensorflow.keras.layers import Dense, Activation\n'), ((2583, 2608), 'tensorflow.argmax', 'tf.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (2592, 2608), True, 'import tensorflow as tf\n'), ((2610, 2630), 'tensorflow.argmax', 'tf.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (2619, 2630), True, 'import tensorflow as tf\n'), ((2662, 2701), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (2669, 2701), True, 'import tensorflow as tf\n'), ((2734, 2746), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2744, 2746), True, 'import tensorflow as tf\n'), ((1812, 1833), 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (1822, 1833), False, 'from tensorflow.keras.layers import Dense, Activation\n'), ((2415, 2469), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (2448, 2469), True, 'import tensorflow as tf\n'), ((2773, 2806), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2804, 2806), True, 'import tensorflow as tf\n'), ((2975, 3000), 'sklearn.utils.shuffle', 'shuffle', (['x_train', 'y_train'], {}), '(x_train, y_train)\n', (2982, 3000), False, 'from sklearn.utils import shuffle\n'), ((476, 485), 'numpy.min', 'np.min', (['X'], {}), '(X)\n', (482, 485), True, 'import numpy as np\n'), ((487, 496), 'numpy.max', 'np.max', (['X'], {}), '(X)\n', (493, 496), True, 'import numpy as np\n'), ((588, 597), 'numpy.min', 'np.min', (['Y'], {}), '(Y)\n', (594, 597), True, 'import numpy as np\n'), ((599, 608), 'numpy.max', 'np.max', (['Y'], {}), '(Y)\n', (605, 608), True, 'import numpy as np\n')]
|
#!/usr/local/bin/python
import pybullet
import time
import pybullet_data
import math, random
import sys
import numpy
import OpenGL
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
import ctypes
from OpenGL.GL import shaders
import render.cubeRender as cubeRender
import render.worldRender as worldRender
import render.renderLoop as renderLoop
import world.worldGen as worldGen
import gui.textRender as textRender
import gui.invRender as invRender
import gui.inventory as inventory
# TERRAIN VBO ARRAYS
chunksize = 16
basez = -9
world = {}
view_range = 1
chunk_view_adjustment = 4.0 # This is a slight multiplier to the size of the world so that it doesn't look small when the player walks on it.
for x in range(-5, 5):
for y in range(-5, 5):
chunk = worldGen.worldGen(chunksize)
world[(x,y)] = chunk
print(world.keys())
terrain_vbo = numpy.array([], numpy.float32)
color_vbo = numpy.array([], numpy.float32)
stos = [] # Static Terrain Objects, which appear with the terrain
# Cubes, non-terrain object arrays. Using VBOs for moving objects is laggy.
cubes = []
vertex_array = numpy.array([], numpy.float32)
color_array = numpy.array([], numpy.float32)
# Temporary line to test world rendering.
display = (1200, 720)
def init_libs():
"""Initialize Pybullet and Pygame. Turn on GL's depth test and make the sky blue."""
physicsClient = pybullet.connect(pybullet.DIRECT)
pybullet.setGravity(0,0,-40)
pygame.init()
pygame.display.set_mode(display, HWSURFACE|OPENGL|DOUBLEBUF)
pygame.key.set_repeat(1, 2)
glEnable(GL_DEPTH_TEST)
glClearColor(0.5, 0.6, 1.0, 0.0);
glViewport(0, 0, display[0], display[1])
def setup_world(world, player_chunk_position):
"""Sets up the basic debug world."""
plane = pybullet.createCollisionShape(pybullet.GEOM_PLANE)
pybullet.createMultiBody(0,plane,-1,[0,0,-9])
# Later on my plan is to just generate a world. For now, we need some debug cubes.
cubes.append(cubeRender.createCube([0,12,0], 1, [45,45,45]))
cubes.append(cubeRender.createCube([4,-4,6], 1, [0,0,0]))
cubes.append(cubeRender.createCube([4,5.9,9], 2, [45,30,10]))
addSTO([18,3], 1, [0.6, 0.2, 0.1])
boxestodelete = worldGen.resetWorldBoxes(chunksize, -9, player_chunk_position, world) # We run this once to initiate the first collision boxes.
return boxestodelete
def reset_camera():
"""Resets the camera to the start position. Returns Yaw and Camera Position."""
# These numbers have no significance other than just being near where the cubes and terrain are rendered. (At the Origin)
yaw = 0.0
pitch = 0.0
camerax = -3
cameray = 1
cameraz = -2
# gluLookAt takes 9 arguments, the camera position, the lookat position and the up vector.
# (Just set the up vector to all zeroes except for a 1 for the axis that is upwards)
# gluLookAt also multiplies the "current vector" rather than changing the camera vector because PyOpenGL is stupid.
# Use glLoadIdentity() to stop this.
glLoadIdentity()
gluPerspective(45, (float(display[0])/float(display[1])), 0.1, 100.0)
gluLookAt(camerax,cameray,cameraz, camerax+(math.cos(yaw)*math.cos(pitch)),cameray+(math.sin(yaw)*math.cos(pitch)),(-4)+math.cos(pitch), 0,0,1)
return yaw, pitch, camerax, cameray, cameraz
def create_program():
VERTEX_SHADER = """
attribute vec3 a_Position;
attribute vec3 a_Color;
varying vec4 v_Color;
void main()
{
v_Color = vec4(a_Color, 1.0);
gl_Position = gl_ModelViewMatrix * vec4(a_Position, 1.0);
}
"""
FRAGMENT_SHADER = """
varying vec4 v_Color;
void main()
{
gl_FragColor = v_Color;
}
"""
vertshader = shaders.compileShader(VERTEX_SHADER, GL_VERTEX_SHADER)
fragshader = shaders.compileShader(FRAGMENT_SHADER, GL_FRAGMENT_SHADER)
program = glCreateProgram()
glAttachShader(program, vertshader)
glAttachShader(program, fragshader)
glLinkProgram(program)
return program
def create_gui_program():
VERTEX_SHADER = """
attribute vec3 a_Position;
attribute vec3 a_Color;
varying vec4 v_Color;
void main()
{
v_Color = vec4(a_Color, 1.0);
gl_Position = vec4(a_Position, 1.0);
}
"""
FRAGMENT_SHADER = """
varying vec4 v_Color;
void main()
{
gl_FragColor = v_Color;
}
"""
vertshader = shaders.compileShader(VERTEX_SHADER, GL_VERTEX_SHADER)
fragshader = shaders.compileShader(FRAGMENT_SHADER, GL_FRAGMENT_SHADER)
program = glCreateProgram()
glAttachShader(program, vertshader)
glAttachShader(program, fragshader)
glLinkProgram(program)
return program
def addVBOVertex(vertex, color):
global terrain_vbo
global color_vbo
terrain_vbo = numpy.append(terrain_vbo, [vertex[0],vertex[1],vertex[2]])
color_vbo = numpy.append(color_vbo, [color[0],color[1],color[2]])
def addSTO(position2d, size, color):
chunk_x = int(math.floor(position2d[0]/chunksize))
chunk_y = int(math.floor(position2d[1]/chunksize))
chunk_position = (chunk_x, chunk_y)
x = int(position2d[0] - (chunk_x*chunksize))
y = int(position2d[1] - (chunk_y*chunksize))
z = len(world[chunk_position][x][y]) + basez + size
stos.append([(position2d[0],position2d[1],z), size, color])
def recalculate_vbos(buffers, player_chunk_position, view_range):
global terrain_vbo
global color_vbo
terrain_vbo = numpy.array([], numpy.float32)
color_vbo = numpy.array([], numpy.float32)
groundpoints, topsoil = worldRender.groundVertices(chunksize, basez, world, player_chunk_position, view_range, chunk_view_adjustment)
for i in range(0,len(groundpoints)):
if topsoil[i] == 0:
addVBOVertex(groundpoints[i],(0.7,0.5,0.2))
elif topsoil[i] == 1:
addVBOVertex(groundpoints[i],(0.3,0.7,0.3))
elif topsoil[i] == 2:
addVBOVertex(groundpoints[i],(0.6,0.6,0.3))
glBindBuffer(GL_ARRAY_BUFFER, buffers[0])
glBufferData(GL_ARRAY_BUFFER, len(terrain_vbo)*4, (ctypes.c_float*len(terrain_vbo))(*terrain_vbo), GL_STATIC_DRAW)
glBindBuffer(GL_ARRAY_BUFFER, buffers[1])
glBufferData(GL_ARRAY_BUFFER, len(color_vbo)*4, (ctypes.c_float*len(color_vbo))(*color_vbo), GL_STATIC_DRAW)
glBindBuffer(GL_ARRAY_BUFFER, 0)
init_libs()
player_chunk_position = (round(-3/chunksize), round(1/chunksize)) # -3 and 1 are the default position of the camera but I need reset camera to come after the world is setup.
last_player_chunk_position = player_chunk_position
boxestodelete = setup_world(world, player_chunk_position)
yaw, pitch, camerax, cameray, cameraz = reset_camera()
program = create_program()
gui_program = create_gui_program()
grab_mouse = False
gui_active = False
buffers = glGenBuffers(2)
recalculate_vbos(buffers, player_chunk_position, view_range)
walkspeed = 0.5
sensitivity = 400.0
text_collection = textRender.TextCollection(display, "gui/textures/")
text_collection.add_text("PyOpenGL Sandbox", 30.0, 0.0, 0.8, True)
prev_pressed = pygame.key.get_pressed()
no_key_timer = 0
gui_v, gui_c = invRender.create_inventory(2,4, display, [])
player_inventory = inventory.create_inv(2,4)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
elif event.type == pygame.KEYDOWN:
pressed_keys = pygame.key.get_pressed()
if pressed_keys[pygame.K_m] and (no_key_timer > 5 or not prev_pressed[pygame.K_m]):
if grab_mouse:
grab_mouse = False
pygame.mouse.set_visible(True)
else:
grab_mouse = True
pygame.mouse.set_visible(False)
if pressed_keys[pygame.K_w] and not gui_active:
camerax += math.cos(yaw) * walkspeed
cameray += math.sin(yaw) * walkspeed
elif pressed_keys[pygame.K_s] and not gui_active:
camerax -= math.cos(yaw) * walkspeed
cameray -= math.sin(yaw) * walkspeed
if pressed_keys[pygame.K_a] and not gui_active:
camerax += math.cos(yaw+(math.pi/2.0)) * walkspeed
cameray += math.sin(yaw+(math.pi/2.0)) * walkspeed
if pressed_keys[pygame.K_d] and not gui_active:
camerax += math.cos(yaw-(math.pi/2.0)) * walkspeed
cameray += math.sin(yaw-(math.pi/2.0)) * walkspeed
if pressed_keys[pygame.K_SPACE] and not gui_active:
yaw, pitch, camerax, cameray, cameraz = reset_camera()
if pressed_keys[pygame.K_q]:
digx = int(float(camerax)/chunk_view_adjustment)
digy = int(float(cameray)/chunk_view_adjustment)
chunk = world[player_chunk_position]
if digx < len(chunk) -1:
if digy < len(chunk[digx]) -1:
if len(world[player_chunk_position][digx][digy]) != 1:
if world[player_chunk_position][digx][digy][-1] == 1 or world[player_chunk_position][digx][digy][-1] == 0:
inventory.add_to_inv(player_inventory, "dirt")
elif world[player_chunk_position][digx][digy][-1] == 2:
inventory.add_to_inv(player_inventory, "sand")
del world[player_chunk_position][digx][digy][-1]
else:
world[player_chunk_position][digx][digy][-1] = 2
boxestodelete = worldGen.resetWorldBoxes(chunksize, basez, player_chunk_position, world, boxestodelete)
recalculate_vbos(buffers, player_chunk_position, view_range)
if pressed_keys[pygame.K_e]:
digx = int(float(camerax)/chunk_view_adjustment) - player_chunk_position[0]*chunksize
digy = int(float(cameray)/chunk_view_adjustment) - player_chunk_position[1]*chunksize
chunk = world[player_chunk_position]
if digx < len(chunk) -1:
if digy < len(chunk[digx]) -1:
if inventory.inv_contains(player_inventory, "dirt"):
world[player_chunk_position][digx][digy].append(0)
inventory.remove_from_inv(player_inventory, "dirt")
elif inventory.inv_contains(player_inventory, "sand"):
world[player_chunk_position][digx][digy].append(2)
inventory.remove_from_inv(player_inventory, "sand")
boxestodelete = worldGen.resetWorldBoxes(chunksize, basez, player_chunk_position, world, boxestodelete)
recalculate_vbos(buffers, player_chunk_position, view_range)
if pressed_keys[pygame.K_f] and not gui_active:
for cube in cubes:
pybullet.applyExternalForce(cube[0], -1, [0,0,100],[0,0,0],pybullet.LINK_FRAME)
if pressed_keys[pygame.K_i] and (no_key_timer > 5 or not prev_pressed[pygame.K_i]):
gui_v, gui_c = invRender.create_inventory(2,4, display, player_inventory)
if gui_active:
gui_active = False
else:
gui_active = True
no_key_timer = 0
prev_pressed = pressed_keys
elif event.type == pygame.MOUSEMOTION and grab_mouse and not gui_active:
mousemove = pygame.mouse.get_pos()
dyaw = mousemove[0] - (display[0]/2)
dpitch = mousemove[1] - (display[1]/2)
newpitch = pitch - dpitch/float(sensitivity)
yaw -= dyaw/float(sensitivity)
if newpitch > -1.45 and newpitch < 1.45:
pitch = newpitch
pygame.mouse.set_pos((display[0]/2),(display[1]/2))
# Step Physics Simulation
pybullet.stepSimulation()
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
player_chunk_position = (round(camerax/(chunksize*chunk_view_adjustment)), round(cameray/(chunksize*chunk_view_adjustment)))
if player_chunk_position != last_player_chunk_position:
boxestodelete = worldGen.resetWorldBoxes(chunksize, basez, player_chunk_position, world, boxestodelete)
recalculate_vbos(buffers, player_chunk_position, view_range)
last_player_chunk_position = player_chunk_position
glLoadIdentity()
gluPerspective(45, (float(display[0])/float(display[1])), 0.1, 100.0)
gluLookAt(camerax,cameray,cameraz, camerax+(math.cos(yaw)*math.cos(pitch)),cameray+(math.sin(yaw)*math.cos(pitch)),cameraz+math.sin(pitch), 0,0,1)
renderLoop.vbo_render(program, buffers, len(terrain_vbo)/3)
renderLoop.render_loop(program, cubes)
for sto in stos:
renderLoop.static_render_loop(program, sto[0], sto[1], sto[2])
#text_collection.render() #Laggy and problematic
if gui_active:
renderLoop.gui_render(gui_program, gui_v, gui_c)
pygame.display.flip()
pygame.time.wait(10)
no_key_timer += 1
|
[
"pygame.init",
"pygame.quit",
"math.floor",
"pybullet.setGravity",
"math.cos",
"numpy.array",
"gui.inventory.inv_contains",
"pybullet.createCollisionShape",
"gui.inventory.add_to_inv",
"render.renderLoop.gui_render",
"gui.inventory.remove_from_inv",
"pygame.mouse.set_pos",
"pygame.display.set_mode",
"pybullet.connect",
"pygame.display.flip",
"pygame.mouse.get_pos",
"gui.inventory.create_inv",
"gui.textRender.TextCollection",
"render.worldRender.groundVertices",
"render.renderLoop.render_loop",
"render.cubeRender.createCube",
"render.renderLoop.static_render_loop",
"gui.invRender.create_inventory",
"OpenGL.GL.shaders.compileShader",
"pygame.key.set_repeat",
"world.worldGen.worldGen",
"pybullet.createMultiBody",
"pygame.event.get",
"world.worldGen.resetWorldBoxes",
"pygame.time.wait",
"numpy.append",
"pygame.key.get_pressed",
"pygame.mouse.set_visible",
"pybullet.stepSimulation",
"math.sin",
"pybullet.applyExternalForce"
] |
[((889, 919), 'numpy.array', 'numpy.array', (['[]', 'numpy.float32'], {}), '([], numpy.float32)\n', (900, 919), False, 'import numpy\n'), ((932, 962), 'numpy.array', 'numpy.array', (['[]', 'numpy.float32'], {}), '([], numpy.float32)\n', (943, 962), False, 'import numpy\n'), ((1135, 1165), 'numpy.array', 'numpy.array', (['[]', 'numpy.float32'], {}), '([], numpy.float32)\n', (1146, 1165), False, 'import numpy\n'), ((1180, 1210), 'numpy.array', 'numpy.array', (['[]', 'numpy.float32'], {}), '([], numpy.float32)\n', (1191, 1210), False, 'import numpy\n'), ((6666, 6717), 'gui.textRender.TextCollection', 'textRender.TextCollection', (['display', '"""gui/textures/"""'], {}), "(display, 'gui/textures/')\n", (6691, 6717), True, 'import gui.textRender as textRender\n'), ((6801, 6825), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (6823, 6825), False, 'import pygame\n'), ((6859, 6904), 'gui.invRender.create_inventory', 'invRender.create_inventory', (['(2)', '(4)', 'display', '[]'], {}), '(2, 4, display, [])\n', (6885, 6904), True, 'import gui.invRender as invRender\n'), ((6923, 6949), 'gui.inventory.create_inv', 'inventory.create_inv', (['(2)', '(4)'], {}), '(2, 4)\n', (6943, 6949), True, 'import gui.inventory as inventory\n'), ((1397, 1430), 'pybullet.connect', 'pybullet.connect', (['pybullet.DIRECT'], {}), '(pybullet.DIRECT)\n', (1413, 1430), False, 'import pybullet\n'), ((1432, 1462), 'pybullet.setGravity', 'pybullet.setGravity', (['(0)', '(0)', '(-40)'], {}), '(0, 0, -40)\n', (1451, 1462), False, 'import pybullet\n'), ((1464, 1477), 'pygame.init', 'pygame.init', ([], {}), '()\n', (1475, 1477), False, 'import pygame\n'), ((1479, 1543), 'pygame.display.set_mode', 'pygame.display.set_mode', (['display', '(HWSURFACE | OPENGL | DOUBLEBUF)'], {}), '(display, HWSURFACE | OPENGL | DOUBLEBUF)\n', (1502, 1543), False, 'import pygame\n'), ((1541, 1568), 'pygame.key.set_repeat', 'pygame.key.set_repeat', (['(1)', '(2)'], {}), '(1, 2)\n', (1562, 1568), False, 'import pygame\n'), ((1769, 1819), 'pybullet.createCollisionShape', 'pybullet.createCollisionShape', (['pybullet.GEOM_PLANE'], {}), '(pybullet.GEOM_PLANE)\n', (1798, 1819), False, 'import pybullet\n'), ((1821, 1871), 'pybullet.createMultiBody', 'pybullet.createMultiBody', (['(0)', 'plane', '(-1)', '[0, 0, -9]'], {}), '(0, plane, -1, [0, 0, -9])\n', (1845, 1871), False, 'import pybullet\n'), ((2192, 2261), 'world.worldGen.resetWorldBoxes', 'worldGen.resetWorldBoxes', (['chunksize', '(-9)', 'player_chunk_position', 'world'], {}), '(chunksize, -9, player_chunk_position, world)\n', (2216, 2261), True, 'import world.worldGen as worldGen\n'), ((3621, 3675), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['VERTEX_SHADER', 'GL_VERTEX_SHADER'], {}), '(VERTEX_SHADER, GL_VERTEX_SHADER)\n', (3642, 3675), False, 'from OpenGL.GL import shaders\n'), ((3690, 3748), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['FRAGMENT_SHADER', 'GL_FRAGMENT_SHADER'], {}), '(FRAGMENT_SHADER, GL_FRAGMENT_SHADER)\n', (3711, 3748), False, 'from OpenGL.GL import shaders\n'), ((4248, 4302), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['VERTEX_SHADER', 'GL_VERTEX_SHADER'], {}), '(VERTEX_SHADER, GL_VERTEX_SHADER)\n', (4269, 4302), False, 'from OpenGL.GL import shaders\n'), ((4317, 4375), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['FRAGMENT_SHADER', 'GL_FRAGMENT_SHADER'], {}), '(FRAGMENT_SHADER, GL_FRAGMENT_SHADER)\n', (4338, 4375), False, 'from OpenGL.GL import shaders\n'), ((4613, 4673), 'numpy.append', 'numpy.append', (['terrain_vbo', '[vertex[0], vertex[1], vertex[2]]'], {}), '(terrain_vbo, [vertex[0], vertex[1], vertex[2]])\n', (4625, 4673), False, 'import numpy\n'), ((4685, 4740), 'numpy.append', 'numpy.append', (['color_vbo', '[color[0], color[1], color[2]]'], {}), '(color_vbo, [color[0], color[1], color[2]])\n', (4697, 4740), False, 'import numpy\n'), ((5255, 5285), 'numpy.array', 'numpy.array', (['[]', 'numpy.float32'], {}), '([], numpy.float32)\n', (5266, 5285), False, 'import numpy\n'), ((5299, 5329), 'numpy.array', 'numpy.array', (['[]', 'numpy.float32'], {}), '([], numpy.float32)\n', (5310, 5329), False, 'import numpy\n'), ((5357, 5470), 'render.worldRender.groundVertices', 'worldRender.groundVertices', (['chunksize', 'basez', 'world', 'player_chunk_position', 'view_range', 'chunk_view_adjustment'], {}), '(chunksize, basez, world, player_chunk_position,\n view_range, chunk_view_adjustment)\n', (5383, 5470), True, 'import render.worldRender as worldRender\n'), ((6976, 6994), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (6992, 6994), False, 'import pygame\n'), ((10736, 10761), 'pybullet.stepSimulation', 'pybullet.stepSimulation', ([], {}), '()\n', (10759, 10761), False, 'import pybullet\n'), ((11527, 11565), 'render.renderLoop.render_loop', 'renderLoop.render_loop', (['program', 'cubes'], {}), '(program, cubes)\n', (11549, 11565), True, 'import render.renderLoop as renderLoop\n'), ((11769, 11790), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (11788, 11790), False, 'import pygame\n'), ((11792, 11812), 'pygame.time.wait', 'pygame.time.wait', (['(10)'], {}), '(10)\n', (11808, 11812), False, 'import pygame\n'), ((801, 829), 'world.worldGen.worldGen', 'worldGen.worldGen', (['chunksize'], {}), '(chunksize)\n', (818, 829), True, 'import world.worldGen as worldGen\n'), ((1966, 2016), 'render.cubeRender.createCube', 'cubeRender.createCube', (['[0, 12, 0]', '(1)', '[45, 45, 45]'], {}), '([0, 12, 0], 1, [45, 45, 45])\n', (1987, 2016), True, 'import render.cubeRender as cubeRender\n'), ((2028, 2075), 'render.cubeRender.createCube', 'cubeRender.createCube', (['[4, -4, 6]', '(1)', '[0, 0, 0]'], {}), '([4, -4, 6], 1, [0, 0, 0])\n', (2049, 2075), True, 'import render.cubeRender as cubeRender\n'), ((2087, 2138), 'render.cubeRender.createCube', 'cubeRender.createCube', (['[4, 5.9, 9]', '(2)', '[45, 30, 10]'], {}), '([4, 5.9, 9], 2, [45, 30, 10])\n', (2108, 2138), True, 'import render.cubeRender as cubeRender\n'), ((4793, 4830), 'math.floor', 'math.floor', (['(position2d[0] / chunksize)'], {}), '(position2d[0] / chunksize)\n', (4803, 4830), False, 'import math, random\n'), ((4845, 4882), 'math.floor', 'math.floor', (['(position2d[1] / chunksize)'], {}), '(position2d[1] / chunksize)\n', (4855, 4882), False, 'import math, random\n'), ((11019, 11110), 'world.worldGen.resetWorldBoxes', 'worldGen.resetWorldBoxes', (['chunksize', 'basez', 'player_chunk_position', 'world', 'boxestodelete'], {}), '(chunksize, basez, player_chunk_position, world,\n boxestodelete)\n', (11043, 11110), True, 'import world.worldGen as worldGen\n'), ((11586, 11648), 'render.renderLoop.static_render_loop', 'renderLoop.static_render_loop', (['program', 'sto[0]', 'sto[1]', 'sto[2]'], {}), '(program, sto[0], sto[1], sto[2])\n', (11615, 11648), True, 'import render.renderLoop as renderLoop\n'), ((11717, 11765), 'render.renderLoop.gui_render', 'renderLoop.gui_render', (['gui_program', 'gui_v', 'gui_c'], {}), '(gui_program, gui_v, gui_c)\n', (11738, 11765), True, 'import render.renderLoop as renderLoop\n'), ((3183, 3198), 'math.cos', 'math.cos', (['pitch'], {}), '(pitch)\n', (3191, 3198), False, 'import math, random\n'), ((7031, 7044), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (7042, 7044), False, 'import pygame\n'), ((11439, 11454), 'math.sin', 'math.sin', (['pitch'], {}), '(pitch)\n', (11447, 11454), False, 'import math, random\n'), ((3107, 3120), 'math.cos', 'math.cos', (['yaw'], {}), '(yaw)\n', (3115, 3120), False, 'import math, random\n'), ((3121, 3136), 'math.cos', 'math.cos', (['pitch'], {}), '(pitch)\n', (3129, 3136), False, 'import math, random\n'), ((3147, 3160), 'math.sin', 'math.sin', (['yaw'], {}), '(yaw)\n', (3155, 3160), False, 'import math, random\n'), ((3161, 3176), 'math.cos', 'math.cos', (['pitch'], {}), '(pitch)\n', (3169, 3176), False, 'import math, random\n'), ((7110, 7134), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (7132, 7134), False, 'import pygame\n'), ((11360, 11373), 'math.cos', 'math.cos', (['yaw'], {}), '(yaw)\n', (11368, 11373), False, 'import math, random\n'), ((11374, 11389), 'math.cos', 'math.cos', (['pitch'], {}), '(pitch)\n', (11382, 11389), False, 'import math, random\n'), ((11400, 11413), 'math.sin', 'math.sin', (['yaw'], {}), '(yaw)\n', (11408, 11413), False, 'import math, random\n'), ((11414, 11429), 'math.cos', 'math.cos', (['pitch'], {}), '(pitch)\n', (11422, 11429), False, 'import math, random\n'), ((8830, 8921), 'world.worldGen.resetWorldBoxes', 'worldGen.resetWorldBoxes', (['chunksize', 'basez', 'player_chunk_position', 'world', 'boxestodelete'], {}), '(chunksize, basez, player_chunk_position, world,\n boxestodelete)\n', (8854, 8921), True, 'import world.worldGen as worldGen\n'), ((9680, 9771), 'world.worldGen.resetWorldBoxes', 'worldGen.resetWorldBoxes', (['chunksize', 'basez', 'player_chunk_position', 'world', 'boxestodelete'], {}), '(chunksize, basez, player_chunk_position, world,\n boxestodelete)\n', (9704, 9771), True, 'import world.worldGen as worldGen\n'), ((10098, 10157), 'gui.invRender.create_inventory', 'invRender.create_inventory', (['(2)', '(4)', 'display', 'player_inventory'], {}), '(2, 4, display, player_inventory)\n', (10124, 10157), True, 'import gui.invRender as invRender\n'), ((10382, 10404), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (10402, 10404), False, 'import pygame\n'), ((10649, 10701), 'pygame.mouse.set_pos', 'pygame.mouse.set_pos', (['(display[0] / 2)', '(display[1] / 2)'], {}), '(display[0] / 2, display[1] / 2)\n', (10669, 10701), False, 'import pygame\n'), ((7274, 7304), 'pygame.mouse.set_visible', 'pygame.mouse.set_visible', (['(True)'], {}), '(True)\n', (7298, 7304), False, 'import pygame\n'), ((7343, 7374), 'pygame.mouse.set_visible', 'pygame.mouse.set_visible', (['(False)'], {}), '(False)\n', (7367, 7374), False, 'import pygame\n'), ((7441, 7454), 'math.cos', 'math.cos', (['yaw'], {}), '(yaw)\n', (7449, 7454), False, 'import math, random\n'), ((7483, 7496), 'math.sin', 'math.sin', (['yaw'], {}), '(yaw)\n', (7491, 7496), False, 'import math, random\n'), ((7710, 7739), 'math.cos', 'math.cos', (['(yaw + math.pi / 2.0)'], {}), '(yaw + math.pi / 2.0)\n', (7718, 7739), False, 'import math, random\n'), ((7766, 7795), 'math.sin', 'math.sin', (['(yaw + math.pi / 2.0)'], {}), '(yaw + math.pi / 2.0)\n', (7774, 7795), False, 'import math, random\n'), ((7872, 7901), 'math.cos', 'math.cos', (['(yaw - math.pi / 2.0)'], {}), '(yaw - math.pi / 2.0)\n', (7880, 7901), False, 'import math, random\n'), ((7928, 7957), 'math.sin', 'math.sin', (['(yaw - math.pi / 2.0)'], {}), '(yaw - math.pi / 2.0)\n', (7936, 7957), False, 'import math, random\n'), ((9912, 10002), 'pybullet.applyExternalForce', 'pybullet.applyExternalForce', (['cube[0]', '(-1)', '[0, 0, 100]', '[0, 0, 0]', 'pybullet.LINK_FRAME'], {}), '(cube[0], -1, [0, 0, 100], [0, 0, 0], pybullet.\n LINK_FRAME)\n', (9939, 10002), False, 'import pybullet\n'), ((7577, 7590), 'math.cos', 'math.cos', (['yaw'], {}), '(yaw)\n', (7585, 7590), False, 'import math, random\n'), ((7618, 7631), 'math.sin', 'math.sin', (['yaw'], {}), '(yaw)\n', (7626, 7631), False, 'import math, random\n'), ((9310, 9358), 'gui.inventory.inv_contains', 'inventory.inv_contains', (['player_inventory', '"""dirt"""'], {}), "(player_inventory, 'dirt')\n", (9332, 9358), True, 'import gui.inventory as inventory\n'), ((9425, 9476), 'gui.inventory.remove_from_inv', 'inventory.remove_from_inv', (['player_inventory', '"""dirt"""'], {}), "(player_inventory, 'dirt')\n", (9450, 9476), True, 'import gui.inventory as inventory\n'), ((9488, 9536), 'gui.inventory.inv_contains', 'inventory.inv_contains', (['player_inventory', '"""sand"""'], {}), "(player_inventory, 'sand')\n", (9510, 9536), True, 'import gui.inventory as inventory\n'), ((8509, 8555), 'gui.inventory.add_to_inv', 'inventory.add_to_inv', (['player_inventory', '"""dirt"""'], {}), "(player_inventory, 'dirt')\n", (8529, 8555), True, 'import gui.inventory as inventory\n'), ((9603, 9654), 'gui.inventory.remove_from_inv', 'inventory.remove_from_inv', (['player_inventory', '"""sand"""'], {}), "(player_inventory, 'sand')\n", (9628, 9654), True, 'import gui.inventory as inventory\n'), ((8627, 8673), 'gui.inventory.add_to_inv', 'inventory.add_to_inv', (['player_inventory', '"""sand"""'], {}), "(player_inventory, 'sand')\n", (8647, 8673), True, 'import gui.inventory as inventory\n')]
|
import numpy as np
import tensorflow as tf
from gym import utils
from gym.envs.mujoco import mujoco_env
from meta_mb.meta_envs.base import MetaEnv
class InvertedPendulumEnv(mujoco_env.MujocoEnv, utils.EzPickle, MetaEnv):
def __init__(self):
utils.EzPickle.__init__(self)
mujoco_env.MujocoEnv.__init__(self, 'inverted_pendulum.xml', 2)
def step(self, a):
# reward = 1.0
reward = self._get_reward()
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
# notdone = np.isfinite(ob).all() and (np.abs(ob[1]) <= .2)
# done = not notdone
done = False
return ob, reward, done, {}
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-0.01, high=0.01)
qvel = self.init_qvel + self.np_random.uniform(size=self.model.nv, low=-0.01, high=0.01)
self.set_state(qpos, qvel)
return self._get_obs()
def _get_reward(self):
old_ob = self._get_obs()
reward = -((old_ob[1]) ** 2)
return reward
def _get_obs(self):
return np.concatenate([self.sim.data.qpos, self.sim.data.qvel]).ravel()
def viewer_setup(self):
v = self.viewer
v.cam.trackbodyid = 0
v.cam.distance = self.model.stat.extent
def reward(self, obs, acts, next_obs):
assert obs.ndim == 2
assert obs.shape == next_obs.shape
assert obs.shape[0] == acts.shape[0]
return -(obs[:, 1]) ** 2
def tf_reward(self, obs, acts, next_obs):
return - tf.square(obs[:, 1])
if __name__ == "__main__":
env = InvertedPendulumEnv()
env.reset()
for _ in range(1000):
_ = env.render()
ob, rew, done, info = env.step(env.action_space.sample()) # take a random action
|
[
"numpy.concatenate",
"gym.utils.EzPickle.__init__",
"gym.envs.mujoco.mujoco_env.MujocoEnv.__init__",
"tensorflow.square"
] |
[((257, 286), 'gym.utils.EzPickle.__init__', 'utils.EzPickle.__init__', (['self'], {}), '(self)\n', (280, 286), False, 'from gym import utils\n'), ((295, 358), 'gym.envs.mujoco.mujoco_env.MujocoEnv.__init__', 'mujoco_env.MujocoEnv.__init__', (['self', '"""inverted_pendulum.xml"""', '(2)'], {}), "(self, 'inverted_pendulum.xml', 2)\n", (324, 358), False, 'from gym.envs.mujoco import mujoco_env\n'), ((1574, 1594), 'tensorflow.square', 'tf.square', (['obs[:, 1]'], {}), '(obs[:, 1])\n', (1583, 1594), True, 'import tensorflow as tf\n'), ((1120, 1176), 'numpy.concatenate', 'np.concatenate', (['[self.sim.data.qpos, self.sim.data.qvel]'], {}), '([self.sim.data.qpos, self.sim.data.qvel])\n', (1134, 1176), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding: utf-8
"""
Script to train a resnet
to determine if a Stokes-I radio cutout
contains a giant radio galaxy candidate.
Copyright (c) 2022 <NAME>
See LICENSE.md in root directory for full BSD-3 license.
Adapted from
Author: <NAME>
License: BSD
Source: https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
"""
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data import WeightedRandomSampler
import numpy as np
import pandas as pd
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
import shutil
from collections import Counter
from astropy.coordinates import SkyCoord
import astropy.units as u
from datetime import datetime
import socket
# # Load Data
dataset_name = 'cutouts_res6arcsec_destsize350arcsec_nfields100'
model_name='resnet101'
data_inspection=False
start = time.time()
print('-'*80)
print('Giants Resnet training script')
print('-'*80)
hostname = socket.gethostname()
print("Data- and save-paths set based on host:", hostname)
if hostname.startswith('lgm4'):
base_path = '/data1/mostertrij/data/giants'
elif hostname.endswith('liacs.nl'):
base_path = '/data/mostertrij/data/giants'
elif hostname.startswith('kafka'):
base_path = '/home/rafael/data/mostertrij/data/giants'
else:
print("Edit this script to include the correct paths for your machine:", hostname)
quit()
data_dir = os.path.join(base_path, dataset_name)
trained_dir = os.path.join(base_path, 'trained_models')
os.makedirs(trained_dir,exist_ok=True)
print("Assuming dataset is located at:", data_dir)
print("Saving trained models at:", trained_dir)
# Data augmentation and normalization for training
# Just normalization for validation
print("\nLoad data")
image_dimension_before_rotation = 400
image_dimension = int(np.floor(image_dimension_before_rotation/np.sqrt(2)))
print("Image dimension before and after rotation in pixels:", image_dimension_before_rotation,
image_dimension)
"""
torchvision.transforms.Normalize(mean, std, inplace=False)[source]
Normalize a tensor image with mean and standard deviation.
This transform does not support PIL Image.
Given mean: (mean[1],...,mean[n]) and std: (std[1],..,std[n]) for n channels,
this transform will normalize each channel of the input torch.
*Tensor i.e., output[channel] = (input[channel] - mean[channel]) / std[channel]
"""
data_mean = [0.2460, 0.6437, 0.4650]
data_std = [0.1285, 0.1169, 0.0789]
data_transforms = {
'train': transforms.Compose([
transforms.RandomHorizontalFlip(),
# Transforms for the giants
transforms.RandomVerticalFlip(),
transforms.RandomRotation((-180,180),expand=False),
transforms.Resize(image_dimension_before_rotation),
transforms.CenterCrop(image_dimension),
#transforms.RandomResizedCrop(image_dimension),
# #interpolation=<InterpolationMode.NEAREST: 'nearest'>, ),
#transforms.RandomGrayscale(p=1), # For BEES only!
transforms.ToTensor(),
#transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
transforms.Normalize(data_mean, data_std )
]),
'val': transforms.Compose([
transforms.Resize(image_dimension_before_rotation),
transforms.CenterCrop(image_dimension),
#transforms.RandomGrayscale(p=1), # For BEES only!
transforms.ToTensor(),
#transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
transforms.Normalize(data_mean, data_std )
]),
}
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
# total_num_images = np.sum([len(image_datasets[x])
# for x in ['train', 'val']])
# weights = {x: total_num_images/len(image_datasets[x])
# for x in ['train', 'val']}
target_list = [t for _, t in image_datasets['train'].samples]
target_dict = Counter(target_list)
print(target_dict)
class_weights = [1/target_dict[t] for t in target_list]
target_list = torch.tensor(target_list)
weighted_sampler = WeightedRandomSampler(
weights=class_weights,
num_samples=len(image_datasets['train']),
replacement=True)
dataloaders = {'train': torch.utils.data.DataLoader(image_datasets['train'], batch_size=4,
sampler=weighted_sampler,num_workers=4),
'val': torch.utils.data.DataLoader(image_datasets['val'], batch_size=4,
shuffle=True, num_workers=4)}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
inp = data_std * inp + data_mean
inp = np.clip(inp, 0, 1)
plt.figure(figsize=(15,5))
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.savefig(0.001) # pause a bit so that plots are updated
if data_inspection:
print(f"Showing training input examples (data_inspection={data_inspection})")
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out, title=[class_names[x] for x in classes])
else:
print(f"Not showing training input examples (data_inspection={data_inspection})")
# # Training the model
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
torch.save(model.state_dict(), os.path.join(trained_dir,
f'model_weights_{model_name}_{dataset_name}_{datetime.today().date().isoformat()}.pth'))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
# In[8]:
def visualize_model(model, num_images=6):
was_training = model.training
model.eval()
images_so_far = 0
fig = plt.figure(figsize=(8,8))
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloaders['val']):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
for j in range(inputs.size()[0]):
images_so_far += 1
ax = plt.subplot(num_images//2, 2, images_so_far)
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[j]]))
imshow(inputs.cpu().data[j])
if images_so_far == num_images:
model.train(mode=was_training)
return
model.train(mode=was_training)
if model_name == 'resnet101':
print("\nCreating a resnet101 model and load pretrained weights")
model_ft = models.resnet101(pretrained=True)
else:
print("\nCreating a resnet18 model and load pretrained weights")
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
# Here the size of each output sample is set to 2.
# Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names)).
model_ft.fc = nn.Linear(num_ftrs, 2)
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=10, gamma=0.5)
# # Train
print("\nTrain model")
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=50)
print("\nSave final model")
torch.save(model_ft.state_dict(), os.path.join(trained_dir,
f'final_model_weights_{model_name}_{dataset_name}_{datetime.now().isoformat()}.pth'))
print(f"Done. Time taken: {time.time()-start:.1f} sec.")
|
[
"numpy.clip",
"numpy.sqrt",
"torch.nn.CrossEntropyLoss",
"torch.max",
"torchvision.models.resnet18",
"torch.cuda.is_available",
"torch.sum",
"datetime.datetime.today",
"torchvision.utils.make_grid",
"matplotlib.pyplot.imshow",
"torch.set_grad_enabled",
"torchvision.transforms.ToTensor",
"socket.gethostname",
"matplotlib.pyplot.savefig",
"torchvision.models.resnet101",
"torchvision.transforms.RandomHorizontalFlip",
"torchvision.transforms.RandomVerticalFlip",
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize",
"matplotlib.pyplot.title",
"time.time",
"torchvision.transforms.CenterCrop",
"os.makedirs",
"torchvision.transforms.RandomRotation",
"os.path.join",
"torch.optim.lr_scheduler.StepLR",
"collections.Counter",
"torch.tensor",
"matplotlib.pyplot.figure",
"datetime.datetime.now",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"torch.no_grad",
"matplotlib.pyplot.subplot"
] |
[((979, 990), 'time.time', 'time.time', ([], {}), '()\n', (988, 990), False, 'import time\n'), ((1070, 1090), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (1088, 1090), False, 'import socket\n'), ((1525, 1562), 'os.path.join', 'os.path.join', (['base_path', 'dataset_name'], {}), '(base_path, dataset_name)\n', (1537, 1562), False, 'import os\n'), ((1577, 1618), 'os.path.join', 'os.path.join', (['base_path', '"""trained_models"""'], {}), "(base_path, 'trained_models')\n", (1589, 1618), False, 'import os\n'), ((1619, 1658), 'os.makedirs', 'os.makedirs', (['trained_dir'], {'exist_ok': '(True)'}), '(trained_dir, exist_ok=True)\n', (1630, 1658), False, 'import os\n'), ((4124, 4144), 'collections.Counter', 'Counter', (['target_list'], {}), '(target_list)\n', (4131, 4144), False, 'from collections import Counter\n'), ((4234, 4259), 'torch.tensor', 'torch.tensor', (['target_list'], {}), '(target_list)\n', (4246, 4259), False, 'import torch\n'), ((9502, 9524), 'torch.nn.Linear', 'nn.Linear', (['num_ftrs', '(2)'], {}), '(num_ftrs, 2)\n', (9511, 9524), True, 'import torch.nn as nn\n'), ((9570, 9591), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (9589, 9591), True, 'import torch.nn as nn\n'), ((9780, 9838), 'torch.optim.lr_scheduler.StepLR', 'lr_scheduler.StepLR', (['optimizer_ft'], {'step_size': '(10)', 'gamma': '(0.5)'}), '(optimizer_ft, step_size=10, gamma=0.5)\n', (9799, 9838), False, 'from torch.optim import lr_scheduler\n'), ((4423, 4535), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (["image_datasets['train']"], {'batch_size': '(4)', 'sampler': 'weighted_sampler', 'num_workers': '(4)'}), "(image_datasets['train'], batch_size=4, sampler=\n weighted_sampler, num_workers=4)\n", (4450, 4535), False, 'import torch\n'), ((4571, 4669), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (["image_datasets['val']"], {'batch_size': '(4)', 'shuffle': '(True)', 'num_workers': '(4)'}), "(image_datasets['val'], batch_size=4, shuffle=\n True, num_workers=4)\n", (4598, 4669), False, 'import torch\n'), ((5024, 5042), 'numpy.clip', 'np.clip', (['inp', '(0)', '(1)'], {}), '(inp, 0, 1)\n', (5031, 5042), True, 'import numpy as np\n'), ((5047, 5074), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (5057, 5074), True, 'import matplotlib.pyplot as plt\n'), ((5078, 5093), 'matplotlib.pyplot.imshow', 'plt.imshow', (['inp'], {}), '(inp)\n', (5088, 5093), True, 'import matplotlib.pyplot as plt\n'), ((5149, 5167), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(0.001)'], {}), '(0.001)\n', (5160, 5167), True, 'import matplotlib.pyplot as plt\n'), ((5443, 5478), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['inputs'], {}), '(inputs)\n', (5470, 5478), False, 'import torchvision\n'), ((5739, 5750), 'time.time', 'time.time', ([], {}), '()\n', (5748, 5750), False, 'import time\n'), ((8301, 8327), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (8311, 8327), True, 'import matplotlib.pyplot as plt\n'), ((9164, 9197), 'torchvision.models.resnet101', 'models.resnet101', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (9180, 9197), False, 'from torchvision import datasets, models, transforms\n'), ((9288, 9320), 'torchvision.models.resnet18', 'models.resnet18', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (9303, 9320), False, 'from torchvision import datasets, models, transforms\n'), ((3680, 3705), 'os.path.join', 'os.path.join', (['data_dir', 'x'], {}), '(data_dir, x)\n', (3692, 3705), False, 'import os\n'), ((4837, 4862), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4860, 4862), False, 'import torch\n'), ((5128, 5144), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (5137, 5144), True, 'import matplotlib.pyplot as plt\n'), ((7898, 7909), 'time.time', 'time.time', ([], {}), '()\n', (7907, 7909), False, 'import time\n'), ((8337, 8352), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8350, 8352), False, 'import torch\n'), ((1967, 1977), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1974, 1977), True, 'import numpy as np\n'), ((2636, 2669), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (2667, 2669), False, 'from torchvision import datasets, models, transforms\n'), ((2715, 2746), 'torchvision.transforms.RandomVerticalFlip', 'transforms.RandomVerticalFlip', ([], {}), '()\n', (2744, 2746), False, 'from torchvision import datasets, models, transforms\n'), ((2757, 2809), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(-180, 180)'], {'expand': '(False)'}), '((-180, 180), expand=False)\n', (2782, 2809), False, 'from torchvision import datasets, models, transforms\n'), ((2817, 2867), 'torchvision.transforms.Resize', 'transforms.Resize', (['image_dimension_before_rotation'], {}), '(image_dimension_before_rotation)\n', (2834, 2867), False, 'from torchvision import datasets, models, transforms\n'), ((2877, 2915), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['image_dimension'], {}), '(image_dimension)\n', (2898, 2915), False, 'from torchvision import datasets, models, transforms\n'), ((3111, 3132), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3130, 3132), False, 'from torchvision import datasets, models, transforms\n'), ((3218, 3259), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['data_mean', 'data_std'], {}), '(data_mean, data_std)\n', (3238, 3259), False, 'from torchvision import datasets, models, transforms\n'), ((3310, 3360), 'torchvision.transforms.Resize', 'transforms.Resize', (['image_dimension_before_rotation'], {}), '(image_dimension_before_rotation)\n', (3327, 3360), False, 'from torchvision import datasets, models, transforms\n'), ((3370, 3408), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['image_dimension'], {}), '(image_dimension)\n', (3391, 3408), False, 'from torchvision import datasets, models, transforms\n'), ((3477, 3498), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3496, 3498), False, 'from torchvision import datasets, models, transforms\n'), ((3584, 3625), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['data_mean', 'data_std'], {}), '(data_mean, data_std)\n', (3604, 3625), False, 'from torchvision import datasets, models, transforms\n'), ((8559, 8580), 'torch.max', 'torch.max', (['outputs', '(1)'], {}), '(outputs, 1)\n', (8568, 8580), False, 'import torch\n'), ((7147, 7178), 'torch.sum', 'torch.sum', (['(preds == labels.data)'], {}), '(preds == labels.data)\n', (7156, 7178), False, 'import torch\n'), ((8684, 8730), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(num_images // 2)', '(2)', 'images_so_far'], {}), '(num_images // 2, 2, images_so_far)\n', (8695, 8730), True, 'import matplotlib.pyplot as plt\n'), ((10195, 10206), 'time.time', 'time.time', ([], {}), '()\n', (10204, 10206), False, 'import time\n'), ((6636, 6676), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (["(phase == 'train')"], {}), "(phase == 'train')\n", (6658, 6676), False, 'import torch\n'), ((6753, 6774), 'torch.max', 'torch.max', (['outputs', '(1)'], {}), '(outputs, 1)\n', (6762, 6774), False, 'import torch\n'), ((10132, 10146), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10144, 10146), False, 'from datetime import datetime\n'), ((7817, 7833), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (7831, 7833), False, 'from datetime import datetime\n')]
|
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score, roc_auc_score
from sklearn.preprocessing import RobustScaler
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RepeatedKFold
from sklearn.impute import SimpleImputer
import functools
import pandas as pd
import math
from src.model.lupts import LUPTS, StatLUPTS, LogisticLUPTS, LogisticStatLUPTS
from src.model.baseline import Baseline, LogisticBaseline
from src.plotutils import method_color, method_marker, set_mpl_default_settings
cols_sel = ['MMSE', 'PTGENDER', 'APOE4', 'AGE', 'PTEDUCAT', 'FDG',
'ABETA', 'TAU', 'PTAU', 'CDRSB', 'ADAS11', 'ADAS13', 'ADASQ4', 'RAVLT_immediate',
'RAVLT_learning', 'RAVLT_forgetting', 'RAVLT_perc_forgetting', 'LDELTOTAL',
'TRABSCOR', 'FAQ', 'MOCA', 'EcogPtMem', 'EcogPtLang', 'EcogPtVisspat', 'EcogPtPlan',
'EcogPtOrgan', 'EcogPtDivatt', 'EcogPtTotal', 'EcogSPMem', 'EcogSPLang', 'EcogSPVisspat',
'EcogSPPlan', 'EcogSPOrgan', 'EcogSPDivatt', 'EcogSPTotal',
'Ventricles', 'Hippocampus', 'WholeBrain', 'Entorhinal', 'Fusiform', 'MidTemp', 'ICV']
cols_categorical = ['PTGENDER', 'APOE4']
def quickADNI(set_path, task, priv_points, nan_threshold = 0.7, seed = 42):
#Set target based on task
if task == 'MCIAD' or task == 'AD':
target = 'AD'
elif task == 'CNMCI':
target = 'MCI'
elif task == 'MMSE':
target = 'MMSE'
#Data from subjects present at measurements bl, m12, m24, m36, m48
#Selection: What data are to be used for training/evaluating (1/3 privileged points)
data_viscodes = ['bl', 'm12', 'm24', 'm36', 'm48']
if priv_points == 1:
selection_viscodes = ['bl', 'm24', 'm48']
elif priv_points == 3:
selection_viscodes = data_viscodes
else:
raise ValueError('priv_points invalid value: ' + str(priv_points))
#Read data.
D = pd.read_csv(set_path)
D['AD'] = D['DX']=='Dementia'
D['MCI'] = D['DX']=='MCI'
D.loc[D['DX'].isna(), ['AD', 'MCI']] = np.nan
D.loc[:,'ABETA'] = D.loc[:,'ABETA'].replace('>1700', 1700, regex=True) \
.replace('<900', 900, regex=True) \
.replace('<200', 200, regex=True).astype(np.float32)
D.loc[:,'TAU'] = D.loc[:,'TAU'].replace('>1300', 1300, regex=True) \
.replace('<80', 80, regex=True).astype(np.float32)
D.loc[:,'PTAU'] = D.loc[:,'PTAU'].replace('>120', 120, regex=True) \
.replace('<8', 8, regex=True).astype(np.float32)
D = D.loc[:,['VISCODE', 'RID', 'MCI', 'AD'] + cols_sel]
D = pd.get_dummies(D, columns=cols_categorical)
#Drop features with more than nan_threshold% of the observations missing
to_be_removed = []
for code in data_viscodes:
count = len(D[D['VISCODE'] == code])
l = D[D['VISCODE'] == code].isna().sum()
for i, e in enumerate(l):
if nan_threshold < e/count:
if D.columns[i] not in to_be_removed:
to_be_removed += [D.columns[i]]
D = D.drop(to_be_removed, axis=1)
#Start to packet data into X, Y
frames = {}
for code in data_viscodes:
if code == data_viscodes[-1]:
frames[code] = D[D['VISCODE'] == code].dropna(subset=[target])
else:
frames[code] = D[D['VISCODE'] == code]
#Subjects present at all 'data_viscodes' measurements
I = get_rids(frames, task, data_viscodes)
data = {}
for code in selection_viscodes:
data[code] = frames[code][frames[code]['RID'].isin(I)]
print(task)
if task != 'MMSE':
print('Number of subjects: '+str(len(I)))
print('Number of positives at last time step: '+str(len(data[selection_viscodes[-1]][data[selection_viscodes[-1]][target] == 1].index)))
print('Number of negatives at last time step: '+str(len(data[selection_viscodes[-1]][data[selection_viscodes[-1]][target] == 0].index)))
else:
print('Number of subjects: '+str(len(I)))
features = [e for e in D.columns if e not in ['RID', 'VISCODE', 'MCI', 'AD']]
X = np.zeros((len(I), len(selection_viscodes)-1, len(features)))
data[selection_viscodes[-1]] = data[selection_viscodes[-1]].sort_values(by=['RID'])
Y = data[selection_viscodes[-1]][target].values
feature_index = {}
for j, code in enumerate(selection_viscodes[0:len(selection_viscodes)-1]):
data[code] = data[code].sort_values(by=['RID'])
data[code] = data[code].loc[:,features]
for feature in features:
feature_index[feature] = data[code].columns.get_loc(feature)
X[:,j,:] = data[code].values
data_size = len(X)
models = {}
#Set models to based on task regression/classification
if task != 'MMSE':
models['Baseline'] = LogisticBaseline(cv_search=True, folds=5, random_state = seed)
models['LuPTS'] = LogisticLUPTS(cv_search=True, folds=5, random_state = seed)
if priv_points == 3:
models['Stat-LuPTS'] = LogisticStatLUPTS(cv_search=True, folds=5, random_state = seed)
else:
models['Baseline'] = Baseline()
models['LuPTS'] = LUPTS()
if priv_points == 3:
models['Stat-LuPTS'] = StatLUPTS()
step = 20
bottom = 80
top = math.floor(data_size*0.5)
top = top - (top % step)
#Range of training sample sizes
tr_sample_sizes = range(bottom, top, step)
results = {}
np.random.seed(seed)
rkf = RepeatedKFold(n_splits=2, n_repeats=50, random_state=seed)
#Main loop
for sample_size in tr_sample_sizes:
results[sample_size] = {}
tmp_results = {}
for model_key in models.keys():
tmp_results[model_key] = []
#Splits, 2x50
for i, (I_tr, I_ts) in enumerate(rkf.split(X)):
sampled_I_tr = np.random.choice(I_tr, sample_size, replace=False)
training_data = X[sampled_I_tr,:,:].copy()
test_data = X[I_ts,:,:].copy()
#Impute missing values
for ixx, code in enumerate(selection_viscodes[0:len(selection_viscodes)-1]):
for j in range(training_data.shape[2]):
if all(np.isnan(training_data[:,ixx,j])):
print(j)
training_data[:,ixx,j] = np.mean(training_data[:,ixx-1,j])
imputer = SimpleImputer()
training_data[:,ixx,:] = imputer.fit_transform(training_data[:,ixx,:])
if ixx == 0:
test_data[:,ixx,:] = imputer.transform(test_data[:,ixx,:])
l_training_data = training_data.copy()
l_test_data = test_data.copy()
scaler = RobustScaler()
lupi_scaler = RobustScaler()
#Scale data for baseline
training_data[:,0,:] = scaler.fit_transform(training_data[:,0,:])
test_data[:,0,:] = scaler.transform(test_data[:,0,:])
#Scale data for LuPTS models, using observations over all time points per feature.
l_training_data = lupi_scaler.fit_transform(l_training_data.\
reshape((-1,X.shape[2]))).reshape((len(l_training_data), X.shape[1], X.shape[2]))
l_test_data= lupi_scaler.transform(l_test_data.reshape((-1,X.shape[2])))\
.reshape((len(I_ts), X.shape[1], X.shape[2]))
#Fit and evaluate models
for model_key in models.keys():
if (model_key == 'LuPTS') or (model_key == 'Stat-LuPTS'):
models[model_key].fit(l_training_data, Y[sampled_I_tr])
else:
models[model_key].fit(training_data, Y[sampled_I_tr])
if task != 'MMSE':
if (model_key == 'LuPTS') or (model_key == 'Stat-LuPTS'):
tmp_results[model_key] += [roc_auc_score(Y[I_ts], models[model_key].predict_proba(l_test_data)[:,1])]
else:
tmp_results[model_key] += [roc_auc_score(Y[I_ts], models[model_key].predict_proba(test_data)[:,1])]
else:
if (model_key == 'LuPTS') or (model_key == 'Stat-LuPTS'):
tmp_results[model_key] += [r2_score(Y[I_ts], models[model_key].predict(l_test_data))]
else:
tmp_results[model_key] += [r2_score(Y[I_ts], models[model_key].predict(test_data))]
#Record results over iterations
for model_key in models.keys():
results[sample_size][model_key] = [np.mean(tmp_results[model_key]), np.std(tmp_results[model_key])]
return results
def get_rids(frames, task, codes):
if task == 'AD' or task == 'MMSE':
pass
elif task == 'CNMCI':
#Select patients with a negative AD diagnosis at last time step
#Select patients with CN status at baseline.
frames[codes[-1]] = frames[codes[-1]][(frames[codes[-1]]['AD'] == 0)]
frames[codes[0]] = frames[codes[0]][((frames[codes[0]]['MCI'] == 0) & (frames[codes[0]]['AD'] == 0))]
elif task == 'MCIAD':
#Select patients who are NOT CN at last time step.
#Select patients with MCI at baseline.
frames[codes[-1]] = frames[codes[-1]][((frames[codes[-1]]['AD'] == 1) | (frames[codes[-1]]['MCI'] == 1))]
frames[codes[0]] = frames[codes[0]][((frames[codes[0]]['MCI'] == 1) & (frames[codes[0]]['AD'] == 0))]
patient_ID = {}
for code in codes:
patient_ID[code] = frames[code]['RID'].unique()
I = functools.reduce(lambda a, b: np.intersect1d(a, b), [patient_ID[k] for k in patient_ID.keys()])
return I
def plot_result_dict(results, ylabel, title):
set_mpl_default_settings()
fig = plt.figure(figsize=(6,6))
outer_keys = list(results.keys())
model_keys = list(results[outer_keys[0]].keys())
for model in model_keys:
mean = np.array([results[size][model][0] for size in outer_keys])
std = np.array([results[size][model][1] for size in outer_keys])
plt.plot(outer_keys, mean, color=method_color(model), marker=method_marker(model))
plt.fill_between(outer_keys, mean-std, mean+std, color=method_color(model), alpha=0.2)
plt.xlabel('Number of training samples')
plt.ylabel(ylabel)
plt.grid()
plt.title(title)
plt.legend(model_keys)
return fig
|
[
"src.plotutils.set_mpl_default_settings",
"matplotlib.pyplot.grid",
"pandas.read_csv",
"math.floor",
"matplotlib.pyplot.ylabel",
"src.model.lupts.LogisticStatLUPTS",
"numpy.array",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"src.model.baseline.LogisticBaseline",
"numpy.random.seed",
"sklearn.model_selection.RepeatedKFold",
"numpy.random.choice",
"src.model.lupts.StatLUPTS",
"src.model.lupts.LogisticLUPTS",
"numpy.isnan",
"src.plotutils.method_color",
"src.plotutils.method_marker",
"numpy.std",
"pandas.get_dummies",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"numpy.intersect1d",
"src.model.lupts.LUPTS",
"matplotlib.pyplot.figure",
"sklearn.preprocessing.RobustScaler",
"src.model.baseline.Baseline",
"sklearn.impute.SimpleImputer"
] |
[((2145, 2166), 'pandas.read_csv', 'pd.read_csv', (['set_path'], {}), '(set_path)\n', (2156, 2166), True, 'import pandas as pd\n'), ((2870, 2913), 'pandas.get_dummies', 'pd.get_dummies', (['D'], {'columns': 'cols_categorical'}), '(D, columns=cols_categorical)\n', (2884, 2913), True, 'import pandas as pd\n'), ((5554, 5581), 'math.floor', 'math.floor', (['(data_size * 0.5)'], {}), '(data_size * 0.5)\n', (5564, 5581), False, 'import math\n'), ((5716, 5736), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (5730, 5736), True, 'import numpy as np\n'), ((5747, 5805), 'sklearn.model_selection.RepeatedKFold', 'RepeatedKFold', ([], {'n_splits': '(2)', 'n_repeats': '(50)', 'random_state': 'seed'}), '(n_splits=2, n_repeats=50, random_state=seed)\n', (5760, 5805), False, 'from sklearn.model_selection import RepeatedKFold\n'), ((10011, 10037), 'src.plotutils.set_mpl_default_settings', 'set_mpl_default_settings', ([], {}), '()\n', (10035, 10037), False, 'from src.plotutils import method_color, method_marker, set_mpl_default_settings\n'), ((10049, 10075), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (10059, 10075), True, 'import matplotlib.pyplot as plt\n'), ((10552, 10592), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of training samples"""'], {}), "('Number of training samples')\n", (10562, 10592), True, 'import matplotlib.pyplot as plt\n'), ((10597, 10615), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (10607, 10615), True, 'import matplotlib.pyplot as plt\n'), ((10620, 10630), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (10628, 10630), True, 'import matplotlib.pyplot as plt\n'), ((10635, 10651), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (10644, 10651), True, 'import matplotlib.pyplot as plt\n'), ((10661, 10683), 'matplotlib.pyplot.legend', 'plt.legend', (['model_keys'], {}), '(model_keys)\n', (10671, 10683), True, 'import matplotlib.pyplot as plt\n'), ((5076, 5136), 'src.model.baseline.LogisticBaseline', 'LogisticBaseline', ([], {'cv_search': '(True)', 'folds': '(5)', 'random_state': 'seed'}), '(cv_search=True, folds=5, random_state=seed)\n', (5092, 5136), False, 'from src.model.baseline import Baseline, LogisticBaseline\n'), ((5165, 5222), 'src.model.lupts.LogisticLUPTS', 'LogisticLUPTS', ([], {'cv_search': '(True)', 'folds': '(5)', 'random_state': 'seed'}), '(cv_search=True, folds=5, random_state=seed)\n', (5178, 5222), False, 'from src.model.lupts import LUPTS, StatLUPTS, LogisticLUPTS, LogisticStatLUPTS\n'), ((5392, 5402), 'src.model.baseline.Baseline', 'Baseline', ([], {}), '()\n', (5400, 5402), False, 'from src.model.baseline import Baseline, LogisticBaseline\n'), ((5429, 5436), 'src.model.lupts.LUPTS', 'LUPTS', ([], {}), '()\n', (5434, 5436), False, 'from src.model.lupts import LUPTS, StatLUPTS, LogisticLUPTS, LogisticStatLUPTS\n'), ((10216, 10274), 'numpy.array', 'np.array', (['[results[size][model][0] for size in outer_keys]'], {}), '([results[size][model][0] for size in outer_keys])\n', (10224, 10274), True, 'import numpy as np\n'), ((10289, 10347), 'numpy.array', 'np.array', (['[results[size][model][1] for size in outer_keys]'], {}), '([results[size][model][1] for size in outer_keys])\n', (10297, 10347), True, 'import numpy as np\n'), ((5289, 5350), 'src.model.lupts.LogisticStatLUPTS', 'LogisticStatLUPTS', ([], {'cv_search': '(True)', 'folds': '(5)', 'random_state': 'seed'}), '(cv_search=True, folds=5, random_state=seed)\n', (5306, 5350), False, 'from src.model.lupts import LUPTS, StatLUPTS, LogisticLUPTS, LogisticStatLUPTS\n'), ((5501, 5512), 'src.model.lupts.StatLUPTS', 'StatLUPTS', ([], {}), '()\n', (5510, 5512), False, 'from src.model.lupts import LUPTS, StatLUPTS, LogisticLUPTS, LogisticStatLUPTS\n'), ((6117, 6167), 'numpy.random.choice', 'np.random.choice', (['I_tr', 'sample_size'], {'replace': '(False)'}), '(I_tr, sample_size, replace=False)\n', (6133, 6167), True, 'import numpy as np\n'), ((6979, 6993), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {}), '()\n', (6991, 6993), False, 'from sklearn.preprocessing import RobustScaler\n'), ((7020, 7034), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {}), '()\n', (7032, 7034), False, 'from sklearn.preprocessing import RobustScaler\n'), ((9871, 9891), 'numpy.intersect1d', 'np.intersect1d', (['a', 'b'], {}), '(a, b)\n', (9885, 9891), True, 'import numpy as np\n'), ((6651, 6666), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {}), '()\n', (6664, 6666), False, 'from sklearn.impute import SimpleImputer\n'), ((8832, 8863), 'numpy.mean', 'np.mean', (['tmp_results[model_key]'], {}), '(tmp_results[model_key])\n', (8839, 8863), True, 'import numpy as np\n'), ((8865, 8895), 'numpy.std', 'np.std', (['tmp_results[model_key]'], {}), '(tmp_results[model_key])\n', (8871, 8895), True, 'import numpy as np\n'), ((10398, 10417), 'src.plotutils.method_color', 'method_color', (['model'], {}), '(model)\n', (10410, 10417), False, 'from src.plotutils import method_color, method_marker, set_mpl_default_settings\n'), ((10426, 10446), 'src.plotutils.method_marker', 'method_marker', (['model'], {}), '(model)\n', (10439, 10446), False, 'from src.plotutils import method_color, method_marker, set_mpl_default_settings\n'), ((10511, 10530), 'src.plotutils.method_color', 'method_color', (['model'], {}), '(model)\n', (10523, 10530), False, 'from src.plotutils import method_color, method_marker, set_mpl_default_settings\n'), ((6474, 6508), 'numpy.isnan', 'np.isnan', (['training_data[:, ixx, j]'], {}), '(training_data[:, ixx, j])\n', (6482, 6508), True, 'import numpy as np\n'), ((6591, 6628), 'numpy.mean', 'np.mean', (['training_data[:, ixx - 1, j]'], {}), '(training_data[:, ixx - 1, j])\n', (6598, 6628), True, 'import numpy as np\n')]
|
import numpy as np
import itertools
import math
class SubwayFinder:
def find_subways(self, grouped_classifications):
if not all(letter in grouped_classifications for letter in ("S","U","B","W","A","Y")):
print("Can not find all parts of logo")
return []
sorted_classifications = [grouped_classifications[letter] for letter in ("S","U","B","W","A","Y")]
# import pdb; pdb.set_trace()
combinations = list(itertools.product(*sorted_classifications))
result = []
errors = [self._mean_squared_error(combination) for combination in combinations]
combinations_with_errors = sorted(zip(combinations, errors), key=lambda pair: pair[1])
while(len(combinations_with_errors) > 0):
best, _error = self._best_fitted_combination(combinations_with_errors)
if best == None: break
result.append(best)
combinations_with_errors = self._remove_combination(combinations_with_errors, best)
return result
def _mean_squared_error(self, classifications):
y = [np.mean(classification.segment.box[0]) for classification in classifications]
x = [np.mean(classification.segment.box[1]) for classification in classifications]
a, b = np.polyfit(x, y, 1)
prediction = np.vectorize(lambda x: a*x + b)
return np.mean((prediction(x) - y) ** 2)
def _best_fitted_combination(self, combinations_with_errors):
if len(combinations_with_errors) < 1 or combinations_with_errors[0][1] > 100:
return(None, math.inf)
return combinations_with_errors[0]
def _remove_combination(self, combinations, best):
rest_combinations = []
best_segments = [classification.segment for classification in best]
for combination, error in combinations:
combination_segments = [classification.segment for classification in combination]
if (combination_segments[0] != best_segments[0] and
combination_segments[1] != best_segments[1] and
combination_segments[2] != best_segments[2] and
combination_segments[3] != best_segments[3] and
combination_segments[4] != best_segments[4] and
combination_segments[5] != best_segments[5]):
rest_combinations.append((combination, error))
return rest_combinations
|
[
"numpy.mean",
"itertools.product",
"numpy.vectorize",
"numpy.polyfit"
] |
[((1286, 1305), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (1296, 1305), True, 'import numpy as np\n'), ((1327, 1360), 'numpy.vectorize', 'np.vectorize', (['(lambda x: a * x + b)'], {}), '(lambda x: a * x + b)\n', (1339, 1360), True, 'import numpy as np\n'), ((465, 507), 'itertools.product', 'itertools.product', (['*sorted_classifications'], {}), '(*sorted_classifications)\n', (482, 507), False, 'import itertools\n'), ((1101, 1139), 'numpy.mean', 'np.mean', (['classification.segment.box[0]'], {}), '(classification.segment.box[0])\n', (1108, 1139), True, 'import numpy as np\n'), ((1192, 1230), 'numpy.mean', 'np.mean', (['classification.segment.box[1]'], {}), '(classification.segment.box[1])\n', (1199, 1230), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
from autodcf.models._base import AbstractDCF
from datetime import datetime
class DCF(AbstractDCF):
"""Class for flexible DCF.
Note that all _to_sales args take either an iterable or float. If given a float, the DCF will
use this constant across all time periods (ex: if given 0.45 for COGS, COGS will be 45% of sales
for all forecasted periods). If given iterable, the first value will be the value used for the first
year in the forecast and the last value will be the value used in the terminal year.
Args:
company (autodcf.company.Company): Company to do DCF analysis for.
sales_growth (Union[Iterable, float]): Iterable of sales growth numbers to iterate over or constant growth rate.
Values are in order, so first value in iterable applies to next sales period and
last value applies to last sales period in DCF. Note, if you want to have 5% sales growth, use 0.05.
discount_rate (float): Rate at which cash flow should be discounted.
terminal_growth_rate (float): Rate at which sales are estimated to grow after returning to normal profit levels.
window (int): Number of years until company returns to normal profit margins (terminal year).
cogs_to_sales (Union[Iterable, float]): COGS as % of sales.
sga_to_sales (Union[Iterable, float]): SGA as % of sales.
rd_to_sales (Union[Iterable, float]): R&D as % of sales.
da_to_sales (Union[Iterable, float]): Depreciation & amortization as % of sales. Assumes amortization is tax
deductible.
interest_to_sales (Union[Iterable, float]): Interest as % of sales.
tax_rate (float): Tax rate.
capex_to_sales (Union[Iterable, float]): Capex as % of sales.
change_in_nwc_to_change_in_sales (float): Ratio of how much net working capital must change to increase sales by
1 unit.
"""
def __init__(self,
company,
sales_growth,
discount_rate,
terminal_growth_rate,
window,
cogs_to_sales,
sga_to_sales,
rd_to_sales,
da_to_sales,
interest_to_sales,
tax_rate,
capex_to_sales,
change_in_nwc_to_change_in_sales,
terminal_discount_rate=None):
self._company = company
self._sales_growth = sales_growth
self._discount_rate = discount_rate
self._terminal_growth_rate = terminal_growth_rate
self._window = window
self._cogs_to_sales = cogs_to_sales
self._sga_to_sales = sga_to_sales
self._rd_to_sales = rd_to_sales
self._da_to_sales = da_to_sales
self._interest_to_sales = interest_to_sales
self._tax_rate = tax_rate
self._capex_to_sales = capex_to_sales
self._change_in_nwc_to_change_in_sales = change_in_nwc_to_change_in_sales
self._forecast = pd.DataFrame(index=np.arange(-1, self.window + 1))
self._terminal_discount_rate = discount_rate if terminal_discount_rate is None else terminal_discount_rate
@property
def company(self):
"""Company object to do DCF for."""
return self._company
@property
def sales_growth(self):
"""Numpy array of sales growth for each year until end of window."""
return self._sales_growth
@property
def discount_rate(self):
"""Discount rate to discount cash flow at."""
return self._discount_rate
@property
def terminal_discount_rate(self):
"""Discount rate after terminal year."""
return self._terminal_discount_rate
@property
def terminal_growth_rate(self):
"""Rate at which sales are expected to grow perpetually."""
return self._terminal_growth_rate
@property
def window(self):
"""Periods of normal sales growth until terminal growth rate takes over."""
return self._window
@property
def cogs_to_sales(self):
"""Cost of goods sold as a percentage of sales."""
return self._cogs_to_sales
@property
def sga_to_sales(self):
"""Selling, general, and administrative costs as a percentage of sales."""
return self._sga_to_sales
@property
def rd_to_sales(self):
"""Research and development costs as a percentage of sales."""
return self._rd_to_sales
@property
def da_to_sales(self):
"""Depreciation and amortization as a percentage of sales."""
return self._da_to_sales
@property
def interest_to_sales(self):
"""Interest expense as a percentage of sales."""
return self._interest_to_sales
@property
def tax_rate(self):
"""Effective tax rate for company."""
return self._tax_rate
@property
def capex_to_sales(self):
"""Capital expenditures as a percentage of sales."""
return self._capex_to_sales
@property
def change_in_nwc_to_change_in_sales(self):
"""How much net working capital is expected to need to increase for each dollar increase in sales."""
return self._change_in_nwc_to_change_in_sales
def _calculate_sales(self):
"""Calculate sales for window of growth.
Returns:
Numpy array with sales from each period in order.
"""
sales_growth = np.repeat(self.sales_growth, self.window + 1) if isinstance(self.sales_growth,
float) else self.sales_growth
initial_sales = self.company.income_statement.sales
return np.concatenate(([initial_sales], initial_sales * np.cumprod(1 + sales_growth)))
def _multiply_by_sales_percent(self, percent_of_sales):
"""Find values for stat in all periods given percent of sales stat accounts for.
Returns:
Pandas series with statistic multiplied by forecast Sales values.
"""
return self._forecast['Sales'] * percent_of_sales
def _calculate_free_cash_flow(self):
"""Calculate free cash flow for each period.
Returns:
Pandas Series with free cash flow for each period in forecast.
"""
return self._forecast['Net Income'] + self._forecast['D&A'] - self._forecast['Capex'] - self._forecast[
'Change in NWC']
def _discount_cash_flows(self):
"""Discount cash flows at given discount rate."""
discount_factors = np.array([1 / (1 + self.discount_rate) ** i for i in range(self.window + 1)])
return self._forecast.loc[0:, 'FCF'] * discount_factors
def forecast(self):
"""Get pandas dataframe with all info needed to complete forecast.
Returns:
forecast (pd.DataFrame): Pandas data frame with forecasted future income statements and discounted
free cash flows.
"""
self._forecast['Year'] = np.arange(datetime.now().year - 1, datetime.now().year + self.window + 1)
self._forecast['Sales'] = self._calculate_sales()
self._forecast['COGS'] = self._multiply_by_sales_percent(self.cogs_to_sales)
self._forecast['Gross Profit'] = self._forecast['Sales'] - self._forecast['COGS']
self._forecast['SG&A'] = self._multiply_by_sales_percent(self.sga_to_sales)
self._forecast['Operating Profit'] = self._forecast['Gross Profit'] - self._forecast['SG&A']
self._forecast['R&D'] = self._multiply_by_sales_percent(self.rd_to_sales)
self._forecast['EBITDA'] = self._forecast['Operating Profit'] - self._forecast['R&D']
self._forecast['D&A'] = self._multiply_by_sales_percent(self.da_to_sales)
self._forecast['EBIT'] = self._forecast['EBITDA'] - self._forecast['D&A'] # noqa:E501
self._forecast['Interest'] = self._multiply_by_sales_percent(self.interest_to_sales)
self._forecast['EBT'] = self._forecast['EBIT'] - self._forecast['Interest']
self._forecast['Taxes'] = self._forecast['EBT'] * self.tax_rate
self._forecast.loc[-1, 'Taxes'] = self.company.income_statement.tax
self._forecast['Net Income'] = self._forecast['EBT'] - self._forecast['Taxes']
self._forecast['Capex'] = self._multiply_by_sales_percent(self.capex_to_sales)
# ΔSales * ΔNWC/ΔSales = ΔNWC
change_in_sales = np.diff(self._forecast['Sales'])
future_changes_nwc = change_in_sales * self.change_in_nwc_to_change_in_sales
self._forecast['Change in NWC'] = np.concatenate(([0.0], future_changes_nwc))
self._forecast['FCF'] = self._calculate_free_cash_flow()
self._forecast['Discounted FCF'] = self._discount_cash_flows()
return self._forecast
@property
def enterprise_value(self):
"""Enterprise value given by discounted cash flow analysis."""
return self.discounted_window_cash_flow + self.discounted_terminal_cash_flow
@property
def equity_value(self):
"""Returns total equity value of firm."""
return self.enterprise_value - self.company.balance_sheet.net_debt
@property
def equity_value_per_share(self):
"""Equity value divided by total number of shares outstanding."""
return self.equity_value / self.company.fully_diluted_shares
@property
def discounted_terminal_cash_flow(self):
"""Sum of discounted cash flows after window."""
f = self.forecast()
last_fcf = f.loc[self.window, 'Discounted FCF']
terminal_discount_minus_growth = (self.terminal_discount_rate - self.terminal_growth_rate)
tv_discounted_to_window = last_fcf * (1 + self.terminal_growth_rate) / terminal_discount_minus_growth
return tv_discounted_to_window / (1 + self.discount_rate) ** self.window
@property
def discounted_window_cash_flow(self):
"""Add up discounted cash flows from window."""
f = self.forecast()
return f.loc[0:, 'Discounted FCF'].sum()
@property
def absolute_upside_per_share(self):
return self.equity_value_per_share - self.company.price_per_share
@property
def percent_upside_per_share(self):
return self.absolute_upside_per_share / self.company.price_per_share
|
[
"numpy.repeat",
"numpy.arange",
"numpy.diff",
"datetime.datetime.now",
"numpy.concatenate",
"numpy.cumprod"
] |
[((8472, 8504), 'numpy.diff', 'np.diff', (["self._forecast['Sales']"], {}), "(self._forecast['Sales'])\n", (8479, 8504), True, 'import numpy as np\n'), ((8632, 8675), 'numpy.concatenate', 'np.concatenate', (['([0.0], future_changes_nwc)'], {}), '(([0.0], future_changes_nwc))\n', (8646, 8675), True, 'import numpy as np\n'), ((5489, 5534), 'numpy.repeat', 'np.repeat', (['self.sales_growth', '(self.window + 1)'], {}), '(self.sales_growth, self.window + 1)\n', (5498, 5534), True, 'import numpy as np\n'), ((3074, 3104), 'numpy.arange', 'np.arange', (['(-1)', '(self.window + 1)'], {}), '(-1, self.window + 1)\n', (3083, 3104), True, 'import numpy as np\n'), ((5805, 5833), 'numpy.cumprod', 'np.cumprod', (['(1 + sales_growth)'], {}), '(1 + sales_growth)\n', (5815, 5833), True, 'import numpy as np\n'), ((7074, 7088), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7086, 7088), False, 'from datetime import datetime\n'), ((7099, 7113), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7111, 7113), False, 'from datetime import datetime\n')]
|
import numpy as np
import heapq
from typing import Union
class Graph:
def __init__(self, adjacency_mat: Union[np.ndarray, str]):
""" Unlike project 2, this Graph class takes an adjacency matrix as input. `adjacency_mat`
can either be a 2D numpy array of floats or the path to a CSV file containing a 2D numpy array of floats.
In this project, we will assume `adjacency_mat` corresponds to the adjacency matrix of an undirected graph
"""
if type(adjacency_mat) == str:
self.adj_mat = self._load_adjacency_matrix_from_csv(adjacency_mat)
elif type(adjacency_mat) == np.ndarray:
self.adj_mat = adjacency_mat
else:
raise TypeError('Input must be a valid path or an adjacency matrix')
self.mst = None
def _load_adjacency_matrix_from_csv(self, path: str) -> np.ndarray:
with open(path) as f:
return np.loadtxt(f, delimiter=',')
def construct_mst(self):
""" Given `self.adj_mat`, the adjacency matrix of a connected undirected graph, implement Prim's
algorithm to construct an adjacency matrix encoding the minimum spanning tree of `self.adj_mat`.
`self.adj_mat` is a 2D numpy array of floats.
Note that because we assume our input graph is undirected, `self.adj_mat` is symmetric.
Row i and column j represents the edge weight between vertex i and vertex j. An edge weight of zero indicates that no edge exists.
TODO:
This function does not return anything. Instead, store the adjacency matrix
representation of the minimum spanning tree of `self.adj_mat` in `self.mst`.
We highly encourage the use of priority queues in your implementation. See the heapq
module, particularly the `heapify`, `heappop`, and `heappush` functions.
"""
# initiating the visited list and the adjacency matrix object
visited = []
adj_mat = self.adj_mat
# determining how many vertices there are by looking at the shape of the array
vertices = adj_mat.shape[0]
# creating an object to reflect every vertex in the adj_mat
all_vertices = list(range(vertices))
# creating a new matrix for MST to exist
self.mst = np.array([[0 for column in range(vertices)] for row in range(vertices)])
# creating a priority queue to start out with
# it is a list structured as such: [ (edge weight, start node, end node), etc.]
start = 0
queue = []
for i in range(0,vertices):
if adj_mat[start][i] != 0:
element = adj_mat[start][i], start, i
queue.append(tuple(element))
heapq.heapify(queue)
# appending the start node to visited
visited.append(start)
# begin the while statement
while len(visited) != len(all_vertices):
# pop the lowest weight edge from the queue
weight, vertex_start, vertex_end = heapq.heappop(queue)
# if dest vertex not in visited:
# add edge to mst matrix
# add dest vertex to visited list
# add outgoing edges of dest vertex to priority queue
if vertex_end not in visited:
self.mst[vertex_start][vertex_end] = weight
self.mst[vertex_end][vertex_start] = weight
visited.append(vertex_end)
for i in all_vertices:
heapq.heappush(queue, (adj_mat[vertex_end][i], vertex_end, i))
|
[
"heapq.heappush",
"numpy.loadtxt",
"heapq.heapify",
"heapq.heappop"
] |
[((2752, 2772), 'heapq.heapify', 'heapq.heapify', (['queue'], {}), '(queue)\n', (2765, 2772), False, 'import heapq\n'), ((925, 953), 'numpy.loadtxt', 'np.loadtxt', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (935, 953), True, 'import numpy as np\n'), ((3040, 3060), 'heapq.heappop', 'heapq.heappop', (['queue'], {}), '(queue)\n', (3053, 3060), False, 'import heapq\n'), ((3534, 3596), 'heapq.heappush', 'heapq.heappush', (['queue', '(adj_mat[vertex_end][i], vertex_end, i)'], {}), '(queue, (adj_mat[vertex_end][i], vertex_end, i))\n', (3548, 3596), False, 'import heapq\n')]
|
#
# Copyright 2021 <NAME>
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import numpy as np
import json
from scipy.special import erfinv
def wavelength_filter2D(field, lamb, sigma, hipass=False):
nx = field.shape[0]
measure = nx**2
Lx = 1
mu_init = np.sum(field)**2/measure
sigma_init = np.sqrt(np.sum((field - mu_init)**2)/measure)
print('sigma_init=',sigma_init)
qx = np.arange(0,nx, dtype=np.float64)
qx = np.where(qx <= nx//2, qx/Lx, (nx-qx)/Lx)
qx *= 2 * np.pi
qy = np.arange(0,nx//2 +1, dtype=np.float64)
qy*= 2*np.pi/Lx
q2 = (qx**2).reshape(-1,1) + (qy**2).reshape(1,-1)
filt = np.ones_like(q2)
q_s = 2*np.pi/lamb
if (hipass is True):
filt *= (q2 >= q_s ** 2)
else:
filt *= (q2 <= q_s ** 2)
h_qs = np.fft.irfftn( np.fft.rfftn(field) * filt, field.shape)
mu_filt = np.sum(h_qs)/measure
sigma_filt = np.sqrt(np.sum((h_qs - mu_filt)**2)/measure)
print('sigma_filt=',sigma_filt)
print('mu_filt=',mu_filt)
h_qs *= sigma/sigma_filt
mu_scaled = np.sum(h_qs)/measure
sigma_scaled = np.sqrt(np.sum((h_qs - mu_scaled)**2)/measure)
print('sigma_scaled=',sigma_scaled)
return h_qs
def smoothcutoff2D(field, minimum_val, k=10):
measure = np.array(field.shape).prod()
mu0 = np.sum(field)/measure
print('mu0', mu0)
print('cutval=', minimum_val-mu0)
cutfield = half_sigmoid(field-mu0, minimum_val-mu0, k=k)
mu_cutoff = np.sum(cutfield)/measure
sigma_cutoff = np.sqrt(np.sum((cutfield - mu_cutoff)**2)/measure)
print('sigma_cutoff=',sigma_cutoff)
print('minval_cutoff=',np.amin(cutfield)+mu0)
return cutfield + mu0
def half_sigmoid(f, cutoff, k=10):
x = np.asarray(f)
y = np.asarray(x+0.0)
y[np.asarray(x < 0)] = x[np.asarray(x < 0)]*abs(cutoff)/(
abs(cutoff)**k+np.abs(x[np.asarray(x < 0)])**k)**(1/k)
return y
def threshsymm(field, Vf):
measure = np.array(field.shape).prod()
mu = np.sum(field)/measure
sigma = np.sqrt(np.sum((field-mu)**2/measure))
thresh = 2**0.5*erfinv(2*Vf - 1)
thresh_scaled = thresh*sigma + mu
thresh_field = np.ones_like(field)
thresh_field[field < thresh_scaled] = -1
print(np.sum(thresh_field)/measure)
return thresh_field
def threshmatrix(field, Vf):
measure = np.array(field.shape).prod()
vfl = 0.5-Vf/2
vfu = 0.5+Vf/2
print(vfl, vfu)
threshL = 2**0.5*erfinv(2*vfl - 1)
threshU = 2**0.5*erfinv(2*vfu - 1)
print(threshL, threshU)
mu = np.sum(field)/measure
sigma = np.sqrt(np.sum((field-mu)**2/measure))
thresh_field = np.ones_like(field)
threshscL = threshL*sigma + mu
thresh_field[field < threshscL] = -1
threshscU = threshU*sigma + mu
thresh_field[field > threshscU] = -1
print(np.sum(thresh_field)/measure)
return thresh_field
def ACsmooth2D(field, nits, ACwidth=2**0.5):
a = field+0.0
# f=(W/4)*(1-a)^2*(1+a)^2
# da/dx = 2*sqrt(f)/e
# max da/dx = 2sqrt(W/4*1^4)/e = sqrt(W)/e
# L = delta a/(max da/dx) = 2/(sqrt(W)/e) = 2e/sqrt(W)
# use W = 4/ACw, e^2 = ACw -> L = 2sqrt(ACw)/sqrt(4/ACw) = ACw
for n in range(0,nits):
a -= 0.05*(2*(2*a**3-2*a)/ACwidth + ACwidth*(4*a - np.roll(a,1,axis=0) - np.roll(a,-1,axis=0)
- np.roll(a,1,axis=1) - np.roll(a,-1,axis=1)))
return a
def save_params(propdict, fname="struct2D.json"):
jsonfile = open(fname, mode='w')
json.dump(propdict,jsonfile,default=lambda o: "(array)")
jsonfile.close()
|
[
"numpy.ones_like",
"numpy.roll",
"numpy.amin",
"numpy.arange",
"numpy.where",
"numpy.asarray",
"scipy.special.erfinv",
"numpy.fft.rfftn",
"numpy.sum",
"numpy.array",
"json.dump"
] |
[((1429, 1463), 'numpy.arange', 'np.arange', (['(0)', 'nx'], {'dtype': 'np.float64'}), '(0, nx, dtype=np.float64)\n', (1438, 1463), True, 'import numpy as np\n'), ((1472, 1520), 'numpy.where', 'np.where', (['(qx <= nx // 2)', '(qx / Lx)', '((nx - qx) / Lx)'], {}), '(qx <= nx // 2, qx / Lx, (nx - qx) / Lx)\n', (1480, 1520), True, 'import numpy as np\n'), ((1542, 1585), 'numpy.arange', 'np.arange', (['(0)', '(nx // 2 + 1)'], {'dtype': 'np.float64'}), '(0, nx // 2 + 1, dtype=np.float64)\n', (1551, 1585), True, 'import numpy as np\n'), ((1668, 1684), 'numpy.ones_like', 'np.ones_like', (['q2'], {}), '(q2)\n', (1680, 1684), True, 'import numpy as np\n'), ((2745, 2758), 'numpy.asarray', 'np.asarray', (['f'], {}), '(f)\n', (2755, 2758), True, 'import numpy as np\n'), ((2767, 2786), 'numpy.asarray', 'np.asarray', (['(x + 0.0)'], {}), '(x + 0.0)\n', (2777, 2786), True, 'import numpy as np\n'), ((3170, 3189), 'numpy.ones_like', 'np.ones_like', (['field'], {}), '(field)\n', (3182, 3189), True, 'import numpy as np\n'), ((3637, 3656), 'numpy.ones_like', 'np.ones_like', (['field'], {}), '(field)\n', (3649, 3656), True, 'import numpy as np\n'), ((4466, 4524), 'json.dump', 'json.dump', (['propdict', 'jsonfile'], {'default': "(lambda o: '(array)')"}), "(propdict, jsonfile, default=lambda o: '(array)')\n", (4475, 4524), False, 'import json\n'), ((1890, 1902), 'numpy.sum', 'np.sum', (['h_qs'], {}), '(h_qs)\n', (1896, 1902), True, 'import numpy as np\n'), ((2084, 2096), 'numpy.sum', 'np.sum', (['h_qs'], {}), '(h_qs)\n', (2090, 2096), True, 'import numpy as np\n'), ((2331, 2344), 'numpy.sum', 'np.sum', (['field'], {}), '(field)\n', (2337, 2344), True, 'import numpy as np\n'), ((2490, 2506), 'numpy.sum', 'np.sum', (['cutfield'], {}), '(cutfield)\n', (2496, 2506), True, 'import numpy as np\n'), ((2791, 2808), 'numpy.asarray', 'np.asarray', (['(x < 0)'], {}), '(x < 0)\n', (2801, 2808), True, 'import numpy as np\n'), ((3003, 3016), 'numpy.sum', 'np.sum', (['field'], {}), '(field)\n', (3009, 3016), True, 'import numpy as np\n'), ((3045, 3080), 'numpy.sum', 'np.sum', (['((field - mu) ** 2 / measure)'], {}), '((field - mu) ** 2 / measure)\n', (3051, 3080), True, 'import numpy as np\n'), ((3096, 3114), 'scipy.special.erfinv', 'erfinv', (['(2 * Vf - 1)'], {}), '(2 * Vf - 1)\n', (3102, 3114), False, 'from scipy.special import erfinv\n'), ((3451, 3470), 'scipy.special.erfinv', 'erfinv', (['(2 * vfl - 1)'], {}), '(2 * vfl - 1)\n', (3457, 3470), False, 'from scipy.special import erfinv\n'), ((3490, 3509), 'scipy.special.erfinv', 'erfinv', (['(2 * vfu - 1)'], {}), '(2 * vfu - 1)\n', (3496, 3509), False, 'from scipy.special import erfinv\n'), ((3545, 3558), 'numpy.sum', 'np.sum', (['field'], {}), '(field)\n', (3551, 3558), True, 'import numpy as np\n'), ((3587, 3622), 'numpy.sum', 'np.sum', (['((field - mu) ** 2 / measure)'], {}), '((field - mu) ** 2 / measure)\n', (3593, 3622), True, 'import numpy as np\n'), ((1296, 1309), 'numpy.sum', 'np.sum', (['field'], {}), '(field)\n', (1302, 1309), True, 'import numpy as np\n'), ((1346, 1376), 'numpy.sum', 'np.sum', (['((field - mu_init) ** 2)'], {}), '((field - mu_init) ** 2)\n', (1352, 1376), True, 'import numpy as np\n'), ((1835, 1854), 'numpy.fft.rfftn', 'np.fft.rfftn', (['field'], {}), '(field)\n', (1847, 1854), True, 'import numpy as np\n'), ((1936, 1965), 'numpy.sum', 'np.sum', (['((h_qs - mu_filt) ** 2)'], {}), '((h_qs - mu_filt) ** 2)\n', (1942, 1965), True, 'import numpy as np\n'), ((2132, 2163), 'numpy.sum', 'np.sum', (['((h_qs - mu_scaled) ** 2)'], {}), '((h_qs - mu_scaled) ** 2)\n', (2138, 2163), True, 'import numpy as np\n'), ((2292, 2313), 'numpy.array', 'np.array', (['field.shape'], {}), '(field.shape)\n', (2300, 2313), True, 'import numpy as np\n'), ((2542, 2577), 'numpy.sum', 'np.sum', (['((cutfield - mu_cutoff) ** 2)'], {}), '((cutfield - mu_cutoff) ** 2)\n', (2548, 2577), True, 'import numpy as np\n'), ((2652, 2669), 'numpy.amin', 'np.amin', (['cutfield'], {}), '(cutfield)\n', (2659, 2669), True, 'import numpy as np\n'), ((2965, 2986), 'numpy.array', 'np.array', (['field.shape'], {}), '(field.shape)\n', (2973, 2986), True, 'import numpy as np\n'), ((3245, 3265), 'numpy.sum', 'np.sum', (['thresh_field'], {}), '(thresh_field)\n', (3251, 3265), True, 'import numpy as np\n'), ((3343, 3364), 'numpy.array', 'np.array', (['field.shape'], {}), '(field.shape)\n', (3351, 3364), True, 'import numpy as np\n'), ((3819, 3839), 'numpy.sum', 'np.sum', (['thresh_field'], {}), '(thresh_field)\n', (3825, 3839), True, 'import numpy as np\n'), ((2814, 2831), 'numpy.asarray', 'np.asarray', (['(x < 0)'], {}), '(x < 0)\n', (2824, 2831), True, 'import numpy as np\n'), ((4338, 4360), 'numpy.roll', 'np.roll', (['a', '(-1)'], {'axis': '(1)'}), '(a, -1, axis=1)\n', (4345, 4360), True, 'import numpy as np\n'), ((2879, 2896), 'numpy.asarray', 'np.asarray', (['(x < 0)'], {}), '(x < 0)\n', (2889, 2896), True, 'import numpy as np\n'), ((4316, 4337), 'numpy.roll', 'np.roll', (['a', '(1)'], {'axis': '(1)'}), '(a, 1, axis=1)\n', (4323, 4337), True, 'import numpy as np\n'), ((4275, 4297), 'numpy.roll', 'np.roll', (['a', '(-1)'], {'axis': '(0)'}), '(a, -1, axis=0)\n', (4282, 4297), True, 'import numpy as np\n'), ((4253, 4274), 'numpy.roll', 'np.roll', (['a', '(1)'], {'axis': '(0)'}), '(a, 1, axis=0)\n', (4260, 4274), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import Tuple
import numpy as np
import pytest
from aicsimageio import exceptions
from aicsimageio.readers.default_reader import DefaultReader
from ..conftest import get_resource_full_path, host
from ..image_container_test_utils import run_image_file_checks
@host
@pytest.mark.parametrize(
"filename, set_scene, expected_shape, expected_dims_order",
[
("example.bmp", "Image:0", (480, 640, 4), "YXS"),
("example.png", "Image:0", (800, 537, 4), "YXS"),
("example.jpg", "Image:0", (452, 400, 3), "YXS"),
("example.gif", "Image:0", (72, 268, 268, 4), "TYXS"),
(
"example_invalid_frame_count.mp4",
"Image:0",
(55, 1080, 1920, 3),
"TYXS",
),
(
"example_valid_frame_count.mp4",
"Image:0",
(72, 272, 272, 3),
"TYXS",
),
pytest.param(
"example.txt",
None,
None,
None,
marks=pytest.mark.raises(exception=exceptions.UnsupportedFileFormatError),
),
pytest.param(
"example.png",
"Image:1",
None,
None,
marks=pytest.mark.raises(exception=IndexError),
),
],
)
def test_default_reader(
filename: str,
host: str,
set_scene: str,
expected_shape: Tuple[int, ...],
expected_dims_order: str,
) -> None:
# Construct full filepath
uri = get_resource_full_path(filename, host)
# Run checks
run_image_file_checks(
ImageContainer=DefaultReader,
image=uri,
set_scene=set_scene,
expected_scenes=("Image:0",),
expected_current_scene="Image:0",
expected_shape=expected_shape,
expected_dtype=np.dtype(np.uint8),
expected_dims_order=expected_dims_order,
expected_channel_names=None,
expected_physical_pixel_sizes=(None, None, None),
expected_metadata_type=dict,
)
def test_ffmpeg_header_fail() -> None:
with pytest.raises(IOError):
# Big Buck Bunny
DefaultReader("https://archive.org/embed/archive-video-files/test.mp4")
|
[
"numpy.dtype",
"pytest.mark.raises",
"pytest.raises",
"aicsimageio.readers.default_reader.DefaultReader"
] |
[((2099, 2121), 'pytest.raises', 'pytest.raises', (['IOError'], {}), '(IOError)\n', (2112, 2121), False, 'import pytest\n'), ((2156, 2227), 'aicsimageio.readers.default_reader.DefaultReader', 'DefaultReader', (['"""https://archive.org/embed/archive-video-files/test.mp4"""'], {}), "('https://archive.org/embed/archive-video-files/test.mp4')\n", (2169, 2227), False, 'from aicsimageio.readers.default_reader import DefaultReader\n'), ((1842, 1860), 'numpy.dtype', 'np.dtype', (['np.uint8'], {}), '(np.uint8)\n', (1850, 1860), True, 'import numpy as np\n'), ((1065, 1132), 'pytest.mark.raises', 'pytest.mark.raises', ([], {'exception': 'exceptions.UnsupportedFileFormatError'}), '(exception=exceptions.UnsupportedFileFormatError)\n', (1083, 1132), False, 'import pytest\n'), ((1271, 1311), 'pytest.mark.raises', 'pytest.mark.raises', ([], {'exception': 'IndexError'}), '(exception=IndexError)\n', (1289, 1311), False, 'import pytest\n')]
|
import collections
import numpy as np
class Vectorizer(object):
def __init__(self):
self.mapping = {}
self.inverse_mapping = {}
self.embedding_size = 0
def vectorize_string(self, s):
vec = np.empty(len(s))
for i in range(0,len(s)):
char = s[i]
if char in self.mapping:
vec[i] = self.mapping[char]
else:
vec[i] = self.embedding_size
self.mapping[char] = self.embedding_size
self.inverse_mapping[self.embedding_size] = char
self.embedding_size += 1
return vec
def devectorize(self, v):
"""
Devectorizes a vector into a a string
"""
s = ""
for ident in v:
s += self.inverse_mapping[ident]
return s
def vectorize_corpus(corpus):
"""
corpus: A list of strings we want to vectorize
->
vectors: A list of lists that represent vectorized
representations of the strings
vectorizer: A vectorizer that can be used to vectorize and devectorize
the strings
"""
vectorizer = Vectorizer()
# vectors = np.array([])
index = 1
mapping = {}
inverse_mapping = {}
count = 0.0
vectors = []
for i in range(0,len(corpus)):
s = ""
for char in corpus[i, :]:
s += char
vectorized = vectorizer.vectorize_string(corpus[i, :])
# print(vectorized)
vectors.append(vectorized)
return np.array(vectors), vectorizer
|
[
"numpy.array"
] |
[((1519, 1536), 'numpy.array', 'np.array', (['vectors'], {}), '(vectors)\n', (1527, 1536), True, 'import numpy as np\n')]
|
import threading
import numpy as np
import time
import rospy
from sensor_msgs import point_cloud2
from std_msgs.msg import Header
from sensor_msgs.msg import PointCloud2, PointField
from stella_nav_core.geometry_utils import GeometryUtils
from stella_nav_core.config import CostConfig, MotionConfig
class State(object):
def __init__(self, x, y, theta, vx, avz, mask_rotation=None, cost=None, costs=None,
accum_cost=0.0, trajectory=None, accum_trajectory=np.array([], dtype=np.float64).reshape(0, 3), level=1, backtrace=[]):
self.x = x
self.y = y
self.theta = theta
self.vx = vx
self.avz = avz
self.mask_rotation = mask_rotation
self.cost = cost
self.accum_cost = accum_cost
self.costs = costs
self.trajectory = trajectory
self.accum_trajectory = accum_trajectory
self.level = level
self.backtrace = backtrace
class DWAPlanner(object):
LETHAL_COST = 1000.0
def __init__(
self, costmaps, costmap, linear_motion_config, angular_motion_config,
dt=0.1, heading_lookahead=0.1, predict_time=1.0, search_level=1,
default_road_width=0.5, heading_lethal_angle=np.pi/4, debug_cloud=True,
angular_speed_cost_config=CostConfig(0.01, 1.0), speed_cost_config=CostConfig(0.01, 1.0),
heading_cost_config=CostConfig(0.01, 1.0), goal_cost_config=CostConfig(1.0, 5.0),
obstacle_cost_config=CostConfig(100.0, 100.0)
):
self._linear_motion_config = MotionConfig(**linear_motion_config)
self._angular_motion_config = MotionConfig(**angular_motion_config)
self._dt = dt
self._predict_time = predict_time
self._search_level = search_level
self._twist = None
self._heading_lookahead = heading_lookahead
self._debug_cloud = debug_cloud
self._angular_speed_cost_config = CostConfig(**angular_speed_cost_config)
self._speed_cost_config = CostConfig(**speed_cost_config)
self._heading_cost_config = CostConfig(**heading_cost_config)
self._goal_cost_config = CostConfig(**goal_cost_config)
self._obstacle_cost_config = CostConfig(**obstacle_cost_config)
self._default_road_width = default_road_width
self._heading_lethal_angle = heading_lethal_angle
self._costmaps = costmaps
self._costmap = costmaps[costmap]
self._cost_pub = rospy.Publisher("~dwa_planner/cost_cloud", PointCloud2, queue_size=1)
self._lethal_cost_pub = rospy.Publisher("~dwa_planner/lethal_cost_cloud", PointCloud2, queue_size=1)
self._rotation_cost_pub = rospy.Publisher("~dwa_planner/rotation_cost_cloud", PointCloud2, queue_size=1)
self._fields = [
PointField(name="x", offset=0, datatype=PointField.FLOAT32, count=1),
PointField(name="y", offset=4, datatype=PointField.FLOAT32, count=1),
PointField(name="z", offset=8, datatype=PointField.FLOAT32, count=1),
PointField(name="speed", offset=12, datatype=PointField.FLOAT32, count=1),
PointField(name="obstacle", offset=16, datatype=PointField.FLOAT32, count=1),
PointField(name="goal", offset=20, datatype=PointField.FLOAT32, count=1),
PointField(name="angular_speed", offset=24, datatype=PointField.FLOAT32, count=1),
PointField(name="heading", offset=28, datatype=PointField.FLOAT32, count=1),
PointField(name="total", offset=32, datatype=PointField.FLOAT32, count=1),
]
self.lock = threading.RLock()
def update_twist(self, twist):
self.lock.acquire()
self._twist = twist
self.lock.release()
def _trajectory(self, x, y, theta, vx, avz):
t = np.linspace(0, self._predict_time, self._predict_time / self._dt)[np.newaxis, :, np.newaxis]
v = np.repeat(
np.vstack((vx * np.cos(theta), vx * np.sin(theta), np.zeros(vx.shape))).T[:, np.newaxis, :],
t.shape[1], axis=1)
pos = np.array((x, y, theta))[np.newaxis, np.newaxis, :]
traj = np.zeros(v.shape)
traj[avz != 0.0] = np.vstack(
((vx / avz) * (np.sin(avz * t + theta) - np.sin(theta)) + x,
(vx / avz) * (np.cos(theta) - np.cos(avz * t + theta)) + y,
avz * t + theta)).T
return traj
def _heading_cost(self, scoring_point, goal):
target_yaw = GeometryUtils.get_yaw(goal.pose.orientation)
angle = np.abs(GeometryUtils.regulate_rad(target_yaw - scoring_point[:, 0, 2]))
cost = self._heading_cost_config.get_cost(angle / np.pi)
cost[angle > self._heading_lethal_angle] += DWAPlanner.LETHAL_COST
return cost
def _angular_speed_cost(self, avz):
return self._angular_speed_cost_config.get_cost(np.abs(avz) / self._linear_motion_config.max_speed)
def _speed_cost(self, vx):
max_speed = max(self._linear_motion_config.max_speed, -self._linear_motion_config.min_speed)
return self._speed_cost_config.get_cost(max_speed - np.abs(vx)) / max_speed
def _speed_cost2(self, vx, scoring_point, goal):
target_yaw = GeometryUtils.get_yaw(goal.pose.orientation)
theta = scoring_point[:, 0, 2] - target_yaw
max_speed = max(self._linear_motion_config.max_speed, -self._linear_motion_config.min_speed)
return self._speed_cost_config.get_cost(
(max_speed - np.abs(vx) * np.cos(theta)) / max_speed
)
def _obstacle_cost(self, traj, scoring_point, costmap):
yaw = scoring_point[:, :, 2]
bias = np.stack((self._heading_lookahead * np.cos(yaw), self._heading_lookahead * np.sin(yaw)), axis=-1)
lethal_cost = np.zeros((scoring_point.shape[0], 1))
lethal_yaw = traj[:, :, 2]
lethal_look_point = traj[:, :, :2] + np.stack((self._heading_lookahead * np.cos(lethal_yaw), self._heading_lookahead * np.sin(lethal_yaw)), axis=-1)
current_pos = traj[:, 0:1, :2]
current_yaw = traj[:, 0:1, 2]
current_bias = np.stack((self._heading_lookahead * np.cos(current_yaw), self._heading_lookahead * np.sin(current_yaw)), axis=-1)
current_look_point = current_pos + current_bias
lethal_cost[np.any(
(costmap.get_value_from_world(lethal_look_point) > 0.99) * (np.linalg.norm(current_look_point - lethal_look_point, axis=2) > 1e-3),
axis=1)] = DWAPlanner.LETHAL_COST
look_point = scoring_point[:, :, :2] + bias
cost = self._obstacle_cost_config.get_cost(costmap.get_value_from_world(look_point))
return (cost + lethal_cost).reshape(cost.shape[0])
def _explicit_goal_cost(self, scoring_point, goal):
yaw = scoring_point[:, 0, 2]
return self._goal_cost_config.get_cost(np.hypot(
goal.pose.position.x - (scoring_point[:, 0, 0] + self._heading_lookahead * np.cos(yaw)),
goal.pose.position.y - (scoring_point[:, 0, 1] + self._heading_lookahead * np.sin(yaw))))
def _goal_cost(self, scoring_point, goal):
robot_yaw = scoring_point[:, 0, 2]
robot_pos = np.array(
(scoring_point[:, 0, 0] + self._heading_lookahead * np.cos(robot_yaw),
scoring_point[:, 0, 1] + self._heading_lookahead * np.sin(robot_yaw))).T
goal_pos = np.array((goal.pose.position.x, goal.pose.position.y))
u = robot_pos - goal_pos
goal_yaw = GeometryUtils.get_yaw(goal.pose.orientation)
v = (np.cos(goal_yaw), np.sin(goal_yaw))
square_distance = np.square(np.cross(u, v)) / np.square(goal.data.get("road_width", self._default_road_width))
cost = self._goal_cost_config.get_cost(square_distance)
cost[square_distance > 1.0] += DWAPlanner.LETHAL_COST
return cost
def _cost(self, trajectory, costmap, goal, vx, avz):
scoring_point = trajectory[:, -1:, :]
# speed_cost = self._speed_cost(vx)
speed_cost = self._speed_cost2(vx, scoring_point, goal)
obstacle_cost = self._obstacle_cost(trajectory, scoring_point, costmap)
if goal.data.get("explicit", None):
goal_cost = self._explicit_goal_cost(scoring_point, goal)
else:
goal_cost = self._goal_cost(scoring_point, goal)
angular_speed_cost = self._angular_speed_cost(avz)
heading_cost = self._heading_cost(scoring_point, goal)
costs = (speed_cost, obstacle_cost, goal_cost, angular_speed_cost, heading_cost)
return sum(costs), np.vstack(costs)
def _dynamic_window(self, linear_vx, angular_vz):
dw = [
max(self._linear_motion_config.min_speed, min(self._linear_motion_config.max_speed, linear_vx - self._linear_motion_config.max_accel * self._dt)),
min(self._linear_motion_config.max_speed, max(self._linear_motion_config.min_speed, linear_vx + self._linear_motion_config.max_accel * self._dt)),
max(self._angular_motion_config.min_speed, min(self._angular_motion_config.max_speed, angular_vz - self._angular_motion_config.max_accel * self._dt)),
min(self._angular_motion_config.max_speed, max(self._angular_motion_config.min_speed, angular_vz + self._angular_motion_config.max_accel * self._dt))
]
return dw
def _sample_v(self, dw):
_vx = np.linspace(dw[0], dw[1], self._linear_motion_config.samples)
_avz = np.linspace(dw[2], dw[3], self._angular_motion_config.samples)
_avz[_avz == 0.0] = 1e-6
vx, avz = np.meshgrid(_vx, _avz)
vx = vx.flatten()
avz = avz.flatten()
mask_rotation = vx == dw[0]
return vx, avz, mask_rotation
def _publish_cloud(self, trajectory, cost, costs, mask_rotation):
header = Header(frame_id="map")
points = np.vstack((trajectory[:, -1, :2].T, np.zeros(cost.shape), costs, cost)).T
mask = mask_rotation
x, y = trajectory[0, 0, :2]
theta_rot = trajectory[mask, -1, 2:3].T
r = 0.1
points_rot = np.vstack((x + r * np.cos(theta_rot), y + r * np.sin(theta_rot), np.zeros(cost[mask].shape), [c[mask] for c in costs], cost[mask])).T
points_rot_filtered = points_rot[points_rot[:, 3 + len(costs)] < DWAPlanner.LETHAL_COST]
points_filtered = points[points[:, 3 + len(costs)] < DWAPlanner.LETHAL_COST]
points_filtered_out = points[points[:, 3 + len(costs)] > DWAPlanner.LETHAL_COST - 1]
cost_msg = point_cloud2.create_cloud(header, self._fields, points_filtered)
lethal_cost_msg = point_cloud2.create_cloud(header, self._fields, points_filtered_out)
rotation_cost_msg = point_cloud2.create_cloud(header, self._fields, points_rot_filtered)
try:
self._cost_pub.publish(cost_msg)
self._lethal_cost_pub.publish(lethal_cost_msg)
self._rotation_cost_pub.publish(rotation_cost_msg)
except rospy.ROSException as e:
rospy.logdebug("DWAPlanner: {}".format(e))
def plan(self, pose, goal):
self.lock.acquire()
twist = self._twist
self.lock.release()
if twist is None:
return np.array((0.0, 0.0)), None
self._costmap.lock.acquire()
costmap = self._costmap.clone()
self._costmap.lock.release()
x = pose.pose.position.x
y = pose.pose.position.y
theta = GeometryUtils.get_yaw(pose.pose.orientation)
linear_vx = twist.linear.x
angular_vz = twist.angular.z
states = [State(x, y, theta, linear_vx, angular_vz)]
results = []
while states:
state = states.pop()
sample_vx, sample_avz, mask_rotation = self._sample_v(dw=self._dynamic_window(state.vx, state.avz))
trajectory = self._trajectory(state.x, state.y, state.theta, sample_vx, sample_avz)
cost, costs = self._cost(trajectory, costmap, goal, sample_vx, sample_avz)
for i in range(len(sample_vx)):
_vx = sample_vx[i]
_avz = sample_avz[i]
_mask_rotation = mask_rotation
_cost = cost
_costs = costs
_accum_cost = cost[i] + state.accum_cost
_x = trajectory[i, -1, 0]
_y = trajectory[i, -1, 1]
_theta = trajectory[i, -1, 2]
_trajectory = trajectory
_accum_trajectory = np.vstack((trajectory[i], state.accum_trajectory))
_backtrace = state.backtrace + [state]
new_state = State(_x, _y, _theta, _vx, _avz, _mask_rotation, _cost, _costs, _accum_cost, _trajectory, _accum_trajectory, state.level + 1, _backtrace)
if state.level < self._search_level:
states.append(new_state)
else:
results.append(new_state)
min_cost = None
min_idx = None
min_score = None
for state in results:
if min_cost is None or state.accum_cost < min_cost:
min_cost = state.accum_cost
min_score = (state.cost, state.vx, state.avz, state.trajectory)
min_backtrace = state.backtrace + [state]
if self._debug_cloud:
for state in min_backtrace[1:]:
self._publish_cloud(state.trajectory, state.cost, state.costs, state.mask_rotation)
if min_score is None:
min_score = (10000.0, 0.0, 0.0, [])
return min_score[1:3], min_score[3]
|
[
"numpy.array",
"stella_nav_core.geometry_utils.GeometryUtils.get_yaw",
"numpy.linalg.norm",
"numpy.sin",
"stella_nav_core.config.MotionConfig",
"numpy.cross",
"threading.RLock",
"stella_nav_core.config.CostConfig",
"numpy.linspace",
"sensor_msgs.point_cloud2.create_cloud",
"numpy.vstack",
"numpy.meshgrid",
"numpy.abs",
"numpy.cos",
"rospy.Publisher",
"std_msgs.msg.Header",
"stella_nav_core.geometry_utils.GeometryUtils.regulate_rad",
"numpy.zeros",
"sensor_msgs.msg.PointField"
] |
[((1289, 1310), 'stella_nav_core.config.CostConfig', 'CostConfig', (['(0.01)', '(1.0)'], {}), '(0.01, 1.0)\n', (1299, 1310), False, 'from stella_nav_core.config import CostConfig, MotionConfig\n'), ((1330, 1351), 'stella_nav_core.config.CostConfig', 'CostConfig', (['(0.01)', '(1.0)'], {}), '(0.01, 1.0)\n', (1340, 1351), False, 'from stella_nav_core.config import CostConfig, MotionConfig\n'), ((1385, 1406), 'stella_nav_core.config.CostConfig', 'CostConfig', (['(0.01)', '(1.0)'], {}), '(0.01, 1.0)\n', (1395, 1406), False, 'from stella_nav_core.config import CostConfig, MotionConfig\n'), ((1425, 1445), 'stella_nav_core.config.CostConfig', 'CostConfig', (['(1.0)', '(5.0)'], {}), '(1.0, 5.0)\n', (1435, 1445), False, 'from stella_nav_core.config import CostConfig, MotionConfig\n'), ((1480, 1504), 'stella_nav_core.config.CostConfig', 'CostConfig', (['(100.0)', '(100.0)'], {}), '(100.0, 100.0)\n', (1490, 1504), False, 'from stella_nav_core.config import CostConfig, MotionConfig\n'), ((1549, 1585), 'stella_nav_core.config.MotionConfig', 'MotionConfig', ([], {}), '(**linear_motion_config)\n', (1561, 1585), False, 'from stella_nav_core.config import CostConfig, MotionConfig\n'), ((1624, 1661), 'stella_nav_core.config.MotionConfig', 'MotionConfig', ([], {}), '(**angular_motion_config)\n', (1636, 1661), False, 'from stella_nav_core.config import CostConfig, MotionConfig\n'), ((1929, 1968), 'stella_nav_core.config.CostConfig', 'CostConfig', ([], {}), '(**angular_speed_cost_config)\n', (1939, 1968), False, 'from stella_nav_core.config import CostConfig, MotionConfig\n'), ((2003, 2034), 'stella_nav_core.config.CostConfig', 'CostConfig', ([], {}), '(**speed_cost_config)\n', (2013, 2034), False, 'from stella_nav_core.config import CostConfig, MotionConfig\n'), ((2071, 2104), 'stella_nav_core.config.CostConfig', 'CostConfig', ([], {}), '(**heading_cost_config)\n', (2081, 2104), False, 'from stella_nav_core.config import CostConfig, MotionConfig\n'), ((2138, 2168), 'stella_nav_core.config.CostConfig', 'CostConfig', ([], {}), '(**goal_cost_config)\n', (2148, 2168), False, 'from stella_nav_core.config import CostConfig, MotionConfig\n'), ((2206, 2240), 'stella_nav_core.config.CostConfig', 'CostConfig', ([], {}), '(**obstacle_cost_config)\n', (2216, 2240), False, 'from stella_nav_core.config import CostConfig, MotionConfig\n'), ((2454, 2523), 'rospy.Publisher', 'rospy.Publisher', (['"""~dwa_planner/cost_cloud"""', 'PointCloud2'], {'queue_size': '(1)'}), "('~dwa_planner/cost_cloud', PointCloud2, queue_size=1)\n", (2469, 2523), False, 'import rospy\n'), ((2556, 2632), 'rospy.Publisher', 'rospy.Publisher', (['"""~dwa_planner/lethal_cost_cloud"""', 'PointCloud2'], {'queue_size': '(1)'}), "('~dwa_planner/lethal_cost_cloud', PointCloud2, queue_size=1)\n", (2571, 2632), False, 'import rospy\n'), ((2667, 2745), 'rospy.Publisher', 'rospy.Publisher', (['"""~dwa_planner/rotation_cost_cloud"""', 'PointCloud2'], {'queue_size': '(1)'}), "('~dwa_planner/rotation_cost_cloud', PointCloud2, queue_size=1)\n", (2682, 2745), False, 'import rospy\n'), ((3581, 3598), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (3596, 3598), False, 'import threading\n'), ((4114, 4131), 'numpy.zeros', 'np.zeros', (['v.shape'], {}), '(v.shape)\n', (4122, 4131), True, 'import numpy as np\n'), ((4441, 4485), 'stella_nav_core.geometry_utils.GeometryUtils.get_yaw', 'GeometryUtils.get_yaw', (['goal.pose.orientation'], {}), '(goal.pose.orientation)\n', (4462, 4485), False, 'from stella_nav_core.geometry_utils import GeometryUtils\n'), ((5175, 5219), 'stella_nav_core.geometry_utils.GeometryUtils.get_yaw', 'GeometryUtils.get_yaw', (['goal.pose.orientation'], {}), '(goal.pose.orientation)\n', (5196, 5219), False, 'from stella_nav_core.geometry_utils import GeometryUtils\n'), ((5730, 5767), 'numpy.zeros', 'np.zeros', (['(scoring_point.shape[0], 1)'], {}), '((scoring_point.shape[0], 1))\n', (5738, 5767), True, 'import numpy as np\n'), ((7317, 7371), 'numpy.array', 'np.array', (['(goal.pose.position.x, goal.pose.position.y)'], {}), '((goal.pose.position.x, goal.pose.position.y))\n', (7325, 7371), True, 'import numpy as np\n'), ((7424, 7468), 'stella_nav_core.geometry_utils.GeometryUtils.get_yaw', 'GeometryUtils.get_yaw', (['goal.pose.orientation'], {}), '(goal.pose.orientation)\n', (7445, 7468), False, 'from stella_nav_core.geometry_utils import GeometryUtils\n'), ((9304, 9365), 'numpy.linspace', 'np.linspace', (['dw[0]', 'dw[1]', 'self._linear_motion_config.samples'], {}), '(dw[0], dw[1], self._linear_motion_config.samples)\n', (9315, 9365), True, 'import numpy as np\n'), ((9381, 9443), 'numpy.linspace', 'np.linspace', (['dw[2]', 'dw[3]', 'self._angular_motion_config.samples'], {}), '(dw[2], dw[3], self._angular_motion_config.samples)\n', (9392, 9443), True, 'import numpy as np\n'), ((9495, 9517), 'numpy.meshgrid', 'np.meshgrid', (['_vx', '_avz'], {}), '(_vx, _avz)\n', (9506, 9517), True, 'import numpy as np\n'), ((9734, 9756), 'std_msgs.msg.Header', 'Header', ([], {'frame_id': '"""map"""'}), "(frame_id='map')\n", (9740, 9756), False, 'from std_msgs.msg import Header\n'), ((10427, 10491), 'sensor_msgs.point_cloud2.create_cloud', 'point_cloud2.create_cloud', (['header', 'self._fields', 'points_filtered'], {}), '(header, self._fields, points_filtered)\n', (10452, 10491), False, 'from sensor_msgs import point_cloud2\n'), ((10518, 10586), 'sensor_msgs.point_cloud2.create_cloud', 'point_cloud2.create_cloud', (['header', 'self._fields', 'points_filtered_out'], {}), '(header, self._fields, points_filtered_out)\n', (10543, 10586), False, 'from sensor_msgs import point_cloud2\n'), ((10615, 10683), 'sensor_msgs.point_cloud2.create_cloud', 'point_cloud2.create_cloud', (['header', 'self._fields', 'points_rot_filtered'], {}), '(header, self._fields, points_rot_filtered)\n', (10640, 10683), False, 'from sensor_msgs import point_cloud2\n'), ((11345, 11389), 'stella_nav_core.geometry_utils.GeometryUtils.get_yaw', 'GeometryUtils.get_yaw', (['pose.pose.orientation'], {}), '(pose.pose.orientation)\n', (11366, 11389), False, 'from stella_nav_core.geometry_utils import GeometryUtils\n'), ((2783, 2851), 'sensor_msgs.msg.PointField', 'PointField', ([], {'name': '"""x"""', 'offset': '(0)', 'datatype': 'PointField.FLOAT32', 'count': '(1)'}), "(name='x', offset=0, datatype=PointField.FLOAT32, count=1)\n", (2793, 2851), False, 'from sensor_msgs.msg import PointCloud2, PointField\n'), ((2865, 2933), 'sensor_msgs.msg.PointField', 'PointField', ([], {'name': '"""y"""', 'offset': '(4)', 'datatype': 'PointField.FLOAT32', 'count': '(1)'}), "(name='y', offset=4, datatype=PointField.FLOAT32, count=1)\n", (2875, 2933), False, 'from sensor_msgs.msg import PointCloud2, PointField\n'), ((2947, 3015), 'sensor_msgs.msg.PointField', 'PointField', ([], {'name': '"""z"""', 'offset': '(8)', 'datatype': 'PointField.FLOAT32', 'count': '(1)'}), "(name='z', offset=8, datatype=PointField.FLOAT32, count=1)\n", (2957, 3015), False, 'from sensor_msgs.msg import PointCloud2, PointField\n'), ((3029, 3102), 'sensor_msgs.msg.PointField', 'PointField', ([], {'name': '"""speed"""', 'offset': '(12)', 'datatype': 'PointField.FLOAT32', 'count': '(1)'}), "(name='speed', offset=12, datatype=PointField.FLOAT32, count=1)\n", (3039, 3102), False, 'from sensor_msgs.msg import PointCloud2, PointField\n'), ((3116, 3192), 'sensor_msgs.msg.PointField', 'PointField', ([], {'name': '"""obstacle"""', 'offset': '(16)', 'datatype': 'PointField.FLOAT32', 'count': '(1)'}), "(name='obstacle', offset=16, datatype=PointField.FLOAT32, count=1)\n", (3126, 3192), False, 'from sensor_msgs.msg import PointCloud2, PointField\n'), ((3206, 3278), 'sensor_msgs.msg.PointField', 'PointField', ([], {'name': '"""goal"""', 'offset': '(20)', 'datatype': 'PointField.FLOAT32', 'count': '(1)'}), "(name='goal', offset=20, datatype=PointField.FLOAT32, count=1)\n", (3216, 3278), False, 'from sensor_msgs.msg import PointCloud2, PointField\n'), ((3292, 3377), 'sensor_msgs.msg.PointField', 'PointField', ([], {'name': '"""angular_speed"""', 'offset': '(24)', 'datatype': 'PointField.FLOAT32', 'count': '(1)'}), "(name='angular_speed', offset=24, datatype=PointField.FLOAT32,\n count=1)\n", (3302, 3377), False, 'from sensor_msgs.msg import PointCloud2, PointField\n'), ((3387, 3462), 'sensor_msgs.msg.PointField', 'PointField', ([], {'name': '"""heading"""', 'offset': '(28)', 'datatype': 'PointField.FLOAT32', 'count': '(1)'}), "(name='heading', offset=28, datatype=PointField.FLOAT32, count=1)\n", (3397, 3462), False, 'from sensor_msgs.msg import PointCloud2, PointField\n'), ((3476, 3549), 'sensor_msgs.msg.PointField', 'PointField', ([], {'name': '"""total"""', 'offset': '(32)', 'datatype': 'PointField.FLOAT32', 'count': '(1)'}), "(name='total', offset=32, datatype=PointField.FLOAT32, count=1)\n", (3486, 3549), False, 'from sensor_msgs.msg import PointCloud2, PointField\n'), ((3781, 3846), 'numpy.linspace', 'np.linspace', (['(0)', 'self._predict_time', '(self._predict_time / self._dt)'], {}), '(0, self._predict_time, self._predict_time / self._dt)\n', (3792, 3846), True, 'import numpy as np\n'), ((4048, 4071), 'numpy.array', 'np.array', (['(x, y, theta)'], {}), '((x, y, theta))\n', (4056, 4071), True, 'import numpy as np\n'), ((4509, 4572), 'stella_nav_core.geometry_utils.GeometryUtils.regulate_rad', 'GeometryUtils.regulate_rad', (['(target_yaw - scoring_point[:, 0, 2])'], {}), '(target_yaw - scoring_point[:, 0, 2])\n', (4535, 4572), False, 'from stella_nav_core.geometry_utils import GeometryUtils\n'), ((7482, 7498), 'numpy.cos', 'np.cos', (['goal_yaw'], {}), '(goal_yaw)\n', (7488, 7498), True, 'import numpy as np\n'), ((7500, 7516), 'numpy.sin', 'np.sin', (['goal_yaw'], {}), '(goal_yaw)\n', (7506, 7516), True, 'import numpy as np\n'), ((8502, 8518), 'numpy.vstack', 'np.vstack', (['costs'], {}), '(costs)\n', (8511, 8518), True, 'import numpy as np\n'), ((477, 507), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float64'}), '([], dtype=np.float64)\n', (485, 507), True, 'import numpy as np\n'), ((4831, 4842), 'numpy.abs', 'np.abs', (['avz'], {}), '(avz)\n', (4837, 4842), True, 'import numpy as np\n'), ((7554, 7568), 'numpy.cross', 'np.cross', (['u', 'v'], {}), '(u, v)\n', (7562, 7568), True, 'import numpy as np\n'), ((11121, 11141), 'numpy.array', 'np.array', (['(0.0, 0.0)'], {}), '((0.0, 0.0))\n', (11129, 11141), True, 'import numpy as np\n'), ((12382, 12432), 'numpy.vstack', 'np.vstack', (['(trajectory[i], state.accum_trajectory)'], {}), '((trajectory[i], state.accum_trajectory))\n', (12391, 12432), True, 'import numpy as np\n'), ((5076, 5086), 'numpy.abs', 'np.abs', (['vx'], {}), '(vx)\n', (5082, 5086), True, 'import numpy as np\n'), ((5646, 5657), 'numpy.cos', 'np.cos', (['yaw'], {}), '(yaw)\n', (5652, 5657), True, 'import numpy as np\n'), ((5685, 5696), 'numpy.sin', 'np.sin', (['yaw'], {}), '(yaw)\n', (5691, 5696), True, 'import numpy as np\n'), ((6097, 6116), 'numpy.cos', 'np.cos', (['current_yaw'], {}), '(current_yaw)\n', (6103, 6116), True, 'import numpy as np\n'), ((6144, 6163), 'numpy.sin', 'np.sin', (['current_yaw'], {}), '(current_yaw)\n', (6150, 6163), True, 'import numpy as np\n'), ((9810, 9830), 'numpy.zeros', 'np.zeros', (['cost.shape'], {}), '(cost.shape)\n', (9818, 9830), True, 'import numpy as np\n'), ((10063, 10089), 'numpy.zeros', 'np.zeros', (['cost[mask].shape'], {}), '(cost[mask].shape)\n', (10071, 10089), True, 'import numpy as np\n'), ((5447, 5457), 'numpy.abs', 'np.abs', (['vx'], {}), '(vx)\n', (5453, 5457), True, 'import numpy as np\n'), ((5460, 5473), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5466, 5473), True, 'import numpy as np\n'), ((5884, 5902), 'numpy.cos', 'np.cos', (['lethal_yaw'], {}), '(lethal_yaw)\n', (5890, 5902), True, 'import numpy as np\n'), ((5930, 5948), 'numpy.sin', 'np.sin', (['lethal_yaw'], {}), '(lethal_yaw)\n', (5936, 5948), True, 'import numpy as np\n'), ((6332, 6394), 'numpy.linalg.norm', 'np.linalg.norm', (['(current_look_point - lethal_look_point)'], {'axis': '(2)'}), '(current_look_point - lethal_look_point, axis=2)\n', (6346, 6394), True, 'import numpy as np\n'), ((3960, 3978), 'numpy.zeros', 'np.zeros', (['vx.shape'], {}), '(vx.shape)\n', (3968, 3978), True, 'import numpy as np\n'), ((6892, 6903), 'numpy.cos', 'np.cos', (['yaw'], {}), '(yaw)\n', (6898, 6903), True, 'import numpy as np\n'), ((6993, 7004), 'numpy.sin', 'np.sin', (['yaw'], {}), '(yaw)\n', (6999, 7004), True, 'import numpy as np\n'), ((7193, 7210), 'numpy.cos', 'np.cos', (['robot_yaw'], {}), '(robot_yaw)\n', (7199, 7210), True, 'import numpy as np\n'), ((7276, 7293), 'numpy.sin', 'np.sin', (['robot_yaw'], {}), '(robot_yaw)\n', (7282, 7293), True, 'import numpy as np\n'), ((10017, 10034), 'numpy.cos', 'np.cos', (['theta_rot'], {}), '(theta_rot)\n', (10023, 10034), True, 'import numpy as np\n'), ((10044, 10061), 'numpy.sin', 'np.sin', (['theta_rot'], {}), '(theta_rot)\n', (10050, 10061), True, 'import numpy as np\n'), ((3925, 3938), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (3931, 3938), True, 'import numpy as np\n'), ((3945, 3958), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (3951, 3958), True, 'import numpy as np\n'), ((4197, 4220), 'numpy.sin', 'np.sin', (['(avz * t + theta)'], {}), '(avz * t + theta)\n', (4203, 4220), True, 'import numpy as np\n'), ((4223, 4236), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4229, 4236), True, 'import numpy as np\n'), ((4270, 4283), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4276, 4283), True, 'import numpy as np\n'), ((4286, 4309), 'numpy.cos', 'np.cos', (['(avz * t + theta)'], {}), '(avz * t + theta)\n', (4292, 4309), True, 'import numpy as np\n')]
|
"""Easily convert RGB video data (e.g. .avi) to the TensorFlow tfrecords file format with the provided 3 color channels.
Allows to subsequently train a neural network in TensorFlow with the generated tfrecords.
Due to common hardware/GPU RAM limitations, this implementation allows to limit the number of frames per
video actually stored in the tfrecords. The code automatically chooses the frame step size such that there is
an equal separation distribution of the video images. Implementation supports Optical Flow
(currently OpenCV's calcOpticalFlowFarneback) as an additional 4th channel.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, math
from tensorflow.python.platform import gfile
from tensorflow.python.platform import flags
from tensorflow.python.platform import app
import cv2 as cv2
import numpy as np
import tensorflow as tf
FLAGS = None
FILE_FILTER = '*.avi'
NUM_FRAMES_PER_VIDEO = 15
NUM_CHANNELS_VIDEO = 4
WIDTH_VIDEO = 128
HEIGHT_VIDEO = 128
SOURCE = '/insert/source/here'
DESTINATION = '/insert/destination/here'
FLAGS = flags.FLAGS
flags.DEFINE_integer('num_videos', 1000, 'Number of videos stored in one single tfrecords file')
flags.DEFINE_string('image_color_depth', np.uint8, 'Color depth for the images stored in the tfrecords files. '
'Has to correspond to the source video color depth. '
'Specified as np dtype (e.g. ''np.uint8).')
flags.DEFINE_string('source', SOURCE, 'Directory with video files')
flags.DEFINE_string('output_path', DESTINATION, 'Directory for storing tf records')
flags.DEFINE_boolean('optical_flow', True, 'Indictes whether optical flow shall be computed and added as fourth '
'channel. Defaults to False')
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def get_chunks(l, n):
"""Yield successive n-sized chunks from l.
Used to create n sublists from a list l"""
for i in range(0, len(l), n):
yield l[i:i + n]
def getVideoCapture(path):
cap = None
if path:
cap = cv2.VideoCapture(path)
return cap
def getNextFrame(cap):
ret, frame = cap.read()
if ret == False:
return None
return np.asarray(frame)
def compute_dense_optical_flow(prev_image, current_image):
old_shape = current_image.shape
prev_image_gray = cv2.cvtColor(prev_image, cv2.COLOR_BGR2GRAY)
current_image_gray = cv2.cvtColor(current_image, cv2.COLOR_BGR2GRAY)
assert current_image.shape == old_shape
hsv = np.zeros_like(prev_image)
hsv[..., 1] = 255
flow = cv2.calcOpticalFlowFarneback(prev_image_gray, current_image_gray, 0.8, 15, 5, 10, 5, 1.5, 0)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang*180/np.pi/2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
def save_video_to_tfrecords(source_path, destination_path, videos_per_file=FLAGS.num_videos, video_filenames=None,
dense_optical_flow=False):
"""calls sub-functions convert_video_to_numpy and save_numpy_to_tfrecords in order to directly export tfrecords files
:param source_path: directory where video videos are stored
:param destination_path: directory where tfrecords should be stored
:param videos_per_file: specifies the number of videos within one tfrecords file
:param dense_optical_flow: boolean flag that controls if optical flow should be used and added to tfrecords
"""
global NUM_CHANNELS_VIDEO
assert (NUM_CHANNELS_VIDEO == 3 and (not dense_optical_flow)) or (NUM_CHANNELS_VIDEO == 4 and dense_optical_flow), "correct NUM_CHANNELS_VIDEO"
if video_filenames is not None:
filenames = video_filenames
else:
filenames = gfile.Glob(os.path.join(source_path, FILE_FILTER))
if not filenames:
raise RuntimeError('No data files found.')
print('Total videos found: ' + str(len(filenames)))
filenames_split = list(get_chunks(filenames, videos_per_file))
for i, batch in enumerate(filenames_split):
data = convert_video_to_numpy(batch, dense_optical_flow=dense_optical_flow)
total_batch_number = int(math.ceil(len(filenames)/videos_per_file))
print('Batch ' + str(i+1) + '/' + str(total_batch_number))
save_numpy_to_tfrecords(data, destination_path, 'train_blobs_batch_', videos_per_file, i+1,
total_batch_number)
def save_numpy_to_tfrecords(data, destination_path, name, fragmentSize, current_batch_number, total_batch_number):
"""Converts an entire dataset into x tfrecords where x=videos/fragmentSize.
:param data: ndarray(uint32) of shape (v,i,h,w,c) with v=number of videos, i=number of images, c=number of image
channels, h=image height, w=image width
:param name: filename; data samples type (train|valid|test)
:param fragmentSize: specifies how many videos are stored in one tfrecords file
:param current_batch_number: indicates the current batch index (function call within loop)
:param total_batch_number: indicates the total number of batches
"""
num_videos = data.shape[0]
num_images = data.shape[1]
num_channels = data.shape[4]
height = data.shape[2]
width = data.shape[3]
writer = None
feature = {}
for videoCount in range((num_videos)):
if videoCount % fragmentSize == 0:
if writer is not None:
writer.close()
filename = os.path.join(destination_path, name + str(current_batch_number) + '_of_' + str(total_batch_number) + '.tfrecords')
print('Writing', filename)
writer = tf.python_io.TFRecordWriter(filename)
for imageCount in range(num_images):
path = 'blob' + '/' + str(imageCount)
image = data[videoCount, imageCount, :, :, :]
image = image.astype(FLAGS.image_color_depth)
image_raw = image.tostring()
feature[path]= _bytes_feature(image_raw)
feature['height'] = _int64_feature(height)
feature['width'] = _int64_feature(width)
feature['depth'] = _int64_feature(num_channels)
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
if writer is not None:
writer.close()
def convert_video_to_numpy(filenames, dense_optical_flow=False):
"""Generates an ndarray from multiple video files given by filenames.
Implementation chooses frame step size automatically for a equal separation distribution of the video images.
:param filenames
:param type: processing type for video data
:return if no optical flow is used: ndarray(uint32) of shape (v,i,h,w,c) with v=number of videos, i=number of images,
(h,w)=height and width of image, c=channel, if optical flow is used: ndarray(uint32) of (v,i,h,w,
c+1)"""
global NUM_CHANNELS_VIDEO
if not filenames:
raise RuntimeError('No data files found.')
number_of_videos = len(filenames)
if dense_optical_flow:
# need an additional channel for the optical flow with one exception:
global NUM_CHANNELS_VIDEO
NUM_CHANNELS_VIDEO = 4
num_real_image_channel = 3
else:
# if no optical flow, make everything normal:
num_real_image_channel = NUM_CHANNELS_VIDEO
data = []
def video_file_to_ndarray(i, filename):
image = np.zeros((HEIGHT_VIDEO, WIDTH_VIDEO, num_real_image_channel), dtype=FLAGS.image_color_depth)
video = np.zeros((NUM_FRAMES_PER_VIDEO, HEIGHT_VIDEO, WIDTH_VIDEO, NUM_CHANNELS_VIDEO), dtype=np.uint32)
imagePrev = None
assert os.path.isfile(filename), "Couldn't find video file"
cap = getVideoCapture(filename)
assert cap is not None, "Couldn't load video capture:" + filename + ". Moving to next video."
# compute meta data of video
frameCount = cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
# returns nan, if fps needed a measurement must be implemented
# frameRate = cap.get(cv2.cv.CV_CAP_PROP_FPS)
steps = math.floor(frameCount / NUM_FRAMES_PER_VIDEO)
j = 0
prev_frame_none = False
restart = True
assert not (frameCount < 1 or steps < 1), str(filename) + " does not have enough frames. Moving to next video."
while restart:
for f in range(int(frameCount)):
# get next frame after 'steps' iterations:
# floor used after modulo operation because rounding module before leads to
# unhandy partition of data (big gab in the end)
if math.floor(f % steps) == 0:
frame = getNextFrame(cap)
# special case handling: opencv's frame count != real frame count, reiterate over same video
if frame is None and j < NUM_FRAMES_PER_VIDEO:
if frame and prev_frame_none: break
prev_frame_none = True
# repeat with smaller step size
steps -= 1
if steps == 0: break
print("reducing step size due to error")
j = 0
cap.release()
cap = getVideoCapture(filenames[i])
# wait for image retrieval to be ready
cv2.waitKey(3000)
video.fill(0)
continue
else:
if j >= NUM_FRAMES_PER_VIDEO:
restart = False
break
# iterate over channels
if frame.ndim == 2:
# cv returns 2 dim array if gray
resizedImage = cv2.resize(frame[:, :], (HEIGHT_VIDEO, WIDTH_VIDEO))
else:
for k in range(num_real_image_channel):
resizedImage = cv2.resize(frame[:, :, k], (HEIGHT_VIDEO, WIDTH_VIDEO))
image[:, :, k] = resizedImage
if dense_optical_flow:
# optical flow requires at least two images
if imagePrev is not None:
frameFlow = np.zeros((HEIGHT_VIDEO, WIDTH_VIDEO))
frameFlow = compute_dense_optical_flow(imagePrev, image)
frameFlow = cv2.cvtColor(frameFlow, cv2.COLOR_BGR2GRAY)
else:
frameFlow = np.zeros((HEIGHT_VIDEO, WIDTH_VIDEO))
imagePrev = image.copy()
if dense_optical_flow:
image_with_flow = image.copy()
image_with_flow = np.concatenate((image_with_flow, np.expand_dims(frameFlow, axis=2)), axis=2)
video[j, :, :, :] = image_with_flow
else:
video[j, :, :, :] = image
j += 1
# print('total frames: ' + str(j) + " frame in video: " + str(f))
else:
getNextFrame(cap)
print(str(i + 1) + " of " + str(number_of_videos) + " videos processed", filenames[i])
v = video.copy()
cap.release()
return v
for i, file in enumerate(filenames):
try:
v = video_file_to_ndarray(i, file)
data.append(v)
except Exception as e:
print(e)
return np.array(data)
def main(argv):
save_video_to_tfrecords(FLAGS.source, FLAGS.output_path, FLAGS.num_videos, dense_optical_flow=FLAGS.optical_flow)
if __name__ == '__main__':
app.run()
|
[
"cv2.normalize",
"math.floor",
"tensorflow.train.Int64List",
"numpy.array",
"numpy.asarray",
"tensorflow.python_io.TFRecordWriter",
"cv2.calcOpticalFlowFarneback",
"cv2.waitKey",
"tensorflow.python.platform.app.run",
"tensorflow.python.platform.flags.DEFINE_integer",
"tensorflow.train.BytesList",
"os.path.isfile",
"tensorflow.train.Features",
"cv2.cvtColor",
"cv2.resize",
"tensorflow.python.platform.flags.DEFINE_boolean",
"tensorflow.python.platform.flags.DEFINE_string",
"cv2.cartToPolar",
"os.path.join",
"numpy.zeros",
"cv2.VideoCapture",
"numpy.expand_dims",
"numpy.zeros_like"
] |
[((1141, 1241), 'tensorflow.python.platform.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_videos"""', '(1000)', '"""Number of videos stored in one single tfrecords file"""'], {}), "('num_videos', 1000,\n 'Number of videos stored in one single tfrecords file')\n", (1161, 1241), False, 'from tensorflow.python.platform import flags\n'), ((1238, 1448), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""image_color_depth"""', 'np.uint8', '"""Color depth for the images stored in the tfrecords files. Has to correspond to the source video color depth. Specified as np dtype (e.g. np.uint8)."""'], {}), "('image_color_depth', np.uint8,\n 'Color depth for the images stored in the tfrecords files. Has to correspond to the source video color depth. Specified as np dtype (e.g. np.uint8).'\n )\n", (1257, 1448), False, 'from tensorflow.python.platform import flags\n'), ((1557, 1624), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""source"""', 'SOURCE', '"""Directory with video files"""'], {}), "('source', SOURCE, 'Directory with video files')\n", (1576, 1624), False, 'from tensorflow.python.platform import flags\n'), ((1625, 1712), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""output_path"""', 'DESTINATION', '"""Directory for storing tf records"""'], {}), "('output_path', DESTINATION,\n 'Directory for storing tf records')\n", (1644, 1712), False, 'from tensorflow.python.platform import flags\n'), ((1709, 1858), 'tensorflow.python.platform.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""optical_flow"""', '(True)', '"""Indictes whether optical flow shall be computed and added as fourth channel. Defaults to False"""'], {}), "('optical_flow', True,\n 'Indictes whether optical flow shall be computed and added as fourth channel. Defaults to False'\n )\n", (1729, 1858), False, 'from tensorflow.python.platform import flags\n'), ((2466, 2483), 'numpy.asarray', 'np.asarray', (['frame'], {}), '(frame)\n', (2476, 2483), True, 'import numpy as np\n'), ((2599, 2643), 'cv2.cvtColor', 'cv2.cvtColor', (['prev_image', 'cv2.COLOR_BGR2GRAY'], {}), '(prev_image, cv2.COLOR_BGR2GRAY)\n', (2611, 2643), True, 'import cv2 as cv2\n'), ((2667, 2714), 'cv2.cvtColor', 'cv2.cvtColor', (['current_image', 'cv2.COLOR_BGR2GRAY'], {}), '(current_image, cv2.COLOR_BGR2GRAY)\n', (2679, 2714), True, 'import cv2 as cv2\n'), ((2765, 2790), 'numpy.zeros_like', 'np.zeros_like', (['prev_image'], {}), '(prev_image)\n', (2778, 2790), True, 'import numpy as np\n'), ((2821, 2918), 'cv2.calcOpticalFlowFarneback', 'cv2.calcOpticalFlowFarneback', (['prev_image_gray', 'current_image_gray', '(0.8)', '(15)', '(5)', '(10)', '(5)', '(1.5)', '(0)'], {}), '(prev_image_gray, current_image_gray, 0.8, 15, \n 5, 10, 5, 1.5, 0)\n', (2849, 2918), True, 'import cv2 as cv2\n'), ((2928, 2971), 'cv2.cartToPolar', 'cv2.cartToPolar', (['flow[..., 0]', 'flow[..., 1]'], {}), '(flow[..., 0], flow[..., 1])\n', (2943, 2971), True, 'import cv2 as cv2\n'), ((3020, 3069), 'cv2.normalize', 'cv2.normalize', (['mag', 'None', '(0)', '(255)', 'cv2.NORM_MINMAX'], {}), '(mag, None, 0, 255, cv2.NORM_MINMAX)\n', (3033, 3069), True, 'import cv2 as cv2\n'), ((3079, 3115), 'cv2.cvtColor', 'cv2.cvtColor', (['hsv', 'cv2.COLOR_HSV2BGR'], {}), '(hsv, cv2.COLOR_HSV2BGR)\n', (3091, 3115), True, 'import cv2 as cv2\n'), ((11080, 11094), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (11088, 11094), True, 'import numpy as np\n'), ((11262, 11271), 'tensorflow.python.platform.app.run', 'app.run', ([], {}), '()\n', (11269, 11271), False, 'from tensorflow.python.platform import app\n'), ((2332, 2354), 'cv2.VideoCapture', 'cv2.VideoCapture', (['path'], {}), '(path)\n', (2348, 2354), True, 'import cv2 as cv2\n'), ((7531, 7628), 'numpy.zeros', 'np.zeros', (['(HEIGHT_VIDEO, WIDTH_VIDEO, num_real_image_channel)'], {'dtype': 'FLAGS.image_color_depth'}), '((HEIGHT_VIDEO, WIDTH_VIDEO, num_real_image_channel), dtype=FLAGS.\n image_color_depth)\n', (7539, 7628), True, 'import numpy as np\n'), ((7636, 7736), 'numpy.zeros', 'np.zeros', (['(NUM_FRAMES_PER_VIDEO, HEIGHT_VIDEO, WIDTH_VIDEO, NUM_CHANNELS_VIDEO)'], {'dtype': 'np.uint32'}), '((NUM_FRAMES_PER_VIDEO, HEIGHT_VIDEO, WIDTH_VIDEO,\n NUM_CHANNELS_VIDEO), dtype=np.uint32)\n', (7644, 7736), True, 'import numpy as np\n'), ((7765, 7789), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (7779, 7789), False, 'import os, math\n'), ((8173, 8218), 'math.floor', 'math.floor', (['(frameCount / NUM_FRAMES_PER_VIDEO)'], {}), '(frameCount / NUM_FRAMES_PER_VIDEO)\n', (8183, 8218), False, 'import os, math\n'), ((1961, 1994), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': '[value]'}), '(value=[value])\n', (1979, 1994), True, 'import tensorflow as tf\n'), ((2061, 2094), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': '[value]'}), '(value=[value])\n', (2079, 2094), True, 'import tensorflow as tf\n'), ((4016, 4054), 'os.path.join', 'os.path.join', (['source_path', 'FILE_FILTER'], {}), '(source_path, FILE_FILTER)\n', (4028, 4054), False, 'import os, math\n'), ((5823, 5860), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['filename'], {}), '(filename)\n', (5850, 5860), True, 'import tensorflow as tf\n'), ((6362, 6396), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feature'}), '(feature=feature)\n', (6379, 6396), True, 'import tensorflow as tf\n'), ((8655, 8676), 'math.floor', 'math.floor', (['(f % steps)'], {}), '(f % steps)\n', (8665, 8676), False, 'import os, math\n'), ((9270, 9287), 'cv2.waitKey', 'cv2.waitKey', (['(3000)'], {}), '(3000)\n', (9281, 9287), True, 'import cv2 as cv2\n'), ((9587, 9639), 'cv2.resize', 'cv2.resize', (['frame[:, :]', '(HEIGHT_VIDEO, WIDTH_VIDEO)'], {}), '(frame[:, :], (HEIGHT_VIDEO, WIDTH_VIDEO))\n', (9597, 9639), True, 'import cv2 as cv2\n'), ((9743, 9798), 'cv2.resize', 'cv2.resize', (['frame[:, :, k]', '(HEIGHT_VIDEO, WIDTH_VIDEO)'], {}), '(frame[:, :, k], (HEIGHT_VIDEO, WIDTH_VIDEO))\n', (9753, 9798), True, 'import cv2 as cv2\n'), ((10015, 10052), 'numpy.zeros', 'np.zeros', (['(HEIGHT_VIDEO, WIDTH_VIDEO)'], {}), '((HEIGHT_VIDEO, WIDTH_VIDEO))\n', (10023, 10052), True, 'import numpy as np\n'), ((10158, 10201), 'cv2.cvtColor', 'cv2.cvtColor', (['frameFlow', 'cv2.COLOR_BGR2GRAY'], {}), '(frameFlow, cv2.COLOR_BGR2GRAY)\n', (10170, 10201), True, 'import cv2 as cv2\n'), ((10254, 10291), 'numpy.zeros', 'np.zeros', (['(HEIGHT_VIDEO, WIDTH_VIDEO)'], {}), '((HEIGHT_VIDEO, WIDTH_VIDEO))\n', (10262, 10291), True, 'import numpy as np\n'), ((10480, 10513), 'numpy.expand_dims', 'np.expand_dims', (['frameFlow'], {'axis': '(2)'}), '(frameFlow, axis=2)\n', (10494, 10513), True, 'import numpy as np\n')]
|
import numpy as np
import SimpleITK as sitk
def reference_image_build(spacing, size, direction, template_size, dim):
#template size: image(array) dimension to resize to: a list of three elements
reference_spacing = np.array(size)/np.array(template_size)*np.array(spacing)
reference_spacing[0] = 1.2
reference_spacing[1] = 1.2
reference_image = sitk.Image(template_size, 0)
reference_image.SetOrigin(np.zeros(3))
reference_image.SetSpacing(reference_spacing)
reference_image.SetDirection(direction)
return reference_image
def centering(img, ref_img, order=1):
dimension = img.GetDimension()
transform = sitk.AffineTransform(dimension)
transform.SetTranslation(np.array(img.GetOrigin()) - ref_img.GetOrigin())
# Modify the transformation to align the centers of the original and reference image instead of their origins.
centering_transform = sitk.TranslationTransform(dimension)
img_center = np.array(img.TransformContinuousIndexToPhysicalPoint(np.array(img.GetSize())/2.0))
reference_center = np.array(ref_img.TransformContinuousIndexToPhysicalPoint(np.array(ref_img.GetSize())/2.0))
centering_transform.SetOffset(np.array(transform.GetInverse().TransformPoint(img_center) - reference_center))
centered_transform = sitk.Transform(transform)
centered_transform.AddTransform(centering_transform)
return transform_func(img, ref_img, centered_transform, order)
def isometric_transform(image, ref_img, orig_direction, order=1, target=None):
dim = ref_img.GetDimension()
affine = sitk.AffineTransform(dim)
if target is None:
target = np.eye(dim)
ori = np.reshape(orig_direction, np.eye(dim).shape)
target = np.reshape(target, np.eye(dim).shape)
affine.SetCenter(ref_img.TransformContinuousIndexToPhysicalPoint(np.array(ref_img.GetSize())/2.0))
return transform_func(image, ref_img, affine, order)
def transform_func(image, reference_image, transform, order=1):
# Output image Origin, Spacing, Size, Direction are taken from the reference
# image in this call to Resample
if order ==1:
interpolator = sitk.sitkLinear
elif order ==2:
interpolator = sitk.sitkBSpline
elif order == 0:
interpolator = sitk.sitkNearestNeighbor
default_value = 0
try:
resampled = sitk.Resample(image, reference_image, transform,
interpolator, default_value)
except Exception as e: print(e)
return resampled
def resample_spacing(sitkIm, resolution=0.5, dim=3, template_size=(256, 256), order=1):
if type(sitkIm) is str:
image = sitk.ReadImage(sitkIm)
else:
image = sitkIm
orig_direction = image.GetDirection()
orig_size = np.array(image.GetSize(), dtype=np.int)
orig_spacing = np.array(image.GetSpacing())
new_size = orig_size*(orig_spacing/np.array(resolution))
new_size = np.ceil(new_size).astype(np.int) # Image dimensions are in integers
new_size = [int(s) for s in new_size]
template_size = (template_size[0], template_size[1], int(orig_size[-1]))
ref_img = reference_image_build(resolution, new_size, image.GetDirection(), template_size, dim)
centered = centering(image, ref_img, order)
transformed = isometric_transform(centered, ref_img, orig_direction, order)
return transformed, ref_img
def resize_to_size(image, size=(256, 256), order=1):
orig_size = np.array(image.GetSize(), dtype=np.int)
orig_spacing =np.array(image.GetSpacing())
new_size = [int(size[0]), int(size[1]), int(orig_size[-1])]
new_spacing = orig_spacing*orig_size/np.array(new_size)
if order ==1:
interpolator = sitk.sitkLinear
elif order ==2:
interpolator = sitk.sitkBSpline
elif order == 0:
interpolator = sitk.sitkNearestNeighbor
default_value = 0
fltr = sitk.ResampleImageFilter()
fltr.SetSize(new_size)
fltr.SetOutputSpacing(new_spacing)
fltr.SetOutputOrigin(image.GetOrigin())
fltr.SetOutputDirection(image.GetDirection())
fltr.SetInterpolator(interpolator)
image = fltr.Execute(image)
return image
def resample_scale(sitkIm, ref_img,gt_img=None, scale_factor=1., order=1):
sitkIm.SetDirection(np.eye(3).ravel())
ref_img.SetDirection(np.eye(3).ravel())
gt_img.SetDirection(np.eye(3).ravel())
dim = sitkIm.GetDimension()
affine = sitk.AffineTransform(dim)
scale= np.array(ref_img.GetDirection())
scale = np.reshape(scale, (dim,dim))
scale[:,0] *= 1./scale_factor
scale[:,1] *= 1./scale_factor
if gt_img is not None:
stats = sitk.LabelShapeStatisticsImageFilter()
stats.Execute(sitk.Cast(gt_img,sitk.sitkInt32))
center = stats.GetCentroid(1)
else:
center = sitkIm.TransformContinuousIndexToPhysicalPoint(np.array(sitkIm.GetSize())/2.0)
affine.SetMatrix(scale.ravel())
affine.SetCenter(center)
transformed = transform_func(sitkIm, ref_img, affine, order)
return transformed
def swap_labels(labels):
unique_label = np.unique(labels)
new_label = range(len(unique_label))
for i in range(len(unique_label)):
label = unique_label[i]
newl = new_label[i]
labels[labels==label] = newl
return labels
def swap_labels_back(labels,pred):
unique_label = np.unique(labels)
new_label = range(len(unique_label))
for i in range(len(unique_label)):
pred[pred==i] = unique_label[i]
return pred
def rescale_intensity(slice_im):
if type(slice_im) != np.ndarray:
raise RuntimeError("Input image is not numpy array")
#upper = np.percentile(slice_im, 90)
upper = np.percentile(slice_im, 99)
lower = np.percentile(slice_im, 20)
slice_im[slice_im>upper] = upper
slice_im[slice_im<lower] = lower
slice_im -= lower
rng = upper - lower
slice_im = slice_im/rng*2.
slice_im -= 1.
#slice_im = (slice_im - np.mean(slice_im))/np.std(slice_im)
return slice_im
def swap_low_freq(im1, im2, beta):
"""
Change the low frequency of im2 with that of im1
Beta: ratio between the swaped region and the image dimension
"""
#im1 = denoise(im1, 10, 0.125)
#im2 = denoise(im2, 10, 0.125)
im1 = np.squeeze(im1)
im2 = np.squeeze(im2)
im1 = im1- np.min(im1)
im2 = im2-np.min(im2)
im1_fft = np.fft.fftshift(np.fft.fft2(im1))
im2_fft = np.fft.fftshift(np.fft.fft2(im2))
change = beta * np.array(im2_fft.shape)
up0 = int(im2.shape[0]/2-change[0]/2)
down0 = int(im2.shape[0]/2+change[0]/2)
up1 = int(im2.shape[1]/2-change[1]/2)
down1 = int(im2.shape[1]/2+change[1]/2)
#im2_fft[up0:down0, up1:down1] = 0.
im2_fft[up0:down0, up1:down1] = im1_fft[up0:down0, up1:down1]
im2_new = np.abs(np.real(np.fft.ifft2(im2_fft)))
return im1, im2, im2_new
class SpatialTransform(object):
'''
Base class to image transform
'''
def __init__(self, image):
self.image = image
self.dim = image.GetDimension()
def apply_transform(self):
output = []
out_im = transform_func(self.image, self.image, self.transform, order=1)
output.append(out_im)
return output
def add_transform(self, transform):
total = sitk.Transform(self.transform)
total.AddTransform(transform)
self.transform = total
class AffineTransform(SpatialTransform):
'''
Apply random affine transform to input 3D image volume
'''
def __init__(self, image, shear_range, scale_range, rot_range, trans_range, flip_prob):
super(AffineTransform, self).__init__(image)
self.shear_range = shear_range
self.scale_range = scale_range
self.rot_range = rot_range
self.flip_prob = flip_prob
self.trans_range = trans_range
self.transform = sitk.AffineTransform(self.dim)
def scale(self):
self.transform = sitk.AffineTransform(self.transform)
scale = np.eye(self.dim)
scale = np.diag( 1./np.random.uniform(self.scale_range[0], self.scale_range[1], self.dim))
matrix = np.array(self.transform.GetMatrix()).reshape((self.dim,self.dim))
matrix = np.matmul(matrix, scale)
self.transform.SetMatrix(matrix.ravel())
self.transform.SetCenter(self.image.TransformContinuousIndexToPhysicalPoint(np.array(self.image.GetSize())/2.0))
def rotate(self):
angles = np.random.uniform(self.rot_range[0], self.rot_range[1], self.dim)
rads = np.array(angles)/180.*np.pi
x_rot = np.eye(self.dim)
x_rot = [[1., 0., 0.], [0., np.cos(rads[0]), -np.sin(rads[0])], [0., np.sin(rads[0]), np.cos(rads[0])]]
y_rot = [[np.cos(rads[1]), 0., np.sin(rads[1])], [0.,1.,0.], [-np.sin(rads[1]), 0., np.cos(rads[1])]]
z_rot = [[np.cos(rads[2]), -np.sin(rads[2]), 0.], [np.sin(rads[2]), np.cos(rads[2]), 0.], [0., 0., 1.]]
rot_matrix = np.matmul(np.matmul(np.array(x_rot), np.array(y_rot)), np.array(z_rot))
matrix = np.array(self.transform.GetMatrix()).reshape((self.dim, self.dim))
matrix = np.matmul(matrix, rot_matrix)
self.transform = sitk.AffineTransform(self.transform)
self.transform.SetMatrix(matrix.ravel())
self.transform.SetCenter(self.image.TransformContinuousIndexToPhysicalPoint(np.array(self.image.GetSize())/2.0))
def translate(self):
self.transform = sitk.AffineTransform(self.transform)
params = np.random.uniform(self.trans_range[0],self.trans_range[1], self.dim)
print("Translation: " , params)
self.transform.SetTranslation(params)
#self.transform.SetCenter(self.image.TransformContinuousIndexToPhysicalPoint(np.array(self.image.GetSize())/2.0))
def shear(self):
self.transform = sitk.AffineTransform(self.transform)
axis = np.argsort(np.random.rand(self.dim))
self.transform.Shear(int(axis[0]), int(axis[1]), np.random.uniform(self.shear_range[0],
self.shear_range[1]))
self.transform.SetCenter(self.image.TransformContinuousIndexToPhysicalPoint(np.array(self.image.GetSize())/2.0))
def flip(self):
flip = np.random.rand(self.dim)>self.flip_prob
flip_matrix = np.eye(self.dim)
flip_matrix[np.diag(flip)] = -1.
self.transform = sitk.AffineTransform(self.transform)
matrix = np.array(self.transform.GetMatrix()).reshape((self.dim,self.dim))
matrix = np.matmul(matrix, flip_matrix)
self.transform.SetMatrix(matrix.ravel())
self.transform.SetCenter(self.image.TransformContinuousIndexToPhysicalPoint(np.array(self.image.GetSize())/2.0))
def affine(self):
# commented out others since we only need translation for now
#self.shear()
#self.rotate()
self.translate()
#self.flip()
#self.scale()
def apply_transform(self):
output = []
out_im = transform_func(self.image, self.image, self.transform, order=1)
output.append(out_im)
return output
def affine_usage(sitk_image):
'''
example function to apply affine transform to images
'''
params_affine = {
'scale_range': [0.8, 1.2],
'rot_range': [-20., 20.],
'trans_range': [-5., 5.], # range of translation
'shear_range': [-0.13, 0.13],
'flip_prob': 0.3
}
affine = AffineTransform(sitk_image, **params_affine)
affine.affine()
output = affine.apply_transform()
return output
|
[
"numpy.random.rand",
"SimpleITK.AffineTransform",
"numpy.array",
"numpy.sin",
"numpy.reshape",
"numpy.fft.fft2",
"numpy.matmul",
"numpy.min",
"SimpleITK.Resample",
"numpy.eye",
"SimpleITK.TranslationTransform",
"numpy.ceil",
"SimpleITK.Image",
"numpy.squeeze",
"numpy.cos",
"SimpleITK.Cast",
"SimpleITK.LabelShapeStatisticsImageFilter",
"SimpleITK.Transform",
"numpy.unique",
"numpy.fft.ifft2",
"SimpleITK.ResampleImageFilter",
"numpy.diag",
"numpy.zeros",
"numpy.random.uniform",
"SimpleITK.ReadImage",
"numpy.percentile"
] |
[((357, 385), 'SimpleITK.Image', 'sitk.Image', (['template_size', '(0)'], {}), '(template_size, 0)\n', (367, 385), True, 'import SimpleITK as sitk\n'), ((628, 659), 'SimpleITK.AffineTransform', 'sitk.AffineTransform', (['dimension'], {}), '(dimension)\n', (648, 659), True, 'import SimpleITK as sitk\n'), ((873, 909), 'SimpleITK.TranslationTransform', 'sitk.TranslationTransform', (['dimension'], {}), '(dimension)\n', (898, 909), True, 'import SimpleITK as sitk\n'), ((1255, 1280), 'SimpleITK.Transform', 'sitk.Transform', (['transform'], {}), '(transform)\n', (1269, 1280), True, 'import SimpleITK as sitk\n'), ((1524, 1549), 'SimpleITK.AffineTransform', 'sitk.AffineTransform', (['dim'], {}), '(dim)\n', (1544, 1549), True, 'import SimpleITK as sitk\n'), ((3762, 3788), 'SimpleITK.ResampleImageFilter', 'sitk.ResampleImageFilter', ([], {}), '()\n', (3786, 3788), True, 'import SimpleITK as sitk\n'), ((4278, 4303), 'SimpleITK.AffineTransform', 'sitk.AffineTransform', (['dim'], {}), '(dim)\n', (4298, 4303), True, 'import SimpleITK as sitk\n'), ((4356, 4385), 'numpy.reshape', 'np.reshape', (['scale', '(dim, dim)'], {}), '(scale, (dim, dim))\n', (4366, 4385), True, 'import numpy as np\n'), ((4910, 4927), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (4919, 4927), True, 'import numpy as np\n'), ((5180, 5197), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (5189, 5197), True, 'import numpy as np\n'), ((5522, 5549), 'numpy.percentile', 'np.percentile', (['slice_im', '(99)'], {}), '(slice_im, 99)\n', (5535, 5549), True, 'import numpy as np\n'), ((5562, 5589), 'numpy.percentile', 'np.percentile', (['slice_im', '(20)'], {}), '(slice_im, 20)\n', (5575, 5589), True, 'import numpy as np\n'), ((6097, 6112), 'numpy.squeeze', 'np.squeeze', (['im1'], {}), '(im1)\n', (6107, 6112), True, 'import numpy as np\n'), ((6123, 6138), 'numpy.squeeze', 'np.squeeze', (['im2'], {}), '(im2)\n', (6133, 6138), True, 'import numpy as np\n'), ((261, 278), 'numpy.array', 'np.array', (['spacing'], {}), '(spacing)\n', (269, 278), True, 'import numpy as np\n'), ((414, 425), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (422, 425), True, 'import numpy as np\n'), ((1584, 1595), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (1590, 1595), True, 'import numpy as np\n'), ((2270, 2347), 'SimpleITK.Resample', 'sitk.Resample', (['image', 'reference_image', 'transform', 'interpolator', 'default_value'], {}), '(image, reference_image, transform, interpolator, default_value)\n', (2283, 2347), True, 'import SimpleITK as sitk\n'), ((2564, 2586), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['sitkIm'], {}), '(sitkIm)\n', (2578, 2586), True, 'import SimpleITK as sitk\n'), ((3530, 3548), 'numpy.array', 'np.array', (['new_size'], {}), '(new_size)\n', (3538, 3548), True, 'import numpy as np\n'), ((4489, 4527), 'SimpleITK.LabelShapeStatisticsImageFilter', 'sitk.LabelShapeStatisticsImageFilter', ([], {}), '()\n', (4525, 4527), True, 'import SimpleITK as sitk\n'), ((6154, 6165), 'numpy.min', 'np.min', (['im1'], {}), '(im1)\n', (6160, 6165), True, 'import numpy as np\n'), ((6180, 6191), 'numpy.min', 'np.min', (['im2'], {}), '(im2)\n', (6186, 6191), True, 'import numpy as np\n'), ((6222, 6238), 'numpy.fft.fft2', 'np.fft.fft2', (['im1'], {}), '(im1)\n', (6233, 6238), True, 'import numpy as np\n'), ((6270, 6286), 'numpy.fft.fft2', 'np.fft.fft2', (['im2'], {}), '(im2)\n', (6281, 6286), True, 'import numpy as np\n'), ((6309, 6332), 'numpy.array', 'np.array', (['im2_fft.shape'], {}), '(im2_fft.shape)\n', (6317, 6332), True, 'import numpy as np\n'), ((7115, 7145), 'SimpleITK.Transform', 'sitk.Transform', (['self.transform'], {}), '(self.transform)\n', (7129, 7145), True, 'import SimpleITK as sitk\n'), ((7690, 7720), 'SimpleITK.AffineTransform', 'sitk.AffineTransform', (['self.dim'], {}), '(self.dim)\n', (7710, 7720), True, 'import SimpleITK as sitk\n'), ((7768, 7804), 'SimpleITK.AffineTransform', 'sitk.AffineTransform', (['self.transform'], {}), '(self.transform)\n', (7788, 7804), True, 'import SimpleITK as sitk\n'), ((7821, 7837), 'numpy.eye', 'np.eye', (['self.dim'], {}), '(self.dim)\n', (7827, 7837), True, 'import numpy as np\n'), ((8037, 8061), 'numpy.matmul', 'np.matmul', (['matrix', 'scale'], {}), '(matrix, scale)\n', (8046, 8061), True, 'import numpy as np\n'), ((8272, 8337), 'numpy.random.uniform', 'np.random.uniform', (['self.rot_range[0]', 'self.rot_range[1]', 'self.dim'], {}), '(self.rot_range[0], self.rot_range[1], self.dim)\n', (8289, 8337), True, 'import numpy as np\n'), ((8397, 8413), 'numpy.eye', 'np.eye', (['self.dim'], {}), '(self.dim)\n', (8403, 8413), True, 'import numpy as np\n'), ((8942, 8971), 'numpy.matmul', 'np.matmul', (['matrix', 'rot_matrix'], {}), '(matrix, rot_matrix)\n', (8951, 8971), True, 'import numpy as np\n'), ((8997, 9033), 'SimpleITK.AffineTransform', 'sitk.AffineTransform', (['self.transform'], {}), '(self.transform)\n', (9017, 9033), True, 'import SimpleITK as sitk\n'), ((9259, 9295), 'SimpleITK.AffineTransform', 'sitk.AffineTransform', (['self.transform'], {}), '(self.transform)\n', (9279, 9295), True, 'import SimpleITK as sitk\n'), ((9313, 9382), 'numpy.random.uniform', 'np.random.uniform', (['self.trans_range[0]', 'self.trans_range[1]', 'self.dim'], {}), '(self.trans_range[0], self.trans_range[1], self.dim)\n', (9330, 9382), True, 'import numpy as np\n'), ((9636, 9672), 'SimpleITK.AffineTransform', 'sitk.AffineTransform', (['self.transform'], {}), '(self.transform)\n', (9656, 9672), True, 'import SimpleITK as sitk\n'), ((10082, 10098), 'numpy.eye', 'np.eye', (['self.dim'], {}), '(self.dim)\n', (10088, 10098), True, 'import numpy as np\n'), ((10166, 10202), 'SimpleITK.AffineTransform', 'sitk.AffineTransform', (['self.transform'], {}), '(self.transform)\n', (10186, 10202), True, 'import SimpleITK as sitk\n'), ((10303, 10333), 'numpy.matmul', 'np.matmul', (['matrix', 'flip_matrix'], {}), '(matrix, flip_matrix)\n', (10312, 10333), True, 'import numpy as np\n'), ((222, 236), 'numpy.array', 'np.array', (['size'], {}), '(size)\n', (230, 236), True, 'import numpy as np\n'), ((237, 260), 'numpy.array', 'np.array', (['template_size'], {}), '(template_size)\n', (245, 260), True, 'import numpy as np\n'), ((1634, 1645), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (1640, 1645), True, 'import numpy as np\n'), ((1683, 1694), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (1689, 1694), True, 'import numpy as np\n'), ((2791, 2811), 'numpy.array', 'np.array', (['resolution'], {}), '(resolution)\n', (2799, 2811), True, 'import numpy as np\n'), ((2826, 2843), 'numpy.ceil', 'np.ceil', (['new_size'], {}), '(new_size)\n', (2833, 2843), True, 'import numpy as np\n'), ((4548, 4581), 'SimpleITK.Cast', 'sitk.Cast', (['gt_img', 'sitk.sitkInt32'], {}), '(gt_img, sitk.sitkInt32)\n', (4557, 4581), True, 'import SimpleITK as sitk\n'), ((6641, 6662), 'numpy.fft.ifft2', 'np.fft.ifft2', (['im2_fft'], {}), '(im2_fft)\n', (6653, 6662), True, 'import numpy as np\n'), ((8824, 8839), 'numpy.array', 'np.array', (['z_rot'], {}), '(z_rot)\n', (8832, 8839), True, 'import numpy as np\n'), ((9701, 9725), 'numpy.random.rand', 'np.random.rand', (['self.dim'], {}), '(self.dim)\n', (9715, 9725), True, 'import numpy as np\n'), ((9784, 9843), 'numpy.random.uniform', 'np.random.uniform', (['self.shear_range[0]', 'self.shear_range[1]'], {}), '(self.shear_range[0], self.shear_range[1])\n', (9801, 9843), True, 'import numpy as np\n'), ((10020, 10044), 'numpy.random.rand', 'np.random.rand', (['self.dim'], {}), '(self.dim)\n', (10034, 10044), True, 'import numpy as np\n'), ((10119, 10132), 'numpy.diag', 'np.diag', (['flip'], {}), '(flip)\n', (10126, 10132), True, 'import numpy as np\n'), ((4135, 4144), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4141, 4144), True, 'import numpy as np\n'), ((4177, 4186), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4183, 4186), True, 'import numpy as np\n'), ((4218, 4227), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4224, 4227), True, 'import numpy as np\n'), ((7866, 7935), 'numpy.random.uniform', 'np.random.uniform', (['self.scale_range[0]', 'self.scale_range[1]', 'self.dim'], {}), '(self.scale_range[0], self.scale_range[1], self.dim)\n', (7883, 7935), True, 'import numpy as np\n'), ((8353, 8369), 'numpy.array', 'np.array', (['angles'], {}), '(angles)\n', (8361, 8369), True, 'import numpy as np\n'), ((8450, 8465), 'numpy.cos', 'np.cos', (['rads[0]'], {}), '(rads[0])\n', (8456, 8465), True, 'import numpy as np\n'), ((8491, 8506), 'numpy.sin', 'np.sin', (['rads[0]'], {}), '(rads[0])\n', (8497, 8506), True, 'import numpy as np\n'), ((8508, 8523), 'numpy.cos', 'np.cos', (['rads[0]'], {}), '(rads[0])\n', (8514, 8523), True, 'import numpy as np\n'), ((8544, 8559), 'numpy.cos', 'np.cos', (['rads[1]'], {}), '(rads[1])\n', (8550, 8559), True, 'import numpy as np\n'), ((8565, 8580), 'numpy.sin', 'np.sin', (['rads[1]'], {}), '(rads[1])\n', (8571, 8580), True, 'import numpy as np\n'), ((8618, 8633), 'numpy.cos', 'np.cos', (['rads[1]'], {}), '(rads[1])\n', (8624, 8633), True, 'import numpy as np\n'), ((8654, 8669), 'numpy.cos', 'np.cos', (['rads[2]'], {}), '(rads[2])\n', (8660, 8669), True, 'import numpy as np\n'), ((8695, 8710), 'numpy.sin', 'np.sin', (['rads[2]'], {}), '(rads[2])\n', (8701, 8710), True, 'import numpy as np\n'), ((8712, 8727), 'numpy.cos', 'np.cos', (['rads[2]'], {}), '(rads[2])\n', (8718, 8727), True, 'import numpy as np\n'), ((8789, 8804), 'numpy.array', 'np.array', (['x_rot'], {}), '(x_rot)\n', (8797, 8804), True, 'import numpy as np\n'), ((8806, 8821), 'numpy.array', 'np.array', (['y_rot'], {}), '(y_rot)\n', (8814, 8821), True, 'import numpy as np\n'), ((8468, 8483), 'numpy.sin', 'np.sin', (['rads[0]'], {}), '(rads[0])\n', (8474, 8483), True, 'import numpy as np\n'), ((8597, 8612), 'numpy.sin', 'np.sin', (['rads[1]'], {}), '(rads[1])\n', (8603, 8612), True, 'import numpy as np\n'), ((8672, 8687), 'numpy.sin', 'np.sin', (['rads[2]'], {}), '(rads[2])\n', (8678, 8687), True, 'import numpy as np\n')]
|
import re
import math
import numpy as np
class UpstreamAUG:
def __init__(self, allow_ORF=True, verbose_output=False):
"""
Constructor
:param allow_ORF: bool, True by default, whether to check uORFs
:param verbose_output: bool, False by default, whether to return dictionaries in predict_on_sample() and predict_on_batch() methods or not
"""
self.allow_ORF = allow_ORF
self.verbose_output = verbose_output
pass
def predict_on_sample(self, seq):
"""
Predict_on_sample
:param seq: string, 5'UTR's sequence
:return: if verbose_output: dictionary:
first entry – 1 or 0 depending whether the uAUG is in-frame or not
second – 1 or 0 depending whether it corresponds to a uORF or not
else: NumPy array of 1 and 0 depending whether the uAUG is in-frame or not
:example: if the input 5'UTR has 5 AUG, then
{
"frame": [1, 1, 0, 0, 1],
"uORF": [1, 1, 1, 0, 0]
}
"""
if self.allow_ORF:
if self.verbose_output:
ATG_frame = []
ATG_ORF = []
for ATG in re.finditer('ATG', seq.upper()):
seq_remainder = seq[ATG.start() + 3:]
TAA_frame = [(TAA.start() % 3) for TAA in re.finditer('TAA', seq_remainder)]
if 0 in TAA_frame:
ORF = True
else:
TAG_frame = [(TAG.start() % 3) for TAG in re.finditer('TAG', seq_remainder)]
if 0 in TAG_frame:
ORF = True
else:
TGA_frame = [(TGA.start() % 3) for TGA in re.finditer('TGA', seq_remainder)]
ORF = 0 in TGA_frame
if ORF:
ATG_ORF.append(1)
else:
ATG_ORF.append(0)
if (len(seq) - ATG.start()) % 3:
ATG_frame.append(0)
else:
ATG_frame.append(1)
return {"frame": np.array(ATG_frame), "uORF": np.array(ATG_ORF)}
else:
ATG_pos = [ATG.start() for ATG in re.finditer('ATG', seq.upper())]
ATG_frame = [((len(seq) - pos) % 3) for pos in ATG_pos]
ATG_frame[:] = [(math.ceil(res / 2) ^ 1) for res in ATG_frame]
return np.array(ATG_frame)
else:
pass
def predict_on_sample_with_pos(self, seq):
"""
In comparison to predict_on_sample(), additionally returns the positions of AUGs
:param seq: string utr's sequence
:return: if verbose_output: dictionary
first entry – 1 or 0 depending whether the uAUG is in-frame or not
second – 1 or 0 depending whether it corresponds to a uORF or not
third - pos of the ATG
else: NumPy array of 1 and 0 depending whether the uAUG is in-frame or not
:example: if the input 5'UTR has 5 AUG, then
{
"frame": [1, 1, 0, 0, 1],
"uORF": [1, 1, 1, 0, 0],
"pos": [38, 190, 438, 769, 981]
}
"""
if self.allow_ORF:
if self.verbose_output:
ATG_frame = []
ATG_ORF = []
ATG_pos = []
for ATG in re.finditer('ATG', seq.upper()):
seq_remainder = seq[ATG.start() + 3:]
TAA_frame = [(TAA.start() % 3) for TAA in re.finditer('TAA', seq_remainder)]
if 0 in TAA_frame:
ORF = True
else:
TAG_frame = [(TAG.start() % 3) for TAG in re.finditer('TAG', seq_remainder)]
if 0 in TAG_frame:
ORF = True
else:
TGA_frame = [(TGA.start() % 3) for TGA in re.finditer('TGA', seq_remainder)]
ORF = 0 in TGA_frame
if ORF:
ATG_ORF.append(1)
else:
ATG_ORF.append(0)
if (len(seq) - ATG.start()) % 3:
ATG_frame.append(0)
else:
ATG_frame.append(1)
ATG_pos.append(ATG.start())
return {"frame": np.array(ATG_frame), "uORF": np.array(ATG_ORF), "pos": np.array(ATG_pos)}
else:
ATG_pos = [ATG.start() for ATG in re.finditer('ATG', seq.upper())]
ATG_frame = [((len(seq) - pos) % 3) for pos in ATG_pos]
ATG_frame[:] = [(math.ceil(res / 2) ^ 1) for res in ATG_frame]
return np.array(ATG_frame)
else:
pass
def predict_on_sample_with_pos_pandas(self, seq, result_dict, strand, start=None):
"""
In comparison to predict_on_sample(), additionally returns as positions of AUGs and outputs everything to the \
passed to it dictionary
:param seq: string utr's sequence
:param result_dict: dictionary with 4 mandatory keys "not_in-frame_no_uORF", "not_in-frame_uORF", "in-frame_no_uORF", "in-frame_uORF", where to append the found values
:param start: integer, position relatively to the whole genome (in contrast to position relative to the exon)
"""
if self.allow_ORF:
if strand == '+':
if self.verbose_output:
list_00 = [] # not_in-frame_no_uORF
list_01 = [] # not_in-frame_uORF
list_10 = [] # in-frame_no_uORF
list_11 = [] # in-frame_uORF
for ATG in re.finditer('ATG', seq):
seq_remainder = seq[ATG.start() + 3:]
TAA_frame = [(TAA.start() % 3) for TAA in re.finditer('TAA', seq_remainder)]
if 0 in TAA_frame:
ORF = True
else:
TAG_frame = [(TAG.start() % 3) for TAG in re.finditer('TAG', seq_remainder)]
if 0 in TAG_frame:
ORF = True
else:
TGA_frame = [(TGA.start() % 3) for TGA in re.finditer('TGA', seq_remainder)]
ORF = 0 in TGA_frame
if ORF:
if (len(seq) - ATG.start()) % 3:
list_01.append(ATG.start() + start)
else:
list_11.append(ATG.start() + start)
else:
if (len(seq) - ATG.start()) % 3:
list_00.append(ATG.start() + start)
else:
list_10.append(ATG.start() + start)
result_dict["not_in-frame_no_uORF"].append(np.array(list_00))
result_dict["not_in-frame_uORF"].append(np.array(list_01))
result_dict["in-frame_no_uORF"].append(np.array(list_10))
result_dict["in-frame_uORF"].append(np.array(list_11))
pass
else:
ATG_pos = [ATG.start() for ATG in re.finditer('ATG', seq)]
ATG_frame = [((len(seq) - pos) % 3) for pos in ATG_pos]
ATG_frame[:] = [(math.ceil(res / 2) ^ 1) for res in ATG_frame]
pass
else:
if self.verbose_output:
list_00 = [] # not_in-frame_no_uORF
list_01 = [] # not_in-frame_uORF
list_10 = [] # in-frame_no_uORF
list_11 = [] # in-frame_uORF
for ATG in re.finditer('ATG', seq):
seq_remainder = seq[ATG.start() + 3:]
TAA_frame = [(TAA.start() % 3) for TAA in re.finditer('TAA', seq_remainder)]
if 0 in TAA_frame:
ORF = True
else:
TAG_frame = [(TAG.start() % 3) for TAG in re.finditer('TAG', seq_remainder)]
if 0 in TAG_frame:
ORF = True
else:
TGA_frame = [(TGA.start() % 3) for TGA in re.finditer('TGA', seq_remainder)]
ORF = 0 in TGA_frame
if ORF:
if (len(seq) - ATG.start()) % 3:
list_01.append(start + (len(seq) - ATG.start()) - 1)
else:
list_11.append(start + (len(seq) - ATG.start()) - 1)
else:
if (len(seq) - ATG.start()) % 3:
list_00.append(start + (len(seq) - ATG.start()) - 1)
else:
list_10.append(start + (len(seq) - ATG.start()) - 1)
result_dict["not_in-frame_no_uORF"].append(np.array(list_00))
result_dict["not_in-frame_uORF"].append(np.array(list_01))
result_dict["in-frame_no_uORF"].append(np.array(list_10))
result_dict["in-frame_uORF"].append(np.array(list_11))
pass
else:
ATG_pos = [ATG.start() for ATG in re.finditer('ATG', seq)]
ATG_frame = [((len(seq) - pos) % 3) for pos in ATG_pos]
ATG_frame[:] = [(math.ceil(res / 2) ^ 1) for res in ATG_frame]
pass
else:
pass
def predict_on_sample_with_stop_pandas(self, seq, result_dict, strand, start=None):
"""
In comparison to predict_on_sample(), additionally returns as positions of AUGs and outputs everything to the \
passed to it dictionary
:param seq: string utr's sequence
:param result_dict: dictionary with 4 mandatory keys "not_in-frame_no_uORF", "not_in-frame_uORF", \
"in-frame_no_uORF", "in-frame_uORF", where to append the found values
:param start: integer, position relatively to the whole genome (in contrast to position relative to the exon)
"""
if self.allow_ORF:
if strand == '+':
if self.verbose_output:
list_00 = [] # not_in-frame_no_uORF
list_01 = [] # not_in-frame_uORF
list_10 = [] # in-frame_no_uORF
list_11 = [] # in-frame_uORF
for ATG in re.finditer('ATG', seq):
ORF = 0
seq_remainder = seq[ATG.start() + 3:]
for TAA in re.finditer('TAA', seq_remainder):
if not (TAA.start() % 3):
ORF = TAA.start()
break
if not ORF:
for TAG in re.finditer('TAG', seq_remainder):
if not (TAG.start() % 3):
ORF = TAG.start()
break
if not ORF:
for TGA in re.finditer('TGA', seq_remainder):
if not (TGA.start() % 3):
ORF = TGA.start()
break
if ORF:
if (len(seq) - ATG.start()) % 3:
list_01.append(ATG.start() + start)
list_01.append(ORF + start)
else:
list_11.append(ATG.start() + start)
list_11.append(ORF + start)
else:
if (len(seq) - ATG.start()) % 3:
list_00.append(ATG.start() + start)
else:
list_10.append(ATG.start() + start)
result_dict["not_in-frame_no_uORF"].append(np.array(list_00))
result_dict["not_in-frame_uORF"].append(np.array(list_01))
result_dict["in-frame_no_uORF"].append(np.array(list_10))
result_dict["in-frame_uORF"].append(np.array(list_11))
pass
else:
ATG_pos = [ATG.start() for ATG in re.finditer('ATG', seq)]
ATG_frame = [((len(seq) - pos) % 3) for pos in ATG_pos]
ATG_frame[:] = [(math.ceil(res / 2) ^ 1) for res in ATG_frame]
pass
else:
if self.verbose_output:
list_00 = [] # not_in-frame_no_uORF
list_01 = [] # not_in-frame_uORF
list_10 = [] # in-frame_no_uORF
list_11 = [] # in-frame_uORF
for ATG in re.finditer('ATG', seq):
ORF = 0
seq_remainder = seq[ATG.start() + 3:]
for TAA in re.finditer('TAA', seq_remainder):
if not (TAA.start() % 3):
ORF = TAA.start()
break
if not ORF:
for TAG in re.finditer('TAG', seq_remainder):
if not (TAG.start() % 3):
ORF = TAG.start()
break
if not ORF:
for TGA in re.finditer('TGA', seq_remainder):
if not (TGA.start() % 3):
ORF = TGA.start()
break
if ORF:
if (len(seq) - ATG.start()) % 3:
list_01.append(start + (len(seq) - ATG.start()) - 1)
list_01.append(start + (len(seq) - ORF) - 1)
else:
list_11.append(start + (len(seq) - ATG.start()) - 1)
list_11.append(start + (len(seq) - ORF) - 1)
else:
if (len(seq) - ATG.start()) % 3:
list_00.append(start + (len(seq) - ATG.start()) - 1)
else:
list_10.append(start + (len(seq) - ATG.start()) - 1)
result_dict["not_in-frame_no_uORF"].append(np.array(list_00))
result_dict["not_in-frame_uORF"].append(np.array(list_01))
result_dict["in-frame_no_uORF"].append(np.array(list_10))
result_dict["in-frame_uORF"].append(np.array(list_11))
pass
else:
ATG_pos = [ATG.start() for ATG in re.finditer('ATG', seq)]
ATG_frame = [((len(seq) - pos) % 3) for pos in ATG_pos]
ATG_frame[:] = [(math.ceil(res / 2) ^ 1) for res in ATG_frame]
pass
else:
pass
def predict_on_batch(self, seq_list):
"""
Predict on batch
:param seq_list: list of string utr's sequences
:return: if verbose_output: NumPy array of dictionaries
first entry – 1 or 0 depending whether the uAUG is in-frame or not
second – 1 or 0 depending whether it corresponds to a uORF or not
else: NumPy array of 1 and 0 whether the uAUG is in-frame or not
"""
if self.allow_ORF:
result_list = []
for seq in seq_list:
result_list.append(self.predict_on_sample(seq))
return result_list
else:
pass
|
[
"numpy.array",
"math.ceil",
"re.finditer"
] |
[((2560, 2579), 'numpy.array', 'np.array', (['ATG_frame'], {}), '(ATG_frame)\n', (2568, 2579), True, 'import numpy as np\n'), ((4952, 4971), 'numpy.array', 'np.array', (['ATG_frame'], {}), '(ATG_frame)\n', (4960, 4971), True, 'import numpy as np\n'), ((2235, 2254), 'numpy.array', 'np.array', (['ATG_frame'], {}), '(ATG_frame)\n', (2243, 2254), True, 'import numpy as np\n'), ((2264, 2281), 'numpy.array', 'np.array', (['ATG_ORF'], {}), '(ATG_ORF)\n', (2272, 2281), True, 'import numpy as np\n'), ((4601, 4620), 'numpy.array', 'np.array', (['ATG_frame'], {}), '(ATG_frame)\n', (4609, 4620), True, 'import numpy as np\n'), ((4630, 4647), 'numpy.array', 'np.array', (['ATG_ORF'], {}), '(ATG_ORF)\n', (4638, 4647), True, 'import numpy as np\n'), ((4656, 4673), 'numpy.array', 'np.array', (['ATG_pos'], {}), '(ATG_pos)\n', (4664, 4673), True, 'import numpy as np\n'), ((5950, 5973), 're.finditer', 're.finditer', (['"""ATG"""', 'seq'], {}), "('ATG', seq)\n", (5961, 5973), False, 'import re\n'), ((8098, 8121), 're.finditer', 're.finditer', (['"""ATG"""', 'seq'], {}), "('ATG', seq)\n", (8109, 8121), False, 'import re\n'), ((11018, 11041), 're.finditer', 're.finditer', (['"""ATG"""', 'seq'], {}), "('ATG', seq)\n", (11029, 11041), False, 'import re\n'), ((13475, 13498), 're.finditer', 're.finditer', (['"""ATG"""', 'seq'], {}), "('ATG', seq)\n", (13486, 13498), False, 'import re\n'), ((2491, 2509), 'math.ceil', 'math.ceil', (['(res / 2)'], {}), '(res / 2)\n', (2500, 2509), False, 'import math\n'), ((4883, 4901), 'math.ceil', 'math.ceil', (['(res / 2)'], {}), '(res / 2)\n', (4892, 4901), False, 'import math\n'), ((7229, 7246), 'numpy.array', 'np.array', (['list_00'], {}), '(list_00)\n', (7237, 7246), True, 'import numpy as np\n'), ((7308, 7325), 'numpy.array', 'np.array', (['list_01'], {}), '(list_01)\n', (7316, 7325), True, 'import numpy as np\n'), ((7386, 7403), 'numpy.array', 'np.array', (['list_10'], {}), '(list_10)\n', (7394, 7403), True, 'import numpy as np\n'), ((7461, 7478), 'numpy.array', 'np.array', (['list_11'], {}), '(list_11)\n', (7469, 7478), True, 'import numpy as np\n'), ((9445, 9462), 'numpy.array', 'np.array', (['list_00'], {}), '(list_00)\n', (9453, 9462), True, 'import numpy as np\n'), ((9524, 9541), 'numpy.array', 'np.array', (['list_01'], {}), '(list_01)\n', (9532, 9541), True, 'import numpy as np\n'), ((9602, 9619), 'numpy.array', 'np.array', (['list_10'], {}), '(list_10)\n', (9610, 9619), True, 'import numpy as np\n'), ((9677, 9694), 'numpy.array', 'np.array', (['list_11'], {}), '(list_11)\n', (9685, 9694), True, 'import numpy as np\n'), ((11173, 11206), 're.finditer', 're.finditer', (['"""TAA"""', 'seq_remainder'], {}), "('TAA', seq_remainder)\n", (11184, 11206), False, 'import re\n'), ((12606, 12623), 'numpy.array', 'np.array', (['list_00'], {}), '(list_00)\n', (12614, 12623), True, 'import numpy as np\n'), ((12685, 12702), 'numpy.array', 'np.array', (['list_01'], {}), '(list_01)\n', (12693, 12702), True, 'import numpy as np\n'), ((12763, 12780), 'numpy.array', 'np.array', (['list_10'], {}), '(list_10)\n', (12771, 12780), True, 'import numpy as np\n'), ((12838, 12855), 'numpy.array', 'np.array', (['list_11'], {}), '(list_11)\n', (12846, 12855), True, 'import numpy as np\n'), ((13630, 13663), 're.finditer', 're.finditer', (['"""TAA"""', 'seq_remainder'], {}), "('TAA', seq_remainder)\n", (13641, 13663), False, 'import re\n'), ((15165, 15182), 'numpy.array', 'np.array', (['list_00'], {}), '(list_00)\n', (15173, 15182), True, 'import numpy as np\n'), ((15244, 15261), 'numpy.array', 'np.array', (['list_01'], {}), '(list_01)\n', (15252, 15261), True, 'import numpy as np\n'), ((15322, 15339), 'numpy.array', 'np.array', (['list_10'], {}), '(list_10)\n', (15330, 15339), True, 'import numpy as np\n'), ((15397, 15414), 'numpy.array', 'np.array', (['list_11'], {}), '(list_11)\n', (15405, 15414), True, 'import numpy as np\n'), ((1394, 1427), 're.finditer', 're.finditer', (['"""TAA"""', 'seq_remainder'], {}), "('TAA', seq_remainder)\n", (1405, 1427), False, 'import re\n'), ((3712, 3745), 're.finditer', 're.finditer', (['"""TAA"""', 'seq_remainder'], {}), "('TAA', seq_remainder)\n", (3723, 3745), False, 'import re\n'), ((7584, 7607), 're.finditer', 're.finditer', (['"""ATG"""', 'seq'], {}), "('ATG', seq)\n", (7595, 7607), False, 'import re\n'), ((7722, 7740), 'math.ceil', 'math.ceil', (['(res / 2)'], {}), '(res / 2)\n', (7731, 7740), False, 'import math\n'), ((9800, 9823), 're.finditer', 're.finditer', (['"""ATG"""', 'seq'], {}), "('ATG', seq)\n", (9811, 9823), False, 'import re\n'), ((9938, 9956), 'math.ceil', 'math.ceil', (['(res / 2)'], {}), '(res / 2)\n', (9947, 9956), False, 'import math\n'), ((11425, 11458), 're.finditer', 're.finditer', (['"""TAG"""', 'seq_remainder'], {}), "('TAG', seq_remainder)\n", (11436, 11458), False, 'import re\n'), ((12961, 12984), 're.finditer', 're.finditer', (['"""ATG"""', 'seq'], {}), "('ATG', seq)\n", (12972, 12984), False, 'import re\n'), ((13099, 13117), 'math.ceil', 'math.ceil', (['(res / 2)'], {}), '(res / 2)\n', (13108, 13117), False, 'import math\n'), ((13882, 13915), 're.finditer', 're.finditer', (['"""TAG"""', 'seq_remainder'], {}), "('TAG', seq_remainder)\n", (13893, 13915), False, 'import re\n'), ((15520, 15543), 're.finditer', 're.finditer', (['"""ATG"""', 'seq'], {}), "('ATG', seq)\n", (15531, 15543), False, 'import re\n'), ((15658, 15676), 'math.ceil', 'math.ceil', (['(res / 2)'], {}), '(res / 2)\n', (15667, 15676), False, 'import math\n'), ((1595, 1628), 're.finditer', 're.finditer', (['"""TAG"""', 'seq_remainder'], {}), "('TAG', seq_remainder)\n", (1606, 1628), False, 'import re\n'), ((3913, 3946), 're.finditer', 're.finditer', (['"""TAG"""', 'seq_remainder'], {}), "('TAG', seq_remainder)\n", (3924, 3946), False, 'import re\n'), ((6103, 6136), 're.finditer', 're.finditer', (['"""TAA"""', 'seq_remainder'], {}), "('TAA', seq_remainder)\n", (6114, 6136), False, 'import re\n'), ((8251, 8284), 're.finditer', 're.finditer', (['"""TAA"""', 'seq_remainder'], {}), "('TAA', seq_remainder)\n", (8262, 8284), False, 'import re\n'), ((11697, 11730), 're.finditer', 're.finditer', (['"""TGA"""', 'seq_remainder'], {}), "('TGA', seq_remainder)\n", (11708, 11730), False, 'import re\n'), ((14154, 14187), 're.finditer', 're.finditer', (['"""TGA"""', 'seq_remainder'], {}), "('TGA', seq_remainder)\n", (14165, 14187), False, 'import re\n'), ((1812, 1845), 're.finditer', 're.finditer', (['"""TGA"""', 'seq_remainder'], {}), "('TGA', seq_remainder)\n", (1823, 1845), False, 'import re\n'), ((4130, 4163), 're.finditer', 're.finditer', (['"""TGA"""', 'seq_remainder'], {}), "('TGA', seq_remainder)\n", (4141, 4163), False, 'import re\n'), ((6320, 6353), 're.finditer', 're.finditer', (['"""TAG"""', 'seq_remainder'], {}), "('TAG', seq_remainder)\n", (6331, 6353), False, 'import re\n'), ((8468, 8501), 're.finditer', 're.finditer', (['"""TAG"""', 'seq_remainder'], {}), "('TAG', seq_remainder)\n", (8479, 8501), False, 'import re\n'), ((6553, 6586), 're.finditer', 're.finditer', (['"""TGA"""', 'seq_remainder'], {}), "('TGA', seq_remainder)\n", (6564, 6586), False, 'import re\n'), ((8701, 8734), 're.finditer', 're.finditer', (['"""TGA"""', 'seq_remainder'], {}), "('TGA', seq_remainder)\n", (8712, 8734), False, 'import re\n')]
|
"""
The script expects the MViT (MDef-DETR or MDETR) detections in .txt format. For example, there should be,
One .txt file for each image and each line in the file represents a detection.
The format of a single detection should be "<label> <confidence> <x1> <y1> <x2> <y2>
Please see the 'mvit_detections' for reference.
"""
import os
import argparse
import xml.etree.ElementTree as ET
from fvcore.common.file_io import PathManager
import numpy as np
import time
import cv2
from nms import nms
TASK1_TRAIN_LIST = "t1_train.txt"
TASK2_TRAIN_LIST = "t2_train.txt"
TASK3_TRAIN_LIST = "t3_train.txt"
TASK4_TRAIN_LIST = "t4_train.txt"
def read_image_list(path):
with open(path, 'r') as f:
lines = f.read()
images = lines.split('\n')
return images[:-1]
TASK1_TRAIN_IMAGES = read_image_list(TASK1_TRAIN_LIST)
TASK2_TRAIN_IMAGES = read_image_list(TASK2_TRAIN_LIST)
TASK3_TRAIN_IMAGES = read_image_list(TASK3_TRAIN_LIST)
TASK4_TRAIN_IMAGES = read_image_list(TASK4_TRAIN_LIST)
TASK1_KNOWN_CLASSES = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat",
"chair", "cow", "diningtable", "dog", "horse", "motorbike", "person",
"pottedplant", "sheep", "sofa", "train", "tvmonitor", "airplane", "dining table", "motorcycle",
"potted plant", "couch", "tv"]
TASK2_KNOWN_CLASSES = TASK1_KNOWN_CLASSES + ["truck", "traffic light", "fire hydrant", "stop sign", "parking meter",
"bench", "elephant", "bear", "zebra", "giraffe",
"backpack", "umbrella", "handbag", "tie", "suitcase",
"microwave", "oven", "toaster", "sink", "refrigerator"]
TASK3_KNOWN_CLASSES = TASK2_KNOWN_CLASSES + ["frisbee", "skis", "snowboard", "sports ball", "kite",
"baseball bat", "baseball glove", "skateboard", "surfboard",
"tennis racket",
"banana", "apple", "sandwich", "orange", "broccoli",
"carrot", "hot dog", "pizza", "donut", "cake"]
TASK4_KNOWN_CLASSES = TASK3_KNOWN_CLASSES + ["bed", "toilet", "laptop", "mouse",
"remote", "keyboard", "cell phone", "book", "clock",
"vase", "scissors", "teddy bear", "hair drier", "toothbrush",
"wine glass", "cup", "fork", "knife", "spoon", "bowl"]
def parse_arguments():
"""
Parse the command line arguments
"""
ap = argparse.ArgumentParser()
ap.add_argument("-ann", "--annotations_dir_path", required=True,
help="Path to the directory containing the original annotations in pascal VOC format.")
ap.add_argument("-det", "--detections_dir_path", required=True,
help="Path to the directory containing the detections generated using class agnostic object "
"detector. One .txt file for each image where each line in the file represents a detection."
"The format of a single detection should be "
"<label> <confidence> <x1> <y1> <x2> <y2>")
ap.add_argument("-o", "--output_dir_path", required=True,
help="The output dir path to save the updated annotations.")
ap.add_argument("-det_conf", "--detection_confidence_threshold", required=False, type=float, default=0.5,
help="The confidence threshold to filter potential detections at first step. All detections with "
"confidence less than this threshold value will be ignored.")
ap.add_argument("-iou", "--iou_thresh_unk", required=False, type=float, default=0.5,
help="All detections, having an overlap greater than iou_thresh with any of the ground truths, "
"will be ignored.")
ap.add_argument("-nms", "--apply_nms", required=False, type=bool, default=False,
help="Flag to decide either to apply NMS on detections before assigning them unknown/gt or not.")
ap.add_argument("-iou_nms", "--iou_thresh_nms", required=False, type=float, default=0.2,
help="IOU threshold for NMS.")
args = vars(ap.parse_args())
return args
def parse_voc_gt_kn(path):
image_name = os.path.basename(path).split('.')[0]
if os.path.exists(path):
with PathManager.open(path) as f:
tree = ET.parse(f)
boxes = []
for obj in tree.findall("object"):
cls = obj.find("name").text
if image_name in TASK1_TRAIN_IMAGES:
if cls not in TASK1_KNOWN_CLASSES:
continue
elif image_name in TASK2_TRAIN_IMAGES:
if cls not in TASK2_KNOWN_CLASSES:
continue
elif image_name in TASK3_TRAIN_IMAGES:
if cls not in TASK3_KNOWN_CLASSES:
continue
elif image_name in TASK4_TRAIN_IMAGES:
if cls not in TASK4_KNOWN_CLASSES:
continue
else:
# Not a training image
return boxes, tree, False
bbox = obj.find("bndbox")
bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]]
# Original annotations are integers in the range [1, W or H]
# Assuming they mean 1-based pixel indices (inclusive),
# a box with annotation (xmin=1, xmax=W) covers the whole image.
# In coordinate space this is represented by (xmin=0, xmax=W)
bbox[0] -= 1.0
bbox[1] -= 1.0
boxes.append(bbox)
else:
# No annotation file found, create an empty xml node and return
image_name = f"{os.path.basename(path).split('.')[0]}.jpg"
image_path = f"{os.path.dirname(os.path.dirname(path))}/JPEGImages/{image_name}"
img = cv2.imread(image_path)
h, w, c = img.shape
node_root = ET.Element('annotation')
node_folder = ET.SubElement(node_root, 'folder')
node_folder.text = 'VOC2007'
node_filename = ET.SubElement(node_root, 'filename')
node_filename.text = image_name
node_size = ET.SubElement(node_root, 'size')
node_width = ET.SubElement(node_size, 'width')
node_width.text = str(int(w))
node_height = ET.SubElement(node_size, 'height')
node_height.text = str(int(h))
node_depth = ET.SubElement(node_size, 'depth')
node_depth.text = str(int(c))
tree = ET.ElementTree(node_root)
boxes = []
return boxes, tree, True
def parse_det_txt(path, conf_thresh=0.5):
if os.path.exists(path):
with open(path, "r") as f:
lines = f.readlines()
boxes = []
scores = []
for line in lines:
content = line.rstrip().split(' ')
bbox = content[2:]
# Only keep the boxes with score >= conf_thresh
det_conf = float(content[1])
if det_conf >= conf_thresh:
boxes.append([int(b) for b in bbox])
scores.append(det_conf)
return boxes, scores
else:
return [], []
def class_agnostic_nms(boxes, scores, iou=0.7):
# boxes = non_max_suppression_fast(np.array(boxes), iou)
boxes = nms(np.array(boxes), np.array(scores), iou)
return list(boxes)
def get_unk_det(gt, det, iou):
if not gt:
return det
gt = np.array(gt)
unk_det = []
for dl in det:
d = np.array(dl)
ixmin = np.maximum(gt[:, 0], d[0])
iymin = np.maximum(gt[:, 1], d[1])
ixmax = np.minimum(gt[:, 2], d[2])
iymax = np.minimum(gt[:, 3], d[3])
iw = np.maximum(ixmax - ixmin + 1.0, 0.0)
ih = np.maximum(iymax - iymin + 1.0, 0.0)
inters = iw * ih
uni = (
(d[2] - d[0] + 1.0) * (d[3] - d[1] + 1.0)
+ (gt[:, 2] - gt[:, 0] + 1.0) * (gt[:, 3] - gt[:, 1] + 1.0)
- inters
)
overlaps = inters / uni
ov_max = np.max(overlaps)
if ov_max < iou:
unk_det.append(dl)
return unk_det
def main(ann_dir, det_dir, out_dir, det_conf_thesh, iou_thresh, nms=False, iou_thresh_nms=0.7):
files = os.listdir(det_dir)
start = time.time()
for i, file_name in enumerate(files):
if i % 100 == 0:
print(f"On image no. {i}. Time: {time.time() - start}")
start = time.time()
ann_file_path = f"{ann_dir}/{file_name.split('.')[0]}.xml"
ref_det_file_path = f"{det_dir}/{file_name.split('.')[0]}.txt"
out_ann_file_path = f"{out_dir}/{file_name.split('.')[0]}.xml"
gt_boxes, ann_tree, train = parse_voc_gt_kn(ann_file_path) # Read the ground truth bounding boxes
# Only add the unknown detections if training image
if not train:
# Copy the original annotation file
ann_tree.write(out_ann_file_path, encoding='latin-1')
continue
det_boxes, scores = parse_det_txt(ref_det_file_path, conf_thresh=det_conf_thesh) # Read the detections
if nms:
det_boxes = class_agnostic_nms(det_boxes, scores, iou_thresh_nms) # Apply NMS if prompted to do so
det_unk = get_unk_det(gt_boxes, det_boxes, iou_thresh) # Get the potential unknown detections
# Create the updated annotation file
for det in det_unk:
object = ET.SubElement(ann_tree.getroot(), 'object')
name = ET.SubElement(object, "name")
name.text = "unknown"
pose = ET.SubElement(object, "pose")
pose.text = "Unspecified"
truncated = ET.SubElement(object, "truncated")
truncated.text = "2"
difficult = ET.SubElement(object, "difficult")
difficult.text = "0"
bndbox = ET.SubElement(object, "bndbox")
xmin = ET.SubElement(bndbox, "xmin")
xmin.text = str(int(det[0]))
ymin = ET.SubElement(bndbox, "ymin")
ymin.text = str(int(det[1]))
xmax = ET.SubElement(bndbox, "xmax")
xmax.text = str(int(det[2]))
ymax = ET.SubElement(bndbox, "ymax")
ymax.text = str(int(det[3]))
# Save the updated annotations
ann_tree.write(out_ann_file_path, encoding='latin-1')
if __name__ == "__main__":
args = parse_arguments()
annotations_dir = args["annotations_dir_path"]
detections_dir = args["detections_dir_path"]
output_dir = args["output_dir_path"]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
conf_threshold_det = args["detection_confidence_threshold"]
iou_threshold_unk = args["iou_thresh_unk"]
apply_nms = args["apply_nms"]
iou_threshold_nms = args["iou_thresh_nms"]
main(annotations_dir, detections_dir, output_dir, conf_threshold_det, iou_threshold_unk,
apply_nms, iou_threshold_nms)
|
[
"os.path.exists",
"os.listdir",
"xml.etree.ElementTree.parse",
"numpy.minimum",
"argparse.ArgumentParser",
"os.makedirs",
"fvcore.common.file_io.PathManager.open",
"numpy.max",
"numpy.array",
"xml.etree.ElementTree.Element",
"xml.etree.ElementTree.ElementTree",
"os.path.dirname",
"os.path.basename",
"numpy.maximum",
"xml.etree.ElementTree.SubElement",
"time.time",
"cv2.imread"
] |
[((2699, 2724), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2722, 2724), False, 'import argparse\n'), ((4540, 4560), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4554, 4560), False, 'import os\n'), ((6880, 6900), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6894, 6900), False, 'import os\n'), ((7676, 7688), 'numpy.array', 'np.array', (['gt'], {}), '(gt)\n', (7684, 7688), True, 'import numpy as np\n'), ((8483, 8502), 'os.listdir', 'os.listdir', (['det_dir'], {}), '(det_dir)\n', (8493, 8502), False, 'import os\n'), ((8515, 8526), 'time.time', 'time.time', ([], {}), '()\n', (8524, 8526), False, 'import time\n'), ((6113, 6135), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (6123, 6135), False, 'import cv2\n'), ((6184, 6208), 'xml.etree.ElementTree.Element', 'ET.Element', (['"""annotation"""'], {}), "('annotation')\n", (6194, 6208), True, 'import xml.etree.ElementTree as ET\n'), ((6231, 6265), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['node_root', '"""folder"""'], {}), "(node_root, 'folder')\n", (6244, 6265), True, 'import xml.etree.ElementTree as ET\n'), ((6327, 6363), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['node_root', '"""filename"""'], {}), "(node_root, 'filename')\n", (6340, 6363), True, 'import xml.etree.ElementTree as ET\n'), ((6424, 6456), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['node_root', '"""size"""'], {}), "(node_root, 'size')\n", (6437, 6456), True, 'import xml.etree.ElementTree as ET\n'), ((6478, 6511), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['node_size', '"""width"""'], {}), "(node_size, 'width')\n", (6491, 6511), True, 'import xml.etree.ElementTree as ET\n'), ((6572, 6606), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['node_size', '"""height"""'], {}), "(node_size, 'height')\n", (6585, 6606), True, 'import xml.etree.ElementTree as ET\n'), ((6667, 6700), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['node_size', '"""depth"""'], {}), "(node_size, 'depth')\n", (6680, 6700), True, 'import xml.etree.ElementTree as ET\n'), ((6754, 6779), 'xml.etree.ElementTree.ElementTree', 'ET.ElementTree', (['node_root'], {}), '(node_root)\n', (6768, 6779), True, 'import xml.etree.ElementTree as ET\n'), ((7537, 7552), 'numpy.array', 'np.array', (['boxes'], {}), '(boxes)\n', (7545, 7552), True, 'import numpy as np\n'), ((7554, 7570), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (7562, 7570), True, 'import numpy as np\n'), ((7737, 7749), 'numpy.array', 'np.array', (['dl'], {}), '(dl)\n', (7745, 7749), True, 'import numpy as np\n'), ((7766, 7792), 'numpy.maximum', 'np.maximum', (['gt[:, 0]', 'd[0]'], {}), '(gt[:, 0], d[0])\n', (7776, 7792), True, 'import numpy as np\n'), ((7809, 7835), 'numpy.maximum', 'np.maximum', (['gt[:, 1]', 'd[1]'], {}), '(gt[:, 1], d[1])\n', (7819, 7835), True, 'import numpy as np\n'), ((7852, 7878), 'numpy.minimum', 'np.minimum', (['gt[:, 2]', 'd[2]'], {}), '(gt[:, 2], d[2])\n', (7862, 7878), True, 'import numpy as np\n'), ((7895, 7921), 'numpy.minimum', 'np.minimum', (['gt[:, 3]', 'd[3]'], {}), '(gt[:, 3], d[3])\n', (7905, 7921), True, 'import numpy as np\n'), ((7935, 7971), 'numpy.maximum', 'np.maximum', (['(ixmax - ixmin + 1.0)', '(0.0)'], {}), '(ixmax - ixmin + 1.0, 0.0)\n', (7945, 7971), True, 'import numpy as np\n'), ((7985, 8021), 'numpy.maximum', 'np.maximum', (['(iymax - iymin + 1.0)', '(0.0)'], {}), '(iymax - iymin + 1.0, 0.0)\n', (7995, 8021), True, 'import numpy as np\n'), ((8281, 8297), 'numpy.max', 'np.max', (['overlaps'], {}), '(overlaps)\n', (8287, 8297), True, 'import numpy as np\n'), ((10786, 10812), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (10800, 10812), False, 'import os\n'), ((10822, 10845), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (10833, 10845), False, 'import os\n'), ((4575, 4597), 'fvcore.common.file_io.PathManager.open', 'PathManager.open', (['path'], {}), '(path)\n', (4591, 4597), False, 'from fvcore.common.file_io import PathManager\n'), ((4623, 4634), 'xml.etree.ElementTree.parse', 'ET.parse', (['f'], {}), '(f)\n', (4631, 4634), True, 'import xml.etree.ElementTree as ET\n'), ((8682, 8693), 'time.time', 'time.time', ([], {}), '()\n', (8691, 8693), False, 'import time\n'), ((9727, 9756), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['object', '"""name"""'], {}), "(object, 'name')\n", (9740, 9756), True, 'import xml.etree.ElementTree as ET\n'), ((9810, 9839), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['object', '"""pose"""'], {}), "(object, 'pose')\n", (9823, 9839), True, 'import xml.etree.ElementTree as ET\n'), ((9902, 9936), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['object', '"""truncated"""'], {}), "(object, 'truncated')\n", (9915, 9936), True, 'import xml.etree.ElementTree as ET\n'), ((9994, 10028), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['object', '"""difficult"""'], {}), "(object, 'difficult')\n", (10007, 10028), True, 'import xml.etree.ElementTree as ET\n'), ((10083, 10114), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['object', '"""bndbox"""'], {}), "(object, 'bndbox')\n", (10096, 10114), True, 'import xml.etree.ElementTree as ET\n'), ((10134, 10163), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['bndbox', '"""xmin"""'], {}), "(bndbox, 'xmin')\n", (10147, 10163), True, 'import xml.etree.ElementTree as ET\n'), ((10224, 10253), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['bndbox', '"""ymin"""'], {}), "(bndbox, 'ymin')\n", (10237, 10253), True, 'import xml.etree.ElementTree as ET\n'), ((10314, 10343), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['bndbox', '"""xmax"""'], {}), "(bndbox, 'xmax')\n", (10327, 10343), True, 'import xml.etree.ElementTree as ET\n'), ((10404, 10433), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['bndbox', '"""ymax"""'], {}), "(bndbox, 'ymax')\n", (10417, 10433), True, 'import xml.etree.ElementTree as ET\n'), ((4496, 4518), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (4512, 4518), False, 'import os\n'), ((6050, 6071), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (6065, 6071), False, 'import os\n'), ((5967, 5989), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (5983, 5989), False, 'import os\n'), ((8639, 8650), 'time.time', 'time.time', ([], {}), '()\n', (8648, 8650), False, 'import time\n')]
|
# <NAME>
# initial version of the webcam detector, can be used to test HSV settings, radius, etc
import cv2
#import time
import numpy as np
#from infer_imagenet import *
FRAME_WIDTH = 640
FRAME_HEIGHT = 480
# load in the video
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH,FRAME_WIDTH)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT,FRAME_HEIGHT)
# Check if camera opened successfully
if (cap.isOpened() == False):
print("Error opening video stream or file")
# writing a video file for presentation
#fourcc = cv2.VideoWriter_fourcc(*'MJPG')
#out = cv2.VideoWriter('example_track.avi', fourcc , 30.0, (640, 480),
# Read until video is completed
while cap.isOpened():
# Capture frame-by-frame
ret, frame = cap.read()
if ret == True:
redball_detected=False
# resize video for faster processing, add blurr to smooth image, convert to Hue saturation value
frame = cv2.resize(frame, (640, 480))
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
frameHSV = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
# code for later exploring using CNNs for object detection, in this case a tennis ball
#found = infer_result(frame, 852, model)
#print('Tennis Ball found?:', found)
redLow = (0, 140, 140)
redHigh = (255, 255, 255)
# other colors such as the green for a tennis ball
#colorLow = (100, 40, 60)
#colorHigh = (120, 255, 255)
# masks the parts of the image which fits the HSV setting, fills in holes using erode/dilate
mask = cv2.inRange(frameHSV, redLow, redHigh)
mask = cv2.erode(mask, None, iterations=4)
mask = cv2.dilate(mask, None, iterations=8)
mask = cv2.erode(mask, None, iterations=4)
# copy of the mask for checking if circle
maskg = np.copy(mask)
imgg = np.zeros(frame.shape[0:2])
cv2.imshow('mask', mask)
cnts, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
center = None
cv2.drawContours(frame, cnts, -1, (0, 255, 0), 3)
# Checks to make sure there is a red object
if len(cnts) < 1:
cv2.imshow('Frame', frame)
#cv2.waitKey(10)
#out.write(frame)
else:
c = max(cnts, key=cv2.contourArea)
(x, y), radius = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
if int(M["m00"]) != 0:
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
print('radius', radius)
# only proceed if the radius meets a minimum size
if radius > 10:
# Check to see if the object is a circle by checking mask fill of enclosing circle
cv2.circle(imgg, center, int(radius), 255, -1)
masked = cv2.bitwise_and(maskg.astype(np.uint8), maskg.astype(np.uint8), mask=imgg.astype(np.uint8))
circle_fullness = np.sum(masked) / (np.pi * radius ** 2 * 255)
if circle_fullness > 0.8:
redball_detected=True
# draw the circle and centroid on the frame,
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 0, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
# if large enough of a red object is detected it sends the coordinates
if redball_detected:
print('center coordinates', center)
print(type(center))
# write to a video file
#out.write(frame)
# Display the resulting frame
print('Redball detected:', redball_detected)
cv2.imshow('Frame', frame)
cv2.imshow("test", frameHSV)
#cv2.waitKey(1)
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# Break the loop
else:
break
# When everything done, release the video capture object
cap.release()
#out.release()
# Closes all the frames
cv2.destroyAllWindows()
|
[
"numpy.copy",
"cv2.drawContours",
"cv2.dilate",
"cv2.inRange",
"cv2.erode",
"cv2.minEnclosingCircle",
"cv2.imshow",
"numpy.sum",
"numpy.zeros",
"cv2.circle",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.moments",
"cv2.findContours",
"cv2.resize",
"cv2.GaussianBlur",
"cv2.waitKey"
] |
[((246, 265), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (262, 265), False, 'import cv2\n'), ((4141, 4164), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4162, 4164), False, 'import cv2\n'), ((938, 967), 'cv2.resize', 'cv2.resize', (['frame', '(640, 480)'], {}), '(frame, (640, 480))\n', (948, 967), False, 'import cv2\n'), ((987, 1023), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['frame', '(11, 11)', '(0)'], {}), '(frame, (11, 11), 0)\n', (1003, 1023), False, 'import cv2\n'), ((1044, 1084), 'cv2.cvtColor', 'cv2.cvtColor', (['blurred', 'cv2.COLOR_BGR2HSV'], {}), '(blurred, cv2.COLOR_BGR2HSV)\n', (1056, 1084), False, 'import cv2\n'), ((1605, 1643), 'cv2.inRange', 'cv2.inRange', (['frameHSV', 'redLow', 'redHigh'], {}), '(frameHSV, redLow, redHigh)\n', (1616, 1643), False, 'import cv2\n'), ((1660, 1695), 'cv2.erode', 'cv2.erode', (['mask', 'None'], {'iterations': '(4)'}), '(mask, None, iterations=4)\n', (1669, 1695), False, 'import cv2\n'), ((1712, 1748), 'cv2.dilate', 'cv2.dilate', (['mask', 'None'], {'iterations': '(8)'}), '(mask, None, iterations=8)\n', (1722, 1748), False, 'import cv2\n'), ((1765, 1800), 'cv2.erode', 'cv2.erode', (['mask', 'None'], {'iterations': '(4)'}), '(mask, None, iterations=4)\n', (1774, 1800), False, 'import cv2\n'), ((1871, 1884), 'numpy.copy', 'np.copy', (['mask'], {}), '(mask)\n', (1878, 1884), True, 'import numpy as np\n'), ((1901, 1927), 'numpy.zeros', 'np.zeros', (['frame.shape[0:2]'], {}), '(frame.shape[0:2])\n', (1909, 1927), True, 'import numpy as np\n'), ((1941, 1965), 'cv2.imshow', 'cv2.imshow', (['"""mask"""', 'mask'], {}), "('mask', mask)\n", (1951, 1965), False, 'import cv2\n'), ((1993, 2055), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (2009, 2055), False, 'import cv2\n'), ((2088, 2137), 'cv2.drawContours', 'cv2.drawContours', (['frame', 'cnts', '(-1)', '(0, 255, 0)', '(3)'], {}), '(frame, cnts, -1, (0, 255, 0), 3)\n', (2104, 2137), False, 'import cv2\n'), ((3779, 3805), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (3789, 3805), False, 'import cv2\n'), ((3815, 3843), 'cv2.imshow', 'cv2.imshow', (['"""test"""', 'frameHSV'], {}), "('test', frameHSV)\n", (3825, 3843), False, 'import cv2\n'), ((2233, 2259), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (2243, 2259), False, 'import cv2\n'), ((2416, 2441), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['c'], {}), '(c)\n', (2438, 2441), False, 'import cv2\n'), ((2459, 2473), 'cv2.moments', 'cv2.moments', (['c'], {}), '(c)\n', (2470, 2473), False, 'import cv2\n'), ((3921, 3936), 'cv2.waitKey', 'cv2.waitKey', (['(25)'], {}), '(25)\n', (3932, 3936), False, 'import cv2\n'), ((3037, 3051), 'numpy.sum', 'np.sum', (['masked'], {}), '(masked)\n', (3043, 3051), True, 'import numpy as np\n'), ((3377, 3422), 'cv2.circle', 'cv2.circle', (['frame', 'center', '(5)', '(0, 0, 255)', '(-1)'], {}), '(frame, center, 5, (0, 0, 255), -1)\n', (3387, 3422), False, 'import cv2\n')]
|
import os
import imutils
import pickle
import time
import cv2
import threading
import numpy as np
from PIL import ImageFont, ImageDraw, Image
import json
import datetime
import requests
from faced import FaceDetector
from faced.utils import annotate_image
from config_reader import read_config
ZM_URL = 'http://18.179.207.49/zm'
ZM_STREAM_URL = f'{ZM_URL}/cgi-bin/nph-zms'
LOGIN_URL = f'{ZM_URL}/api/host/login.json?user=admin&pass=<PASSWORD>'
MAX_RETRY_FRAME = 1000
def connect_stream(monitor, stream_url):
r = requests.post(url=LOGIN_URL)
print('[INFO] openning video stream...')
auth_info = r.json()['credentials']
new_url = f'{ZM_STREAM_URL}?mode=jpeg&maxfps=5&monitor={monitor}&{auth_info}'
# start streaming with zm stream url
cap = cv2.VideoCapture(new_url)
if cap is None or not cap.isOpened():
# try to open alternative url
print('[ERROR] trying to open direct url...')
cap = cv2.VideoCapture(stream_url)
return cap
class Camera(object):
thread_list = {}
json_list = {}
frame_list = {}
last_access = {}
json_data = {}
detector = None
embedder = None
recognizer = None
le = None
max_retry_count = 0
stream_url_list = {}
confidence = 0.90
# is_ended = False
def initialize(self, monitor, stream_url):
if monitor not in Camera.thread_list:
# start background frame thread
thread = threading.Thread(target=self._thread, args=(
stream_url,), kwargs={"monitor": monitor})
thread.start()
Camera.thread_list[str(monitor)] = thread
# wait until frames start to be available
# while monitor not in self.frame_list or self.frame_list[str(monitor)] is None:
# time.sleep(0)
def __init__(self):
file_paths, configs = read_config()
if Camera.detector is None:
print('[INFO] loading face detector...')
Camera.detector = FaceDetector()
if Camera.embedder is None:
# load our serialized face embedding model from disk
print('[INFO] loading embedder from {}'.format(
file_paths['embedder_path']))
Camera.embedder = cv2.dnn.readNetFromTorch(
file_paths['embedder_path'])
if Camera.recognizer is None:
# load the actual face recognition model along with the label encoder
print('[INFO] loading face recognizer from {}'.format(
file_paths['recognizer_path']))
Camera.recognizer = pickle.loads(
open('output/recognizer.pickle', 'rb').read())
if Camera.le is None:
print('[INFO] loading le from {}'.format(file_paths['le_path']))
Camera.le = pickle.loads(open('output/le.pickle', 'rb').read())
print('[INFO] Confidence value is set to {}'.format(
configs['confidence']))
Camera.confidence = float(configs['confidence'])
Camera.max_retry_count = int(configs['max_retry_count'])
# def get_frame(self, monitor):
# try:
# return self.frame_list[str(monitor)]
# except:
# return None
def get_json(self, monitor):
try:
return self.json_list[str(monitor)]
except:
response_data = {}
response_data['detection'] = []
return response_data
def change_stream_url(self, monitor, stream_url):
if monitor in Camera.thread_list:
return None
Camera.stream_url_list[str(monitor)] = stream_url
self.initialize(monitor, stream_url)
@classmethod
def _thread(cls, stream_url, monitor=0):
# login to zm server first
r = requests.post(url=LOGIN_URL)
print('[INFO] openning video stream...')
auth_info = r.json()['credentials']
new_url = f'{ZM_STREAM_URL}?mode=jpeg&maxfps=5&monitor={monitor}&{auth_info}'
retry_count = 0
cap = None
# start trying to connect to streaming resource
while (cap is None or not cap.isOpened) and retry_count < cls.max_retry_count:
cap = connect_stream(monitor, cls.stream_url_list[str(monitor)])
retry_count += 1
if cap is None or not cap.isOpened():
print('[ERROR] unable to open remote stream...')
cls.thread_list[str(monitor)] = None
return
print('[INFO] starting face detection...')
cap_failed_count = 0
while True:
try:
response_data = {}
response_data['detection'] = []
ret, frame = cap.read()
#ret, frame = camera.read()
if not ret:
cap_failed_count += 1
cls.json_list[str(monitor)] = response_data
if (cap_failed_count > cls.max_retry_count):
if cap.isOpened():
cap.release()
retry_count = 0
while (cap is None or not cap.isOpened) and retry_count < cls.max_retry_count:
cap = connect_stream(
monitor, cls.stream_url_list[str(monitor)])
retry_count += 1
if cap is None or not cap.isOpened():
print('[ERROR] unable to open remote stream...')
cls.thread_list[str(monitor)] = None
return
continue
# resize the frame to have a width of 600 pixels (while
# maintaining the aspect ratio), and then grab the image
# dimensions
frame = imutils.resize(frame, width=600)
# (h, w) = frame.shape[:2]
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
bboxes = cls.detector.predict(frame, cls.confidence)
# ensure at least one face was found
print('[INFO] detected faces: {}'.format(len(bboxes)))
if len(bboxes) > 0:
for xb, yb, wb, hb, pb in bboxes:
startX = int(xb - wb/2)
startY = int(yb - hb/2)
endX = int(xb + wb/2)
endY = int(yb + hb/2)
# extract the face ROI
face = frame[startY:endY, startX:endX]
# (fH, fW) = face.shape[:2]
# ensure the face width and height are sufficiently large
# if fW < 20 or fH < 20:
# continue
# construct a blob for the face ROI, then pass the blob
# through our face embedding model to obtain the 128-d
# quantification of the face
faceBlob = cv2.dnn.blobFromImage(
face, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True, crop=False)
cls.embedder.setInput(faceBlob)
vec = cls.embedder.forward()
# perform classification to recognize the face
preds = cls.recognizer.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = cls.le.classes_[j]
# name = 0
# if proba >= 0.6:
# name = cls.le.classes_[j]
json_data = {}
json_data['name'] = '{}'.format(name)
json_data['time'] = datetime.datetime.now().strftime(
'%Y-%m-%d %H:%M:%S')
json_data['confidence'] = str(proba)
response_data['detection'].append(json_data)
cls.json_list[str(monitor)] = response_data
# ret, jpeg = cv2.imencode('.jpg', frame)
# cls.frame_list[str(monitor)] = jpeg.tobytes()
finally:
time.sleep(0.02)
print('[INFO] releasing stream resources...')
if cap.isOpened():
cap.release()
cls.thread_list[str(monitor)] = None
def detect_image(self, frame):
response_data = {}
response_data['detection'] = []
response_list = []
# resize the frame to have a width of 600 pixels (while
# maintaining the aspect ratio), and then grab the image
# dimensions
# frame = imutils.resize(frame, width=600)
try:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
bboxes = Camera.detector.predict(frame, Camera.confidence)
# ensure at least one face was found
print('[INFO] detected faces: {}'.format(len(bboxes)))
if len(bboxes) > 0:
for xb, yb, wb, hb, pb in bboxes:
startX = int(xb - wb/2)
startY = int(yb - hb/2)
endX = int(xb + wb/2)
endY = int(yb + hb/2)
# extract the face ROI
face = frame[startY:endY, startX:endX]
# (fH, fW) = face.shape[:2]
# ensure the face width and height are sufficiently large
# if fW < 20 or fH < 20:
# continue
# construct a blob for the face ROI, then pass the blob
# through our face embedding model to obtain the 128-d
# quantification of the face
faceBlob = cv2.dnn.blobFromImage(
face, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True, crop=False)
Camera.embedder.setInput(faceBlob)
vec = Camera.embedder.forward()
# perform classification to recognize the face
preds = Camera.recognizer.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = Camera.le.classes_[j]
# name = 0
# if proba >= 0.6:
# name = Camera.le.classes_[j]
if name not in response_list:
response_list.append(name)
json_data = {}
json_data['name'] = '{}'.format(name)
json_data['time'] = datetime.datetime.now().strftime(
'%Y-%m-%d %H:%M:%S')
json_data['confidence'] = str(proba)
response_data['detection'].append(json_data)
finally:
return response_data, response_list
def detect_video(self, event_id, monitor_id, event_date):
response_data = {}
response_data['detection'] = []
# cap = cv2.VideoCapture(0)
print(f'[INFO] starting face detection for event {event_id}...')
result_list = []
start_index = 1
while(True):
print(f'[INFO] checking still image {start_index:05}-analyse.jpg...')
img_path = f'/mnt/zoneminder/events/{monitor_id}/{event_date}/{event_id}/{start_index:05}-analyse.jpg'
if not os.path.isfile(img_path):
if start_index >= MAX_RETRY_FRAME:
break
start_index += 1
time.sleep(0.02)
continue
try:
# print(f'[INFO] parsing {img_path}...')
frame = cv2.imread(img_path)
if frame is not None:
detect_data, detect_list = self.detect_image(frame)
for detect_id in detect_list:
if detect_id not in result_list:
result_list.append(detect_id)
except Exception as e:
# print(e)
print(
f'[INFO] failed to parsing frame {start_index} for event {event_id}...')
finally:
break
print('[INFO] finish video detection...')
response_data['detection'] = result_list
return response_data
|
[
"cv2.dnn.blobFromImage",
"requests.post",
"cv2.dnn.readNetFromTorch",
"config_reader.read_config",
"numpy.argmax",
"time.sleep",
"os.path.isfile",
"imutils.resize",
"faced.FaceDetector",
"datetime.datetime.now",
"cv2.VideoCapture",
"cv2.cvtColor",
"threading.Thread",
"cv2.imread"
] |
[((521, 549), 'requests.post', 'requests.post', ([], {'url': 'LOGIN_URL'}), '(url=LOGIN_URL)\n', (534, 549), False, 'import requests\n'), ((768, 793), 'cv2.VideoCapture', 'cv2.VideoCapture', (['new_url'], {}), '(new_url)\n', (784, 793), False, 'import cv2\n'), ((942, 970), 'cv2.VideoCapture', 'cv2.VideoCapture', (['stream_url'], {}), '(stream_url)\n', (958, 970), False, 'import cv2\n'), ((1860, 1873), 'config_reader.read_config', 'read_config', ([], {}), '()\n', (1871, 1873), False, 'from config_reader import read_config\n'), ((3768, 3796), 'requests.post', 'requests.post', ([], {'url': 'LOGIN_URL'}), '(url=LOGIN_URL)\n', (3781, 3796), False, 'import requests\n'), ((1440, 1530), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._thread', 'args': '(stream_url,)', 'kwargs': "{'monitor': monitor}"}), "(target=self._thread, args=(stream_url,), kwargs={'monitor':\n monitor})\n", (1456, 1530), False, 'import threading\n'), ((1993, 2007), 'faced.FaceDetector', 'FaceDetector', ([], {}), '()\n', (2005, 2007), False, 'from faced import FaceDetector\n'), ((2246, 2299), 'cv2.dnn.readNetFromTorch', 'cv2.dnn.readNetFromTorch', (["file_paths['embedder_path']"], {}), "(file_paths['embedder_path'])\n", (2270, 2299), False, 'import cv2\n'), ((8736, 8774), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (8748, 8774), False, 'import cv2\n'), ((5795, 5827), 'imutils.resize', 'imutils.resize', (['frame'], {'width': '(600)'}), '(frame, width=600)\n', (5809, 5827), False, 'import imutils\n'), ((5895, 5933), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (5907, 5933), False, 'import cv2\n'), ((8202, 8218), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (8212, 8218), False, 'import time\n'), ((11390, 11414), 'os.path.isfile', 'os.path.isfile', (['img_path'], {}), '(img_path)\n', (11404, 11414), False, 'import os\n'), ((11542, 11558), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (11552, 11558), False, 'import time\n'), ((11683, 11703), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (11693, 11703), False, 'import cv2\n'), ((9759, 9847), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['face', '(1.0 / 255)', '(96, 96)', '(0, 0, 0)'], {'swapRB': '(True)', 'crop': '(False)'}), '(face, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True,\n crop=False)\n', (9780, 9847), False, 'import cv2\n'), ((10136, 10152), 'numpy.argmax', 'np.argmax', (['preds'], {}), '(preds)\n', (10145, 10152), True, 'import numpy as np\n'), ((6988, 7076), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['face', '(1.0 / 255)', '(96, 96)', '(0, 0, 0)'], {'swapRB': '(True)', 'crop': '(False)'}), '(face, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True,\n crop=False)\n', (7009, 7076), False, 'import cv2\n'), ((7380, 7396), 'numpy.argmax', 'np.argmax', (['preds'], {}), '(preds)\n', (7389, 7396), True, 'import numpy as np\n'), ((10599, 10622), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10620, 10622), False, 'import datetime\n'), ((7768, 7791), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7789, 7791), False, 'import datetime\n')]
|
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2017-2018 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""Helper to transform or extract information from the abstratc model.
"""
from __future__ import division
__authors__ = ["<NAME>"]
__license__ = "MIT"
import numpy
from pyFAI.control_points import ControlPoints
from pyFAI.gui.model.CalibrationModel import CalibrationModel
from pyFAI.gui.model.PeakSelectionModel import PeakSelectionModel
from pyFAI.gui.model.PeakModel import PeakModel
from pyFAI.gui.CalibrationContext import CalibrationContext
def createControlPoints(model):
"""Create ControlPoints object from the calibration model
:rtype: pyFAI.control_points.ControlPoints
"""
if not isinstance(model, CalibrationModel):
raise TypeError("Unexpected model type")
calibrant = model.experimentSettingsModel().calibrantModel().calibrant()
wavelength = model.experimentSettingsModel().wavelength().value()
controlPoints = ControlPoints(calibrant=calibrant, wavelength=wavelength)
for peakModel in model.peakSelectionModel():
if not peakModel.isEnabled():
continue
ringNumber = peakModel.ringNumber() - 1
points = peakModel.coords().tolist()
controlPoints.append(points=points, ring=ringNumber)
return controlPoints
def createPeaksArray(model):
"""Create a contiguous peak array containing (y, x, ring number)
:param PeakSelectionModel model: A set of selected peaks
:rtype: numpy.ndarray
"""
if not isinstance(model, PeakSelectionModel):
raise TypeError("Unexpected model type")
count = 0
for group in model:
count += len(group)
pos = 0
peaks = numpy.empty(shape=(count, 3), dtype=float)
for group in model:
if not group.isEnabled():
continue
end = pos + len(group)
peaks[pos:end, 0:2] = group.coords()
peaks[pos:end, 2] = group.ringNumber() - 1
pos = end
peaks = numpy.array(peaks)
return peaks
def filterControlPoints(filterCallback, peakSelectionModel, removedPeaks=None):
"""Filter each peaks of the model using a callback
:param Callable[int,int,bool] filter: Filter returning true is the
peak have to stay in the result.
:param PeakSelectionModel peakSelectionModel: Model to filter
:param List[Tuple[int,int]] removedPeaks: Provide a list to feed it with
removed peaks from the model.
"""
peakSelectionModel.lockSignals()
for peakGroup in peakSelectionModel:
changed = False
newCoords = []
for coord in peakGroup.coords():
if filterCallback(coord[0], coord[1]):
newCoords.append(coord)
else:
if removedPeaks is not None:
removedPeaks.append(coord)
changed = True
if changed:
if len(newCoords) == 0:
newCoords = numpy.empty(shape=(0, 2))
else:
newCoords = numpy.array(newCoords)
peakGroup.setCoords(newCoords)
peakSelectionModel.unlockSignals()
def _findUnusedName(peakSelectionModel):
"""
:rtype: str
"""
names = ["% 8s" % p.name() for p in peakSelectionModel]
if len(names) > 0:
names = list(sorted(names))
bigger = names[-1].strip()
number = 0
for c in bigger:
number = number * 26 + (ord(c) - ord('a'))
else:
number = -1
number = number + 1
# compute the next one
name = ""
if number == 0:
name = "a"
else:
n = number
while n > 0:
c = n % 26
n = n // 26
name = chr(c + ord('a')) + name
return name
def createRing(points, peakSelectionModel, ringNumber=None, context=None):
"""Create a new ring from a group of points
:rtype: PeakModel
"""
if context is None:
context = CalibrationContext.instance()
name = _findUnusedName(peakSelectionModel)
if ringNumber is None:
ringNumber = 1
color = context.getMarkerColor(ringNumber - 1)
peakModel = PeakModel(peakSelectionModel)
peakModel.setName(name)
peakModel.setColor(color)
peakModel.setCoords(points)
peakModel.setRingNumber(ringNumber)
return peakModel
def initPeaksFromControlPoints(peakSelectionModel, controlPoints, context=None):
"""Initialize peak selection model using control points object
:rtype: pyFAI.control_points.ControlPoints
"""
if not isinstance(peakSelectionModel, PeakSelectionModel):
raise TypeError("Unexpected model type")
if not isinstance(controlPoints, ControlPoints):
raise TypeError("Unexpected model type")
if context is None:
context = CalibrationContext.instance()
peakSelectionModel.clear()
for label in controlPoints.get_labels():
group = controlPoints.get(lbl=label)
color = context.getMarkerColor(group.ring)
points = numpy.array(group.points)
peakModel = createRing(points, peakSelectionModel=peakSelectionModel, context=context)
peakModel.setRingNumber(group.ring + 1)
peakModel.setColor(color)
peakModel.setName(label)
peakSelectionModel.append(peakModel)
def geometryModelToGeometry(geometryModel, geometry):
geometry.dist = geometryModel.distance().value()
geometry.poni1 = geometryModel.poni1().value()
geometry.poni2 = geometryModel.poni2().value()
geometry.rot1 = geometryModel.rotation1().value()
geometry.rot2 = geometryModel.rotation2().value()
geometry.rot3 = geometryModel.rotation3().value()
geometry.wavelength = geometryModel.wavelength().value()
|
[
"pyFAI.gui.model.PeakModel.PeakModel",
"pyFAI.control_points.ControlPoints",
"numpy.array",
"numpy.empty",
"pyFAI.gui.CalibrationContext.CalibrationContext.instance"
] |
[((2173, 2230), 'pyFAI.control_points.ControlPoints', 'ControlPoints', ([], {'calibrant': 'calibrant', 'wavelength': 'wavelength'}), '(calibrant=calibrant, wavelength=wavelength)\n', (2186, 2230), False, 'from pyFAI.control_points import ControlPoints\n'), ((2904, 2946), 'numpy.empty', 'numpy.empty', ([], {'shape': '(count, 3)', 'dtype': 'float'}), '(shape=(count, 3), dtype=float)\n', (2915, 2946), False, 'import numpy\n'), ((3183, 3201), 'numpy.array', 'numpy.array', (['peaks'], {}), '(peaks)\n', (3194, 3201), False, 'import numpy\n'), ((5331, 5360), 'pyFAI.gui.model.PeakModel.PeakModel', 'PeakModel', (['peakSelectionModel'], {}), '(peakSelectionModel)\n', (5340, 5360), False, 'from pyFAI.gui.model.PeakModel import PeakModel\n'), ((5135, 5164), 'pyFAI.gui.CalibrationContext.CalibrationContext.instance', 'CalibrationContext.instance', ([], {}), '()\n', (5162, 5164), False, 'from pyFAI.gui.CalibrationContext import CalibrationContext\n'), ((5976, 6005), 'pyFAI.gui.CalibrationContext.CalibrationContext.instance', 'CalibrationContext.instance', ([], {}), '()\n', (6003, 6005), False, 'from pyFAI.gui.CalibrationContext import CalibrationContext\n'), ((6196, 6221), 'numpy.array', 'numpy.array', (['group.points'], {}), '(group.points)\n', (6207, 6221), False, 'import numpy\n'), ((4140, 4165), 'numpy.empty', 'numpy.empty', ([], {'shape': '(0, 2)'}), '(shape=(0, 2))\n', (4151, 4165), False, 'import numpy\n'), ((4212, 4234), 'numpy.array', 'numpy.array', (['newCoords'], {}), '(newCoords)\n', (4223, 4234), False, 'import numpy\n')]
|
# %% [markdown]
# ## Imports
# %%
import numpy as np
import scipy
import skimage
import cv2
# %%
class CornerDetector:
"""Corner detector for an image.
Args:
img (array-like): matrix representation of input image.
May be a grayscale or RGB image.
Attributes:
img (numpy.ndarray): numpy array of image input image representation.
"""
def __init__(self, img):
self.__img = np.array(img)
def rgb_to_grayscale(self, img):
""" Converts an RGB image to gray scale.
Using the ITU-R 601-2 luma transform
Args:
img (array-like): array representation of a RGB image.
Returns:
numpy.ndarray: Array representation of img, converted to grayscale.
"""
return np.dot(img[..., :3], [0.2989, 0.5870, 0.1140])
def image_derivatives(self, arr, x=True, y=True):
""" Calculates x and y derivatives using the Sobel operator,
with convolution using Scipy.
Args:
arr (array-like): An array representation of a grayscale image.
x (bool): True to calculate the X-derivative, else False
y (bool): True to calculate the Y-derivative, else False.
Returns:
numpy.ndarray: X-derivative of arr if x = True, else None.
numpy.ndarray: Y-derivative of arr if y = True, else None.
"""
kernel_x = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
kernel_y = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])
deriv_x, deriv_y = None, None
if (x):
deriv_x = scipy.signal.convolve2d(arr, kernel_x, mode='same')
if (y):
deriv_y = scipy.signal.convolve2d(arr, kernel_y, mode='same')
return deriv_x, deriv_y
def __preprocess(self):
"""
Pre-processing the image, with denoising using openCV2,
and thresholding with the Sobel filter and threshold_otsu
from Scikit-image.
Returns:
numpy.ndarray: Pre-processed image for self.corner_detector method.
"""
img_p = None
if (len(self.__img.shape) == 3):
img_p = cv2.fastNlMeansDenoisingColored(self.__img)
img_p = skimage.filters.sobel(self.rgb_to_grayscale(img_p))
img_p = img_p > skimage.filters.threshold_otsu(img_p)
elif (len(self.__img.shape) == 2):
img_p = cv2.fastNlMeansDenoising(self.__img)
img_p = skimage.filters.sobel(img_p)
img_p = img_p > skimage.filters.threshold_otsu(img_p)
return img_p
def corner_detector(self,
offset=1,
threshold=0,
k=0.06,
k_mean=False,
eps=0.001,
mode='shi-tomasi'):
""" Corner detection method.
Uses Harris Corner Detector or Shi-Tomasi Corner Detector.
Note:
This method calls the self.__preprocess method before applying the
Harris/Shi-Tomasi corner detector on the resulting image.
Args:
offset (int): Offset to center of analyzed regions around a pixel.
Equals the integer division of the size of the region by two.
threshold (float): Threshold of corner response measure.
The higher the limit, the fewer points will be returned.
k (float): Harris detector parameter
Should be around 0.04 to 0.06.
k_mean (bool): Determines if k should be automatically computed.
eps (float): Small value (around 0.001) for k computation.
Only relevant if k_mean = True.
mode (str): 'harris' or 'shi-tomasi'.
Selector between Harris and Shi-Tomasi Corner Detectors.
Returns:
numpy.ndarray: Input image, with marked regions identified
as corners.
numpy.ndarray: List of points identified as corners.
Structure: [x, y, E], where x and y are the coordinates,
and E is the corner response measure of the point.
"""
corner_points = []
ret_img = np.copy(self.__img)
# Preprocessing image with thresholding
img_p = self.__preprocess()
# Find derivatives and tensor setup
# Create image for return, illustrating corner points
if (len(img_p.shape) == 3):
dx, dy = self.image_derivatives(self.rgb_to_grayscale(img_p))
elif (len(img_p.shape) == 2):
dx, dy = self.image_derivatives(img_p)
else:
raise TypeError("Numpy array with invalid shape")
ixx, ixy, iyy = dx**2, dx * dy, dy**2
# Iterate through windows
for i in range(offset, self.__img.shape[0] - offset):
for j in range(offset, self.__img.shape[1] - offset):
# Calculate sum over the sliding window
sxx = np.sum(ixx[i - offset:i + offset + 1, j - offset:j +
offset + 1])
syy = np.sum(iyy[i - offset:i + offset + 1, j - offset:j +
offset + 1])
sxy = np.sum(ixy[i - offset:i + offset + 1, j - offset:j +
offset + 1])
# Find determinant and trace,
# use to get corner response -> r = det - k*(trace**2)
det = ((sxx * syy) - (sxy**2))
trace = sxx + syy
if (k_mean):
k = 2 * (det / (trace + eps))
if (mode == 'harris'):
r = det - k * (trace**2)
elif (mode == 'shi-tomasi'):
r = np.minimum(sxx, syy)
else:
raise ValueError("Invalid value for 'mode' variable")
# Verify if point is a corner with threshold value
# If true, add to list of corner points and colorize point
# on returning image
if (r > threshold):
corner_points.append([i, j, r])
if (len(ret_img.shape) == 3):
ret_img[i, j] = [255, 0, 0]
elif (len(ret_img.shape) == 2):
ret_img[i, j] = 255
else:
raise TypeError("Numpy array with invalid shape")
return ret_img, np.array(corner_points)
def find_corners4(self,
offset=1,
threshold=0,
k=0.06,
k_mean=False,
eps=0.001,
mode='shi-tomasi'):
"""
Find the corner points nearest to the corners of the input image,
using self.corner_detector.
Args:
offset (int): Offset to center of analyzed regions around a pixel.
Equals the integer division of the size of the region by two.
threshold (float): Threshold of corner response measure.
The higher the limit, the fewer points will be returned.
k (float): Harris detector parameter
Should be around 0.04 to 0.06.
k_mean (bool): Determines if k should be automatically computed.
eps (float): Small value (around 0.001) for k computation.
Only relevant if k_mean = True.
mode (str): 'harris' or 'shi-tomasi'.
Selector between Harris and Shi-Tomasi Corner Detectors.
Returns:
numpy.ndarray: Array of coordinates of the four identified corners
of the object.
"""
img_cd, img_cd_c = self.corner_detector(offset, threshold, k, k_mean,
eps, mode)
# Getting the four best corners of the business card, after corner
# detection
points = np.array([[0, 0, np.inf], [0, 0, np.inf], [0, 0, np.inf],
[0, 0, np.inf]])
corners = [[0, 0], [0, img_cd.shape[1] - 1], [img_cd.shape[0] - 1, 0],
[img_cd.shape[0] - 1, img_cd.shape[1] - 1]]
for c in img_cd_c:
# Getting distances from c to the corners of the image
dist = np.array([
scipy.spatial.distance.euclidean(c[:2], corners[0]),
scipy.spatial.distance.euclidean(c[:2], corners[1]),
scipy.spatial.distance.euclidean(c[:2], corners[2]),
scipy.spatial.distance.euclidean(c[:2], corners[3]),
])
# Limiting each element from points to a quadrant of the image
if (dist[0] < points[0][2] and c[0] < img_cd.shape[0] // 2 and
c[1] < img_cd.shape[1] // 2):
points[0] = [(c[0]), c[1], dist[0]]
if (dist[1] < points[1][2] and c[0] < img_cd.shape[0] // 2 and
c[1] > img_cd.shape[1] // 2):
points[1] = [(c[0]), c[1], dist[1]]
if (dist[2] < points[2][2] and c[0] > img_cd.shape[0] // 2 and
c[1] < img_cd.shape[1] // 2):
points[2] = [(c[0]), c[1], dist[2]]
if (dist[3] < points[3][2] and c[0] > img_cd.shape[0] // 2 and
c[1] > img_cd.shape[1] // 2):
points[3] = [(c[0]), c[1], dist[3]]
return points[:, :2]
# %%
# Running tests on an random image
# ! This segment of the code is used only for testing purposes
if __name__ == "__main__":
import imageio
import matplotlib.pyplot as plt
import os
# Listing example files
example_files = [
'./images/' + f for f in os.listdir('./images')
if os.path.isfile(os.path.join('./images', f))
]
# Selecting random file for testing
file_img = example_files[np.random.randint(0, len(example_files))]
img = imageio.imread(file_img)
plt.figure(figsize=(20, 20))
plt.subplot(121)
plt.imshow(img)
plt.subplot(122)
plt.imshow(CornerDetector(img).corner_detector()[0])
print(CornerDetector(img).find_corners4())
|
[
"matplotlib.pyplot.imshow",
"numpy.copy",
"scipy.signal.convolve2d",
"os.listdir",
"numpy.minimum",
"cv2.fastNlMeansDenoising",
"cv2.fastNlMeansDenoisingColored",
"skimage.filters.threshold_otsu",
"skimage.filters.sobel",
"os.path.join",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.dot",
"numpy.sum",
"scipy.spatial.distance.euclidean",
"imageio.imread",
"matplotlib.pyplot.subplot"
] |
[((9931, 9955), 'imageio.imread', 'imageio.imread', (['file_img'], {}), '(file_img)\n', (9945, 9955), False, 'import imageio\n'), ((9960, 9988), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (9970, 9988), True, 'import matplotlib.pyplot as plt\n'), ((9993, 10009), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (10004, 10009), True, 'import matplotlib.pyplot as plt\n'), ((10014, 10029), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (10024, 10029), True, 'import matplotlib.pyplot as plt\n'), ((10034, 10050), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (10045, 10050), True, 'import matplotlib.pyplot as plt\n'), ((431, 444), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (439, 444), True, 'import numpy as np\n'), ((783, 827), 'numpy.dot', 'np.dot', (['img[..., :3]', '[0.2989, 0.587, 0.114]'], {}), '(img[..., :3], [0.2989, 0.587, 0.114])\n', (789, 827), True, 'import numpy as np\n'), ((1412, 1458), 'numpy.array', 'np.array', (['[[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]'], {}), '([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])\n', (1420, 1458), True, 'import numpy as np\n'), ((1478, 1524), 'numpy.array', 'np.array', (['[[-1, -2, -1], [0, 0, 0], [1, 2, 1]]'], {}), '([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])\n', (1486, 1524), True, 'import numpy as np\n'), ((4212, 4231), 'numpy.copy', 'np.copy', (['self.__img'], {}), '(self.__img)\n', (4219, 4231), True, 'import numpy as np\n'), ((7971, 8045), 'numpy.array', 'np.array', (['[[0, 0, np.inf], [0, 0, np.inf], [0, 0, np.inf], [0, 0, np.inf]]'], {}), '([[0, 0, np.inf], [0, 0, np.inf], [0, 0, np.inf], [0, 0, np.inf]])\n', (7979, 8045), True, 'import numpy as np\n'), ((1601, 1652), 'scipy.signal.convolve2d', 'scipy.signal.convolve2d', (['arr', 'kernel_x'], {'mode': '"""same"""'}), "(arr, kernel_x, mode='same')\n", (1624, 1652), False, 'import scipy\n'), ((1691, 1742), 'scipy.signal.convolve2d', 'scipy.signal.convolve2d', (['arr', 'kernel_y'], {'mode': '"""same"""'}), "(arr, kernel_y, mode='same')\n", (1714, 1742), False, 'import scipy\n'), ((2165, 2208), 'cv2.fastNlMeansDenoisingColored', 'cv2.fastNlMeansDenoisingColored', (['self.__img'], {}), '(self.__img)\n', (2196, 2208), False, 'import cv2\n'), ((6465, 6488), 'numpy.array', 'np.array', (['corner_points'], {}), '(corner_points)\n', (6473, 6488), True, 'import numpy as np\n'), ((9726, 9748), 'os.listdir', 'os.listdir', (['"""./images"""'], {}), "('./images')\n", (9736, 9748), False, 'import os\n'), ((2309, 2346), 'skimage.filters.threshold_otsu', 'skimage.filters.threshold_otsu', (['img_p'], {}), '(img_p)\n', (2339, 2346), False, 'import skimage\n'), ((2410, 2446), 'cv2.fastNlMeansDenoising', 'cv2.fastNlMeansDenoising', (['self.__img'], {}), '(self.__img)\n', (2434, 2446), False, 'import cv2\n'), ((2467, 2495), 'skimage.filters.sobel', 'skimage.filters.sobel', (['img_p'], {}), '(img_p)\n', (2488, 2495), False, 'import skimage\n'), ((4986, 5051), 'numpy.sum', 'np.sum', (['ixx[i - offset:i + offset + 1, j - offset:j + offset + 1]'], {}), '(ixx[i - offset:i + offset + 1, j - offset:j + offset + 1])\n', (4992, 5051), True, 'import numpy as np\n'), ((5107, 5172), 'numpy.sum', 'np.sum', (['iyy[i - offset:i + offset + 1, j - offset:j + offset + 1]'], {}), '(iyy[i - offset:i + offset + 1, j - offset:j + offset + 1])\n', (5113, 5172), True, 'import numpy as np\n'), ((5228, 5293), 'numpy.sum', 'np.sum', (['ixy[i - offset:i + offset + 1, j - offset:j + offset + 1]'], {}), '(ixy[i - offset:i + offset + 1, j - offset:j + offset + 1])\n', (5234, 5293), True, 'import numpy as np\n'), ((9775, 9802), 'os.path.join', 'os.path.join', (['"""./images"""', 'f'], {}), "('./images', f)\n", (9787, 9802), False, 'import os\n'), ((2524, 2561), 'skimage.filters.threshold_otsu', 'skimage.filters.threshold_otsu', (['img_p'], {}), '(img_p)\n', (2554, 2561), False, 'import skimage\n'), ((8355, 8406), 'scipy.spatial.distance.euclidean', 'scipy.spatial.distance.euclidean', (['c[:2]', 'corners[0]'], {}), '(c[:2], corners[0])\n', (8387, 8406), False, 'import scipy\n'), ((8424, 8475), 'scipy.spatial.distance.euclidean', 'scipy.spatial.distance.euclidean', (['c[:2]', 'corners[1]'], {}), '(c[:2], corners[1])\n', (8456, 8475), False, 'import scipy\n'), ((8493, 8544), 'scipy.spatial.distance.euclidean', 'scipy.spatial.distance.euclidean', (['c[:2]', 'corners[2]'], {}), '(c[:2], corners[2])\n', (8525, 8544), False, 'import scipy\n'), ((8562, 8613), 'scipy.spatial.distance.euclidean', 'scipy.spatial.distance.euclidean', (['c[:2]', 'corners[3]'], {}), '(c[:2], corners[3])\n', (8594, 8613), False, 'import scipy\n'), ((5758, 5778), 'numpy.minimum', 'np.minimum', (['sxx', 'syy'], {}), '(sxx, syy)\n', (5768, 5778), True, 'import numpy as np\n')]
|
__copyright__ = '2017 <NAME>. All Rights Reserved.'
__author__ = '<NAME>'
""" Mic file geometry and processing.
"""
import numpy as np
from xdm_toolkit import xdm_assert as xassert
def generate_vertices(mic_snp, sidewidth):
T_GEN_IDX = 4 # Triangle generation index.
T_DIR_IDX = 3 # Triangle direction index.
tri_gen = 2.0 ** mic_snp[:, T_GEN_IDX]
down_idx = (mic_snp[:, T_DIR_IDX] > 1).nonzero()
up_idx = (mic_snp[:, T_DIR_IDX] <= 1).nonzero()
ups_sidewidth = sidewidth / tri_gen[up_idx]
downs_sidewidth = sidewidth / tri_gen[down_idx]
up_vert = gen_vertex_helper(np.squeeze(
mic_snp[up_idx, 0:2]), ups_sidewidth, points_up=True)
down_vert = gen_vertex_helper(np.squeeze(
mic_snp[down_idx, 0:2]), downs_sidewidth, points_up=False)
up_data = np.squeeze(mic_snp[up_idx, 2:])
down_data = np.squeeze(mic_snp[down_idx, 2:])
# Return the set of vertices (x0, y0, x1, y1, x2, y2) and data.
# Note that the data are reordered.
return np.vstack((up_vert, down_vert)), np.vstack((up_data, down_data))
def gen_vertex_helper(left_vert, sw_list, points_up):
# import pdb; pdb.set_trace()
# Here be type check and dimension check.
xassert.runtime_assert(len(left_vert.shape) == 2 and
left_vert.shape[1] == 2,
'Error: vertex expected to be 2 dimensional.')
v1 = np.copy(left_vert)
v2 = np.copy(left_vert)
v2[:, 0] += sw_list
v3 = np.copy(left_vert)
v3[:, 0] += sw_list / 2.0
if points_up:
v3[:, 1] += sw_list / 2.0 * np.sqrt(3)
else:
v3[:, 1] -= sw_list / 2.0 * np.sqrt(3)
return np.hstack((v1, v2, v3))
|
[
"numpy.copy",
"numpy.sqrt",
"numpy.hstack",
"numpy.squeeze",
"numpy.vstack"
] |
[((807, 838), 'numpy.squeeze', 'np.squeeze', (['mic_snp[up_idx, 2:]'], {}), '(mic_snp[up_idx, 2:])\n', (817, 838), True, 'import numpy as np\n'), ((855, 888), 'numpy.squeeze', 'np.squeeze', (['mic_snp[down_idx, 2:]'], {}), '(mic_snp[down_idx, 2:])\n', (865, 888), True, 'import numpy as np\n'), ((1403, 1421), 'numpy.copy', 'np.copy', (['left_vert'], {}), '(left_vert)\n', (1410, 1421), True, 'import numpy as np\n'), ((1431, 1449), 'numpy.copy', 'np.copy', (['left_vert'], {}), '(left_vert)\n', (1438, 1449), True, 'import numpy as np\n'), ((1484, 1502), 'numpy.copy', 'np.copy', (['left_vert'], {}), '(left_vert)\n', (1491, 1502), True, 'import numpy as np\n'), ((1667, 1690), 'numpy.hstack', 'np.hstack', (['(v1, v2, v3)'], {}), '((v1, v2, v3))\n', (1676, 1690), True, 'import numpy as np\n'), ((605, 637), 'numpy.squeeze', 'np.squeeze', (['mic_snp[up_idx, 0:2]'], {}), '(mic_snp[up_idx, 0:2])\n', (615, 637), True, 'import numpy as np\n'), ((713, 747), 'numpy.squeeze', 'np.squeeze', (['mic_snp[down_idx, 0:2]'], {}), '(mic_snp[down_idx, 0:2])\n', (723, 747), True, 'import numpy as np\n'), ((1009, 1040), 'numpy.vstack', 'np.vstack', (['(up_vert, down_vert)'], {}), '((up_vert, down_vert))\n', (1018, 1040), True, 'import numpy as np\n'), ((1042, 1073), 'numpy.vstack', 'np.vstack', (['(up_data, down_data)'], {}), '((up_data, down_data))\n', (1051, 1073), True, 'import numpy as np\n'), ((1587, 1597), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (1594, 1597), True, 'import numpy as np\n'), ((1644, 1654), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (1651, 1654), True, 'import numpy as np\n')]
|
#! /usr/bin/env python
"""Make history files into timeseries"""
import os
import sys
from subprocess import check_call, Popen, PIPE
from glob import glob
import re
import click
import yaml
import tempfile
import logging
import cftime
import xarray as xr
import numpy as np
import globus
from workflow import task_manager as tm
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
script_path = os.path.dirname(os.path.realpath(__file__))
GLOBUS_CAMPAIGN_PATH = '/gpfs/csfs1/cesm/development/bgcwg/projects/xtFe/cases'
USER = os.environ['USER']
ARCHIVE_ROOT = f'/glade/scratch/{USER}/archive'
tm.ACCOUNT = '<KEY>'
tm.MAXJOBS = 100
xr_open = dict(decode_times=False, decode_coords=False)
def get_year_filename(file):
"""Get the year from the datestr part of a file."""
date_parts = [int(d) for d in file.split('.')[-2].split('-')]
return date_parts[0]
class file_date(object):
"""Class with attributes for the start, stop, and middle of a file's time
axis.
"""
def __init__(self, file):
with xr.open_dataset(file, **xr_open) as ds:
time_units = ds.time.units
calendar = ds.time.calendar
tb = ds.time.bounds
tb_dim = ds[tb].dims[-1]
t0 = ds[tb].isel(**{'time': 0, tb_dim: 0})
tf = ds[tb].isel(**{'time': -1, tb_dim: -1})
self.date = cftime.num2date(np.mean([t0, tf]), units=time_units,
calendar=calendar)
self.year = self.date.year
self.month = self.date.month
self.day = self.date.day
time_mid_point = cftime.num2date(ds[tb].mean(dim=tb_dim),
units=time_units, calendar=calendar)
self.t0 = time_mid_point[0]
self.tf = time_mid_point[-1]
def get_date_string(files, freq):
"""return a date string for timeseries files"""
date_start = file_date(files[0])
date_end = file_date(files[-1])
year = [date_start.t0.year, date_end.tf.year]
month = [date_start.t0.month, date_end.tf.month]
day = [date_start.t0.day, date_end.tf.day]
if freq in ['day_1', 'day_5', 'daily', 'day']:
return (f'{year[0]:04d}{month[0]:02d}{day[0]:02d}-'
f'{year[1]:04d}{month[1]:02d}{day[1]:02d}')
elif freq in ['month_1', 'monthly', 'mon']:
return (f'{year[0]:04d}{month[0]:02d}-'
f'{year[1]:04d}{month[1]:02d}')
elif freq in ['year_1', 'yearly', 'year', 'ann']:
return (f'{year[0]:04d}-'
f'{year[1]:04d}')
else:
raise ValueError(f'freq: {freq} not implemented')
def get_vars(files):
"""get lists of non-time-varying variables and time varying variables"""
with xr.open_dataset(files[0], **xr_open) as ds:
static_vars = [v for v, da in ds.variables.items() if 'time' not in da.dims]
static_vars = static_vars+['time', ds.time.attrs['bounds']]
time_vars = [v for v, da in ds.variables.items() if 'time' in da.dims and
v not in static_vars]
return static_vars, time_vars
@click.command()
@click.argument('case')
@click.option('--components', default='ocn')
@click.option('--archive-root', default=ARCHIVE_ROOT)
@click.option('--output-root', default=None)
@click.option('--only-streams', default=[])
@click.option('--only-variables', default=None)
@click.option('--campaign-transfer', default=False, is_flag=True)
@click.option('--campaign-path', default=GLOBUS_CAMPAIGN_PATH)
@click.option('--year-groups', default=None)
@click.option('--demo', default=False, is_flag=True)
@click.option('--clobber', default=False, is_flag=True)
def main(case, components=['ocn', 'ice'], archive_root=ARCHIVE_ROOT, output_root=None,
only_streams=[], only_variables=None, campaign_transfer=False, campaign_path=None,
year_groups=None, demo=False, clobber=False):
droot = os.path.join(archive_root, case)
if isinstance(components, str):
components = components.split(',')
if output_root is None:
droot_out = droot
else:
droot_out = os.path.join(output_root, case)
if campaign_transfer and campaign_path is None:
raise ValueError('campaign path required')
if year_groups is None:
year_groups = [(-1e36, 1e36)]
report_year_groups = False
elif isinstance(year_groups, str):
year_groups = year_groups.split(',')
year_groups = [tuple(int(i) for i in ygi.split(':')) for ygi in year_groups]
report_year_groups = True
else:
raise ValueError('cannot parse year groups')
if isinstance(only_streams, str):
only_streams = only_streams.split(',')
if isinstance(only_variables, str):
only_variables = only_variables.split(',')
logger.info('constructing time-series of the following year groups:')
logger.info(year_groups)
print()
with open(f'{script_path}/cesm_streams.yml') as f:
streams = yaml.safe_load(f)
for component in components:
print('='*80)
logger.info(f'working on component: {component}')
print('='*80)
for stream, stream_info in streams[component].items():
if only_streams:
if stream not in only_streams:
continue
print('-'*80)
logger.info(f'working on stream: {stream}')
print('-'*80)
dateglob = stream_info['dateglob']
dateregex = stream_info['dateregex']
freq = stream_info['freq']
dout = f'{droot_out}/{component}/proc/tseries/{freq}'
if not os.path.exists(dout):
os.makedirs(dout, exist_ok=True)
# set target destination on globus
globus_file_list = []
if campaign_transfer:
campaign_dout = f'{campaign_path}/{case}/{component}/proc/tseries/{freq}'
globus.makedirs('campaign', campaign_dout)
globus_file_list = globus.listdir('campaign', campaign_dout)
logger.info(f'found {len(globus_file_list)} files on campaign.')
# get input files
files = sorted(glob(f'{droot}/{component}/hist/{case}.{stream}.{dateglob}.nc'))
if len(files) == 0:
logger.warning(f'no files: component={component}, stream={stream}')
continue
# get file dates
files_year = [get_year_filename(f) for f in files]
# get variable lists
static_vars, time_vars = get_vars(files)
if only_variables is not None:
time_vars = [v for v in time_vars if v in only_variables]
print(only_variables)
if not static_vars:
continue
# make a report
logger.info(f'found {len(files)} history files')
logger.info(f'history file years: {min(files_year)}-{max(files_year)}')
logger.info(f'found {len(time_vars)} variables to process')
logger.info(f'expecting to generate {len(time_vars) * len(year_groups)} timeseries files')
for y0, yf in year_groups:
if report_year_groups:
logger.info(f'working on year group {y0}-{yf}')
files_group_i = [f for f, y in zip(files, files_year)
if (y0 <= y) and (y <= yf)]
fid, tmpfile = tempfile.mkstemp(suffix='.filelist', prefix='tmpfile',
dir=os.environ['TMPDIR'])
with open(tmpfile,'w') as fid:
for i, f in enumerate(files_group_i):
fid.write('%s\n'%f)
# get the date string
date_cat = get_date_string(files_group_i, freq)
for i, v in enumerate(time_vars):
file_cat_basename = '.'.join([case, stream, v, date_cat, 'nc'])
file_cat = os.path.join(dout, file_cat_basename)
if not clobber:
if file_cat_basename in globus_file_list:
print(f'on campaign: {file_cat_basename}...skipping')
continue
if os.path.exists(file_cat):
print(f'exists: {file_cat_basename}...skipping')
continue
logger.info(f'creating {file_cat}')
vars = ','.join(static_vars+[v])
cat_cmd = [f'cat {tmpfile} | ncrcat -O -h -v {vars} {file_cat}']
compress_cmd = [f'ncks -O -4 -L 1 {file_cat} {file_cat}']
if not demo:
if campaign_transfer:
xfr_cmd = [f'{script_path}/globus.py',
'--src-ep=glade --dst-ep=campaign',
'--retry=3',
f'--src-paths={file_cat}',
f'--dst-paths={campaign_dout}/{file_cat_basename}']
cleanup_cmd = [f'if [ $? -eq 0 ]; then rm -f {file_cat}; else exit 1; fi']
else:
xfr_cmd = []
cleanup_cmd = []
jid = tm.submit([cat_cmd, compress_cmd, xfr_cmd, cleanup_cmd],
modules=['nco'], memory='100GB')
print()
tm.wait()
if __name__ == '__main__':
main()
|
[
"logging.getLogger",
"numpy.mean",
"click.argument",
"logging.StreamHandler",
"os.path.exists",
"os.makedirs",
"click.option",
"os.path.join",
"globus.listdir",
"xarray.open_dataset",
"os.path.realpath",
"workflow.task_manager.wait",
"yaml.safe_load",
"workflow.task_manager.submit",
"globus.makedirs",
"click.command",
"tempfile.mkstemp",
"glob.glob"
] |
[((341, 368), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (358, 368), False, 'import logging\n'), ((415, 448), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (436, 448), False, 'import logging\n'), ((3246, 3261), 'click.command', 'click.command', ([], {}), '()\n', (3259, 3261), False, 'import click\n'), ((3263, 3285), 'click.argument', 'click.argument', (['"""case"""'], {}), "('case')\n", (3277, 3285), False, 'import click\n'), ((3287, 3330), 'click.option', 'click.option', (['"""--components"""'], {'default': '"""ocn"""'}), "('--components', default='ocn')\n", (3299, 3330), False, 'import click\n'), ((3332, 3384), 'click.option', 'click.option', (['"""--archive-root"""'], {'default': 'ARCHIVE_ROOT'}), "('--archive-root', default=ARCHIVE_ROOT)\n", (3344, 3384), False, 'import click\n'), ((3386, 3429), 'click.option', 'click.option', (['"""--output-root"""'], {'default': 'None'}), "('--output-root', default=None)\n", (3398, 3429), False, 'import click\n'), ((3431, 3473), 'click.option', 'click.option', (['"""--only-streams"""'], {'default': '[]'}), "('--only-streams', default=[])\n", (3443, 3473), False, 'import click\n'), ((3475, 3521), 'click.option', 'click.option', (['"""--only-variables"""'], {'default': 'None'}), "('--only-variables', default=None)\n", (3487, 3521), False, 'import click\n'), ((3523, 3587), 'click.option', 'click.option', (['"""--campaign-transfer"""'], {'default': '(False)', 'is_flag': '(True)'}), "('--campaign-transfer', default=False, is_flag=True)\n", (3535, 3587), False, 'import click\n'), ((3589, 3650), 'click.option', 'click.option', (['"""--campaign-path"""'], {'default': 'GLOBUS_CAMPAIGN_PATH'}), "('--campaign-path', default=GLOBUS_CAMPAIGN_PATH)\n", (3601, 3650), False, 'import click\n'), ((3652, 3695), 'click.option', 'click.option', (['"""--year-groups"""'], {'default': 'None'}), "('--year-groups', default=None)\n", (3664, 3695), False, 'import click\n'), ((3697, 3748), 'click.option', 'click.option', (['"""--demo"""'], {'default': '(False)', 'is_flag': '(True)'}), "('--demo', default=False, is_flag=True)\n", (3709, 3748), False, 'import click\n'), ((3750, 3804), 'click.option', 'click.option', (['"""--clobber"""'], {'default': '(False)', 'is_flag': '(True)'}), "('--clobber', default=False, is_flag=True)\n", (3762, 3804), False, 'import click\n'), ((539, 565), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (555, 565), False, 'import os\n'), ((4053, 4085), 'os.path.join', 'os.path.join', (['archive_root', 'case'], {}), '(archive_root, case)\n', (4065, 4085), False, 'import os\n'), ((9689, 9698), 'workflow.task_manager.wait', 'tm.wait', ([], {}), '()\n', (9696, 9698), True, 'from workflow import task_manager as tm\n'), ((2886, 2922), 'xarray.open_dataset', 'xr.open_dataset', (['files[0]'], {}), '(files[0], **xr_open)\n', (2901, 2922), True, 'import xarray as xr\n'), ((4250, 4281), 'os.path.join', 'os.path.join', (['output_root', 'case'], {}), '(output_root, case)\n', (4262, 4281), False, 'import os\n'), ((5124, 5141), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (5138, 5141), False, 'import yaml\n'), ((1164, 1196), 'xarray.open_dataset', 'xr.open_dataset', (['file'], {}), '(file, **xr_open)\n', (1179, 1196), True, 'import xarray as xr\n'), ((1506, 1523), 'numpy.mean', 'np.mean', (['[t0, tf]'], {}), '([t0, tf])\n', (1513, 1523), True, 'import numpy as np\n'), ((5779, 5799), 'os.path.exists', 'os.path.exists', (['dout'], {}), '(dout)\n', (5793, 5799), False, 'import os\n'), ((5817, 5849), 'os.makedirs', 'os.makedirs', (['dout'], {'exist_ok': '(True)'}), '(dout, exist_ok=True)\n', (5828, 5849), False, 'import os\n'), ((6072, 6114), 'globus.makedirs', 'globus.makedirs', (['"""campaign"""', 'campaign_dout'], {}), "('campaign', campaign_dout)\n", (6087, 6114), False, 'import globus\n'), ((6150, 6191), 'globus.listdir', 'globus.listdir', (['"""campaign"""', 'campaign_dout'], {}), "('campaign', campaign_dout)\n", (6164, 6191), False, 'import globus\n'), ((6331, 6394), 'glob.glob', 'glob', (['f"""{droot}/{component}/hist/{case}.{stream}.{dateglob}.nc"""'], {}), "(f'{droot}/{component}/hist/{case}.{stream}.{dateglob}.nc')\n", (6335, 6394), False, 'from glob import glob\n'), ((7598, 7683), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".filelist"""', 'prefix': '"""tmpfile"""', 'dir': "os.environ['TMPDIR']"}), "(suffix='.filelist', prefix='tmpfile', dir=os.environ['TMPDIR']\n )\n", (7614, 7683), False, 'import tempfile\n'), ((8146, 8183), 'os.path.join', 'os.path.join', (['dout', 'file_cat_basename'], {}), '(dout, file_cat_basename)\n', (8158, 8183), False, 'import os\n'), ((8433, 8457), 'os.path.exists', 'os.path.exists', (['file_cat'], {}), '(file_cat)\n', (8447, 8457), False, 'import os\n'), ((9528, 9621), 'workflow.task_manager.submit', 'tm.submit', (['[cat_cmd, compress_cmd, xfr_cmd, cleanup_cmd]'], {'modules': "['nco']", 'memory': '"""100GB"""'}), "([cat_cmd, compress_cmd, xfr_cmd, cleanup_cmd], modules=['nco'],\n memory='100GB')\n", (9537, 9621), True, 'from workflow import task_manager as tm\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import numpy
import quantities
quantities.set_default_units('si')
quantities.UnitQuantity('kilocalorie', 1000.0*quantities.cal, symbol='kcal')
quantities.UnitQuantity('kilojoule', 1000.0*quantities.J, symbol='kJ')
from rmgpy.chem.molecule import Molecule
from rmgpy.chem.species import Species
from rmgpy.chem.reaction import Reaction
from rmgpy.chem.kinetics import Arrhenius
from rmgpy.chem.thermo import ThermoData
from rmgpy.solver.simple import SimpleReactor
################################################################################
class SimpleReactorCheck(unittest.TestCase):
def testSolve(self):
"""
Test the simple batch reactor with a simple kinetic model. Here we
choose a kinetic model consisting of the hydrogen abstraction reaction
CH4 + C2H5 <=> CH3 + C2H6.
"""
CH4 = Species(
molecule=[Molecule().fromSMILES("C")],
thermo=ThermoData(Tdata=([300,400,500,600,800,1000,1500],"K"), Cpdata=([ 8.615, 9.687,10.963,12.301,14.841,16.976,20.528],"cal/mol*K"), H298=(-17.714,"kcal/mol"), S298=(44.472,"cal/mol*K"))
)
CH3 = Species(
molecule=[Molecule().fromSMILES("[CH3]")],
thermo=ThermoData(Tdata=([300,400,500,600,800,1000,1500],"K"), Cpdata=([ 9.397,10.123,10.856,11.571,12.899,14.055,16.195],"cal/mol*K"), H298=( 9.357,"kcal/mol"), S298=(45.174,"cal/mol*K"))
)
C2H6 = Species(
molecule=[Molecule().fromSMILES("CC")],
thermo=ThermoData(Tdata=([300,400,500,600,800,1000,1500],"K"), Cpdata=([12.684,15.506,18.326,20.971,25.500,29.016,34.595],"cal/mol*K"), H298=(-19.521,"kcal/mol"), S298=(54.799,"cal/mol*K"))
)
C2H5 = Species(
molecule=[Molecule().fromSMILES("C[CH2]")],
thermo=ThermoData(Tdata=([300,400,500,600,800,1000,1500],"K"), Cpdata=([11.635,13.744,16.085,18.246,21.885,24.676,29.107],"cal/mol*K"), H298=( 29.496,"kcal/mol"), S298=(56.687,"cal/mol*K"))
)
rxn1 = Reaction(reactants=[C2H6,CH3], products=[C2H5,CH4], kinetics=Arrhenius(A=686.375*6, n=4.40721, Ea=7.82799*4184., T0=298.15))
coreSpecies = [CH4,CH3,C2H6,C2H5]
edgeSpecies = []
coreReactions = [rxn1]
edgeReactions = []
T = 1000; P = 1.0e5
rxnSystem = SimpleReactor(T, P, initialMoleFractions={C2H5: 0.1, CH3: 0.1, CH4: 0.4, C2H6: 0.4})
rxnSystem.initializeModel(coreSpecies, coreReactions, edgeSpecies, edgeReactions)
tlist = numpy.array([10**(i/10.0) for i in range(-130, -49)], numpy.float64)
# Integrate to get the solution at each time point
t = []; y = []; reactionRates = []; speciesRates = []
for t1 in tlist:
rxnSystem.advance(t1)
t.append(rxnSystem.t)
# You must make a copy of y because it is overwritten by DASSL at
# each call to advance()
y.append(rxnSystem.y.copy())
reactionRates.append(rxnSystem.coreReactionRates.copy())
speciesRates.append(rxnSystem.coreSpeciesRates.copy())
# Convert the solution vectors to numpy arrays
t = numpy.array(t, numpy.float64)
y = numpy.array(y, numpy.float64)
reactionRates = numpy.array(reactionRates, numpy.float64)
speciesRates = numpy.array(speciesRates, numpy.float64)
import pylab
fig = pylab.figure(figsize=(6,6))
pylab.subplot(2,1,1)
pylab.semilogx(t, y)
pylab.ylabel('Concentration (mol/m$^\\mathdefault{3}$)')
pylab.legend(['CH4', 'CH3', 'C2H6', 'C2H5'], loc=4)
pylab.subplot(2,1,2)
pylab.semilogx(t, speciesRates)
pylab.legend(['CH4', 'CH3', 'C2H6', 'C2H5'], loc=4)
pylab.xlabel('Time (s)')
pylab.ylabel('Rate (mol/m$^\\mathdefault{3}$*s)')
fig.subplots_adjust(left=0.12, bottom=0.10, right=0.95, top=0.95, wspace=0.20, hspace=0.35)
pylab.show()
################################################################################
if __name__ == '__main__':
unittest.main( testRunner = unittest.TextTestRunner(verbosity=2) )
|
[
"pylab.ylabel",
"quantities.set_default_units",
"pylab.subplot",
"unittest.TextTestRunner",
"pylab.xlabel",
"pylab.legend",
"rmgpy.chem.kinetics.Arrhenius",
"pylab.figure",
"numpy.array",
"rmgpy.chem.thermo.ThermoData",
"rmgpy.chem.molecule.Molecule",
"pylab.semilogx",
"rmgpy.solver.simple.SimpleReactor",
"quantities.UnitQuantity",
"pylab.show"
] |
[((91, 125), 'quantities.set_default_units', 'quantities.set_default_units', (['"""si"""'], {}), "('si')\n", (119, 125), False, 'import quantities\n'), ((126, 204), 'quantities.UnitQuantity', 'quantities.UnitQuantity', (['"""kilocalorie"""', '(1000.0 * quantities.cal)'], {'symbol': '"""kcal"""'}), "('kilocalorie', 1000.0 * quantities.cal, symbol='kcal')\n", (149, 204), False, 'import quantities\n'), ((203, 275), 'quantities.UnitQuantity', 'quantities.UnitQuantity', (['"""kilojoule"""', '(1000.0 * quantities.J)'], {'symbol': '"""kJ"""'}), "('kilojoule', 1000.0 * quantities.J, symbol='kJ')\n", (226, 275), False, 'import quantities\n'), ((2380, 2468), 'rmgpy.solver.simple.SimpleReactor', 'SimpleReactor', (['T', 'P'], {'initialMoleFractions': '{C2H5: 0.1, CH3: 0.1, CH4: 0.4, C2H6: 0.4}'}), '(T, P, initialMoleFractions={C2H5: 0.1, CH3: 0.1, CH4: 0.4,\n C2H6: 0.4})\n', (2393, 2468), False, 'from rmgpy.solver.simple import SimpleReactor\n'), ((3216, 3245), 'numpy.array', 'numpy.array', (['t', 'numpy.float64'], {}), '(t, numpy.float64)\n', (3227, 3245), False, 'import numpy\n'), ((3258, 3287), 'numpy.array', 'numpy.array', (['y', 'numpy.float64'], {}), '(y, numpy.float64)\n', (3269, 3287), False, 'import numpy\n'), ((3312, 3353), 'numpy.array', 'numpy.array', (['reactionRates', 'numpy.float64'], {}), '(reactionRates, numpy.float64)\n', (3323, 3353), False, 'import numpy\n'), ((3377, 3417), 'numpy.array', 'numpy.array', (['speciesRates', 'numpy.float64'], {}), '(speciesRates, numpy.float64)\n', (3388, 3417), False, 'import numpy\n'), ((3454, 3482), 'pylab.figure', 'pylab.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (3466, 3482), False, 'import pylab\n'), ((3490, 3512), 'pylab.subplot', 'pylab.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (3503, 3512), False, 'import pylab\n'), ((3519, 3539), 'pylab.semilogx', 'pylab.semilogx', (['t', 'y'], {}), '(t, y)\n', (3533, 3539), False, 'import pylab\n'), ((3548, 3604), 'pylab.ylabel', 'pylab.ylabel', (['"""Concentration (mol/m$^\\\\mathdefault{3}$)"""'], {}), "('Concentration (mol/m$^\\\\mathdefault{3}$)')\n", (3560, 3604), False, 'import pylab\n'), ((3613, 3664), 'pylab.legend', 'pylab.legend', (["['CH4', 'CH3', 'C2H6', 'C2H5']"], {'loc': '(4)'}), "(['CH4', 'CH3', 'C2H6', 'C2H5'], loc=4)\n", (3625, 3664), False, 'import pylab\n'), ((3673, 3695), 'pylab.subplot', 'pylab.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (3686, 3695), False, 'import pylab\n'), ((3702, 3733), 'pylab.semilogx', 'pylab.semilogx', (['t', 'speciesRates'], {}), '(t, speciesRates)\n', (3716, 3733), False, 'import pylab\n'), ((3742, 3793), 'pylab.legend', 'pylab.legend', (["['CH4', 'CH3', 'C2H6', 'C2H5']"], {'loc': '(4)'}), "(['CH4', 'CH3', 'C2H6', 'C2H5'], loc=4)\n", (3754, 3793), False, 'import pylab\n'), ((3802, 3826), 'pylab.xlabel', 'pylab.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (3814, 3826), False, 'import pylab\n'), ((3835, 3884), 'pylab.ylabel', 'pylab.ylabel', (['"""Rate (mol/m$^\\\\mathdefault{3}$*s)"""'], {}), "('Rate (mol/m$^\\\\mathdefault{3}$*s)')\n", (3847, 3884), False, 'import pylab\n'), ((3993, 4005), 'pylab.show', 'pylab.show', ([], {}), '()\n', (4003, 4005), False, 'import pylab\n'), ((4148, 4184), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (4171, 4184), False, 'import unittest\n'), ((985, 1190), 'rmgpy.chem.thermo.ThermoData', 'ThermoData', ([], {'Tdata': "([300, 400, 500, 600, 800, 1000, 1500], 'K')", 'Cpdata': "([8.615, 9.687, 10.963, 12.301, 14.841, 16.976, 20.528], 'cal/mol*K')", 'H298': "(-17.714, 'kcal/mol')", 'S298': "(44.472, 'cal/mol*K')"}), "(Tdata=([300, 400, 500, 600, 800, 1000, 1500], 'K'), Cpdata=([\n 8.615, 9.687, 10.963, 12.301, 14.841, 16.976, 20.528], 'cal/mol*K'),\n H298=(-17.714, 'kcal/mol'), S298=(44.472, 'cal/mol*K'))\n", (995, 1190), False, 'from rmgpy.chem.thermo import ThermoData\n'), ((1279, 1483), 'rmgpy.chem.thermo.ThermoData', 'ThermoData', ([], {'Tdata': "([300, 400, 500, 600, 800, 1000, 1500], 'K')", 'Cpdata': "([9.397, 10.123, 10.856, 11.571, 12.899, 14.055, 16.195], 'cal/mol*K')", 'H298': "(9.357, 'kcal/mol')", 'S298': "(45.174, 'cal/mol*K')"}), "(Tdata=([300, 400, 500, 600, 800, 1000, 1500], 'K'), Cpdata=([\n 9.397, 10.123, 10.856, 11.571, 12.899, 14.055, 16.195], 'cal/mol*K'),\n H298=(9.357, 'kcal/mol'), S298=(45.174, 'cal/mol*K'))\n", (1289, 1483), False, 'from rmgpy.chem.thermo import ThermoData\n'), ((1571, 1776), 'rmgpy.chem.thermo.ThermoData', 'ThermoData', ([], {'Tdata': "([300, 400, 500, 600, 800, 1000, 1500], 'K')", 'Cpdata': "([12.684, 15.506, 18.326, 20.971, 25.5, 29.016, 34.595], 'cal/mol*K')", 'H298': "(-19.521, 'kcal/mol')", 'S298': "(54.799, 'cal/mol*K')"}), "(Tdata=([300, 400, 500, 600, 800, 1000, 1500], 'K'), Cpdata=([\n 12.684, 15.506, 18.326, 20.971, 25.5, 29.016, 34.595], 'cal/mol*K'),\n H298=(-19.521, 'kcal/mol'), S298=(54.799, 'cal/mol*K'))\n", (1581, 1776), False, 'from rmgpy.chem.thermo import ThermoData\n'), ((1867, 2073), 'rmgpy.chem.thermo.ThermoData', 'ThermoData', ([], {'Tdata': "([300, 400, 500, 600, 800, 1000, 1500], 'K')", 'Cpdata': "([11.635, 13.744, 16.085, 18.246, 21.885, 24.676, 29.107], 'cal/mol*K')", 'H298': "(29.496, 'kcal/mol')", 'S298': "(56.687, 'cal/mol*K')"}), "(Tdata=([300, 400, 500, 600, 800, 1000, 1500], 'K'), Cpdata=([\n 11.635, 13.744, 16.085, 18.246, 21.885, 24.676, 29.107], 'cal/mol*K'),\n H298=(29.496, 'kcal/mol'), S298=(56.687, 'cal/mol*K'))\n", (1877, 2073), False, 'from rmgpy.chem.thermo import ThermoData\n'), ((2141, 2208), 'rmgpy.chem.kinetics.Arrhenius', 'Arrhenius', ([], {'A': '(686.375 * 6)', 'n': '(4.40721)', 'Ea': '(7.82799 * 4184.0)', 'T0': '(298.15)'}), '(A=686.375 * 6, n=4.40721, Ea=7.82799 * 4184.0, T0=298.15)\n', (2150, 2208), False, 'from rmgpy.chem.kinetics import Arrhenius\n'), ((937, 947), 'rmgpy.chem.molecule.Molecule', 'Molecule', ([], {}), '()\n', (945, 947), False, 'from rmgpy.chem.molecule import Molecule\n'), ((1227, 1237), 'rmgpy.chem.molecule.Molecule', 'Molecule', ([], {}), '()\n', (1235, 1237), False, 'from rmgpy.chem.molecule import Molecule\n'), ((1522, 1532), 'rmgpy.chem.molecule.Molecule', 'Molecule', ([], {}), '()\n', (1530, 1532), False, 'from rmgpy.chem.molecule import Molecule\n'), ((1814, 1824), 'rmgpy.chem.molecule.Molecule', 'Molecule', ([], {}), '()\n', (1822, 1824), False, 'from rmgpy.chem.molecule import Molecule\n')]
|
from typing import List, Tuple, Optional
import numpy as np
def rmse(x: List[float], y: List[float]) -> float:
r = 0
for (a, b) in zip(x, y):
r += (a - b) ** 2
return r
def lin_reg(data: List[Tuple[float, float]]) -> Tuple[float, float]:
d = np.array(data)
m = d.shape[0]
p = np.sum(d[:, 0])
q = np.sum(d[:, 1])
r = np.sum(d[:, 0] * d[:, 1])
s = np.sum(d[:, 0] ** 2)
d = (m + 1) * s - p ** 2
a = ((m + 1) * r - p * q) / d
b = (s * q - p * r) / d
return (a, b)
class LinearRegressor():
def __init__(self):
self._coeffs = None # type: Optional[Tuple[float, float]]
def fit(self, data: List[Tuple[float, float]]) -> None:
self._coeffs = lin_reg(data)
def predict(self, x: List[float]) -> List[float]:
pass
@property
def coeffs(self) -> Tuple[float, float]:
if self._coeffs is None:
raise Exception('You need to call `fit` on the model first.')
return self._coeffs
class Vertex:
def __init__(self, id: int) -> None:
self.id = id
self.neighbours = set()
self.visited = False
def add_neighbour(self, other_id):
self.neighbours.add(other_id)
def visit(self):
self.visited = True
def __str__(self):
return "Vertex " + str(self.id)
def __repr__(self):
return self.__str__()
class Graph:
def __init__(self, matrix=None):
self.vertices = []
if matrix is None:
return
n = len(matrix)
for i in range(n):
v = Vertex(i)
self.vertices.append(v)
for j in range(n):
if matrix[i][j]:
v.add_neighbour(j)
def __str__(self):
r = ""
for row in self.matrix():
r += str(row) + "\n"
return r
def matrix(self):
n = len(self.vertices)
m = [[0 for i in range(n)] for j in range(n)]
for i in range(n):
for j in range(n):
if j in self.vertices[i].neighbours:
m[i][j] = 1
return m
def add_vertex(self, neighbours):
v = Vertex(len(self.vertices))
for n in neighbours:
v.add_neighbour(n)
self.vertices.append(v)
return self
def add_egde(self, e):
self.vertices[e[0]].add_neighbour(e[1])
return self
def clear_visited(self):
for v in self.vertices:
v.visited = False
def BFS(self, start=None):
q = []
r = []
if start is not None:
q.append(start)
else:
q.append(self.vertices[0])
while q:
c = q.pop(0)
r.append(c)
c.visit()
for n in c.neighbours:
nei = self.vertices[n]
if not nei.visited:
q.append(nei)
self.clear_visited()
return r
g = Graph([
[0, 1, 1, 0, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]
])
print(g.BFS())
|
[
"numpy.array",
"numpy.sum"
] |
[((270, 284), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (278, 284), True, 'import numpy as np\n'), ((312, 327), 'numpy.sum', 'np.sum', (['d[:, 0]'], {}), '(d[:, 0])\n', (318, 327), True, 'import numpy as np\n'), ((336, 351), 'numpy.sum', 'np.sum', (['d[:, 1]'], {}), '(d[:, 1])\n', (342, 351), True, 'import numpy as np\n'), ((360, 385), 'numpy.sum', 'np.sum', (['(d[:, 0] * d[:, 1])'], {}), '(d[:, 0] * d[:, 1])\n', (366, 385), True, 'import numpy as np\n'), ((394, 414), 'numpy.sum', 'np.sum', (['(d[:, 0] ** 2)'], {}), '(d[:, 0] ** 2)\n', (400, 414), True, 'import numpy as np\n')]
|
import csv
import gzip
import json
import re
import sys
from ast import literal_eval
from collections import Counter
from math import exp
import numpy as np
from nltk.stem.porter import PorterStemmer
from nltk.tokenize import word_tokenize
OPINION_EXP = re.compile(r"(.*)<o>(.*?)</o>(.*)")
ASPECT_EXP = re.compile(r"(.*)<f>(.*?)</f>(.*)")
TAGGED_EXP = re.compile(r"<\w>(.*?)</\w>")
TARGET_EXP = re.compile(r"\[.*\]")
def readline_gzip(path):
with gzip.open(path, "rt") as f:
for line in f:
yield line
def readline(path):
with open(path, "r") as f:
for line in f:
yield line
def unique(sequence):
"""
Returns a unique list preserve the order of original list
"""
seen = set()
return [x for x in sequence if not (x in seen or seen.add(x))]
def to_dict(values):
value2index = {}
for i, item in enumerate(unique(values)):
value2index[item] = i
return value2index
def save_dict(value2index, path):
with open(path, "w") as f:
for value, index in value2index.items():
f.write("%s %d\n" % (value, index))
return value2index
def load_dict(path, sep=None):
dic = {}
with open(path, "r") as f:
for line in f:
try:
[item, index] = line.split(sep)
dic[item] = int(index)
except:
print("WARN - skipping invalid line: {}".format(line), sys.exc_info())
return dic
def save_count(values, path):
counts = Counter(values)
with open(path, "w") as f:
for w, count in counts.most_common():
f.write("%s %d\n" % (w, count))
return counts
def load_count(path, sep=None, dtypeKey=""):
counts = Counter()
with open(path, "r") as f:
for line in f:
try:
[w, count] = line.strip().split(sep)
if dtypeKey == "int":
w = int(w)
counts[w] = int(count)
except:
print("WARN - skipping invalid line: {}".format(line), sys.exc_info())
return counts
def reverse_key(key_value):
return {v: k for k, v in key_value.items()}
def parse_sentence(sentence, opinion, aspect):
stemmer = PorterStemmer()
sentence = re.sub(
re.compile("(^| )({})".format(opinion)), r"\1<o>\2</o>", sentence, 1
)
if not OPINION_EXP.match(sentence):
sentence = re.sub(
re.compile("(^| )({})".format(stemmer.stem(opinion))),
r"\1<o>\2</o>",
sentence,
1,
)
sentence = re.sub(
re.compile("(^| )({})".format(aspect)), r"\1<f>\2</f>", sentence, 1
)
if not ASPECT_EXP.match(sentence):
sentence = re.sub(
re.compile("(^| )({})".format(stemmer.stem(aspect))),
r"\1<f>\2</f>",
sentence,
1,
)
sentence = re.sub(
re.compile("<o>{}</o>".format(opinion)),
"<o>{}</o>".format("_".join(opinion.split(" "))),
sentence,
)
sentence = re.sub(
re.compile("<f>{}</f>".format(aspect)),
"<f>{}</f>".format("_".join(aspect.split(" "))),
sentence,
)
sentence = re.sub(r"(<\w?>[ \w]+)(</\w?>)([-\w]+)", r"\1\3\2", sentence)
sentence = re.sub(r"\(\d+\)$", "", sentence).strip().lower()
opinion_pos = None
aspect_pos = None
opinion_segments = OPINION_EXP.match(sentence)
if opinion_segments is not None:
opinion_pos = len(
word_tokenize(re.sub(TAGGED_EXP, r"\1", opinion_segments.group(1)))
)
opinion = opinion_segments.group(2)
aspect_segments = ASPECT_EXP.match(sentence)
if aspect_segments is not None:
aspect_pos = len(
word_tokenize(re.sub(TAGGED_EXP, r"\1", aspect_segments.group(1)))
)
aspect = aspect_segments.group(2)
tokens = word_tokenize(re.sub(TAGGED_EXP, r"\1", sentence))
sentence_len = len(tokens)
sentence = " ".join(tokens)
return sentence, sentence_len, opinion_pos, opinion, aspect_pos, aspect
def to_one_hot(idx, size, value=1.0):
one_hot = np.zeros(size).astype(np.float32)
one_hot[int(float(idx))] = value
return one_hot
def flatten_json(json_content):
csv_content = {}
for k, v in json_content.items():
if not isinstance(v, dict):
csv_content[k] = v
else:
for k1, v1 in v.items():
csv_content["{}_{}".format(k, k1)] = v1
return csv_content
def dict_to_csv(json_content, path):
with open(path, "w") as f:
writer = csv.DictWriter(f, fieldnames=list(json_content.keys()))
writer.writeheader()
writer.writerow(json_content)
def dump_json(json_content, path):
with open(path, "w") as f:
json.dump(json_content, f)
def load_json(path):
with open(path, "r") as f:
return json.load(f)
def export_spare_matrix(M, path, sep="\t"):
assert len(M.shape) == 2
(d1, d2) = M.shape
with open(path, "w") as f:
f.write("{}\t{}\t{}\n".format(d1, d2, np.count_nonzero(M)))
for i in range(d1):
for j in range(d2):
if M[i, j] != 0:
f.write("{}\t{}\t{}\n".format(i, j, M[i, j]))
def export_dense_matrix(M, path):
assert len(M.shape) == 2
(d1, d2) = M.shape
with open(path, "w") as f:
f.write("Dimension: {} x {}\n".format(d1, d2))
for i in range(d1):
f.write("[{}]\n".format("\t".join([str(j) for j in M[i]])))
def load_sparse_matrix(path):
with open(path, "r") as f:
line = f.readline()
tokens = line.strip().split()
assert len(tokens) == 3
r, c, n = int(tokens[0]), int(tokens[1]), int(tokens[2])
matrix = np.zeros((r, c))
for i in range(n):
line = f.readline()
tokens = line.strip().split()
assert len(tokens) == 3
matrix[int(tokens[0])][int(tokens[1])] = float(tokens[2])
return matrix
def load_dense_matrix(path):
result = []
with open(path, "r") as f:
tokens = f.readline().split(":")[1].split("x")
assert len(tokens) == 2
r, c = int(tokens[0]), int(tokens[1])
for i in range(r):
tokens = f.readline().strip()[1:-1].split()
assert len(tokens) == c
values = [float(v) for v in tokens]
result.append(values)
return np.array(result)
def export_dense_tensor(T, path):
assert len(T.shape) == 3
(d1, d2, d3) = T.shape
with open(path, "w") as f:
f.write("Dimension: {} x {} x {}\n".format(d1, d2, d3))
for i in range(d1):
f.write(
"{}\n".format(
",".join(
["[{}]".format("\t".join([str(k) for k in j])) for j in T[i]]
)
)
)
def load_dense_tensor(path):
result = []
with open(path, "r") as f:
tokens = f.readline().split(":")[1].split("x")
assert len(tokens) == 3
d1, d2, d3 = int(tokens[0]), int(tokens[1]), int(tokens[2])
for i in range(d1):
lst = f.readline().strip().split(",")
arr = []
for j in range(d2):
values = [float(v) for v in lst[j][1:-1].split()]
arr.append(values)
result.append(arr)
return np.array(result)
def empty_file(path):
with open(path, "w") as f:
f.write("")
def frequent_score(cnt, N):
return 1 + (N - 1) * (2 / (1 + exp(-cnt)) - 1)
def sentiment_score(sentiment, N):
return 1 + (N - 1) / (1 + exp(-sentiment))
def lcs(a, b):
lengths = [[0 for j in range(len(b) + 1)] for i in range(len(a) + 1)]
# row 0 and column 0 are initialized to 0 already
for i, x in enumerate(a):
for j, y in enumerate(b):
if x == y:
lengths[i + 1][j + 1] = lengths[i][j] + 1
else:
lengths[i + 1][j + 1] = max(lengths[i + 1][j], lengths[i][j + 1])
# read the subsequence out from the matrix
result = []
x, y = len(a), len(b)
while x != 0 and y != 0:
if lengths[x][y] == lengths[x - 1][y]:
x -= 1
elif lengths[x][y] == lengths[x][y - 1]:
y -= 1
else:
assert a[x - 1] == b[y - 1]
result.append(a[x - 1])
x -= 1
y -= 1
return result[::-1]
def array2string(x):
assert len(np.shape(x)) <= 2
if len(np.shape(x)) == 1:
return ",".join([str(i) for i in x])
elif len(np.shape(x)) == 2:
return ";".join([array2string(i) for i in x])
def string2array(x):
if len(x.split(";")) > 1:
return [[j for j in i.split(",")] for i in x.split(";")]
return [i for i in x.split(",")]
def substitute_word(sentence, new_word, position):
sentence = sentence.split()
sentence[position] = new_word
return " ".join(sentence)
def convert_str_to_list(cell):
return literal_eval(cell)
|
[
"re.compile",
"gzip.open",
"collections.Counter",
"numpy.array",
"nltk.stem.porter.PorterStemmer",
"ast.literal_eval",
"numpy.zeros",
"numpy.count_nonzero",
"sys.exc_info",
"json.load",
"re.sub",
"numpy.shape",
"json.dump",
"math.exp"
] |
[((256, 290), 're.compile', 're.compile', (['"""(.*)<o>(.*?)</o>(.*)"""'], {}), "('(.*)<o>(.*?)</o>(.*)')\n", (266, 290), False, 'import re\n'), ((305, 339), 're.compile', 're.compile', (['"""(.*)<f>(.*?)</f>(.*)"""'], {}), "('(.*)<f>(.*?)</f>(.*)')\n", (315, 339), False, 'import re\n'), ((354, 384), 're.compile', 're.compile', (['"""<\\\\w>(.*?)</\\\\w>"""'], {}), "('<\\\\w>(.*?)</\\\\w>')\n", (364, 384), False, 'import re\n'), ((397, 419), 're.compile', 're.compile', (['"""\\\\[.*\\\\]"""'], {}), "('\\\\[.*\\\\]')\n", (407, 419), False, 'import re\n'), ((1515, 1530), 'collections.Counter', 'Counter', (['values'], {}), '(values)\n', (1522, 1530), False, 'from collections import Counter\n'), ((1730, 1739), 'collections.Counter', 'Counter', ([], {}), '()\n', (1737, 1739), False, 'from collections import Counter\n'), ((2238, 2253), 'nltk.stem.porter.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (2251, 2253), False, 'from nltk.stem.porter import PorterStemmer\n'), ((3202, 3268), 're.sub', 're.sub', (['"""(<\\\\w?>[ \\\\w]+)(</\\\\w?>)([-\\\\w]+)"""', '"""\\\\1\\\\3\\\\2"""', 'sentence'], {}), "('(<\\\\w?>[ \\\\w]+)(</\\\\w?>)([-\\\\w]+)', '\\\\1\\\\3\\\\2', sentence)\n", (3208, 3268), False, 'import re\n'), ((6437, 6453), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (6445, 6453), True, 'import numpy as np\n'), ((9018, 9036), 'ast.literal_eval', 'literal_eval', (['cell'], {}), '(cell)\n', (9030, 9036), False, 'from ast import literal_eval\n'), ((455, 476), 'gzip.open', 'gzip.open', (['path', '"""rt"""'], {}), "(path, 'rt')\n", (464, 476), False, 'import gzip\n'), ((3894, 3929), 're.sub', 're.sub', (['TAGGED_EXP', '"""\\\\1"""', 'sentence'], {}), "(TAGGED_EXP, '\\\\1', sentence)\n", (3900, 3929), False, 'import re\n'), ((4790, 4816), 'json.dump', 'json.dump', (['json_content', 'f'], {}), '(json_content, f)\n', (4799, 4816), False, 'import json\n'), ((4886, 4898), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4895, 4898), False, 'import json\n'), ((5772, 5788), 'numpy.zeros', 'np.zeros', (['(r, c)'], {}), '((r, c))\n', (5780, 5788), True, 'import numpy as np\n'), ((7402, 7418), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (7410, 7418), True, 'import numpy as np\n'), ((4124, 4138), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (4132, 4138), True, 'import numpy as np\n'), ((8491, 8502), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (8499, 8502), True, 'import numpy as np\n'), ((8520, 8531), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (8528, 8531), True, 'import numpy as np\n'), ((5074, 5093), 'numpy.count_nonzero', 'np.count_nonzero', (['M'], {}), '(M)\n', (5090, 5093), True, 'import numpy as np\n'), ((7642, 7657), 'math.exp', 'exp', (['(-sentiment)'], {}), '(-sentiment)\n', (7645, 7657), False, 'from math import exp\n'), ((8597, 8608), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (8605, 8608), True, 'import numpy as np\n'), ((3279, 3314), 're.sub', 're.sub', (['"""\\\\(\\\\d+\\\\)$"""', '""""""', 'sentence'], {}), "('\\\\(\\\\d+\\\\)$', '', sentence)\n", (3285, 3314), False, 'import re\n'), ((1439, 1453), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1451, 1453), False, 'import sys\n'), ((2063, 2077), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2075, 2077), False, 'import sys\n'), ((7559, 7568), 'math.exp', 'exp', (['(-cnt)'], {}), '(-cnt)\n', (7562, 7568), False, 'from math import exp\n')]
|
# Copyright (c) 2012-2020 Jicamarca Radio Observatory
# All rights reserved.
#
# Distributed under the terms of the BSD 3-clause license.
"""Base class to create plot operations
"""
import os
import sys
import zmq
import time
import numpy
import datetime
from collections import deque
from functools import wraps
from threading import Thread
import matplotlib
if 'BACKEND' in os.environ:
matplotlib.use(os.environ['BACKEND'])
elif 'linux' in sys.platform:
matplotlib.use("TkAgg")
elif 'darwin' in sys.platform:
matplotlib.use('MacOSX')
else:
from schainpy.utils import log
log.warning('Using default Backend="Agg"', 'INFO')
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.ticker import FuncFormatter, LinearLocator, MultipleLocator
from schainpy.model.data.jrodata import PlotterData
from schainpy.model.proc.jroproc_base import ProcessingUnit, Operation, MPDecorator
from schainpy.utils import log
jet_values = matplotlib.pyplot.get_cmap('jet', 100)(numpy.arange(100))[10:90]
blu_values = matplotlib.pyplot.get_cmap(
'seismic_r', 20)(numpy.arange(20))[10:15]
ncmap = matplotlib.colors.LinearSegmentedColormap.from_list(
'jro', numpy.vstack((blu_values, jet_values)))
matplotlib.pyplot.register_cmap(cmap=ncmap)
CMAPS = [plt.get_cmap(s) for s in ('jro', 'jet', 'viridis',
'plasma', 'inferno', 'Greys', 'seismic', 'bwr', 'coolwarm')]
EARTH_RADIUS = 6.3710e3
def ll2xy(lat1, lon1, lat2, lon2):
p = 0.017453292519943295
a = 0.5 - numpy.cos((lat2 - lat1) * p)/2 + numpy.cos(lat1 * p) * \
numpy.cos(lat2 * p) * (1 - numpy.cos((lon2 - lon1) * p)) / 2
r = 12742 * numpy.arcsin(numpy.sqrt(a))
theta = numpy.arctan2(numpy.sin((lon2-lon1)*p)*numpy.cos(lat2*p), numpy.cos(lat1*p)
* numpy.sin(lat2*p)-numpy.sin(lat1*p)*numpy.cos(lat2*p)*numpy.cos((lon2-lon1)*p))
theta = -theta + numpy.pi/2
return r*numpy.cos(theta), r*numpy.sin(theta)
def km2deg(km):
'''
Convert distance in km to degrees
'''
return numpy.rad2deg(km/EARTH_RADIUS)
def figpause(interval):
backend = plt.rcParams['backend']
if backend in matplotlib.rcsetup.interactive_bk:
figManager = matplotlib._pylab_helpers.Gcf.get_active()
if figManager is not None:
canvas = figManager.canvas
if canvas.figure.stale:
canvas.draw()
try:
canvas.start_event_loop(interval)
except:
pass
return
def popup(message):
'''
'''
fig = plt.figure(figsize=(12, 8), facecolor='r')
text = '\n'.join([s.strip() for s in message.split(':')])
fig.text(0.01, 0.5, text, ha='left', va='center',
size='20', weight='heavy', color='w')
fig.show()
figpause(1000)
class Throttle(object):
'''
Decorator that prevents a function from being called more than once every
time period.
To create a function that cannot be called more than once a minute, but
will sleep until it can be called:
@Throttle(minutes=1)
def foo():
pass
for i in range(10):
foo()
print "This function has run %s times." % i
'''
def __init__(self, seconds=0, minutes=0, hours=0):
self.throttle_period = datetime.timedelta(
seconds=seconds, minutes=minutes, hours=hours
)
self.time_of_last_call = datetime.datetime.min
def __call__(self, fn):
@wraps(fn)
def wrapper(*args, **kwargs):
coerce = kwargs.pop('coerce', None)
if coerce:
self.time_of_last_call = datetime.datetime.now()
return fn(*args, **kwargs)
else:
now = datetime.datetime.now()
time_since_last_call = now - self.time_of_last_call
time_left = self.throttle_period - time_since_last_call
if time_left > datetime.timedelta(seconds=0):
return
self.time_of_last_call = datetime.datetime.now()
return fn(*args, **kwargs)
return wrapper
def apply_throttle(value):
@Throttle(seconds=value)
def fnThrottled(fn):
fn()
return fnThrottled
@MPDecorator
class Plot(Operation):
"""Base class for Schain plotting operations
This class should never be use directtly you must subclass a new operation,
children classes must be defined as follow:
ExamplePlot(Plot):
CODE = 'code'
colormap = 'jet'
plot_type = 'pcolor' # options are ('pcolor', 'pcolorbuffer', 'scatter', 'scatterbuffer')
def setup(self):
pass
def plot(self):
pass
"""
CODE = 'Figure'
colormap = 'jet'
bgcolor = 'white'
buffering = True
__missing = 1E30
__attrs__ = ['show', 'save', 'ymin', 'ymax', 'zmin', 'zmax', 'title',
'showprofile']
def __init__(self):
Operation.__init__(self)
self.isConfig = False
self.isPlotConfig = False
self.save_time = 0
self.sender_time = 0
self.data = None
self.firsttime = True
self.sender_queue = deque(maxlen=10)
self.plots_adjust = {'left': 0.125, 'right': 0.9, 'bottom': 0.15, 'top': 0.9, 'wspace': 0.2, 'hspace': 0.2}
def __fmtTime(self, x, pos):
'''
'''
return '{}'.format(self.getDateTime(x).strftime('%H:%M'))
def __setup(self, **kwargs):
'''
Initialize variables
'''
self.figures = []
self.axes = []
self.cb_axes = []
self.localtime = kwargs.pop('localtime', True)
self.show = kwargs.get('show', True)
self.save = kwargs.get('save', False)
self.save_period = kwargs.get('save_period', 0)
self.colormap = kwargs.get('colormap', self.colormap)
self.colormap_coh = kwargs.get('colormap_coh', 'jet')
self.colormap_phase = kwargs.get('colormap_phase', 'RdBu_r')
self.colormaps = kwargs.get('colormaps', None)
self.bgcolor = kwargs.get('bgcolor', self.bgcolor)
self.showprofile = kwargs.get('showprofile', False)
self.title = kwargs.get('wintitle', self.CODE.upper())
self.cb_label = kwargs.get('cb_label', None)
self.cb_labels = kwargs.get('cb_labels', None)
self.labels = kwargs.get('labels', None)
self.xaxis = kwargs.get('xaxis', 'frequency')
self.zmin = kwargs.get('zmin', None)
self.zmax = kwargs.get('zmax', None)
self.zlimits = kwargs.get('zlimits', None)
self.xmin = kwargs.get('xmin', None)
self.xmax = kwargs.get('xmax', None)
self.xrange = kwargs.get('xrange', 12)
self.xscale = kwargs.get('xscale', None)
self.ymin = kwargs.get('ymin', None)
self.ymax = kwargs.get('ymax', None)
self.yscale = kwargs.get('yscale', None)
self.xlabel = kwargs.get('xlabel', None)
self.attr_time = kwargs.get('attr_time', 'utctime')
self.attr_data = kwargs.get('attr_data', 'data_param')
self.decimation = kwargs.get('decimation', None)
self.showSNR = kwargs.get('showSNR', False)
self.oneFigure = kwargs.get('oneFigure', True)
self.width = kwargs.get('width', None)
self.height = kwargs.get('height', None)
self.colorbar = kwargs.get('colorbar', True)
self.factors = kwargs.get('factors', [1, 1, 1, 1, 1, 1, 1, 1])
self.channels = kwargs.get('channels', None)
self.titles = kwargs.get('titles', [])
self.polar = False
self.type = kwargs.get('type', 'iq')
self.grid = kwargs.get('grid', False)
self.pause = kwargs.get('pause', False)
self.save_code = kwargs.get('save_code', self.CODE)
self.throttle = kwargs.get('throttle', 0)
self.exp_code = kwargs.get('exp_code', None)
self.server = kwargs.get('server', False)
self.sender_period = kwargs.get('sender_period', 60)
self.tag = kwargs.get('tag', '')
self.height_index = kwargs.get('height_index', None)
self.__throttle_plot = apply_throttle(self.throttle)
code = self.attr_data if self.attr_data else self.CODE
self.data = PlotterData(self.CODE, self.exp_code, self.localtime)
if self.server:
if not self.server.startswith('tcp://'):
self.server = 'tcp://{}'.format(self.server)
log.success(
'Sending to server: {}'.format(self.server),
self.name
)
def __setup_plot(self):
'''
Common setup for all figures, here figures and axes are created
'''
self.setup()
self.time_label = 'LT' if self.localtime else 'UTC'
if self.width is None:
self.width = 8
self.figures = []
self.axes = []
self.cb_axes = []
self.pf_axes = []
self.cmaps = []
size = '15%' if self.ncols == 1 else '30%'
pad = '4%' if self.ncols == 1 else '8%'
if self.oneFigure:
if self.height is None:
self.height = 1.4 * self.nrows + 1
fig = plt.figure(figsize=(self.width, self.height),
edgecolor='k',
facecolor='w')
self.figures.append(fig)
for n in range(self.nplots):
ax = fig.add_subplot(self.nrows, self.ncols,
n + 1, polar=self.polar)
ax.tick_params(labelsize=8)
ax.firsttime = True
ax.index = 0
ax.press = None
self.axes.append(ax)
if self.showprofile:
cax = self.__add_axes(ax, size=size, pad=pad)
cax.tick_params(labelsize=8)
self.pf_axes.append(cax)
else:
if self.height is None:
self.height = 3
for n in range(self.nplots):
fig = plt.figure(figsize=(self.width, self.height),
edgecolor='k',
facecolor='w')
ax = fig.add_subplot(1, 1, 1, polar=self.polar)
ax.tick_params(labelsize=8)
ax.firsttime = True
ax.index = 0
ax.press = None
self.figures.append(fig)
self.axes.append(ax)
if self.showprofile:
cax = self.__add_axes(ax, size=size, pad=pad)
cax.tick_params(labelsize=8)
self.pf_axes.append(cax)
for n in range(self.nrows):
if self.colormaps is not None:
cmap = plt.get_cmap(self.colormaps[n])
else:
cmap = plt.get_cmap(self.colormap)
cmap.set_bad(self.bgcolor, 1.)
self.cmaps.append(cmap)
def __add_axes(self, ax, size='30%', pad='8%'):
'''
Add new axes to the given figure
'''
divider = make_axes_locatable(ax)
nax = divider.new_horizontal(size=size, pad=pad)
ax.figure.add_axes(nax)
return nax
def fill_gaps(self, x_buffer, y_buffer, z_buffer):
'''
Create a masked array for missing data
'''
if x_buffer.shape[0] < 2:
return x_buffer, y_buffer, z_buffer
deltas = x_buffer[1:] - x_buffer[0:-1]
x_median = numpy.median(deltas)
index = numpy.where(deltas > 5 * x_median)
if len(index[0]) != 0:
z_buffer[::, index[0], ::] = self.__missing
z_buffer = numpy.ma.masked_inside(z_buffer,
0.99 * self.__missing,
1.01 * self.__missing)
return x_buffer, y_buffer, z_buffer
def decimate(self):
# dx = int(len(self.x)/self.__MAXNUMX) + 1
dy = int(len(self.y) / self.decimation) + 1
# x = self.x[::dx]
x = self.x
y = self.y[::dy]
z = self.z[::, ::, ::dy]
return x, y, z
def format(self):
'''
Set min and max values, labels, ticks and titles
'''
for n, ax in enumerate(self.axes):
if ax.firsttime:
if self.xaxis != 'time':
xmin = self.xmin
xmax = self.xmax
else:
xmin = self.tmin
xmax = self.tmin + self.xrange*60*60
ax.xaxis.set_major_formatter(FuncFormatter(self.__fmtTime))
ax.xaxis.set_major_locator(LinearLocator(9))
ymin = self.ymin if self.ymin is not None else numpy.nanmin(self.y[numpy.isfinite(self.y)])
ymax = self.ymax if self.ymax is not None else numpy.nanmax(self.y[numpy.isfinite(self.y)])
ax.set_facecolor(self.bgcolor)
if self.xscale:
ax.xaxis.set_major_formatter(FuncFormatter(
lambda x, pos: '{0:g}'.format(x*self.xscale)))
if self.yscale:
ax.yaxis.set_major_formatter(FuncFormatter(
lambda x, pos: '{0:g}'.format(x*self.yscale)))
if self.xlabel is not None:
ax.set_xlabel(self.xlabel)
if self.ylabel is not None:
ax.set_ylabel(self.ylabel)
if self.showprofile:
self.pf_axes[n].set_ylim(ymin, ymax)
self.pf_axes[n].set_xlim(self.zmin, self.zmax)
self.pf_axes[n].set_xlabel('dB')
self.pf_axes[n].grid(b=True, axis='x')
[tick.set_visible(False)
for tick in self.pf_axes[n].get_yticklabels()]
if self.colorbar:
ax.cbar = plt.colorbar(
ax.plt, ax=ax, fraction=0.05, pad=0.02, aspect=10)
ax.cbar.ax.tick_params(labelsize=8)
ax.cbar.ax.press = None
if self.cb_label:
ax.cbar.set_label(self.cb_label, size=8)
elif self.cb_labels:
ax.cbar.set_label(self.cb_labels[n], size=8)
else:
ax.cbar = None
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.firsttime = False
if self.grid:
ax.grid(True)
if not self.polar:
ax.set_title('{} {} {}'.format(
self.titles[n],
self.getDateTime(self.data.max_time).strftime(
'%Y-%m-%d %H:%M:%S'),
self.time_label),
size=8)
else:
ax.set_title('{}'.format(self.titles[n]), size=8)
ax.set_ylim(0, 90)
ax.set_yticks(numpy.arange(0, 90, 20))
ax.yaxis.labelpad = 40
if self.firsttime:
for n, fig in enumerate(self.figures):
fig.subplots_adjust(**self.plots_adjust)
self.firsttime = False
def clear_figures(self):
'''
Reset axes for redraw plots
'''
for ax in self.axes+self.pf_axes+self.cb_axes:
ax.clear()
ax.firsttime = True
if hasattr(ax, 'cbar') and ax.cbar:
ax.cbar.remove()
def __plot(self):
'''
Main function to plot, format and save figures
'''
self.plot()
self.format()
for n, fig in enumerate(self.figures):
if self.nrows == 0 or self.nplots == 0:
log.warning('No data', self.name)
fig.text(0.5, 0.5, 'No Data', fontsize='large', ha='center')
fig.canvas.manager.set_window_title(self.CODE)
continue
fig.canvas.manager.set_window_title('{} - {}'.format(self.title,
self.getDateTime(self.data.max_time).strftime('%Y/%m/%d')))
fig.canvas.draw()
if self.show:
fig.show()
figpause(0.01)
if self.save:
self.save_figure(n)
if self.server:
self.send_to_server()
def __update(self, dataOut, timestamp):
'''
'''
metadata = {
'yrange': dataOut.heightList,
'interval': dataOut.timeInterval,
'channels': dataOut.channelList
}
data, meta = self.update(dataOut)
metadata.update(meta)
self.data.update(data, timestamp, metadata)
def save_figure(self, n):
'''
'''
if (self.data.max_time - self.save_time) <= self.save_period:
return
self.save_time = self.data.max_time
fig = self.figures[n]
figname = os.path.join(
self.save,
self.save_code,
'{}_{}.png'.format(
self.save_code,
self.getDateTime(self.data.max_time).strftime(
'%Y%m%d_%H%M%S'
),
)
)
log.log('Saving figure: {}'.format(figname), self.name)
if not os.path.isdir(os.path.dirname(figname)):
os.makedirs(os.path.dirname(figname))
fig.savefig(figname)
if self.throttle == 0:
figname = os.path.join(
self.save,
'{}_{}.png'.format(
self.save_code,
self.getDateTime(self.data.min_time).strftime(
'%Y%m%d'
),
)
)
fig.savefig(figname)
def send_to_server(self):
'''
'''
if self.exp_code == None:
log.warning('Missing `exp_code` skipping sending to server...')
last_time = self.data.max_time
interval = last_time - self.sender_time
if interval < self.sender_period:
return
self.sender_time = last_time
attrs = ['titles', 'zmin', 'zmax', 'tag', 'ymin', 'ymax']
for attr in attrs:
value = getattr(self, attr)
if value:
if isinstance(value, (numpy.float32, numpy.float64)):
value = round(float(value), 2)
self.data.meta[attr] = value
if self.colormap == 'jet':
self.data.meta['colormap'] = 'Jet'
elif 'RdBu' in self.colormap:
self.data.meta['colormap'] = 'RdBu'
else:
self.data.meta['colormap'] = 'Viridis'
self.data.meta['interval'] = int(interval)
self.sender_queue.append(last_time)
while True:
try:
tm = self.sender_queue.popleft()
except IndexError:
break
msg = self.data.jsonify(tm, self.save_code, self.plot_type)
self.socket.send_string(msg)
socks = dict(self.poll.poll(2000))
if socks.get(self.socket) == zmq.POLLIN:
reply = self.socket.recv_string()
if reply == 'ok':
log.log("Response from server ok", self.name)
time.sleep(0.1)
continue
else:
log.warning(
"Malformed reply from server: {}".format(reply), self.name)
else:
log.warning(
"No response from server, retrying...", self.name)
self.sender_queue.appendleft(tm)
self.socket.setsockopt(zmq.LINGER, 0)
self.socket.close()
self.poll.unregister(self.socket)
self.socket = self.context.socket(zmq.REQ)
self.socket.connect(self.server)
self.poll.register(self.socket, zmq.POLLIN)
break
def setup(self):
'''
This method should be implemented in the child class, the following
attributes should be set:
self.nrows: number of rows
self.ncols: number of cols
self.nplots: number of plots (channels or pairs)
self.ylabel: label for Y axes
self.titles: list of axes title
'''
raise NotImplementedError
def plot(self):
'''
Must be defined in the child class, the actual plotting method
'''
raise NotImplementedError
def update(self, dataOut):
'''
Must be defined in the child class, update self.data with new data
'''
data = {
self.CODE: getattr(dataOut, 'data_{}'.format(self.CODE))
}
meta = {}
return data, meta
def run(self, dataOut, **kwargs):
'''
Main plotting routine
'''
if self.isConfig is False:
self.__setup(**kwargs)
if self.localtime:
self.getDateTime = datetime.datetime.fromtimestamp
else:
self.getDateTime = datetime.datetime.utcfromtimestamp
self.data.setup()
self.isConfig = True
if self.server:
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REQ)
self.socket.connect(self.server)
self.poll = zmq.Poller()
self.poll.register(self.socket, zmq.POLLIN)
tm = getattr(dataOut, self.attr_time)
if self.data and 'time' in self.xaxis and (tm - self.tmin) >= self.xrange*60*60:
self.save_time = tm
self.__plot()
self.tmin += self.xrange*60*60
self.data.setup()
self.clear_figures()
self.__update(dataOut, tm)
if self.isPlotConfig is False:
self.__setup_plot()
self.isPlotConfig = True
if self.xaxis == 'time':
dt = self.getDateTime(tm)
if self.xmin is None:
self.tmin = tm
self.xmin = dt.hour
minutes = (self.xmin-int(self.xmin)) * 60
seconds = (minutes - int(minutes)) * 60
self.tmin = (dt.replace(hour=int(self.xmin), minute=int(minutes), second=int(seconds)) -
datetime.datetime(1970, 1, 1)).total_seconds()
if self.localtime:
self.tmin += time.timezone
if self.xmin is not None and self.xmax is not None:
self.xrange = self.xmax - self.xmin
if self.throttle == 0:
self.__plot()
else:
self.__throttle_plot(self.__plot)#, coerce=coerce)
def close(self):
if self.data and not self.data.flagNoData:
self.save_time = self.data.max_time
self.__plot()
if self.data and not self.data.flagNoData and self.pause:
figpause(10)
|
[
"numpy.sqrt",
"time.sleep",
"zmq.Poller",
"numpy.isfinite",
"numpy.sin",
"datetime.timedelta",
"numpy.arange",
"datetime.datetime",
"collections.deque",
"matplotlib.ticker.FuncFormatter",
"numpy.where",
"functools.wraps",
"matplotlib._pylab_helpers.Gcf.get_active",
"numpy.vstack",
"mpl_toolkits.axes_grid1.make_axes_locatable",
"numpy.rad2deg",
"numpy.ma.masked_inside",
"matplotlib.ticker.LinearLocator",
"matplotlib.use",
"os.path.dirname",
"numpy.cos",
"matplotlib.pyplot.register_cmap",
"schainpy.model.data.jrodata.PlotterData",
"schainpy.model.proc.jroproc_base.Operation.__init__",
"matplotlib.pyplot.get_cmap",
"numpy.median",
"schainpy.utils.log.warning",
"schainpy.utils.log.log",
"matplotlib.pyplot.colorbar",
"datetime.datetime.now",
"matplotlib.pyplot.figure",
"zmq.Context"
] |
[((1323, 1366), 'matplotlib.pyplot.register_cmap', 'matplotlib.pyplot.register_cmap', ([], {'cmap': 'ncmap'}), '(cmap=ncmap)\n', (1354, 1366), False, 'import matplotlib\n'), ((395, 432), 'matplotlib.use', 'matplotlib.use', (["os.environ['BACKEND']"], {}), "(os.environ['BACKEND'])\n", (409, 432), False, 'import matplotlib\n'), ((1283, 1321), 'numpy.vstack', 'numpy.vstack', (['(blu_values, jet_values)'], {}), '((blu_values, jet_values))\n', (1295, 1321), False, 'import numpy\n'), ((1377, 1392), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['s'], {}), '(s)\n', (1389, 1392), True, 'import matplotlib.pyplot as plt\n'), ((2161, 2193), 'numpy.rad2deg', 'numpy.rad2deg', (['(km / EARTH_RADIUS)'], {}), '(km / EARTH_RADIUS)\n', (2174, 2193), False, 'import numpy\n'), ((2688, 2730), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)', 'facecolor': '"""r"""'}), "(figsize=(12, 8), facecolor='r')\n", (2698, 2730), True, 'import matplotlib.pyplot as plt\n'), ((467, 490), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (481, 490), False, 'import matplotlib\n'), ((1059, 1097), 'matplotlib.pyplot.get_cmap', 'matplotlib.pyplot.get_cmap', (['"""jet"""', '(100)'], {}), "('jet', 100)\n", (1085, 1097), False, 'import matplotlib\n'), ((1098, 1115), 'numpy.arange', 'numpy.arange', (['(100)'], {}), '(100)\n', (1110, 1115), False, 'import numpy\n'), ((1137, 1180), 'matplotlib.pyplot.get_cmap', 'matplotlib.pyplot.get_cmap', (['"""seismic_r"""', '(20)'], {}), "('seismic_r', 20)\n", (1163, 1180), False, 'import matplotlib\n'), ((1186, 1202), 'numpy.arange', 'numpy.arange', (['(20)'], {}), '(20)\n', (1198, 1202), False, 'import numpy\n'), ((2330, 2372), 'matplotlib._pylab_helpers.Gcf.get_active', 'matplotlib._pylab_helpers.Gcf.get_active', ([], {}), '()\n', (2370, 2372), False, 'import matplotlib\n'), ((3409, 3474), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'seconds', 'minutes': 'minutes', 'hours': 'hours'}), '(seconds=seconds, minutes=minutes, hours=hours)\n', (3427, 3474), False, 'import datetime\n'), ((3591, 3600), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (3596, 3600), False, 'from functools import wraps\n'), ((5084, 5108), 'schainpy.model.proc.jroproc_base.Operation.__init__', 'Operation.__init__', (['self'], {}), '(self)\n', (5102, 5108), False, 'from schainpy.model.proc.jroproc_base import ProcessingUnit, Operation, MPDecorator\n'), ((5312, 5328), 'collections.deque', 'deque', ([], {'maxlen': '(10)'}), '(maxlen=10)\n', (5317, 5328), False, 'from collections import deque\n'), ((8384, 8437), 'schainpy.model.data.jrodata.PlotterData', 'PlotterData', (['self.CODE', 'self.exp_code', 'self.localtime'], {}), '(self.CODE, self.exp_code, self.localtime)\n', (8395, 8437), False, 'from schainpy.model.data.jrodata import PlotterData\n'), ((11241, 11264), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (11260, 11264), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((11649, 11669), 'numpy.median', 'numpy.median', (['deltas'], {}), '(deltas)\n', (11661, 11669), False, 'import numpy\n'), ((11687, 11721), 'numpy.where', 'numpy.where', (['(deltas > 5 * x_median)'], {}), '(deltas > 5 * x_median)\n', (11698, 11721), False, 'import numpy\n'), ((526, 550), 'matplotlib.use', 'matplotlib.use', (['"""MacOSX"""'], {}), "('MacOSX')\n", (540, 550), False, 'import matplotlib\n'), ((596, 646), 'schainpy.utils.log.warning', 'log.warning', (['"""Using default Backend="Agg\\""""', '"""INFO"""'], {}), '(\'Using default Backend="Agg"\', \'INFO\')\n', (607, 646), False, 'from schainpy.utils import log\n'), ((651, 672), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (665, 672), False, 'import matplotlib\n'), ((1784, 1797), 'numpy.sqrt', 'numpy.sqrt', (['a'], {}), '(a)\n', (1794, 1797), False, 'import numpy\n'), ((1825, 1853), 'numpy.sin', 'numpy.sin', (['((lon2 - lon1) * p)'], {}), '((lon2 - lon1) * p)\n', (1834, 1853), False, 'import numpy\n'), ((1850, 1869), 'numpy.cos', 'numpy.cos', (['(lat2 * p)'], {}), '(lat2 * p)\n', (1859, 1869), False, 'import numpy\n'), ((2040, 2056), 'numpy.cos', 'numpy.cos', (['theta'], {}), '(theta)\n', (2049, 2056), False, 'import numpy\n'), ((2060, 2076), 'numpy.sin', 'numpy.sin', (['theta'], {}), '(theta)\n', (2069, 2076), False, 'import numpy\n'), ((4150, 4173), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4171, 4173), False, 'import datetime\n'), ((9345, 9420), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(self.width, self.height)', 'edgecolor': '"""k"""', 'facecolor': '"""w"""'}), "(figsize=(self.width, self.height), edgecolor='k', facecolor='w')\n", (9355, 9420), True, 'import matplotlib.pyplot as plt\n'), ((11833, 11911), 'numpy.ma.masked_inside', 'numpy.ma.masked_inside', (['z_buffer', '(0.99 * self.__missing)', '(1.01 * self.__missing)'], {}), '(z_buffer, 0.99 * self.__missing, 1.01 * self.__missing)\n', (11855, 11911), False, 'import numpy\n'), ((18205, 18268), 'schainpy.utils.log.warning', 'log.warning', (['"""Missing `exp_code` skipping sending to server..."""'], {}), "('Missing `exp_code` skipping sending to server...')\n", (18216, 18268), False, 'from schainpy.utils import log\n'), ((1629, 1657), 'numpy.cos', 'numpy.cos', (['((lat2 - lat1) * p)'], {}), '((lat2 - lat1) * p)\n', (1638, 1657), False, 'import numpy\n'), ((1869, 1888), 'numpy.cos', 'numpy.cos', (['(lat1 * p)'], {}), '(lat1 * p)\n', (1878, 1888), False, 'import numpy\n'), ((1915, 1934), 'numpy.sin', 'numpy.sin', (['(lat2 * p)'], {}), '(lat2 * p)\n', (1924, 1934), False, 'import numpy\n'), ((1969, 1997), 'numpy.cos', 'numpy.cos', (['((lon2 - lon1) * p)'], {}), '((lon2 - lon1) * p)\n', (1978, 1997), False, 'import numpy\n'), ((3751, 3774), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3772, 3774), False, 'import datetime\n'), ((3858, 3881), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3879, 3881), False, 'import datetime\n'), ((10200, 10275), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(self.width, self.height)', 'edgecolor': '"""k"""', 'facecolor': '"""w"""'}), "(figsize=(self.width, self.height), edgecolor='k', facecolor='w')\n", (10210, 10275), True, 'import matplotlib.pyplot as plt\n'), ((10925, 10956), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['self.colormaps[n]'], {}), '(self.colormaps[n])\n', (10937, 10956), True, 'import matplotlib.pyplot as plt\n'), ((10998, 11025), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['self.colormap'], {}), '(self.colormap)\n', (11010, 11025), True, 'import matplotlib.pyplot as plt\n'), ((15970, 16003), 'schainpy.utils.log.warning', 'log.warning', (['"""No data"""', 'self.name'], {}), "('No data', self.name)\n", (15981, 16003), False, 'from schainpy.utils import log\n'), ((17630, 17654), 'os.path.dirname', 'os.path.dirname', (['figname'], {}), '(figname)\n', (17645, 17654), False, 'import os\n'), ((17681, 17705), 'os.path.dirname', 'os.path.dirname', (['figname'], {}), '(figname)\n', (17696, 17705), False, 'import os\n'), ((19872, 19934), 'schainpy.utils.log.warning', 'log.warning', (['"""No response from server, retrying..."""', 'self.name'], {}), "('No response from server, retrying...', self.name)\n", (19883, 19934), False, 'from schainpy.utils import log\n'), ((21610, 21623), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (21621, 21623), False, 'import zmq\n'), ((21760, 21772), 'zmq.Poller', 'zmq.Poller', ([], {}), '()\n', (21770, 21772), False, 'import zmq\n'), ((1662, 1681), 'numpy.cos', 'numpy.cos', (['(lat1 * p)'], {}), '(lat1 * p)\n', (1671, 1681), False, 'import numpy\n'), ((1694, 1713), 'numpy.cos', 'numpy.cos', (['(lat2 * p)'], {}), '(lat2 * p)\n', (1703, 1713), False, 'import numpy\n'), ((1721, 1749), 'numpy.cos', 'numpy.cos', (['((lon2 - lon1) * p)'], {}), '((lon2 - lon1) * p)\n', (1730, 1749), False, 'import numpy\n'), ((1933, 1952), 'numpy.sin', 'numpy.sin', (['(lat1 * p)'], {}), '(lat1 * p)\n', (1942, 1952), False, 'import numpy\n'), ((1951, 1970), 'numpy.cos', 'numpy.cos', (['(lat2 * p)'], {}), '(lat2 * p)\n', (1960, 1970), False, 'import numpy\n'), ((4054, 4083), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (4072, 4083), False, 'import datetime\n'), ((14101, 14164), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['ax.plt'], {'ax': 'ax', 'fraction': '(0.05)', 'pad': '(0.02)', 'aspect': '(10)'}), '(ax.plt, ax=ax, fraction=0.05, pad=0.02, aspect=10)\n', (14113, 14164), True, 'import matplotlib.pyplot as plt\n'), ((15184, 15207), 'numpy.arange', 'numpy.arange', (['(0)', '(90)', '(20)'], {}), '(0, 90, 20)\n', (15196, 15207), False, 'import numpy\n'), ((19588, 19633), 'schainpy.utils.log.log', 'log.log', (['"""Response from server ok"""', 'self.name'], {}), "('Response from server ok', self.name)\n", (19595, 19633), False, 'from schainpy.utils import log\n'), ((19654, 19669), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (19664, 19669), False, 'import time\n'), ((12776, 12805), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['self.__fmtTime'], {}), '(self.__fmtTime)\n', (12789, 12805), False, 'from matplotlib.ticker import FuncFormatter, LinearLocator, MultipleLocator\n'), ((12854, 12870), 'matplotlib.ticker.LinearLocator', 'LinearLocator', (['(9)'], {}), '(9)\n', (12867, 12870), False, 'from matplotlib.ticker import FuncFormatter, LinearLocator, MultipleLocator\n'), ((12955, 12977), 'numpy.isfinite', 'numpy.isfinite', (['self.y'], {}), '(self.y)\n', (12969, 12977), False, 'import numpy\n'), ((13063, 13085), 'numpy.isfinite', 'numpy.isfinite', (['self.y'], {}), '(self.y)\n', (13077, 13085), False, 'import numpy\n'), ((22726, 22755), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (22743, 22755), False, 'import datetime\n')]
|
import gym
import numpy as np
from copy import deepcopy
from gym_fabrikatioRL.envs.core import Core
from gym_fabrikatioRL.envs.core_state import State
from gym_fabrikatioRL.envs.interface_input import Input
from gym_fabrikatioRL.envs.env_utils import UndefinedOptimizerConfiguration
from gym_fabrikatioRL.envs.env_utils import UndefinedOptimizerTargetMode
from gym_fabrikatioRL.envs.env_utils import UndefinedLegalActionCall
from gym_fabrikatioRL.envs.env_utils import IllegalAction
class FabricatioRL(gym.Env):
def __init__(self, scheduling_inputs, seeds='', logfile_path='',
return_transformer=None, selectable_optimizers=None):
# SEED DEFINITION
if bool(seeds):
self.__seeds_remaining = seeds[1:]
self.__seeds_used = [seeds[0]]
init_seed = seeds[0]
else:
self.__seeds_remaining = []
self.__seeds_used = []
init_seed = -1
# SETUP DEFINITION
self.__parameters = Input(scheduling_inputs, init_seed, logfile_path)
# CORE
self.__core = Core(deepcopy(self.__parameters))
# INTERFACE OBJECTS
# return transformer
self.__return_transformer = return_transformer
# otimizers
self.__optimizer_configuration = -1 # becomes value in {0, .., 11}
self.__sequencing_optimizers = None
self.__transport_optimizers = None
self.__setup_optimizers(selectable_optimizers
if selectable_optimizers is not None else [])
# needed when transport and routing decisions are made by the same agent
self.__transport_decision_offset = None
# action and state space
self.action_space = None
self.observation_space = None
self.__get_action_space()
self.__get_observation_space()
# <editor-fold desc="Environment Interface">
def step(self, action: int):
try:
direct_action = self.__transform_action(action)
except IllegalAction:
state_repr = self.__return_transformer.transform_state(
self.__core.state)
return state_repr, -1, True, {}
state, done = self.__core.step(direct_action)
if self.__return_transformer is None:
# something other than RL is using this simulation
return state, None, done, {}
state_repr = self.__return_transformer.transform_state(state)
reward = self.__return_transformer.transform_reward(state)
while self.autoplay() and not done:
state_repr, reward, done, _ = self.step(0)
return state_repr, reward, done, {}
def autoplay(self):
return ((self.__core.state.scheduling_mode == 0 and
self.__optimizer_configuration in {4, 6, 7}) or
(self.__core.state.scheduling_mode == 1 and
self.__optimizer_configuration in {2, 6, 10}))
def reset(self) -> State:
# seed cycling if seeds were passed
if bool(self.__seeds_remaining) or bool(self.__seeds_used):
if len(self.__seeds_remaining) > 0:
seed = self.__seeds_remaining.pop(0)
self.__seeds_used.append(seed)
else:
self.__seeds_remaining = self.__seeds_used[1:]
seed = self.__seeds_used[0]
self.__seeds_used = [seed]
self.__parameters = Input(self.__parameters.scheduling_inputs,
seed, self.__parameters.logfile_path)
else:
self.__parameters = Input(
self.__parameters.scheduling_inputs,
logfile_path=self.__parameters.logfile_path)
self.__core = Core(self.__parameters)
if self.__return_transformer is not None:
return self.__return_transformer.transform_state(self.__core.state)
else:
return self.__core.state
def render(self, mode='dummy'):
raise NotImplementedError
def get_legal_actions(self):
"""
Returns a list of legal actions for each simulation mode and optimizer
mode combination.
:return: The legal actions in this state.
"""
# TODO: implement masking
toffs = self.__transport_decision_offset
n_to = self.__transport_optimizers.shape[0]
if self.__optimizer_configuration == 0:
if self.__core.state.scheduling_mode == 0:
return self.__core.state.legal_actions
else:
return [a + toffs - 1 for a in self.__core.state.legal_actions]
elif self.__optimizer_configuration in {1, 2}:
if self.__core.state.scheduling_mode == 0:
return self.__core.state.legal_actions
else:
raise UndefinedLegalActionCall(
self.__optimizer_configuration,
self.__core.state.scheduling_mode)
elif self.__optimizer_configuration == 3:
if self.__core.state.scheduling_mode == 0:
return self.__core.state.legal_actions
else:
return [toffs + i for i in range(n_to)]
elif self.__optimizer_configuration == 4:
if self.__core.state.scheduling_mode == 0:
raise UndefinedLegalActionCall(
self.__optimizer_configuration,
self.__core.state.scheduling_mode)
else:
return self.__core.state.legal_actions
elif self.__optimizer_configuration in {5, 6}:
raise UndefinedLegalActionCall(
self.__optimizer_configuration,
self.__core.state.scheduling_mode)
elif self.__optimizer_configuration == 7:
if self.__core.state.scheduling_mode == 0:
raise UndefinedLegalActionCall(
self.__optimizer_configuration,
self.__core.state.scheduling_mode)
else:
return list(range(n_to))
elif self.__optimizer_configuration == 8:
if self.__core.state.scheduling_mode == 0:
return list(range(toffs))
else:
return [a + toffs - 1 for a in self.__core.state.legal_actions]
elif self.__optimizer_configuration in {9, 10}:
if self.__core.state.scheduling_mode == 0:
return list(range(len(self.sequencing_optimizers)))
else:
raise UndefinedLegalActionCall(
self.__optimizer_configuration,
self.__core.state.scheduling_mode)
else: # self.__optimizer_configuration == 11:
if self.__core.state.scheduling_mode == 0:
return list(range(toffs))
else:
return [toffs + i for i in range(n_to)]
def make_deterministic(self):
"""
Purges all stochasticity from the simulation.
This breaks the environment in that one cannot recover the initial
stochastic events purged by this method.
:return: None.
"""
self.__core.make_deterministic()
def seed(self, seed=-1):
self.__seeds_remaining = seed
self.__seeds_used = []
# </editor-fold>
# <editor-fold desc="Optimizer Configuration">
def __setup_optimizers(self, selectable_opt: list):
"""
Splits the transport and sequencing optimizers according to their type
parameter, and initializes the optimizer_configuration parameter
defining the action space definition and action selection schemes.
:param selectable_opt: The list of optimizers.
:return: None
"""
seq_opt, tra_opt = [], []
for optimizer in selectable_opt:
if optimizer.target_mode == 'sequencing':
seq_opt.append(optimizer)
elif optimizer.target_mode == 'transport':
tra_opt.append(optimizer)
else:
raise UndefinedOptimizerTargetMode()
self.__sequencing_optimizers = np.array(seq_opt)
self.__transport_optimizers = np.array(tra_opt)
self.__setup_optimizer_config()
def __is_sequencing_only_simulation(self):
"""
If all types can be executed on exactly one machine, and the operation
ordering is sequential, then there is no transport decision to be made,
since jobs have only one downstream machine to be routed to. In such a
case, return True.
:return: True, if no transport decisions need to be made.
"""
type_to_machine = self.__parameters.matrices_m.machine_capabilities_dt
prec_list = self.__parameters.matrices_j.operation_precedence_l
for _, eligible_machines in type_to_machine.items():
if len(eligible_machines) > 1:
return False
for node_to_neighbor_map in prec_list:
for _, neighbors in node_to_neighbor_map.items():
if len(neighbors) > 1:
return False
return True
def __setup_optimizer_config(self):
"""
Initializes the optimizer_configuration parameter influencing the action
space definition and action translation to one of 11 integer values
defined as follows:
0: Direct sequencing action and direct transport action
1: Direct sequencing action (sequencing only simulation)
2: Direct sequencing action and fixed transport optimizer
3: Selectable sequencing optimizer and selectable transport optimizer
4: Fixed sequencing optimizer and direct transport action
5: Fixed sequencing optimizer run (sequencing only simulation)
6: Fixed sequencing and routing optimizer run
7: Fixed sequencing and selectable transport optimizer
8: Selectable sequencing optimizer and direct transport action
9: Selectable sequencing optimizer (sequencing only simulation)
10: Selectable sequencing optimizer and fixed transport optimizer
11: Selectable sequencing and transport optimizers
:return: None
"""
n_to = self.__transport_optimizers.shape[0]
n_so = self.__sequencing_optimizers.shape[0]
if n_so == 0 and n_to == 0: # direct actions only
if not self.__is_sequencing_only_simulation():
self.__optimizer_configuration = 0
else:
self.__optimizer_configuration = 1
elif n_so == 0 and n_to == 1:
self.__optimizer_configuration = 2
elif n_so == 0 and n_to > 1:
self.__optimizer_configuration = 3
elif n_so == 1 and n_to == 0:
if not self.__is_sequencing_only_simulation():
self.__optimizer_configuration = 4
else:
self.__optimizer_configuration = 5
elif n_so == 1 and n_to == 1:
self.__optimizer_configuration = 6
elif n_so == 1 and n_to > 1:
self.__optimizer_configuration = 7
elif n_so > 1 and n_to == 0:
if not self.__is_sequencing_only_simulation():
self.__optimizer_configuration = 8
else:
self.__optimizer_configuration = 9
elif n_so > 1 and n_to == 1:
self.__optimizer_configuration = 10
else: # n_so > 1 and n_to > 1:
self.__optimizer_configuration = 11
# </editor-fold>
# <editor-fold desc="Action and Observation Space Setup">
def __get_action_space(self):
"""
Initializes the action space parameter based on the
optimizer_configuration. The following scheme is applied:
1.) The agent action vector contains sequencing actions first,
then transport, except when there are no sequencing actions,
in which case only the transport options are actions
2.) For direct sequencing action, the total number of *visible*
operation indices constitute the actions + 1 for the wait signal
3.) For direct transport the number of machines in the system + 1 for
the wait signal constitute the actions
4.) For indirect optimizer actions the index of the respective optimizer
represents the action (here too 1. applies)
5.) If both routing and scheduling actions come from the agent, an
offset scalar (number of possible agent sequencing actions, n_s)
is kept to distinguish between the two, e.g. for agent action n
in transport mode transport action = n - n_s
:return: None
"""
assert -1 < self.__optimizer_configuration <= 11
n = self.__core.state.params.n_jobs
o = self.__core.state.params.max_n_operations
m = self.__core.state.params.n_machines
n_so = self.__sequencing_optimizers.shape[0]
n_to = self.__transport_optimizers.shape[0]
self.__transport_decision_offset = None
if self.__optimizer_configuration == 0:
self.__transport_decision_offset = n * o + 1
self.action_space = gym.spaces.Discrete(n * o + 1 + m + 1)
elif self.__optimizer_configuration in {1, 2}:
self.action_space = gym.spaces.Discrete(n * o + 1)
elif self.__optimizer_configuration == 3:
self.__transport_decision_offset = n * o + 1
self.action_space = gym.spaces.Discrete(n * o + 1 + n_to)
elif self.__optimizer_configuration == 4:
self.action_space = gym.spaces.Discrete(m + 1)
elif self.__optimizer_configuration in {5, 6}:
return # not RL; leave action space None
elif self.__optimizer_configuration == 7:
self.action_space = gym.spaces.Discrete(n_to)
elif self.__optimizer_configuration == 8:
self.__transport_decision_offset = n_so
self.action_space = gym.spaces.Discrete(n_so + m + 1)
elif self.__optimizer_configuration in {9, 10}:
self.action_space = gym.spaces.Discrete(n_so)
else: # self.__optimizer_configuration == 11:
self.__transport_decision_offset = n_so
self.action_space = gym.spaces.Discrete(n_so + n_to)
def __get_observation_space(self):
"""
Initializes the observation space required by gym to a Box object as
defined by gym.
The observation (i.e. state) space dimension is inferred from the state
representation returned by the state_transformer on the initial state.
:return: None
"""
if self.__return_transformer is None:
# something other than RL is using this simulation
return
state_repr = self.__return_transformer.transform_state(
self.__core.state)
self.observation_space = gym.spaces.Box(low=-np.inf, high=np.inf,
shape=state_repr.shape)
# </editor-fold>
# <editor-fold desc="Getters">
@property
def parameters(self):
return self.__parameters
@property
def core(self):
return self.__core
@property
def sequencing_optimizers(self):
return self.__sequencing_optimizers
@property
def transport_optimizers(self):
return self.__transport_optimizers
@property
def optimizer_configuration(self):
return self.__optimizer_configuration
# </editor-fold>
# <editor-fold desc="Action Transformation">
def __transform_action(self, agent_action):
"""
Switches between the 11 available decision interfaces and transforms the
agent action accordingly into an environment core compatible decision.
:param agent_action: The action as chosen by the agent.
:return: The action compatible with the core.
"""
if self.__optimizer_configuration in {0, 1}:
# both routing and sequencing direct actions
return self.__transform_a_direct_action_run(agent_action)
elif self.__optimizer_configuration == 2:
return self.__transform_a_direct_sequencing_fixed_transport(
agent_action)
elif self.__optimizer_configuration == 3:
return self.__transform_a_direct_sequencing_selectable_transport(
agent_action)
elif self.__optimizer_configuration == 4:
return self.__transform_a_fixed_sequencing_direct_transport(
agent_action)
elif self.__optimizer_configuration in {5, 6}:
return self.__transform_a_fixed_optimizer_run()
elif self.__optimizer_configuration == 7:
return self.__transform_a_fixed_sequencing_selectable_transport(
agent_action)
elif self.__optimizer_configuration in {8, 9}:
return self.__transform_a_selectable_sequencing_direct_transport(
agent_action)
elif self.__optimizer_configuration == 10:
return self.__transform_a_selectable_sequencing_fixed_transport(
agent_action)
elif self.__optimizer_configuration == 11:
return self.__transform_action_fully_selectable_optimizer_run(
agent_action)
else: # should not be possible at this point;
raise UndefinedOptimizerConfiguration()
def __transform_a_selectable_sequencing_direct_transport(
self, action: int) -> int:
"""
Translates an agent action into a simulation core action when sequencing
decisions (mode 0) are made indirectly through optimizers and transport
decisions (mode 1) are taken directly by the agent.
This function ensures that:
1. No transport action is taken in sequencing mode
(action > transport decision offset)
2. No transport decisions are made at all, if the simulation
instance only needs sequencing decisions (transport decision offset
is None)
3. The raw transport action passed by the agent is legal, as
perceived by the simulation core.
:param action: The action selected by the agent.
:return: The corresponding simulation core action.
"""
if self.__core.state.scheduling_mode == 0: # sequencing
if self.__transport_decision_offset is None:
# no transport decisions available
return self.__sequencing_optimizers[action].get_action(
self.__core.state)
elif action >= self.__transport_decision_offset:
# picked a transport action in sequencing mode
raise IllegalAction()
else:
# all goode :)
return self.__sequencing_optimizers[action].get_action(
self.__core.state)
else:
core_action = action - self.__transport_decision_offset + 1
if (action < self.__transport_decision_offset or
core_action not in self.__core.state.legal_actions):
raise IllegalAction()
# m starts from 1!
return core_action
def __transform_a_direct_sequencing_selectable_transport(
self, action: int) -> int:
if self.__core.state.scheduling_mode == 0:
if (action >= self.__transport_decision_offset or
action not in self.__core.state.legal_actions):
raise IllegalAction()
return action
else:
if action < self.__transport_decision_offset:
raise IllegalAction()
return self.__transport_optimizers[
action - self.__transport_decision_offset].get_action(
self.__core.state)
def __transform_a_fixed_optimizer_run(self) -> int:
# pure optimizer run. action space not relevant
# illegal actions not possible
if self.__core.state.scheduling_mode == 0:
direct_core_action = self.__sequencing_optimizers[0].get_action(
self.__core.state)
else:
direct_core_action = self.__transport_optimizers[0].get_action(
self.__core.state)
return direct_core_action
def __transform_a_selectable_sequencing_fixed_transport(
self, agent_action: int) -> int:
# illegal actions not possible
if self.__core.state.scheduling_mode == 0:
return self.__sequencing_optimizers[agent_action].get_action(
self.__core.state)
else:
return self.__transport_optimizers[0].get_action(self.__core.state)
def __transform_a_direct_sequencing_fixed_transport(
self, agent_action: int) -> int:
if self.__core.state.scheduling_mode == 0:
if agent_action not in self.__core.state.legal_actions:
raise IllegalAction()
return agent_action
else:
return self.__transport_optimizers[0].get_action(
self.__core.state)
def __transform_a_fixed_sequencing_selectable_transport(
self, agent_action: int) -> int:
if self.__core.state.scheduling_mode == 0:
return self.__sequencing_optimizers[0].get_action(
self.__core.state)
else:
# illegal actions not possible
return self.__transport_optimizers[agent_action].get_action(
self.__core.state)
def __transform_a_fixed_sequencing_direct_transport(
self, agent_action: int) -> int:
if self.__core.state.scheduling_mode == 0:
return self.__sequencing_optimizers[0].get_action(
self.__core.state)
else:
# illegal actions handled by the core?
core_action = agent_action + 1
if core_action not in self.__core.state.legal_actions:
raise IllegalAction()
return core_action
def __transform_a_direct_action_run(self, agent_action: int) -> int:
if self.__core.state.scheduling_mode == 0:
if self.__transport_decision_offset is None:
if agent_action not in self.__core.state.legal_actions:
raise IllegalAction()
elif (agent_action >= self.__transport_decision_offset or
agent_action not in self.__core.state.legal_actions):
raise IllegalAction()
return agent_action
else:
core_action = agent_action - self.__transport_decision_offset + 1
if (agent_action < self.__transport_decision_offset or
core_action not in self.__core.state.legal_actions):
raise IllegalAction()
return core_action
def __transform_action_fully_selectable_optimizer_run(
self, agent_action: int) -> int:
"""
Transforms action in the selectable routing and sequencing mode
(opt_conf==6).
When the core is in sequencing mode, the agent action
designates a sequencing optimizer index. When in routing mode, the agent
action designates a transport optimizer index. The first
self.__transport_decision_offset optimizers designate sequencing
optimizers while the next indices pertain to transport optimizers.
The get_action method of the optimizer selected by the agent is called
with the core state to return the core compatible action.
:param agent_action: The transport or sequencing optimizer index.
:return: The core compatible action.
"""
# Selectable Indirect Transport Action &
# Selectable Indirect Sequencing Action
if self.__core.state.scheduling_mode == 0:
if agent_action >= self.__transport_decision_offset:
raise IllegalAction()
direct_core_action = self.__sequencing_optimizers[
agent_action].get_action(self.__core.state)
else:
if agent_action < self.__transport_decision_offset:
raise IllegalAction()
direct_core_action = self.__transport_optimizers[
agent_action - self.__transport_decision_offset].get_action(
self.__core.state)
return direct_core_action
# </editor-fold>
|
[
"gym_fabrikatioRL.envs.env_utils.UndefinedOptimizerTargetMode",
"gym.spaces.Discrete",
"gym.spaces.Box",
"gym_fabrikatioRL.envs.interface_input.Input",
"numpy.array",
"gym_fabrikatioRL.envs.env_utils.UndefinedOptimizerConfiguration",
"copy.deepcopy",
"gym_fabrikatioRL.envs.env_utils.IllegalAction",
"gym_fabrikatioRL.envs.core.Core",
"gym_fabrikatioRL.envs.env_utils.UndefinedLegalActionCall"
] |
[((999, 1048), 'gym_fabrikatioRL.envs.interface_input.Input', 'Input', (['scheduling_inputs', 'init_seed', 'logfile_path'], {}), '(scheduling_inputs, init_seed, logfile_path)\n', (1004, 1048), False, 'from gym_fabrikatioRL.envs.interface_input import Input\n'), ((3739, 3762), 'gym_fabrikatioRL.envs.core.Core', 'Core', (['self.__parameters'], {}), '(self.__parameters)\n', (3743, 3762), False, 'from gym_fabrikatioRL.envs.core import Core\n'), ((8102, 8119), 'numpy.array', 'np.array', (['seq_opt'], {}), '(seq_opt)\n', (8110, 8119), True, 'import numpy as np\n'), ((8158, 8175), 'numpy.array', 'np.array', (['tra_opt'], {}), '(tra_opt)\n', (8166, 8175), True, 'import numpy as np\n'), ((14891, 14955), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': 'state_repr.shape'}), '(low=-np.inf, high=np.inf, shape=state_repr.shape)\n', (14905, 14955), False, 'import gym\n'), ((1091, 1118), 'copy.deepcopy', 'deepcopy', (['self.__parameters'], {}), '(self.__parameters)\n', (1099, 1118), False, 'from copy import deepcopy\n'), ((3431, 3516), 'gym_fabrikatioRL.envs.interface_input.Input', 'Input', (['self.__parameters.scheduling_inputs', 'seed', 'self.__parameters.logfile_path'], {}), '(self.__parameters.scheduling_inputs, seed, self.__parameters.logfile_path\n )\n', (3436, 3516), False, 'from gym_fabrikatioRL.envs.interface_input import Input\n'), ((3596, 3688), 'gym_fabrikatioRL.envs.interface_input.Input', 'Input', (['self.__parameters.scheduling_inputs'], {'logfile_path': 'self.__parameters.logfile_path'}), '(self.__parameters.scheduling_inputs, logfile_path=self.__parameters.\n logfile_path)\n', (3601, 3688), False, 'from gym_fabrikatioRL.envs.interface_input import Input\n'), ((13174, 13212), 'gym.spaces.Discrete', 'gym.spaces.Discrete', (['(n * o + 1 + m + 1)'], {}), '(n * o + 1 + m + 1)\n', (13193, 13212), False, 'import gym\n'), ((13300, 13330), 'gym.spaces.Discrete', 'gym.spaces.Discrete', (['(n * o + 1)'], {}), '(n * o + 1)\n', (13319, 13330), False, 'import gym\n'), ((19163, 19178), 'gym_fabrikatioRL.envs.env_utils.IllegalAction', 'IllegalAction', ([], {}), '()\n', (19176, 19178), False, 'from gym_fabrikatioRL.envs.env_utils import IllegalAction\n'), ((19546, 19561), 'gym_fabrikatioRL.envs.env_utils.IllegalAction', 'IllegalAction', ([], {}), '()\n', (19559, 19561), False, 'from gym_fabrikatioRL.envs.env_utils import IllegalAction\n'), ((19682, 19697), 'gym_fabrikatioRL.envs.env_utils.IllegalAction', 'IllegalAction', ([], {}), '()\n', (19695, 19697), False, 'from gym_fabrikatioRL.envs.env_utils import IllegalAction\n'), ((20970, 20985), 'gym_fabrikatioRL.envs.env_utils.IllegalAction', 'IllegalAction', ([], {}), '()\n', (20983, 20985), False, 'from gym_fabrikatioRL.envs.env_utils import IllegalAction\n'), ((21999, 22014), 'gym_fabrikatioRL.envs.env_utils.IllegalAction', 'IllegalAction', ([], {}), '()\n', (22012, 22014), False, 'from gym_fabrikatioRL.envs.env_utils import IllegalAction\n'), ((22810, 22825), 'gym_fabrikatioRL.envs.env_utils.IllegalAction', 'IllegalAction', ([], {}), '()\n', (22823, 22825), False, 'from gym_fabrikatioRL.envs.env_utils import IllegalAction\n'), ((23940, 23955), 'gym_fabrikatioRL.envs.env_utils.IllegalAction', 'IllegalAction', ([], {}), '()\n', (23953, 23955), False, 'from gym_fabrikatioRL.envs.env_utils import IllegalAction\n'), ((24179, 24194), 'gym_fabrikatioRL.envs.env_utils.IllegalAction', 'IllegalAction', ([], {}), '()\n', (24192, 24194), False, 'from gym_fabrikatioRL.envs.env_utils import IllegalAction\n'), ((4825, 4921), 'gym_fabrikatioRL.envs.env_utils.UndefinedLegalActionCall', 'UndefinedLegalActionCall', (['self.__optimizer_configuration', 'self.__core.state.scheduling_mode'], {}), '(self.__optimizer_configuration, self.__core.state.\n scheduling_mode)\n', (4849, 4921), False, 'from gym_fabrikatioRL.envs.env_utils import UndefinedLegalActionCall\n'), ((8032, 8062), 'gym_fabrikatioRL.envs.env_utils.UndefinedOptimizerTargetMode', 'UndefinedOptimizerTargetMode', ([], {}), '()\n', (8060, 8062), False, 'from gym_fabrikatioRL.envs.env_utils import UndefinedOptimizerTargetMode\n'), ((13470, 13507), 'gym.spaces.Discrete', 'gym.spaces.Discrete', (['(n * o + 1 + n_to)'], {}), '(n * o + 1 + n_to)\n', (13489, 13507), False, 'import gym\n'), ((18745, 18760), 'gym_fabrikatioRL.envs.env_utils.IllegalAction', 'IllegalAction', ([], {}), '()\n', (18758, 18760), False, 'from gym_fabrikatioRL.envs.env_utils import IllegalAction\n'), ((22326, 22341), 'gym_fabrikatioRL.envs.env_utils.IllegalAction', 'IllegalAction', ([], {}), '()\n', (22339, 22341), False, 'from gym_fabrikatioRL.envs.env_utils import IllegalAction\n'), ((22508, 22523), 'gym_fabrikatioRL.envs.env_utils.IllegalAction', 'IllegalAction', ([], {}), '()\n', (22521, 22523), False, 'from gym_fabrikatioRL.envs.env_utils import IllegalAction\n'), ((13590, 13616), 'gym.spaces.Discrete', 'gym.spaces.Discrete', (['(m + 1)'], {}), '(m + 1)\n', (13609, 13616), False, 'import gym\n'), ((5319, 5415), 'gym_fabrikatioRL.envs.env_utils.UndefinedLegalActionCall', 'UndefinedLegalActionCall', (['self.__optimizer_configuration', 'self.__core.state.scheduling_mode'], {}), '(self.__optimizer_configuration, self.__core.state.\n scheduling_mode)\n', (5343, 5415), False, 'from gym_fabrikatioRL.envs.env_utils import UndefinedLegalActionCall\n'), ((5598, 5694), 'gym_fabrikatioRL.envs.env_utils.UndefinedLegalActionCall', 'UndefinedLegalActionCall', (['self.__optimizer_configuration', 'self.__core.state.scheduling_mode'], {}), '(self.__optimizer_configuration, self.__core.state.\n scheduling_mode)\n', (5622, 5694), False, 'from gym_fabrikatioRL.envs.env_utils import UndefinedLegalActionCall\n'), ((13808, 13833), 'gym.spaces.Discrete', 'gym.spaces.Discrete', (['n_to'], {}), '(n_to)\n', (13827, 13833), False, 'import gym\n'), ((5850, 5946), 'gym_fabrikatioRL.envs.env_utils.UndefinedLegalActionCall', 'UndefinedLegalActionCall', (['self.__optimizer_configuration', 'self.__core.state.scheduling_mode'], {}), '(self.__optimizer_configuration, self.__core.state.\n scheduling_mode)\n', (5874, 5946), False, 'from gym_fabrikatioRL.envs.env_utils import UndefinedLegalActionCall\n'), ((13968, 14001), 'gym.spaces.Discrete', 'gym.spaces.Discrete', (['(n_so + m + 1)'], {}), '(n_so + m + 1)\n', (13987, 14001), False, 'import gym\n'), ((14090, 14115), 'gym.spaces.Discrete', 'gym.spaces.Discrete', (['n_so'], {}), '(n_so)\n', (14109, 14115), False, 'import gym\n'), ((14255, 14287), 'gym.spaces.Discrete', 'gym.spaces.Discrete', (['(n_so + n_to)'], {}), '(n_so + n_to)\n', (14274, 14287), False, 'import gym\n'), ((6506, 6602), 'gym_fabrikatioRL.envs.env_utils.UndefinedLegalActionCall', 'UndefinedLegalActionCall', (['self.__optimizer_configuration', 'self.__core.state.scheduling_mode'], {}), '(self.__optimizer_configuration, self.__core.state.\n scheduling_mode)\n', (6530, 6602), False, 'from gym_fabrikatioRL.envs.env_utils import UndefinedLegalActionCall\n'), ((17375, 17408), 'gym_fabrikatioRL.envs.env_utils.UndefinedOptimizerConfiguration', 'UndefinedOptimizerConfiguration', ([], {}), '()\n', (17406, 17408), False, 'from gym_fabrikatioRL.envs.env_utils import UndefinedOptimizerConfiguration\n')]
|
# -*- coding: utf-8 -*-
"""
main program for IMRT QA PDF report parser
Created on Thu May 30 2019
@author: <NAME>, PhD
"""
from os.path import isdir, join, splitext, normpath
from os import walk, listdir
import zipfile
from datetime import datetime
from dateutil.parser import parse as date_parser
import numpy as np
import codecs
DELIMITER = ',' # delimiter for the csv output file for process_files
ALTERNATE = '^' # replace the delimiter character with this so not to confuse csv file parsing
def are_all_strings_in_text(text, list_of_strings):
"""
:param text: output from convert_pdf_to_text
:type text: list of str
:param list_of_strings: a list of strings used to identify document type
:type list_of_strings: list of str
:return: Will return true if every string in list_of_strings is found in the text data
:rtype: bool
"""
for str_to_find in list_of_strings:
if str_to_find not in text:
return False
return True
#############################################################
# CSV related functions
#############################################################
def get_csv(data, columns):
"""
Convert a dictionary of data into a row for a csv file
:param data: a dictionary with values with str representations
:type data: dict
:param columns: a list of keys dictating the order of the csv
:type columns: list
:return: a csv string delimited by DELIMITER
:rtype: str
"""
clean_csv = [str(data[column]).replace(DELIMITER, ALTERNATE) for column in columns]
return DELIMITER.join(clean_csv)
def load_csv_file(file_path):
with codecs.open(file_path, 'r', encoding='utf-8', errors='ignore') as doc:
return [line.split(',') for line in doc]
def import_csv(file_path, day_first=False):
raw_data = load_csv_file(file_path)
keys = raw_data.pop(0) # remove column header row
keys = [key.strip() for key in keys if key.strip()] + ['file_name']
data = {key: [] for key in keys}
for row in raw_data:
for col, key in enumerate(keys):
data[key].append(row[col])
sorted_data = {key: [] for key in keys}
sorted_data['date_time_obj'] = []
date_time_objs = get_date_times(data, day_first=day_first)
for i in get_sorted_indices(date_time_objs):
for key in keys:
sorted_data[key].append(data[key][i])
sorted_data['date_time_obj'].append(date_time_objs[i])
return sorted_data
def get_file_names_from_csv_file(file_path):
raw_data = load_csv_file(file_path)
column_headers = raw_data.pop(0) # remove column header row
fp_start = len(column_headers)
file_names = []
for row in raw_data:
file_name_fields = [value for value in row[fp_start:]]
file_name = ','.join(file_name_fields)
file_names.append(normpath(file_name.strip()))
return file_names
#############################################################
# Plotting and Stat related functions
#############################################################
def collapse_into_single_dates(x, y):
"""
Function used for a time plot to convert multiple values into one value, while retaining enough information
to perform a moving average over time
:param x: a list of dates in ascending order
:param y: a list of values and can use the '+' operator as a function of date
:return: a unique list of dates, sum of y for that date, and number of original points for that date
:rtype: dict
"""
# average daily data and keep track of points per day
x_collapsed = [x[0]]
y_collapsed = [y[0]]
w_collapsed = [1]
for n in range(1, len(x)):
if x[n] == x_collapsed[-1]:
y_collapsed[-1] = (y_collapsed[-1] + y[n])
w_collapsed[-1] += 1
else:
x_collapsed.append(x[n])
y_collapsed.append(y[n])
w_collapsed.append(1)
return {'x': x_collapsed, 'y': y_collapsed, 'w': w_collapsed}
def moving_avg(xyw, avg_len):
"""
Calculate a moving average for a given averaging length
:param xyw: output from collapse_into_single_dates
:type xyw: dict
:param avg_len: average of these number of points, i.e., look-back window
:type avg_len: int
:return: list of x values, list of y values
:rtype: tuple
"""
cumsum, moving_aves, x_final = [0], [], []
for i, y in enumerate(xyw['y'], 1):
cumsum.append(cumsum[i - 1] + y / xyw['w'][i - 1])
if i >= avg_len:
moving_ave = (cumsum[i] - cumsum[i - avg_len]) / avg_len
moving_aves.append(moving_ave)
x_final = [xyw['x'][i] for i in range(avg_len - 1, len(xyw['x']))]
return x_final, moving_aves
def get_sorted_indices(some_list):
try:
return [i[0] for i in sorted(enumerate(some_list), key=lambda x: x[1])]
except TypeError: # can't sort if a mix of str and float
try:
temp_data = [[value, -float('inf')][value == 'None'] for value in some_list]
return [i[0] for i in sorted(enumerate(temp_data), key=lambda x: x[1])]
except TypeError:
temp_data = [str(value) for value in some_list]
return [i[0] for i in sorted(enumerate(temp_data), key=lambda x: x[1])]
def get_date_times(data, datetime_key='Plan Date', row_id_key='Patient ID', day_first=False):
dates = []
for i, date_str in enumerate(data[datetime_key]):
try:
dates.append(date_parser(date_str, dayfirst=day_first).date())
except ValueError:
print('ERROR: Could not parse the following into a date: %s' % date_str)
print("\tPatient ID: %s" % data[row_id_key][i])
print("\tUsing today's date instead")
dates.append(datetime.today().date())
return dates
def get_control_limits(y):
"""
Calculate control limits for Control Chart
:param y: data
:type y: list
:return: center line, upper control limit, and lower control limit
"""
y = np.array(y)
center_line = np.mean(y)
avg_moving_range = np.mean(np.absolute(np.diff(y)))
scalar_d = 1.128
ucl = center_line + 3 * avg_moving_range / scalar_d
lcl = center_line - 3 * avg_moving_range / scalar_d
return center_line, ucl, lcl
#############################################################
# File related functions
#############################################################
def extract_files_from_zipped_files(init_directory, extract_to_path, extension='.pdf'):
"""
Function to extract .pdf files from zipped files
:param init_directory: initial top-level directory to walk through
:type init_directory: str
:param extract_to_path: directory to extract pdfs into
:type extract_to_path: str
:param extension: file extension of file type to extract, set to None to extract all files
:type extension: str or None
"""
for dirName, subdirList, fileList in walk(init_directory): # iterate through files and all sub-directories
for fileName in fileList:
if splitext(fileName)[1].lower == '.zip':
zip_file_path = join(dirName, fileName)
with zipfile.ZipFile(zip_file_path, 'r') as z:
for file_name in z.namelist():
if not isdir(file_name) and (extension is None or splitext(file_name)[1].lower == extension):
temp_path = join(extract_to_path)
z.extract(file_name, path=temp_path)
def find_latest_results(init_directory, no_recursive_search=False):
"""
Find the most recent IQDM results csv file within the provided directory
:param init_directory: initial scan directory
:type init_directory: str
:param no_recursive_search: set to True to ignore subdirectories
:type no_recursive_search: bool
:return: a dictionary like {report_type: {'time_stamp': datetime, 'file_path': str}}
:rtype: dict
"""
results = {}
if no_recursive_search:
process_result_csvs(listdir(init_directory), results)
else:
for dirName, subdirList, fileList in walk(init_directory): # iterate through files and all sub-directories
process_result_csvs(fileList, results, directory_name=dirName)
return results
def process_result_csvs(file_list, results, directory_name=None):
"""
Parse each file for report type and time stamp, edit results with the latest file_path for each report_type
:param file_list: files to be parsed
:type file_list: list
:param results: results dict from find_latest_results()
:type results: dict
:param directory_name: optionally specify the directory
:type directory_name: str
"""
for file_name in file_list:
fn = splitext(file_name)[0].lower()
ext = splitext(file_name)[1].lower()
if ext == '.csv' and '_results_' in fn:
try:
result_info = file_name.split('_')
report_type = result_info[0]
time_stamp = result_info[2].replace(ext, '')
time_stamp = datetime.strptime(time_stamp[:-7], '%Y-%m-%d %H-%M-%S')
if report_type and report_type not in results.keys() \
or results[report_type]['time_stamp'] < time_stamp:
if directory_name is None:
file_path = file_name
else:
file_path = join(directory_name, file_name)
results[report_type] = {'time_stamp': time_stamp, 'file_path': file_path}
except Exception:
continue
def get_processed_files(init_directory, no_recursive_search=False):
processed = []
if no_recursive_search:
get_file_names_from_result_csvs(listdir(init_directory), processed)
else:
for dirName, subdirList, fileList in walk(init_directory): # iterate through files and all sub-directories
get_file_names_from_result_csvs(fileList, processed, directory_name=dirName)
return list(set(processed))
def get_file_names_from_result_csvs(file_list, processed, directory_name=None):
for file_name in file_list:
fn = splitext(file_name)[0].lower()
ext = splitext(file_name)[1].lower()
if ext == '.csv' and '_results_' in fn:
if directory_name is None:
file_path = file_name
else:
file_path = join(directory_name, file_name)
try:
file_names = get_file_names_from_csv_file(file_path)
processed.extend(file_names)
except Exception:
continue
def is_file_name_found_in_processed_files(file_name, directory, processed_files):
for processed_file in processed_files:
if normpath(file_name) in processed_file or normpath(join(directory, file_name)) in processed_files:
return True
return False
|
[
"numpy.mean",
"dateutil.parser.parse",
"os.listdir",
"zipfile.ZipFile",
"datetime.datetime.strptime",
"os.path.join",
"numpy.diff",
"os.path.splitext",
"os.path.normpath",
"numpy.array",
"os.path.isdir",
"datetime.datetime.today",
"codecs.open",
"os.walk"
] |
[((6035, 6046), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (6043, 6046), True, 'import numpy as np\n'), ((6066, 6076), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (6073, 6076), True, 'import numpy as np\n'), ((6970, 6990), 'os.walk', 'walk', (['init_directory'], {}), '(init_directory)\n', (6974, 6990), False, 'from os import walk, listdir\n'), ((1650, 1712), 'codecs.open', 'codecs.open', (['file_path', '"""r"""'], {'encoding': '"""utf-8"""', 'errors': '"""ignore"""'}), "(file_path, 'r', encoding='utf-8', errors='ignore')\n", (1661, 1712), False, 'import codecs\n'), ((8160, 8180), 'os.walk', 'walk', (['init_directory'], {}), '(init_directory)\n', (8164, 8180), False, 'from os import walk, listdir\n'), ((9922, 9942), 'os.walk', 'walk', (['init_directory'], {}), '(init_directory)\n', (9926, 9942), False, 'from os import walk, listdir\n'), ((6120, 6130), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (6127, 6130), True, 'import numpy as np\n'), ((8071, 8094), 'os.listdir', 'listdir', (['init_directory'], {}), '(init_directory)\n', (8078, 8094), False, 'from os import walk, listdir\n'), ((9831, 9854), 'os.listdir', 'listdir', (['init_directory'], {}), '(init_directory)\n', (9838, 9854), False, 'from os import walk, listdir\n'), ((7161, 7184), 'os.path.join', 'join', (['dirName', 'fileName'], {}), '(dirName, fileName)\n', (7165, 7184), False, 'from os.path import isdir, join, splitext, normpath\n'), ((9134, 9189), 'datetime.datetime.strptime', 'datetime.strptime', (['time_stamp[:-7]', '"""%Y-%m-%d %H-%M-%S"""'], {}), "(time_stamp[:-7], '%Y-%m-%d %H-%M-%S')\n", (9151, 9189), False, 'from datetime import datetime\n'), ((10488, 10519), 'os.path.join', 'join', (['directory_name', 'file_name'], {}), '(directory_name, file_name)\n', (10492, 10519), False, 'from os.path import isdir, join, splitext, normpath\n'), ((10844, 10863), 'os.path.normpath', 'normpath', (['file_name'], {}), '(file_name)\n', (10852, 10863), False, 'from os.path import isdir, join, splitext, normpath\n'), ((7206, 7241), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_file_path', '"""r"""'], {}), "(zip_file_path, 'r')\n", (7221, 7241), False, 'import zipfile\n'), ((8807, 8826), 'os.path.splitext', 'splitext', (['file_name'], {}), '(file_name)\n', (8815, 8826), False, 'from os.path import isdir, join, splitext, normpath\n'), ((8852, 8871), 'os.path.splitext', 'splitext', (['file_name'], {}), '(file_name)\n', (8860, 8871), False, 'from os.path import isdir, join, splitext, normpath\n'), ((10241, 10260), 'os.path.splitext', 'splitext', (['file_name'], {}), '(file_name)\n', (10249, 10260), False, 'from os.path import isdir, join, splitext, normpath\n'), ((10286, 10305), 'os.path.splitext', 'splitext', (['file_name'], {}), '(file_name)\n', (10294, 10305), False, 'from os.path import isdir, join, splitext, normpath\n'), ((10894, 10920), 'os.path.join', 'join', (['directory', 'file_name'], {}), '(directory, file_name)\n', (10898, 10920), False, 'from os.path import isdir, join, splitext, normpath\n'), ((5488, 5529), 'dateutil.parser.parse', 'date_parser', (['date_str'], {'dayfirst': 'day_first'}), '(date_str, dayfirst=day_first)\n', (5499, 5529), True, 'from dateutil.parser import parse as date_parser\n'), ((7090, 7108), 'os.path.splitext', 'splitext', (['fileName'], {}), '(fileName)\n', (7098, 7108), False, 'from os.path import isdir, join, splitext, normpath\n'), ((9493, 9524), 'os.path.join', 'join', (['directory_name', 'file_name'], {}), '(directory_name, file_name)\n', (9497, 9524), False, 'from os.path import isdir, join, splitext, normpath\n'), ((5785, 5801), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (5799, 5801), False, 'from datetime import datetime\n'), ((7457, 7478), 'os.path.join', 'join', (['extract_to_path'], {}), '(extract_to_path)\n', (7461, 7478), False, 'from os.path import isdir, join, splitext, normpath\n'), ((7330, 7346), 'os.path.isdir', 'isdir', (['file_name'], {}), '(file_name)\n', (7335, 7346), False, 'from os.path import isdir, join, splitext, normpath\n'), ((7373, 7392), 'os.path.splitext', 'splitext', (['file_name'], {}), '(file_name)\n', (7381, 7392), False, 'from os.path import isdir, join, splitext, normpath\n')]
|
import numpy as np
from PIL import Image
from retina.retina import warp_image
class DatasetGenerator(object):
def __init__(self, data, output_dim=28, scenario=1, noise_var=None, common_dim=200):
""" DatasetGenerator initialization.
:param data: original dataset, MNIST
:param output_dim: the dimensionality for the first transformation
:param scenario: one of the paradigm proposed [1, 2, 4]
:param noise_var: useful in paradigm 1, 4
:param common_dim: dimensionality of output for scenario 4
"""
self.data = data
self.output_dim = output_dim
self.scenario = scenario
if noise_var is None:
noise_var = 2e-1
self.noise_var = noise_var
n_samples, dim1, dim2 = self.data.shape
# here we want to split
self.n_samples = n_samples
self.dim1 = dim1
self.dim2 = dim2
self.common_dim = common_dim # we upscale and then add noise
self.edge = int((self.output_dim - self.dim1) / 2)
if self.scenario == 4:
self.edge = int((self.common_dim - self.output_dim) / 2)
self.output = None
def add_noise_and_std(self):
""" Add noise to the original image and standardize the entire image.
The pixels for this image are between values [0,1].
We generate the larger image, where the noise is such to be positive.
We then standardize every image, so that its pixels distribution become Gaussian.
"""
out = self.noise_var * np.abs(np.random.randn(self.n_samples,
2 * self.edge + self.dim1,
2 * self.edge + self.dim2))
out[:, self.edge:self.edge+self.dim1, self.edge:self.edge+self.dim2] = self.data
out_std = np.zeros_like(out)
mean_ = np.mean(out, axis=(1, 2))
std_ = np.std(out, axis=(1, 2))
for k_, (m_, s_) in enumerate(zip(mean_, std_)):
out_std[k_] = (out[k_] - m_) / s_
self.output = out_std
return self
def upscale_std(self):
"""
Automatic PIL upscale of the image with standardization.
"""
new_x = np.zeros((self.n_samples, self.output_dim, self.output_dim))
for n_, old_image_ in enumerate(self.data):
image = Image.fromarray(old_image_)
tmp_x = image.resize(size=(self.output_dim, self.output_dim))
tmp_std_x = (tmp_x - np.mean(tmp_x)) / np.std(tmp_x)
new_x[n_] = tmp_std_x
self.output = new_x
return self
def _upscale_no_std(self):
""" Upscale for experiment 4 wo standardization
"""
new_x = np.zeros((self.n_samples, self.output_dim, self.output_dim))
for n_, old_image_ in enumerate(self.data):
image = Image.fromarray(old_image_)
new_x[n_] = image.resize(size=(self.output_dim, self.output_dim))
self.dim1 = self.output_dim
self.dim2 = self.output_dim
return new_x
def upscale_add_noise_std(self):
upscaled_mnist = self._upscale_no_std()
self.data = upscaled_mnist
self.add_noise_and_std()
def foveation(self):
""" In the original implementation, the image is rescaled to a smaller dimension
and then lifted to the original dimensions. We do not want to lose information.
To prevent this we keep the scaling factor as it is, and we stick to the implementation:
https://github.com/dicarlolab/retinawarp
We assume here that the image has square dimension.
:returns foveated_dataset: the output after foveation.
"""
ret_img = np.zeros_like(self.data)
for n_ in range(self.n_samples):
ret_img[n_] = warp_image(self.data[n_], output_size=self.dim1, input_size=self.dim1)
self.output = ret_img
def run(self):
if self.scenario == 1:
self.add_noise_and_std()
elif self.scenario == 2:
self.upscale_std()
elif self.scenario == 4:
self.upscale_add_noise_std()
else:
raise ValueError('Nope')
|
[
"numpy.mean",
"PIL.Image.fromarray",
"numpy.zeros",
"numpy.random.randn",
"retina.retina.warp_image",
"numpy.std",
"numpy.zeros_like"
] |
[((1868, 1886), 'numpy.zeros_like', 'np.zeros_like', (['out'], {}), '(out)\n', (1881, 1886), True, 'import numpy as np\n'), ((1903, 1928), 'numpy.mean', 'np.mean', (['out'], {'axis': '(1, 2)'}), '(out, axis=(1, 2))\n', (1910, 1928), True, 'import numpy as np\n'), ((1944, 1968), 'numpy.std', 'np.std', (['out'], {'axis': '(1, 2)'}), '(out, axis=(1, 2))\n', (1950, 1968), True, 'import numpy as np\n'), ((2255, 2315), 'numpy.zeros', 'np.zeros', (['(self.n_samples, self.output_dim, self.output_dim)'], {}), '((self.n_samples, self.output_dim, self.output_dim))\n', (2263, 2315), True, 'import numpy as np\n'), ((2756, 2816), 'numpy.zeros', 'np.zeros', (['(self.n_samples, self.output_dim, self.output_dim)'], {}), '((self.n_samples, self.output_dim, self.output_dim))\n', (2764, 2816), True, 'import numpy as np\n'), ((3748, 3772), 'numpy.zeros_like', 'np.zeros_like', (['self.data'], {}), '(self.data)\n', (3761, 3772), True, 'import numpy as np\n'), ((2389, 2416), 'PIL.Image.fromarray', 'Image.fromarray', (['old_image_'], {}), '(old_image_)\n', (2404, 2416), False, 'from PIL import Image\n'), ((2889, 2916), 'PIL.Image.fromarray', 'Image.fromarray', (['old_image_'], {}), '(old_image_)\n', (2904, 2916), False, 'from PIL import Image\n'), ((3840, 3910), 'retina.retina.warp_image', 'warp_image', (['self.data[n_]'], {'output_size': 'self.dim1', 'input_size': 'self.dim1'}), '(self.data[n_], output_size=self.dim1, input_size=self.dim1)\n', (3850, 3910), False, 'from retina.retina import warp_image\n'), ((1565, 1654), 'numpy.random.randn', 'np.random.randn', (['self.n_samples', '(2 * self.edge + self.dim1)', '(2 * self.edge + self.dim2)'], {}), '(self.n_samples, 2 * self.edge + self.dim1, 2 * self.edge +\n self.dim2)\n', (1580, 1654), True, 'import numpy as np\n'), ((2542, 2555), 'numpy.std', 'np.std', (['tmp_x'], {}), '(tmp_x)\n', (2548, 2555), True, 'import numpy as np\n'), ((2524, 2538), 'numpy.mean', 'np.mean', (['tmp_x'], {}), '(tmp_x)\n', (2531, 2538), True, 'import numpy as np\n')]
|
from FeatureProcess import *
import pandas as pd
import numpy as np
fsd = FeaturesStandard()
data = [[0, 0], [0, 0], [1, 1], [1, 1]]
scr=fsd.fit(data)
print(scr.mean_)
print(scr.transform(data))
print('--------------------')
fe = FeaturesEncoder(handle_unknown='ignore')
X = [['Male', 1], ['Female', 3], ['Female', 2]]
enc =fe.fit(X)
print(enc.categories_)
print(enc.transform([['Female', 1], ['Male', 4]]).toarray())
print(enc.inverse_transform([[0, 1, 1, 0, 0], [0, 0, 0, 1, 0]]))
print(enc.get_feature_names(['gender', 'group']))
fd = FeaturesDecomposition(n_components=2)
X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
p = fd.fit(X)
print(p.explained_variance_ratio_)
print(p.singular_values_)
fs = FeaturesSelection(threshold=(.8 * (1 - .8)))
X = [[0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 1, 1], [0, 1, 0], [0, 1, 1]]
fs.fit(X)
X = fs.transform(X)
print(X)
|
[
"numpy.array"
] |
[((586, 650), 'numpy.array', 'np.array', (['[[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]'], {}), '([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])\n', (594, 650), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
from pathlib import Path
import ptitprince as pt
# ----------
# Loss Plots
# ----------
def save_loss_plot(path, loss_function, v_path=None, show=True):
df = pd.read_csv(path)
if v_path is not None:
vdf = pd.read_csv(v_path)
else:
vdf = None
p = Path(path)
n = p.stem
d = p.parents[0]
out_path = os.path.join(d, n + '_loss.png')
fig, ax = plot_loss(df, vdf=vdf, x_lab='Iteration', y_lab=loss_function, save=out_path, show=show)
def plot_loss(df, vdf=None, x_lab='Iteration', y_lab='BCE Loss', save=None, show=True):
x = df['Unnamed: 0'].values
y = df['loss'].values
epochs = len(df['epoch'].unique())
no_batches = int(len(x) / epochs)
epoch_ends = np.array([((i + 1) * no_batches) - 1 for i in range(epochs)])
epoch_end_x = x[epoch_ends]
epoch_end_y = y[epoch_ends]
fig, ax = plt.subplots()
leg = ['loss',]
ax.plot(x, y, linewidth=2)
ax.scatter(epoch_end_x, epoch_end_y)
title = 'Training loss'
if vdf is not None:
if len(vdf) > epochs:
vy = vdf.groupby('batch_id').mean()['validation_loss'].values
vx = vdf['batch_id'].unique()
else:
vy = vdf['validation_loss'].values
vx = epoch_end_x
title = title + ' with validation loss'
leg.append('validation loss')
if len(vdf) > epochs:
#vy_err = v_df.groupby('batch_id').sem()['validation_loss'].values
#ax.errorbar(vx, vy, vy_err, marker='.')
ax.plot(vx, vy, linewidth=2, marker='o')
else:
ax.plot(vx, vy, linewidth=2, marker='o')
ax.set(xlabel=x_lab, ylabel=y_lab)
ax.set_title(title)
ax.legend(leg)
fig.set_size_inches(13, 9)
if save is not None:
plt.savefig(save, dpi=300)
if show:
plt.show()
return fig, ax
def save_channel_loss_plot(path, show=True):
df = pd.read_csv(path)
p = Path(path)
n = p.stem
d = p.parents[0]
out_path = os.path.join(d, n + '_channel-loss.png')
fig, ax = plot_channel_losses(df, save=out_path, show=show)
def plot_channel_losses(df, x_lab='Iteration', y_lab='BCE Loss', save=None, show=True):
cols = list(df.columns)
x = df['Unnamed: 0'].values
non_channel_cols = ['Unnamed: 0', 'epoch', 'batch_num', 'loss', 'data_id']
channel_losses = [col for col in cols if col not in non_channel_cols]
fig, axs = plt.subplots(2, 2)
zs, ys, xs, cs = [], [], [], []
for col in channel_losses:
y = df[col].values
if col.startswith('z'):
ls = _get_linestyle(zs)
axs[0, 0].plot(x, y, linewidth=1, linestyle=ls)
zs.append(col)
if col.startswith('y'):
ls = _get_linestyle(ys)
axs[0, 1].plot(x, y, linewidth=1, linestyle=ls)
ys.append(col)
if col.startswith('x'):
ls = _get_linestyle(xs)
axs[1, 0].plot(x, y, linewidth=1, linestyle=ls)
xs.append(col)
if col.startswith('centre'):
ls = _get_linestyle(cs)
axs[1, 1].plot(x, y, linewidth=1, linestyle=ls)
cs.append(col)
axs[0, 0].set_title('Z affinities losses')
axs[0, 0].legend(zs)
axs[0, 1].set_title('Y affinities losses')
axs[0, 1].legend(ys)
axs[1, 0].set_title('X affinities losses')
axs[1, 0].legend(xs)
axs[1, 1].set_title('Centreness losses')
axs[1, 1].legend(cs)
for ax in axs.flat:
ax.set(xlabel=x_lab, ylabel=y_lab)
fig.set_size_inches(13, 9)
if save is not None:
plt.savefig(save, dpi=300)
if show:
plt.show()
return fig, axs
def _get_linestyle(lis):
if len(lis) == 0:
ls = '-'
elif len(lis) == 1:
ls = '--'
else:
ls = ':'
return ls
# --------
# VI Plots
# --------
def VI_plot(
path,
cond_ent_over="GT | Output",
cond_ent_under="Output | GT",
lab="",
save=False,
show=True):
df = pd.read_csv(path)
overseg = df[cond_ent_over].values
o_groups = [cond_ent_over] * len(overseg)
underseg = df[cond_ent_under].values
u_groups = [cond_ent_under] * len(underseg)
groups = o_groups + u_groups
x = 'Variation of information'
y = 'Conditional entropy'
data = {
x : groups,
y : np.concatenate([overseg, underseg])
}
data = pd.DataFrame(data)
o = 'h'
pal = 'Set2'
sigma = .2
f, ax = plt.subplots(figsize=(12, 10))
pt.RainCloud(x = x, y = y, data = data, palette = pal, bw = sigma,
width_viol = .6, ax = ax, orient = o)
p = Path(path)
plt.title(p.stem)
if save:
save_path = os.path.join(p.parents[0], p.stem + lab + '_VI_rainclout_plot.png')
plt.savefig(save_path, bbox_inches='tight')
if show:
plt.show()
def experiment_VI_plots(
paths,
names,
title,
out_name,
out_dir,
cond_ent_over="GT | Output",
cond_ent_under="Output | GT",
):
groups = []
ce0 = []
ce1 = []
for i, p in enumerate(paths):
df = pd.read_csv(p)
ce0.append(df[cond_ent_over].values)
ce1.append(df[cond_ent_under].values)
groups += [names[i]] * len(df)
x = 'Experiment'
data = {
x : groups,
cond_ent_over : np.concatenate(ce0),
cond_ent_under : np.concatenate(ce1)
}
data = pd.DataFrame(data)
f, axs = plt.subplots(1, 2, figsize=(12, 10))
ax0 = axs[0, 0]
ax1 = axs[0, 1]
o = 'h'
pal = 'Set2'
sigma = .2
pt.RainCloud(x = x, y = cond_ent_over, data = data, palette = pal, bw = sigma,
width_viol = .6, ax = ax0, orient = o)
pt.RainCloud(x = x, y = cond_ent_under, data = data, palette = pal, bw = sigma,
width_viol = .6, ax = ax1, orient = o)
plt.title(title)
if save:
save_path = os.path.join(out_dir, '_VI_rainclould_plots.png')
plt.savefig(save_path, bbox_inches='tight')
if show:
plt.show()
if __name__ == '__main__':
#name = 'loss_z-1_z-2_y-1_y-2_y-3_x-1_x-2_x-3_c_cl.csv'
name = 'loss_210401_150158_z-1_y-1_x-1__wBCE2-1-1.csv'
#dir_ = '/Users/amcg0011/Data/pia-tracking/cang_training/210331_training_0'
dir_ = '/Users/amcg0011/Data/pia-tracking/cang_training/210401_150158_z-1_y-1_x-1__wBCE2-1-1'
path = os.path.join(dir_, name)
save_channel_loss_plot(path)
#v_name = 'validation-loss_z-1_z-2_y-1_y-2_y-3_x-1_x-2_x-3_c_cl.csv'
v_name = 'validation-loss_210401_150158_z-1_y-1_x-1__wBCE2-1-1.csv'
v_path = os.path.join(dir_, v_name)
loss_function = 'Weighted BCE Loss (2, 1, 1)'
save_loss_plot(path, loss_function, v_path)
|
[
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"pathlib.Path",
"ptitprince.RainCloud",
"os.path.join",
"numpy.concatenate",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((245, 262), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (256, 262), True, 'import pandas as pd\n'), ((361, 371), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (365, 371), False, 'from pathlib import Path\n'), ((423, 455), 'os.path.join', 'os.path.join', (['d', "(n + '_loss.png')"], {}), "(d, n + '_loss.png')\n", (435, 455), False, 'import os\n'), ((942, 956), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (954, 956), True, 'import matplotlib.pyplot as plt\n'), ((1986, 2003), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (1997, 2003), True, 'import pandas as pd\n'), ((2012, 2022), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (2016, 2022), False, 'from pathlib import Path\n'), ((2074, 2114), 'os.path.join', 'os.path.join', (['d', "(n + '_channel-loss.png')"], {}), "(d, n + '_channel-loss.png')\n", (2086, 2114), False, 'import os\n'), ((2498, 2516), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {}), '(2, 2)\n', (2510, 2516), True, 'import matplotlib.pyplot as plt\n'), ((4114, 4131), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (4125, 4131), True, 'import pandas as pd\n'), ((4507, 4525), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (4519, 4525), True, 'import pandas as pd\n'), ((4582, 4612), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 10)'}), '(figsize=(12, 10))\n', (4594, 4612), True, 'import matplotlib.pyplot as plt\n'), ((4617, 4711), 'ptitprince.RainCloud', 'pt.RainCloud', ([], {'x': 'x', 'y': 'y', 'data': 'data', 'palette': 'pal', 'bw': 'sigma', 'width_viol': '(0.6)', 'ax': 'ax', 'orient': 'o'}), '(x=x, y=y, data=data, palette=pal, bw=sigma, width_viol=0.6, ax\n =ax, orient=o)\n', (4629, 4711), True, 'import ptitprince as pt\n'), ((4747, 4757), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (4751, 4757), False, 'from pathlib import Path\n'), ((4762, 4779), 'matplotlib.pyplot.title', 'plt.title', (['p.stem'], {}), '(p.stem)\n', (4771, 4779), True, 'import matplotlib.pyplot as plt\n'), ((5556, 5574), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (5568, 5574), True, 'import pandas as pd\n'), ((5588, 5624), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(12, 10)'}), '(1, 2, figsize=(12, 10))\n', (5600, 5624), True, 'import matplotlib.pyplot as plt\n'), ((5713, 5819), 'ptitprince.RainCloud', 'pt.RainCloud', ([], {'x': 'x', 'y': 'cond_ent_over', 'data': 'data', 'palette': 'pal', 'bw': 'sigma', 'width_viol': '(0.6)', 'ax': 'ax0', 'orient': 'o'}), '(x=x, y=cond_ent_over, data=data, palette=pal, bw=sigma,\n width_viol=0.6, ax=ax0, orient=o)\n', (5725, 5819), True, 'import ptitprince as pt\n'), ((5852, 5959), 'ptitprince.RainCloud', 'pt.RainCloud', ([], {'x': 'x', 'y': 'cond_ent_under', 'data': 'data', 'palette': 'pal', 'bw': 'sigma', 'width_viol': '(0.6)', 'ax': 'ax1', 'orient': 'o'}), '(x=x, y=cond_ent_under, data=data, palette=pal, bw=sigma,\n width_viol=0.6, ax=ax1, orient=o)\n', (5864, 5959), True, 'import ptitprince as pt\n'), ((5992, 6008), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (6001, 6008), True, 'import matplotlib.pyplot as plt\n'), ((6514, 6538), 'os.path.join', 'os.path.join', (['dir_', 'name'], {}), '(dir_, name)\n', (6526, 6538), False, 'import os\n'), ((6730, 6756), 'os.path.join', 'os.path.join', (['dir_', 'v_name'], {}), '(dir_, v_name)\n', (6742, 6756), False, 'import os\n'), ((304, 323), 'pandas.read_csv', 'pd.read_csv', (['v_path'], {}), '(v_path)\n', (315, 323), True, 'import pandas as pd\n'), ((1851, 1877), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save'], {'dpi': '(300)'}), '(save, dpi=300)\n', (1862, 1877), True, 'import matplotlib.pyplot as plt\n'), ((1899, 1909), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1907, 1909), True, 'import matplotlib.pyplot as plt\n'), ((3653, 3679), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save'], {'dpi': '(300)'}), '(save, dpi=300)\n', (3664, 3679), True, 'import matplotlib.pyplot as plt\n'), ((3701, 3711), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3709, 3711), True, 'import matplotlib.pyplot as plt\n'), ((4450, 4485), 'numpy.concatenate', 'np.concatenate', (['[overseg, underseg]'], {}), '([overseg, underseg])\n', (4464, 4485), True, 'import numpy as np\n'), ((4813, 4880), 'os.path.join', 'os.path.join', (['p.parents[0]', "(p.stem + lab + '_VI_rainclout_plot.png')"], {}), "(p.parents[0], p.stem + lab + '_VI_rainclout_plot.png')\n", (4825, 4880), False, 'import os\n'), ((4889, 4932), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {'bbox_inches': '"""tight"""'}), "(save_path, bbox_inches='tight')\n", (4900, 4932), True, 'import matplotlib.pyplot as plt\n'), ((4954, 4964), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4962, 4964), True, 'import matplotlib.pyplot as plt\n'), ((5248, 5262), 'pandas.read_csv', 'pd.read_csv', (['p'], {}), '(p)\n', (5259, 5262), True, 'import pandas as pd\n'), ((5472, 5491), 'numpy.concatenate', 'np.concatenate', (['ce0'], {}), '(ce0)\n', (5486, 5491), True, 'import numpy as np\n'), ((5519, 5538), 'numpy.concatenate', 'np.concatenate', (['ce1'], {}), '(ce1)\n', (5533, 5538), True, 'import numpy as np\n'), ((6042, 6091), 'os.path.join', 'os.path.join', (['out_dir', '"""_VI_rainclould_plots.png"""'], {}), "(out_dir, '_VI_rainclould_plots.png')\n", (6054, 6091), False, 'import os\n'), ((6100, 6143), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {'bbox_inches': '"""tight"""'}), "(save_path, bbox_inches='tight')\n", (6111, 6143), True, 'import matplotlib.pyplot as plt\n'), ((6165, 6175), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6173, 6175), True, 'import matplotlib.pyplot as plt\n')]
|
################################################################################
#
# test_xtram.py - testing the pyfeat xtram class
#
# author: <NAME> <<EMAIL>>
# author: <NAME> <<EMAIL>>
#
################################################################################
from nose.tools import assert_raises, assert_true
from pyfeat.estimator import XTRAM
from pytram import ExpressionError, NotConvergedWarning
import numpy as np
#XTRAM testing
def test_expression_error_None():
assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3,3), dtype=np.intc), None, np.ones( shape =(10), dtype=np.intc), np.ones( shape =(10), dtype=np.intc),np.ones( shape =(2,3), dtype=np.intc) )
assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3,3), dtype=np.intc), np.ones( shape =(2,10), dtype=np.float64 ),None, np.ones( shape =(10), dtype=np.intc), np.ones( shape =(2,3), dtype=np.intc) )
assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3,3), dtype=np.intc), np.ones( shape =(2,10), dtype=np.float64), np.ones( shape =(10), dtype=np.intc),None, np.ones( shape =(2,3), dtype=np.intc) )
assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3,3), dtype=np.intc), np.ones( shape =(2,10), dtype=np.float64), np.ones( shape =(10), dtype=np.intc),np.ones( shape =(10), dtype=np.intc), None )
def test_expression_error_dim():
assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3), dtype=np.intc), np.ones( shape =(2,10), dtype=np.float64 ), np.ones( shape =(10), dtype=np.intc ),np.ones( shape =(10), dtype=np.intc), np.ones( shape =(2,3), dtype=np.intc) )
assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3,3), dtype=np.intc), np.ones( shape =(10), dtype=np.float64 ), np.ones( shape =(10), dtype=np.intc ),np.ones( shape =(10), dtype=np.intc), np.ones( shape =(2,3), dtype=np.intc) )
assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3,3), dtype=np.intc), np.ones( shape =(2,10), dtype=np.float64 ), np.ones( shape =(10), dtype=np.intc ),np.ones( shape =(10), dtype=np.intc), np.ones( shape =(3), dtype=np.intc) )
def test_expression_error_markov():
assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3,3), dtype=np.intc), np.ones( shape =(2,10), dtype=np.float64), np.ones( shape =(10), dtype=np.intc),np.ones( shape =(10), dtype=np.intc), np.ones( shape =(2,4), dtype=np.intc) )
def test_expression_error_therm():
assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3,3), dtype=np.intc), np.ones( shape =(3,10), dtype=np.float64), np.ones( shape =(10), dtype=np.intc),np.ones( shape =(10), dtype=np.intc), np.ones( shape =(2,3), dtype=np.intc) )
assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3,3), dtype=np.intc), np.ones( shape =(2,10), dtype=np.float64), np.ones( shape =(10), dtype=np.intc),np.ones( shape =(10), dtype=np.intc), np.ones( shape =(3,4), dtype=np.intc) )
|
[
"numpy.ones"
] |
[((531, 570), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3, 3)', 'dtype': 'np.intc'}), '(shape=(2, 3, 3), dtype=np.intc)\n', (538, 570), True, 'import numpy as np\n'), ((578, 610), 'numpy.ones', 'np.ones', ([], {'shape': '(10)', 'dtype': 'np.intc'}), '(shape=10, dtype=np.intc)\n', (585, 610), True, 'import numpy as np\n'), ((616, 648), 'numpy.ones', 'np.ones', ([], {'shape': '(10)', 'dtype': 'np.intc'}), '(shape=10, dtype=np.intc)\n', (623, 648), True, 'import numpy as np\n'), ((653, 689), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3)', 'dtype': 'np.intc'}), '(shape=(2, 3), dtype=np.intc)\n', (660, 689), True, 'import numpy as np\n'), ((736, 775), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3, 3)', 'dtype': 'np.intc'}), '(shape=(2, 3, 3), dtype=np.intc)\n', (743, 775), True, 'import numpy as np\n'), ((777, 817), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 10)', 'dtype': 'np.float64'}), '(shape=(2, 10), dtype=np.float64)\n', (784, 817), True, 'import numpy as np\n'), ((826, 858), 'numpy.ones', 'np.ones', ([], {'shape': '(10)', 'dtype': 'np.intc'}), '(shape=10, dtype=np.intc)\n', (833, 858), True, 'import numpy as np\n'), ((864, 900), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3)', 'dtype': 'np.intc'}), '(shape=(2, 3), dtype=np.intc)\n', (871, 900), True, 'import numpy as np\n'), ((947, 986), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3, 3)', 'dtype': 'np.intc'}), '(shape=(2, 3, 3), dtype=np.intc)\n', (954, 986), True, 'import numpy as np\n'), ((988, 1028), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 10)', 'dtype': 'np.float64'}), '(shape=(2, 10), dtype=np.float64)\n', (995, 1028), True, 'import numpy as np\n'), ((1031, 1063), 'numpy.ones', 'np.ones', ([], {'shape': '(10)', 'dtype': 'np.intc'}), '(shape=10, dtype=np.intc)\n', (1038, 1063), True, 'import numpy as np\n'), ((1074, 1110), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3)', 'dtype': 'np.intc'}), '(shape=(2, 3), dtype=np.intc)\n', (1081, 1110), True, 'import numpy as np\n'), ((1157, 1196), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3, 3)', 'dtype': 'np.intc'}), '(shape=(2, 3, 3), dtype=np.intc)\n', (1164, 1196), True, 'import numpy as np\n'), ((1198, 1238), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 10)', 'dtype': 'np.float64'}), '(shape=(2, 10), dtype=np.float64)\n', (1205, 1238), True, 'import numpy as np\n'), ((1241, 1273), 'numpy.ones', 'np.ones', ([], {'shape': '(10)', 'dtype': 'np.intc'}), '(shape=10, dtype=np.intc)\n', (1248, 1273), True, 'import numpy as np\n'), ((1278, 1310), 'numpy.ones', 'np.ones', ([], {'shape': '(10)', 'dtype': 'np.intc'}), '(shape=10, dtype=np.intc)\n', (1285, 1310), True, 'import numpy as np\n'), ((1400, 1436), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3)', 'dtype': 'np.intc'}), '(shape=(2, 3), dtype=np.intc)\n', (1407, 1436), True, 'import numpy as np\n'), ((1439, 1479), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 10)', 'dtype': 'np.float64'}), '(shape=(2, 10), dtype=np.float64)\n', (1446, 1479), True, 'import numpy as np\n'), ((1483, 1515), 'numpy.ones', 'np.ones', ([], {'shape': '(10)', 'dtype': 'np.intc'}), '(shape=10, dtype=np.intc)\n', (1490, 1515), True, 'import numpy as np\n'), ((1521, 1553), 'numpy.ones', 'np.ones', ([], {'shape': '(10)', 'dtype': 'np.intc'}), '(shape=10, dtype=np.intc)\n', (1528, 1553), True, 'import numpy as np\n'), ((1559, 1595), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3)', 'dtype': 'np.intc'}), '(shape=(2, 3), dtype=np.intc)\n', (1566, 1595), True, 'import numpy as np\n'), ((1642, 1681), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3, 3)', 'dtype': 'np.intc'}), '(shape=(2, 3, 3), dtype=np.intc)\n', (1649, 1681), True, 'import numpy as np\n'), ((1683, 1718), 'numpy.ones', 'np.ones', ([], {'shape': '(10)', 'dtype': 'np.float64'}), '(shape=10, dtype=np.float64)\n', (1690, 1718), True, 'import numpy as np\n'), ((1725, 1757), 'numpy.ones', 'np.ones', ([], {'shape': '(10)', 'dtype': 'np.intc'}), '(shape=10, dtype=np.intc)\n', (1732, 1757), True, 'import numpy as np\n'), ((1763, 1795), 'numpy.ones', 'np.ones', ([], {'shape': '(10)', 'dtype': 'np.intc'}), '(shape=10, dtype=np.intc)\n', (1770, 1795), True, 'import numpy as np\n'), ((1801, 1837), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3)', 'dtype': 'np.intc'}), '(shape=(2, 3), dtype=np.intc)\n', (1808, 1837), True, 'import numpy as np\n'), ((1884, 1923), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3, 3)', 'dtype': 'np.intc'}), '(shape=(2, 3, 3), dtype=np.intc)\n', (1891, 1923), True, 'import numpy as np\n'), ((1925, 1965), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 10)', 'dtype': 'np.float64'}), '(shape=(2, 10), dtype=np.float64)\n', (1932, 1965), True, 'import numpy as np\n'), ((1969, 2001), 'numpy.ones', 'np.ones', ([], {'shape': '(10)', 'dtype': 'np.intc'}), '(shape=10, dtype=np.intc)\n', (1976, 2001), True, 'import numpy as np\n'), ((2007, 2039), 'numpy.ones', 'np.ones', ([], {'shape': '(10)', 'dtype': 'np.intc'}), '(shape=10, dtype=np.intc)\n', (2014, 2039), True, 'import numpy as np\n'), ((2045, 2076), 'numpy.ones', 'np.ones', ([], {'shape': '(3)', 'dtype': 'np.intc'}), '(shape=3, dtype=np.intc)\n', (2052, 2076), True, 'import numpy as np\n'), ((2163, 2202), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3, 3)', 'dtype': 'np.intc'}), '(shape=(2, 3, 3), dtype=np.intc)\n', (2170, 2202), True, 'import numpy as np\n'), ((2204, 2244), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 10)', 'dtype': 'np.float64'}), '(shape=(2, 10), dtype=np.float64)\n', (2211, 2244), True, 'import numpy as np\n'), ((2247, 2279), 'numpy.ones', 'np.ones', ([], {'shape': '(10)', 'dtype': 'np.intc'}), '(shape=10, dtype=np.intc)\n', (2254, 2279), True, 'import numpy as np\n'), ((2284, 2316), 'numpy.ones', 'np.ones', ([], {'shape': '(10)', 'dtype': 'np.intc'}), '(shape=10, dtype=np.intc)\n', (2291, 2316), True, 'import numpy as np\n'), ((2322, 2358), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 4)', 'dtype': 'np.intc'}), '(shape=(2, 4), dtype=np.intc)\n', (2329, 2358), True, 'import numpy as np\n'), ((2440, 2479), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3, 3)', 'dtype': 'np.intc'}), '(shape=(2, 3, 3), dtype=np.intc)\n', (2447, 2479), True, 'import numpy as np\n'), ((2481, 2521), 'numpy.ones', 'np.ones', ([], {'shape': '(3, 10)', 'dtype': 'np.float64'}), '(shape=(3, 10), dtype=np.float64)\n', (2488, 2521), True, 'import numpy as np\n'), ((2524, 2556), 'numpy.ones', 'np.ones', ([], {'shape': '(10)', 'dtype': 'np.intc'}), '(shape=10, dtype=np.intc)\n', (2531, 2556), True, 'import numpy as np\n'), ((2561, 2593), 'numpy.ones', 'np.ones', ([], {'shape': '(10)', 'dtype': 'np.intc'}), '(shape=10, dtype=np.intc)\n', (2568, 2593), True, 'import numpy as np\n'), ((2599, 2635), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3)', 'dtype': 'np.intc'}), '(shape=(2, 3), dtype=np.intc)\n', (2606, 2635), True, 'import numpy as np\n'), ((2682, 2721), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3, 3)', 'dtype': 'np.intc'}), '(shape=(2, 3, 3), dtype=np.intc)\n', (2689, 2721), True, 'import numpy as np\n'), ((2723, 2763), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 10)', 'dtype': 'np.float64'}), '(shape=(2, 10), dtype=np.float64)\n', (2730, 2763), True, 'import numpy as np\n'), ((2766, 2798), 'numpy.ones', 'np.ones', ([], {'shape': '(10)', 'dtype': 'np.intc'}), '(shape=10, dtype=np.intc)\n', (2773, 2798), True, 'import numpy as np\n'), ((2803, 2835), 'numpy.ones', 'np.ones', ([], {'shape': '(10)', 'dtype': 'np.intc'}), '(shape=10, dtype=np.intc)\n', (2810, 2835), True, 'import numpy as np\n'), ((2841, 2877), 'numpy.ones', 'np.ones', ([], {'shape': '(3, 4)', 'dtype': 'np.intc'}), '(shape=(3, 4), dtype=np.intc)\n', (2848, 2877), True, 'import numpy as np\n')]
|
import numpy as np
from os.path import join
from os import listdir
from .utils import *
from sklearn.preprocessing import normalize
from sklearn.preprocessing import scale
from sklearn.preprocessing import MinMaxScaler
from scipy.signal import resample
from scipy.signal import decimate
import warnings
def load_data():
"""
data generator : yields subject data, label and age subject by subject
trims data with error measure (cf. data exploration)"""
data_path=join("..","PaHaW","PaHaW_public")#/00026/00026__1_1.svc"
folder_path=listdir(data_path)
folder_path.sort()
meta_path=join("data","PaHaW","corpus_PaHaW.csv")
meta_data=np.loadtxt(meta_path,dtype=str,skiprows=1,delimiter=";")#skip the first line == headers
labels=list(map(lambda x: 1 if x =="ON" else 0, meta_data[:,4]))
ages=meta_data[:,5].astype(int)
#Subjects 46 (control), 60 (PD) and 66 (control) didn't perform the spiral !
#data=[]
for i,folder in enumerate(folder_path):
subject=[]
task_path=listdir(join(data_path,folder))
task_path.sort()
if len(task_path)!=8:#subject didn't perform the spiral
#so we discard it
continue
#subject.append([])#add an empty array so that all tasks are on the same column number
for task_name in task_path:
path=join(data_path,folder,task_name)
#load data as float (not int because we will need to standardize it afterwards)
#and throw out the first line == number of lines in the file
task=np.loadtxt(path, dtype=float, skiprows=1,delimiter=" ")
if task[0][measure2index["button_status"]]!=1:#exam starts in air
for k,timestep in enumerate(task):
if(timestep[measure2index["button_status"]]==1):#wait for on paper button status
break
#then trims the data
task=task[k:]
elif any(task[:,measure2index["timestamp"]]>1e7):#defect of recording (see data exploration)
task=task[:-12]
subject.append(task)
yield subject,labels[i],ages[i]
## augmentation
def flip(task,axis_i):
warnigs.warn("flip should be deprecated and replaced by -x or -y depending on axis")
if axis_i is not 0 and axis_i is not 1:
raise ValueError("expected 0 or 1 for value of axis_i, got {}".format(axis_i))
axis=task[0][axis_i]
for i,point in enumerate(task[:,axis_i]):
if point < axis:
task[i][axis_i]=axis+(axis-point)
else:
task[i][axis_i]=axis-(point-axis)
return task
def rotate(task, delta_rotate):
x0=task[0][0]#angle starts here
y0=task[0][1]
for i, (y,x) in enumerate(task[:,:2]):
vector=[x-x0,y-y0]
norm=np.linalg.norm(vector)
angle=np.angle(vector[0]+vector[1]*1j)#*1j to add imaginary part to y-coordinate
task[i][1]=np.cos(angle+delta_rotate)*norm#new x
task[i][0]=np.sin(angle+delta_rotate)*norm#new y
return scale(task,axis=0)#recenters the task
#rotated=rotate_(task.copy(),np.pi/10)
"""
h_flip=horizontal_flip(task.copy())
v_flip=vertical_flip(task.copy())
double_flip=horizontal_flip(v_flip.copy())
translation=np.random.rand()-0.5#because the std is one
translated=task.copy()
translated[:,0]+=translation
translated[:,1]+=translation
#~ match the translation scale
#as the standardized data ranges ~ from -2 to 2
zoom_factor=np.random.uniform(0.8,1.2)
zoomed=task.copy()
zoomed[:,0]*=zoom_factor
zoomed[:,1]*=zoom_factor"""
## preprocessing
def compute_movement(data):
"""Compute movement
Transforms data as Zhang et al. (cf Report #5)"""
print("computing movement\n")
button_i=measure2index["button_status"]
for i,task in enumerate(data):
for t in range(len(task)-1):
button=task[t+1][button_i]*task[t][button_i]
data[i][t]=task[t+1]-task[t]
data[i][t][button_i]=button
data[i]=data[i][:-1]#throw out the last point
return data
def task_selection(data,task_i,newhandpd=False):
"""set `task_i` to None if you want to train the model on all tasks at once (i.e. early fusion)
Else set `task_i` to the desired task index (cf. task2index)
"""
if task_i is not None:
print("\ntask index, name")
print(task_i,index2task[task_i])
#keep only one task
data=[subject[task_i] for subject in data]
#keep only one measure
#data=[[[raw[i][task][j][6]] for j in range(len(raw[i][task])) ] for i,subject in enumerate(raw) if len(raw[i][task])!=0]#discard the subjects that didn't perform spiral
elif newhandpd:
print("setting task_i to -1")
task_i=-1
else:
print("task_i is None so we will use all tasks to train the model")
print("len(data), len(data[0]) :")
print(len(data),len(data[0]))
return data
def compute_speed_accel(data):
"""on single task training, concatenates the instantaneous speed and acceleration to each timestep of the data.
Thus the data is 2 timesteps shorter (we discard the first 2)"""
print("computing speed and acceleration")
for i,task in enumerate(data):
speed=np.zeros((len(task)-1,1))
for t in range(len(task)-1):
speed[t][0]=np.linalg.norm(#norm of vector
task[t+1][:2]-task[t][:2]#vector [y(t+1)-y(t) , x(t+1)-x(t)]
)
accel=np.zeros((len(speed)-1,1))
for t in range(len(speed)-1):
accel[t][0]=speed[t+1]-speed[t]
#discard the 1st speed point
speed_accel=np.concatenate((speed[1:],accel),axis=1)
#discard the 2 firsts timesteps
data[i]=np.concatenate((task[2:],speed_accel),axis=1)
return data
last_stroke_in_air_index=[[],#spiral
[4, 36, 71],#l
[11, 14, 16, 42],#le
[1, 13, 14, 20, 54]#les
]
non_letters_indexes=[[],#spiral
[(22,1), (26,2), (36,5), (37,1), (41,4), (46,4), (48,1),(3,4),
(3,2),(6,5), (6,3), (14,6), (14,4),(14,2), (16,6), (16,4), (16,2), (21,5), (71,6), (71,2)],#l
[(3,4), (6,5), (6,4), (6,2), (9,4), (9,3), (11,5), (12,1), (13, 1),
(14, 6), (14, 1), (16, 5), (18, 3), (18, 2), (18, 1), (20, 3), (26, 2),
(26, 1), (27, 4), (41, 5), (41, 2), (42, 7), (42, 5), (42, 3), (65, 5), (65, 3)],#le
[(1, 7),(1, 6),(3, 4),(6, 4),(6, 1),(9, 1),(13, 5),(14, 10), (14, 9), (14, 8), (14, 7),(14, 4),(14, 2),
(18, 4), (18, 3), (18, 2), (18, 1),(20, 8),(20, 6),(20, 4),(20, 2),(23, 4),(26, 4),(26, 1),(38, 3),
(48, 4),(50, 4),(54, 9),(54, 7),(54, 5),(54, 3),(54, 1),(62, 4),(65, 6),(65, 4),(65, 1)]#les
]
too_many_letters_indexes=[[],#spiral
[12, 21, 23, 44, 67],#l
[],#le
[1,37,62]#les
]
def LetterSplit(data,task_i):
print("Merging strokes into letters")
for j in range(len(data)):
tmp=[]
for i in range(0,len(data[j]),2):
try :
data[j][i+1]
except IndexError:
tmp.append(data[j][i])
else:
tmp.append(np.concatenate((data[j][i],data[j][i+1]),axis=0))
data[j]=tmp
def pop(i,j):
data[i][j-1]=np.concatenate((data[i][j-1],data[i][j]))
data[i].pop(j)
for i,j in non_letters_indexes[task_i]:
pop(i,j)
for i in too_many_letters_indexes[task_i]:
data[i].pop()
assert [i for i,s in enumerate(data) if len(s) != 5]==[]
return data
def DiscardNonLetters(data,task_i):
print("discarding non letters from stroke list")
for i,j in non_letters_indexes[task_i]:
if 2*j+1<len(data[i]):
data[i].pop(2*j+1)
data[i].pop(2*j)
for i in too_many_letters_indexes[task_i]:#did 6 l instead of 5
data[i].pop()
data[i].pop()
for i in last_stroke_in_air_index[task_i]:#in air stroke after last l
data[i].pop()
assert [i for i,s in enumerate(data) if len(s) != 9]==[]
return data
def massage_data(data,task_i,compute_speed_accel_,compute_movement_,downsampling_factor,
window_size,paper_air_split=False,newhandpd=False,max_len=None,letter_split=False,discard_non_letters=False,pad_subs=False,trim=False):
"""
returns data
set `task_i` to None if you want to train the model on all tasks at once (i.e. early fusion)
Else set `task_i` to the desired task index (cf. task2index)
compute_movement Transforms data as Zhang et al. (cf Report #5)
Set `downsampling_factor` to `1` if you don't want to downsample
Set `window_size` to `None` if you don't want to split data into subsequence of fixed length
Set `paper_air_split` to `False` if you don't want to split data into strokes
"""
data=task_selection(data,task_i,newhandpd)
if compute_speed_accel_:
data=compute_speed_accel(data)
elif compute_movement_:
data=compute_movement(data)
else:
print("\nneither speed nor movement was computed (i.e. data was not transformed)\n")
## Split in subsequence (or not)
#Set `window_size` to `None` if you don't want to split data into subsequence of fixed length
if task_i is not None:
overlap=90
if window_size is not None:
print("\nsplitting data into subsequences")
for i,task in enumerate(data):
data[i]=[task[w:w+window_size] for w in range(0,len(task)-window_size,window_size-overlap)]
print("len(data), data[0].shape, total n° of subsequences (i.e. training examples) :")
print(len(data),",",len(data[0]),len(data[0][0]),len(data[0][0][0]),",",sum([len(subs) for subs in data]))
elif paper_air_split:
print("\nsplitting data into strokes")
for j, task in enumerate(data):
changes = []
for i in range(len(task)-1):
if task[i][measure2index["button_status"]]!=task[i+1][measure2index["button_status"]]:
changes.append(i+1)
task=np.split(task,changes)
data[j]=task
if letter_split:#todo : rename in token split
data=LetterSplit(data,task_i)
elif discard_non_letters:
data=DiscardNonLetters(data,task_i)
print("len(data), data[0].shape, total n° of subsequences (i.e. training examples) :")
print(len(data),",",len(data[0]),len(data[0][0]),len(data[0][0][0]),",",sum([len(subs) for subs in data]))
else:
print("the task is represented as one single sequence (i.e. data was not transformed)")
if window_size is not None or paper_air_split or task_i is None:#subsequences or multiple tasks
print('computing global means')
for i,subject in enumerate(data):
for j,sub in enumerate(subject):
#removes t0 from each timestamps so the time stamp measure represents the length of the exams
data[i][j][:,measure2index["timestamp"]]-=data[i][j][0,measure2index["timestamp"]]
if task_i is None:
#computes overall measures and stds per task
data=np.asarray(data)
means,stds=[],[]
for task in range(data.shape[1]):
flat=flat_list(data[:,task])
means.append(np.mean(flat,axis=0)[measure2index["timestamp"]])
stds.append(np.std(flat,axis=0)[measure2index["timestamp"]])
else:
#computes overall measures and stds
flat=np.asarray(flat_list(flat_list(data)))
means,stds=np.mean(flat,axis=0)[measure2index["timestamp"]],np.std(flat,axis=0)[measure2index["timestamp"]]
print("scaling")
for i,subject in enumerate(data):
for j,sub in enumerate(subject):
data[i][j]=scale(sub,axis=0)
#keep the button_status unscaled
data[i][j][:,[measure2index["button_status"]]]=sub[:,[measure2index["button_status"]]]
#globally scale the timestamp
if task_i is None:
data[i][j][:,[measure2index["timestamp"]]]=(sub[:,[measure2index["timestamp"]]]-means[j])/stds[j]
else:
data[i][j][:,[measure2index["timestamp"]]]=(sub[:,[measure2index["timestamp"]]]-means)/stds
if downsampling_factor != 1:
if i ==0 and j==0:
print("and downsampling")
data[i][j]=decimate(data[i][j], downsampling_factor,axis=0)#then downsample
#rounds the button status because decimate applies a filter
data[i][j][:,[measure2index["button_status"]]]=[[round(b[0])] for b in data[i][j][:,[measure2index["button_status"]]]]
else:
print('computing global means')
for i in range(len(data)):
#removes t0 from each timestamps so the time stamp measure represents the length of the exams
data[i][:,measure2index["timestamp"]]-=data[i][0,measure2index["timestamp"]]
#computes overall measures and stds
flat=np.asarray(flat_list(data))
means,stds=np.mean(flat,axis=0)[measure2index["timestamp"]],np.std(flat,axis=0)[measure2index["timestamp"]]
## Scale then downsample (or not) then concatenate task id (or not)
print("scaling")
for i,subject in enumerate(data):
data[i]=scale(subject,axis=0)
#keep the button_status unscaled
data[i][:,[measure2index["button_status"]]]=subject[:,[measure2index["button_status"]]]
#globally scale the timestamp
data[i][:,[measure2index["timestamp"]]]=(subject[:,[measure2index["timestamp"]]]-means)/stds
if downsampling_factor != 1:
if i ==0:
print("and downsampling")
data[i]=decimate(data[i], downsampling_factor,axis=0)#then downsample
#rounds the button status because decimate applies a filter
data[i][:,[measure2index["button_status"]]]=[[round(b[0])] for b in data[i][:,[measure2index["button_status"]]]]
if max_len is not None:
print("padding data at {} timesteps. Trimming : {} ".format(max_len,trim))
if task_i is None :
for i,subject in enumerate(data):
for j,task in enumerate(subject):#task
if len(task) > max_len[j]:
if trim:
data[i][j]=task[:max_len[j]]
else:
data[i][j]=np.concatenate((task,np.zeros(shape=(max_len[j]-len(task),task.shape[1]))))
elif window_size is not None or paper_air_split :
for i,subject in enumerate(data):
for j,sub in enumerate(subject):#sub
if len(sub) > max_len:
if trim:
data[i][j]=sub[:max_len]
else:
data[i][j]=np.concatenate((sub,np.zeros(shape=(max_len-len(sub),sub.shape[1]))))
if pad_subs:
if i == 0:
print("padding # of subsequences to",max_strokes[task_i])
for _ in range(max_strokes[task_i]-len(subject)):
data[i].append(np.zeros(shape=(max_len,sub.shape[1])))
else:#only one task
for i,task in enumerate(data):
if len(task) > max_len:
if trim:
data[i]=task[:max_len]
else:
data[i]=np.concatenate((task,np.zeros(shape=(max_len-len(task),task.shape[1]))))
print("converting data to numpy array")
data=np.asarray(data)
print("data shape :",data.shape)
return data
|
[
"numpy.mean",
"os.listdir",
"numpy.std",
"numpy.asarray",
"os.path.join",
"numpy.angle",
"scipy.signal.decimate",
"numpy.split",
"numpy.zeros",
"numpy.cos",
"numpy.concatenate",
"numpy.linalg.norm",
"numpy.sin",
"numpy.loadtxt",
"sklearn.preprocessing.scale"
] |
[((478, 513), 'os.path.join', 'join', (['""".."""', '"""PaHaW"""', '"""PaHaW_public"""'], {}), "('..', 'PaHaW', 'PaHaW_public')\n", (482, 513), False, 'from os.path import join\n'), ((551, 569), 'os.listdir', 'listdir', (['data_path'], {}), '(data_path)\n', (558, 569), False, 'from os import listdir\n'), ((608, 649), 'os.path.join', 'join', (['"""data"""', '"""PaHaW"""', '"""corpus_PaHaW.csv"""'], {}), "('data', 'PaHaW', 'corpus_PaHaW.csv')\n", (612, 649), False, 'from os.path import join\n'), ((662, 721), 'numpy.loadtxt', 'np.loadtxt', (['meta_path'], {'dtype': 'str', 'skiprows': '(1)', 'delimiter': '""";"""'}), "(meta_path, dtype=str, skiprows=1, delimiter=';')\n", (672, 721), True, 'import numpy as np\n'), ((3047, 3066), 'sklearn.preprocessing.scale', 'scale', (['task'], {'axis': '(0)'}), '(task, axis=0)\n', (3052, 3066), False, 'from sklearn.preprocessing import scale\n'), ((15678, 15694), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (15688, 15694), True, 'import numpy as np\n'), ((2810, 2832), 'numpy.linalg.norm', 'np.linalg.norm', (['vector'], {}), '(vector)\n', (2824, 2832), True, 'import numpy as np\n'), ((2847, 2885), 'numpy.angle', 'np.angle', (['(vector[0] + vector[1] * 1.0j)'], {}), '(vector[0] + vector[1] * 1.0j)\n', (2855, 2885), True, 'import numpy as np\n'), ((5627, 5669), 'numpy.concatenate', 'np.concatenate', (['(speed[1:], accel)'], {'axis': '(1)'}), '((speed[1:], accel), axis=1)\n', (5641, 5669), True, 'import numpy as np\n'), ((5724, 5771), 'numpy.concatenate', 'np.concatenate', (['(task[2:], speed_accel)'], {'axis': '(1)'}), '((task[2:], speed_accel), axis=1)\n', (5738, 5771), True, 'import numpy as np\n'), ((7181, 7225), 'numpy.concatenate', 'np.concatenate', (['(data[i][j - 1], data[i][j])'], {}), '((data[i][j - 1], data[i][j]))\n', (7195, 7225), True, 'import numpy as np\n'), ((1039, 1062), 'os.path.join', 'join', (['data_path', 'folder'], {}), '(data_path, folder)\n', (1043, 1062), False, 'from os.path import join\n'), ((1355, 1389), 'os.path.join', 'join', (['data_path', 'folder', 'task_name'], {}), '(data_path, folder, task_name)\n', (1359, 1389), False, 'from os.path import join\n'), ((1570, 1626), 'numpy.loadtxt', 'np.loadtxt', (['path'], {'dtype': 'float', 'skiprows': '(1)', 'delimiter': '""" """'}), "(path, dtype=float, skiprows=1, delimiter=' ')\n", (1580, 1626), True, 'import numpy as np\n'), ((2941, 2969), 'numpy.cos', 'np.cos', (['(angle + delta_rotate)'], {}), '(angle + delta_rotate)\n', (2947, 2969), True, 'import numpy as np\n'), ((2998, 3026), 'numpy.sin', 'np.sin', (['(angle + delta_rotate)'], {}), '(angle + delta_rotate)\n', (3004, 3026), True, 'import numpy as np\n'), ((5316, 5361), 'numpy.linalg.norm', 'np.linalg.norm', (['(task[t + 1][:2] - task[t][:2])'], {}), '(task[t + 1][:2] - task[t][:2])\n', (5330, 5361), True, 'import numpy as np\n'), ((11109, 11125), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (11119, 11125), True, 'import numpy as np\n'), ((13375, 13397), 'sklearn.preprocessing.scale', 'scale', (['subject'], {'axis': '(0)'}), '(subject, axis=0)\n', (13380, 13397), False, 'from sklearn.preprocessing import scale\n'), ((11779, 11797), 'sklearn.preprocessing.scale', 'scale', (['sub'], {'axis': '(0)'}), '(sub, axis=0)\n', (11784, 11797), False, 'from sklearn.preprocessing import scale\n'), ((13115, 13136), 'numpy.mean', 'np.mean', (['flat'], {'axis': '(0)'}), '(flat, axis=0)\n', (13122, 13136), True, 'import numpy as np\n'), ((13164, 13184), 'numpy.std', 'np.std', (['flat'], {'axis': '(0)'}), '(flat, axis=0)\n', (13170, 13184), True, 'import numpy as np\n'), ((13826, 13872), 'scipy.signal.decimate', 'decimate', (['data[i]', 'downsampling_factor'], {'axis': '(0)'}), '(data[i], downsampling_factor, axis=0)\n', (13834, 13872), False, 'from scipy.signal import decimate\n'), ((7072, 7124), 'numpy.concatenate', 'np.concatenate', (['(data[j][i], data[j][i + 1])'], {'axis': '(0)'}), '((data[j][i], data[j][i + 1]), axis=0)\n', (7086, 7124), True, 'import numpy as np\n'), ((9991, 10014), 'numpy.split', 'np.split', (['task', 'changes'], {}), '(task, changes)\n', (9999, 10014), True, 'import numpy as np\n'), ((11543, 11564), 'numpy.mean', 'np.mean', (['flat'], {'axis': '(0)'}), '(flat, axis=0)\n', (11550, 11564), True, 'import numpy as np\n'), ((11592, 11612), 'numpy.std', 'np.std', (['flat'], {'axis': '(0)'}), '(flat, axis=0)\n', (11598, 11612), True, 'import numpy as np\n'), ((12447, 12496), 'scipy.signal.decimate', 'decimate', (['data[i][j]', 'downsampling_factor'], {'axis': '(0)'}), '(data[i][j], downsampling_factor, axis=0)\n', (12455, 12496), False, 'from scipy.signal import decimate\n'), ((11275, 11296), 'numpy.mean', 'np.mean', (['flat'], {'axis': '(0)'}), '(flat, axis=0)\n', (11282, 11296), True, 'import numpy as np\n'), ((11353, 11373), 'numpy.std', 'np.std', (['flat'], {'axis': '(0)'}), '(flat, axis=0)\n', (11359, 11373), True, 'import numpy as np\n'), ((15275, 15314), 'numpy.zeros', 'np.zeros', ([], {'shape': '(max_len, sub.shape[1])'}), '(shape=(max_len, sub.shape[1]))\n', (15283, 15314), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
def get_mean_stds(data):
return np.mean(data), np.std(data) / np.sqrt(len(data)) * 1.96
if __name__ == '__main__':
labels = ['OpenTAL', 'EDL', 'SoftMax']
result_folders = ['edl_oshead_iou', 'edl_15kc', 'default']
colors = ['k', 'g', 'm']
split = '0'
tiou_target = 0.3
tidx = 0 # 0-4 for [0,3...,0.7]
items = ['$TP_{u2u}$', '$TP_{k2k}$', '$FP_{u2k}$', '$FP_{k2k}$', '$FP_{k2u}$', '$FP_{bg2u}$', '$FP_{bg2k}$']
fontsize = 18
width = 0.25
fig_path = 'experiments/figs'
os.makedirs(fig_path, exist_ok=True)
xrng = np.arange(len(items))
fig, ax = plt.subplots(1,1, figsize=(8,5))
plt.rcParams["font.family"] = "Arial"
for idx, (folder, label, color) in enumerate(zip(result_folders, labels, colors)):
# load result file
result_file = os.path.join('output', folder, f'split_{split}', 'open_stats.pkl')
with open(result_file, 'rb') as f:
stats = pickle.load(f)
print(label)
all_scores = 1 - np.array(stats['ood_scores'])
mean_scores = np.zeros((7))
std_scores = np.zeros((7))
mean_scores[0], std_scores[0] = get_mean_stds(all_scores[stats['tp_u2u'][tidx] > 0])
mean_scores[1], std_scores[1] = get_mean_stds(all_scores[stats['tp_k2k'][tidx].sum(axis=0) > 0])
mean_scores[2], std_scores[2] = get_mean_stds(all_scores[stats['fp_u2k'][tidx].sum(axis=0) > 0])
mean_scores[3], std_scores[3] = get_mean_stds(all_scores[stats['fp_k2k'][tidx].sum(axis=0) > 0])
mean_scores[4], std_scores[4] = get_mean_stds(all_scores[stats['fp_k2u'][tidx] > 0])
mean_scores[5], std_scores[5] = get_mean_stds(all_scores[stats['fp_bg2u'][tidx] > 0])
mean_scores[6], std_scores[6] = get_mean_stds(all_scores[stats['fp_bg2k'][tidx].sum(axis=0) > 0])
h = ax.bar(xrng + (idx-1) * width, mean_scores, yerr=std_scores, width=width, label=f'{label}', align='center', alpha=0.5, ecolor='black', color=color)
ax.set_ylim(0, 1.2)
ax.set_ylabel('OOD Scores', fontsize=fontsize)
ax.set_xticks(xrng)
ax.set_xticklabels(items, fontsize=fontsize-3)
ax.legend(fontsize=fontsize, loc='upper center', ncol=3)
plt.yticks(fontsize=fontsize)
plt.tight_layout()
plt.savefig(os.path.join(fig_path, 'OOD_Score_compare.png'))
|
[
"numpy.mean",
"os.makedirs",
"os.path.join",
"pickle.load",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.tight_layout",
"numpy.std",
"matplotlib.pyplot.subplots"
] |
[((595, 631), 'os.makedirs', 'os.makedirs', (['fig_path'], {'exist_ok': '(True)'}), '(fig_path, exist_ok=True)\n', (606, 631), False, 'import os\n'), ((681, 715), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8, 5)'}), '(1, 1, figsize=(8, 5))\n', (693, 715), True, 'import matplotlib.pyplot as plt\n'), ((2262, 2291), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'fontsize'}), '(fontsize=fontsize)\n', (2272, 2291), True, 'import matplotlib.pyplot as plt\n'), ((2296, 2314), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2312, 2314), True, 'import matplotlib.pyplot as plt\n'), ((112, 125), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (119, 125), True, 'import numpy as np\n'), ((892, 958), 'os.path.join', 'os.path.join', (['"""output"""', 'folder', 'f"""split_{split}"""', '"""open_stats.pkl"""'], {}), "('output', folder, f'split_{split}', 'open_stats.pkl')\n", (904, 958), False, 'import os\n'), ((1135, 1146), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (1143, 1146), True, 'import numpy as np\n'), ((1170, 1181), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (1178, 1181), True, 'import numpy as np\n'), ((2331, 2378), 'os.path.join', 'os.path.join', (['fig_path', '"""OOD_Score_compare.png"""'], {}), "(fig_path, 'OOD_Score_compare.png')\n", (2343, 2378), False, 'import os\n'), ((1022, 1036), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1033, 1036), False, 'import pickle\n'), ((1083, 1112), 'numpy.array', 'np.array', (["stats['ood_scores']"], {}), "(stats['ood_scores'])\n", (1091, 1112), True, 'import numpy as np\n'), ((127, 139), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (133, 139), True, 'import numpy as np\n')]
|
from textwrap import dedent
import os
import subprocess
import numpy
import pandas
from wqio.tests import helpers
from wqio.utils import numutils
def _sig_figs(x):
""" Wrapper around `utils.sigFig` (n=3, tex=True) requiring only
argument for the purpose of easily "apply"-ing it to a pandas
dataframe.
"""
return numutils.sigFigs(x, n=3, tex=True)
def refresh_index(df):
""" gets around weird pandas block manager bugs that rise with
deeply nested indexes
"""
if isinstance(df.index, pandas.MultiIndex):
return df.reset_index().set_index(df.index.names)
else:
return df
def get_level_position(df, levelname):
_names = numpy.array(df.index.names)
ri, = numpy.nonzero(_names == levelname)
return ri[0]
def sanitizeTex(texstring):
""" Cleans up overly eager LaTeX renderings from pandas.
Parameters
----------
texstring : string
The string of LaTeX code to be cleaned up
Returns
-------
sanitized : string
Cleaned up LaTeX string.
"""
newstring = (
texstring.replace(r"\\%", r"\%")
.replace(r"\\", r"\tabularnewline")
.replace("\$", "$")
.replace("\_", "_")
.replace("ug/L", "\si[per-mode=symbol]{\micro\gram\per\liter}")
.replace(r"\textbackslashtimes", r"\times")
.replace(r"\textbackslash", "")
.replace(r"\textasciicircum", r"^")
.replace("\{", "{")
.replace("\}", "}")
)
return newstring
def csvToTex(
csvpath,
na_rep="--",
float_format=_sig_figs,
pcols=15,
addmidrules=None,
replaceTBrules=True,
replacestats=True,
):
""" Convert data in CSV format to a LaTeX table
Parameters
----------
csvpath : string
Full name and file path of the input data file.
na_rep : string, default "--"
How NA values should be written.
float_format : callable (default = `_sig_figs`)
Single input function that will return the correct
representation of floating point numbers.
pcols : int (default = 15)
Width of the columns for the LaTeX table.
addmidrules : string or list of strings, optional
(List of) string(s) to be replaced with "\midrule".
replaceTBrules : bool, default = True
When True, replaces "\toprule" and "\bottomrule" with
"\midrule".
replacestats : bool, default = True
When True, the labels of statistics are cleaned up a bit (e.g.,
"75%" -> "75th Percentile")
Returns
-------
None
"""
# read in the data pandas
data = pandas.read_csv(csvpath, parse_dates=False, na_values=[na_rep])
# open a new file and use pandas to dump the latex and close out
# with open(texpath, 'w') as texfile:
latex = data.to_latex(float_format=float_format, na_rep=na_rep, index=False)
if pcols > 0:
lines = []
header, rest_of_file = latex.split("\n", maxsplit=1)
# createa a bew header
header_sections = header.split("{")
old_col_def = header_sections[-1][:-1]
new_col_def = ""
for n in range(len(old_col_def)):
if n == 0:
new_col_def = new_col_def + "l"
new_col_def = new_col_def + "x{%smm}" % pcols
lines.append(header.replace(old_col_def, new_col_def))
if replaceTBrules:
rest_of_file = rest_of_file.replace("\\toprule", "\\midrule")
rest_of_file = rest_of_file.replace("\\bottomrule", "\\midrule")
if replacestats:
rest_of_file = rest_of_file.replace("std", "Std. Dev.")
rest_of_file = rest_of_file.replace("50\\%", "Median")
rest_of_file = rest_of_file.replace("25\\%", "25th Percentile")
rest_of_file = rest_of_file.replace("75\\%", "75th Percentile")
rest_of_file = rest_of_file.replace("count", "Count")
rest_of_file = rest_of_file.replace("mean", "Mean")
rest_of_file = rest_of_file.replace("min ", "Min. ")
rest_of_file = rest_of_file.replace("max", "Max.")
# XXX: omg hack
rest_of_file = rest_of_file.replace("AluMin.um", "Aluminum")
if addmidrules is not None:
if hasattr(addmidrules, "append"):
for amr in addmidrules:
rest_of_file = rest_of_file.replace(amr, "\\midrule\n%s" % amr)
else:
rest_of_file = rest_of_file.replace(amr, "\\midrule\n%s" % addmidrules)
lines.append(rest_of_file)
return sanitizeTex("\n".join(lines))
def csvToXlsx(csvpath, xlsxpath, na_rep="--", float_format=None):
""" Convert data in CSV format to an Excel workbook
Parameters
----------
csvpath : string
Full name and file path of the input data file.
xlsxpath : string
Full name and file path of the output .xlsx file.
na_rep : string (default = "--")
How NA values should be represented.
float_format : callable, optional
Single input function that will return the correct
representation of floating point numbers.
Returns
-------
None
"""
# read in the data pandas
data = pandas.read_csv(csvpath, parse_dates=False, na_values=[na_rep])
# use pandas to dump the excel file and close out
data.to_excel(xlsxpath, float_format=float_format, na_rep=na_rep, index=False)
def makeTexTable(
tablefile, caption, sideways=False, footnotetext=None, clearpage=False, pos="h!"
):
""" Creates a table block for a LaTeX document. Does not add it any
file.
Parameters
----------
tablefile : string
Name of the .tex file that actually contains the table.
caption : string
Caption/title that should be given to the table.
sideways : bool (default = False)
When True, a landscape table block is produced. Otherwise, the
table is in portrait mode.
footnotetext : string, optional
Any text that should be added as a footnote.
clearpage : bool (default = False)
When True, a "\clearpage" command is appended to the end of the
table block.
pos : string (default = "h!")
LaTeX float position specification. Default values tries its
best to place the table where the block appears in the LaTeX
document.
Returns
-------
tablestring : string
The table block text that can be -- but has not been -- added
to a LaTeX document.
"""
if sideways:
tabletype = "sidewaystable"
clearpage = True
else:
tabletype = "table"
if clearpage:
clearpagetext = r"\clearpage"
else:
clearpagetext = ""
if footnotetext is None:
notes = ""
else:
notes = footnotetext
tablestring = (
dedent(
r"""
\begin{%s}[%s]
\rowcolors{1}{CVCWhite}{CVCLightGrey}
\caption{%s}
\centering
\input{%s}
\end{%s}
%s
%s
"""
)
% (tabletype, pos, caption, tablefile, tabletype, notes, clearpagetext)
)
return tablestring
def makeLongLandscapeTexTable(df, caption, label, footnotetext=None, index=False):
""" Create a multi-page landscape label for a LaTeX document.
Parameters
----------
df : pandas.DataFrame
Dataframe to be turned into the table.
caption : string
Caption/title to be given to the table.
label : string
Unique identifier for references to table within LaTeX.
footnotetext : string, optional
Any text that should be added as a footnote.
index : bool (default = False)
Toggles the inclusion of the dataframe's index in to the table.
Default behavior omits it.
Returns
-------
tablestring : string
The table block text that can be -- but has not been -- added
to a LaTeX document.
"""
if footnotetext is None:
notes = ""
else:
notes = footnotetext
tabletexstring = df.to_latex(index=index, float_format=_sig_figs, na_rep="--")
valuelines = tabletexstring.split("\n")[4:-3]
valuestring = "\n".join(valuelines)
def _multicol_format(args):
n, col = args
if n == 0:
align = "l"
else:
align = "p{16mm}"
return r"\multicolumn{1}{%s}{%s}" % (align, col.replace("%", r"\%"))
dfcols = df.columns.tolist()
colalignlist = ["c"] * len(dfcols)
colalignlist[0] = "l"
colalignment = "".join(colalignlist)
col_enum = list(enumerate(dfcols))
columns = " &\n ".join(list(map(_multicol_format, col_enum)))
tablestring = (
dedent(
r"""
\begin{landscape}
\centering
\rowcolors{1}{CVCWhite}{CVCLightGrey}
\begin{longtable}{%s}
\caption{%s} \label{%s} \\
\toprule
%s \\
\toprule
\endfirsthead
\multicolumn{%d}{c}
{{\bfseries \tablename\ \thetable{} -- continued from previous page}} \\
\toprule
%s \\
\toprule
\endhead
\toprule
\rowcolor{CVCWhite}
\multicolumn{%d}{r}{{Continued on next page...}} \\
\bottomrule
\endfoot
\bottomrule
\endlastfoot
%s
\end{longtable}
\end{landscape}
%s
\clearpage
"""
)
% (
colalignment,
caption,
label,
columns,
len(dfcols),
columns,
len(dfcols),
valuestring,
notes,
)
)
return tablestring
def makeTexFigure(figFile, caption, pos="hb", clearpage=True):
""" Create the LaTeX for include a figure in a document. Does not
actually add it to any document.
Parameters
----------
figfile : string
Name of the image (.pdf) file that actually contains the figure.
caption : string
Caption/title that should be given to the table.
sideways : bool (default = False)
When True, a landscape table block is produced. Otherwise, the
table is in portrait mode.
footnotetext : string, optional
Any text that should be added as a footnote.
clearpage : bool (default = False)
When True, a "\clearpage" command is appended to the end of the
table block.
pos : string (default = "h!")
LaTeX float position specification. Default values tries its
best to place the table where the block appears in the LaTeX
document.
Returns
-------
tablestring : string
The table block text that can be -- but has not been -- added
to a LaTeX document.
"""
if clearpage:
clearpagetext = r"\clearpage"
else:
clearpagetext = ""
figurestring = (
dedent(
r"""
\begin{figure}[%s] %% FIGURE
\centering
\includegraphics[scale=1.00]{%s}
\caption{%s}
\end{figure} %% FIGURE
%s
"""
)
% (pos, figFile, caption, clearpagetext)
)
return figurestring
def processFilename(filename):
""" Sanitizes a filename for LaTeX. DON'T feed it a full path.
Parameters
----------
filename : string
The name of the file to be sanitized.
Returns
-------
sanitized : string
Mutated filename without characters that might cause errors in
LaTeX.
Example
-------
>>> processFilename('FigureBenzo/Inzo_1')
'FigureBenzoInzo1'
"""
badchars = [" ", ",", "+", "$", "_", "{", "}", "/", "&"]
fn = filename
for bc in badchars:
fn = fn.replace(bc, "")
return fn
def setMPLStyle(serif=False):
if serif:
fontfamily = "serif"
preamble = [
r"\usepackage{siunitx}",
r"\sisetup{detect-all}",
r"\usepackage{fourier}",
]
else:
fontfamily = "sans-serif"
preamble = [
r"\usepackage{siunitx}",
r"\sisetup{detect-all}",
r"\usepackage{helvet}",
r"\usepackage{sansmath}",
r"\sansmath",
]
style_dict = {
"text.usetex": True,
"font.family": [fontfamily],
"font.serif": ["Utopia", "Palantino"],
"font.sans-serif": ["Helvetica", "Arial"],
"lines.linewidth": 0.5,
"patch.linewidth": 0.5,
"text.latex.preamble": preamble,
"axes.linewidth": 0.5,
"axes.grid": True,
"axes.titlesize": 12,
"axes.labelsize": 10,
"xtick.labelsize": 10,
"xtick.direction": "out",
"ytick.labelsize": 10,
"ytick.direction": "out",
"grid.linewidth": 0.5,
"legend.fancybox": True,
"legend.numpoints": 1,
"legend.fontsize": 8,
"figure.figsize": (6.5, 3.5),
"savefig.dpi": 300,
}
matplotlib.rcParams.update(style_dict)
class LaTeXDirectory(object):
""" Context manager to help compile latex docs from python.
Switches to the latex document's folder and remains there while
inside the manager. The present working directory is restored once
the context manager exits.
Parameters
----------
texpath : string
The LaTeX source file or the directory in which it is found.
"""
def __init__(self, texpath):
self.home = os.getcwd()
if os.path.isfile(texpath):
self.texpath = os.path.dirname(texpath)
else:
self.texpath = texpath
def __enter__(self):
os.chdir(self.texpath)
return self
def __exit__(self, *args):
os.chdir(self.home)
def compile(self, texdoc, clean=False):
""" Compile a LaTeX document inside the context manager
Parameters
----------
texdoc : string
File name of a .tex file in the LaTeX directory
clean : bool (default = False)
When True, all of non-PDF files resulting from compilation
are removed. By default, they are left on the file system.
Returns
-------
tex : int or None
The status (1 or 0) of the compilation. If LaTeX is not
available, None is returned.
"""
if helpers.checkdep_tex() is not None:
# use ``pdflatex`` to compile the document
tex = subprocess.call(
["pdflatex", texdoc, "--quiet"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
)
if clean:
extensions = ["aux", "log", "nav", "out", "snm", "toc"]
for ext in extensions:
junkfiles = glob.glob("*.{}".format(ext))
for junk in junkfiles:
os.remove(junk)
else:
tex = None
return tex
|
[
"textwrap.dedent",
"wqio.utils.numutils.sigFigs",
"pandas.read_csv",
"os.getcwd",
"os.path.isfile",
"numpy.array",
"os.chdir",
"os.path.dirname",
"wqio.tests.helpers.checkdep_tex",
"subprocess.call",
"numpy.nonzero",
"os.remove"
] |
[((338, 372), 'wqio.utils.numutils.sigFigs', 'numutils.sigFigs', (['x'], {'n': '(3)', 'tex': '(True)'}), '(x, n=3, tex=True)\n', (354, 372), False, 'from wqio.utils import numutils\n'), ((687, 714), 'numpy.array', 'numpy.array', (['df.index.names'], {}), '(df.index.names)\n', (698, 714), False, 'import numpy\n'), ((725, 759), 'numpy.nonzero', 'numpy.nonzero', (['(_names == levelname)'], {}), '(_names == levelname)\n', (738, 759), False, 'import numpy\n'), ((2622, 2685), 'pandas.read_csv', 'pandas.read_csv', (['csvpath'], {'parse_dates': '(False)', 'na_values': '[na_rep]'}), '(csvpath, parse_dates=False, na_values=[na_rep])\n', (2637, 2685), False, 'import pandas\n'), ((5234, 5297), 'pandas.read_csv', 'pandas.read_csv', (['csvpath'], {'parse_dates': '(False)', 'na_values': '[na_rep]'}), '(csvpath, parse_dates=False, na_values=[na_rep])\n', (5249, 5297), False, 'import pandas\n'), ((6862, 7048), 'textwrap.dedent', 'dedent', (['"""\n \\\\begin{%s}[%s]\n \\\\rowcolors{1}{CVCWhite}{CVCLightGrey}\n \\\\caption{%s}\n \\\\centering\n \\\\input{%s}\n \\\\end{%s}\n %s\n %s\n """'], {}), '(\n """\n \\\\begin{%s}[%s]\n \\\\rowcolors{1}{CVCWhite}{CVCLightGrey}\n \\\\caption{%s}\n \\\\centering\n \\\\input{%s}\n \\\\end{%s}\n %s\n %s\n """\n )\n', (6868, 7048), False, 'from textwrap import dedent\n'), ((8726, 9521), 'textwrap.dedent', 'dedent', (['"""\n \\\\begin{landscape}\n \\\\centering\n \\\\rowcolors{1}{CVCWhite}{CVCLightGrey}\n \\\\begin{longtable}{%s}\n \\\\caption{%s} \\\\label{%s} \\\\\\\\\n \\\\toprule\n %s \\\\\\\\\n \\\\toprule\n \\\\endfirsthead\n\n \\\\multicolumn{%d}{c}\n {{\\\\bfseries \\\\tablename\\\\ \\\\thetable{} -- continued from previous page}} \\\\\\\\\n \\\\toprule\n %s \\\\\\\\\n \\\\toprule\n \\\\endhead\n\n \\\\toprule\n \\\\rowcolor{CVCWhite}\n \\\\multicolumn{%d}{r}{{Continued on next page...}} \\\\\\\\\n \\\\bottomrule\n \\\\endfoot\n\n \\\\bottomrule\n \\\\endlastfoot\n\n %s\n\n \\\\end{longtable}\n \\\\end{landscape}\n %s\n \\\\clearpage\n """'], {}), '(\n """\n \\\\begin{landscape}\n \\\\centering\n \\\\rowcolors{1}{CVCWhite}{CVCLightGrey}\n \\\\begin{longtable}{%s}\n \\\\caption{%s} \\\\label{%s} \\\\\\\\\n \\\\toprule\n %s \\\\\\\\\n \\\\toprule\n \\\\endfirsthead\n\n \\\\multicolumn{%d}{c}\n {{\\\\bfseries \\\\tablename\\\\ \\\\thetable{} -- continued from previous page}} \\\\\\\\\n \\\\toprule\n %s \\\\\\\\\n \\\\toprule\n \\\\endhead\n\n \\\\toprule\n \\\\rowcolor{CVCWhite}\n \\\\multicolumn{%d}{r}{{Continued on next page...}} \\\\\\\\\n \\\\bottomrule\n \\\\endfoot\n\n \\\\bottomrule\n \\\\endlastfoot\n\n %s\n\n \\\\end{longtable}\n \\\\end{landscape}\n %s\n \\\\clearpage\n """\n )\n', (8732, 9521), False, 'from textwrap import dedent\n'), ((10962, 11154), 'textwrap.dedent', 'dedent', (['"""\n \\\\begin{figure}[%s] %% FIGURE\n \\\\centering\n \\\\includegraphics[scale=1.00]{%s}\n \\\\caption{%s}\n \\\\end{figure} %% FIGURE\n %s\n """'], {}), '(\n """\n \\\\begin{figure}[%s] %% FIGURE\n \\\\centering\n \\\\includegraphics[scale=1.00]{%s}\n \\\\caption{%s}\n \\\\end{figure} %% FIGURE\n %s\n """\n )\n', (10968, 11154), False, 'from textwrap import dedent\n'), ((13521, 13532), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (13530, 13532), False, 'import os\n'), ((13544, 13567), 'os.path.isfile', 'os.path.isfile', (['texpath'], {}), '(texpath)\n', (13558, 13567), False, 'import os\n'), ((13704, 13726), 'os.chdir', 'os.chdir', (['self.texpath'], {}), '(self.texpath)\n', (13712, 13726), False, 'import os\n'), ((13787, 13806), 'os.chdir', 'os.chdir', (['self.home'], {}), '(self.home)\n', (13795, 13806), False, 'import os\n'), ((13596, 13620), 'os.path.dirname', 'os.path.dirname', (['texpath'], {}), '(texpath)\n', (13611, 13620), False, 'import os\n'), ((14413, 14435), 'wqio.tests.helpers.checkdep_tex', 'helpers.checkdep_tex', ([], {}), '()\n', (14433, 14435), False, 'from wqio.tests import helpers\n'), ((14522, 14635), 'subprocess.call', 'subprocess.call', (["['pdflatex', texdoc, '--quiet']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'shell': '(False)'}), "(['pdflatex', texdoc, '--quiet'], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, shell=False)\n", (14537, 14635), False, 'import subprocess\n'), ((14974, 14989), 'os.remove', 'os.remove', (['junk'], {}), '(junk)\n', (14983, 14989), False, 'import os\n')]
|
import asyncio
import json
import multiprocessing
import random
from functools import partial
from typing import Set, Callable, List, Iterator
import numpy as np
import torch
from torch import nn
import backgammon.game as bg
class RandomAgent(bg.Agent):
"""Random Player."""
def get_action(self, available_moves: List[bg.Moves], board: bg.Board) -> bg.Moves:
return random.choice(list(available_moves))
class NNAgent(bg.Agent):
"""Neural network player."""
def __init__(self, model: nn.Module) -> None:
self.model = model
"""Model, which can predict a quality of state."""
def extract_features(self, board: bg.Board) -> torch.Tensor:
"""Create feature to insert in model.
Generate array of 720 features, 15 features for every position and same for opponent.
"""
def get_features(columns: bg.ColumnCheckersNumber) -> np.ndarray:
features = np.zeros(board.NUM_COLS * board.NUM_CHECKERS)
for col in range(board.NUM_COLS):
if col in columns:
start = col * board.NUM_CHECKERS
end = start + columns[col]
features[start:end] = 1
return features
columns, opp_columns = board.to_schema()
features = np.concatenate((get_features(columns), get_features(opp_columns)))
return torch.from_numpy(features).float().cuda()
def estimate_moves(self, available_moves: List[bg.Moves], board: bg.Board) -> Iterator[float]:
"""Estimate resulting board position for all passed moves."""
for moves in available_moves:
with board.temp_move(*moves) as temp_board:
v = self.estimate(temp_board)
yield v
def get_action(self, available_moves: List[bg.Moves], board: bg.Board) -> bg.Moves:
"""Find and return best action."""
available_moves = list(available_moves)
estimated_moves = list(self.estimate_moves(available_moves, board))
index_max = int(np.argmin(estimated_moves))
return available_moves[index_max]
def estimate(self, board):
"""Get a value of specified position."""
features = self.extract_features(board)
v = self.model(features)
return v
def __repr__(self):
return f'{self.__class__.__name__}[model={self.model}]'
@classmethod
def with_model_constructor(cls, model: nn.Module) -> Callable[[], 'NNAgent']:
"""
Create a child of current class with specified model.
:param model: torch model
:return: NNAgent class with specified model
"""
return partial(cls, model=model)
class TCPAgent(bg.Agent):
def get_action(self, available_moves: List[bg.Moves], board: bg.Board) -> bg.Moves:
"""Send a message to the server, wait an answer and use it."""
async def tcp_echo_client(message):
reader, writer = await asyncio.open_connection(self.host, self.port)
writer.write(message.encode())
data = await reader.read(100000)
writer.close()
return json.loads(data.decode())
message = json.dumps(dict(available_moves=available_moves, board=board.to_schema()))
done = asyncio.run(tcp_echo_client(message))
return done
def __init__(self, host: str = None, port: int = None, agent_name: str = None):
self.host = host
self.port = port
self.agent_name = agent_name
def __repr__(self):
information = ''
if self.host:
information += f'[{self.host}]'
if self.port:
information += f'[:{self.port}]'
if self.agent_name:
information += f'[{self.agent_name}]'
return f'{self.__class__.__name__}{information}'
@classmethod
def with_server(
cls,
agent_initializer: Callable[[], bg.Agent],
port: int = None, host: str = None
) -> 'TCPAgent':
"""Run server in child process, return insta"""
if not host and not port:
raise ValueError('Should specified at least host or port.')
pipe = multiprocessing.Pipe(False)
proc = multiprocessing.Process(
target=cls._server_runner,
args=(agent_initializer,),
kwargs=dict(port=port, host=host, pipe=pipe)
)
proc.start()
pipe_out, _ = pipe
agent_name = pipe_out.recv()
return cls(
host=host, port=port, agent_name=agent_name
)
@classmethod
def _server_runner(
cls,
agent_initializer: Callable[[], bg.Agent],
host: str = None,
port: int = None,
pipe: multiprocessing.Pipe = None
) -> None:
"""Create a TCP server, which can receive board and available values and select an action.
:param agent_initializer: function to initialize Agent. Do not pass Agent instance directly, because there are
situations, where we should generate it already in another process.
:param host: host
:param port: port
:param pipe: Pipe. Send name of created agent, if specified.
"""
async def handle(reader, writer):
data = await reader.read(100000)
message = json.loads(data.decode())
move = agent.get_action(
available_moves=message['available_moves'],
board=bg.Board.from_schema(*message['board'])
)
writer.write(json.dumps(move).encode())
await writer.drain()
writer.close()
async def run_server():
server = await asyncio.start_server(handle, host, port)
async with server:
await server.serve_forever()
agent = agent_initializer()
if pipe:
_, pipe_in = pipe
pipe_in.send(str(agent))
asyncio.run(run_server())
|
[
"backgammon.game.Board.from_schema",
"asyncio.start_server",
"json.dumps",
"torch.from_numpy",
"numpy.zeros",
"asyncio.open_connection",
"functools.partial",
"numpy.argmin",
"multiprocessing.Pipe"
] |
[((2669, 2694), 'functools.partial', 'partial', (['cls'], {'model': 'model'}), '(cls, model=model)\n', (2676, 2694), False, 'from functools import partial\n'), ((4171, 4198), 'multiprocessing.Pipe', 'multiprocessing.Pipe', (['(False)'], {}), '(False)\n', (4191, 4198), False, 'import multiprocessing\n'), ((937, 982), 'numpy.zeros', 'np.zeros', (['(board.NUM_COLS * board.NUM_CHECKERS)'], {}), '(board.NUM_COLS * board.NUM_CHECKERS)\n', (945, 982), True, 'import numpy as np\n'), ((2043, 2069), 'numpy.argmin', 'np.argmin', (['estimated_moves'], {}), '(estimated_moves)\n', (2052, 2069), True, 'import numpy as np\n'), ((2962, 3007), 'asyncio.open_connection', 'asyncio.open_connection', (['self.host', 'self.port'], {}), '(self.host, self.port)\n', (2985, 3007), False, 'import asyncio\n'), ((5681, 5721), 'asyncio.start_server', 'asyncio.start_server', (['handle', 'host', 'port'], {}), '(handle, host, port)\n', (5701, 5721), False, 'import asyncio\n'), ((5455, 5494), 'backgammon.game.Board.from_schema', 'bg.Board.from_schema', (["*message['board']"], {}), "(*message['board'])\n", (5475, 5494), True, 'import backgammon.game as bg\n'), ((1387, 1413), 'torch.from_numpy', 'torch.from_numpy', (['features'], {}), '(features)\n', (1403, 1413), False, 'import torch\n'), ((5534, 5550), 'json.dumps', 'json.dumps', (['move'], {}), '(move)\n', (5544, 5550), False, 'import json\n')]
|
import numpy as np
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def log_loss(x, y, eps=1e-6):
x = np.clip(x, eps, 1-eps)
return -(y*np.log(x) + (1-y)*np.log(1-x))
|
[
"numpy.clip",
"numpy.exp",
"numpy.log"
] |
[((109, 133), 'numpy.clip', 'np.clip', (['x', 'eps', '(1 - eps)'], {}), '(x, eps, 1 - eps)\n', (116, 133), True, 'import numpy as np\n'), ((57, 67), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (63, 67), True, 'import numpy as np\n'), ((147, 156), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (153, 156), True, 'import numpy as np\n'), ((165, 178), 'numpy.log', 'np.log', (['(1 - x)'], {}), '(1 - x)\n', (171, 178), True, 'import numpy as np\n')]
|
import random
import _jsonnet, json
import logging
import hashlib
import os
from copy import deepcopy
import pandas as pd
from tqdm import tqdm
import math
from LeapOfThought.resources.teachai_kb import TeachAIKB
from LeapOfThought.common.general import num2words1, bc
from LeapOfThought.common.data_utils import uniform_sample_by_column, pandas_multi_column_agg
# This is mainly for testing and debugging ...
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 2000)
pd.set_option('display.max_colwidth', 200)
pd.set_option("display.colheader_justify","left")
import numpy as np
from LeapOfThought.common.file_utils import upload_jsonl_to_s3, save_jsonl_to_local, is_path_creatable
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class ArtiSet():
def __init__(self, args):
random.seed(17)
np.random.seed(1234)
self._np_seed = np.random.RandomState(17)
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), args.config_path) ,'r') as f:
self._config = json.load(f)[self.artiset_name]
if args.__contains__('variant') and len(args.variant) > 0:
self._output_file = args.output_file.replace('.jsonl','_' + args.variant + '.jsonl')
if len(args.experiment_version) > 0:
self._output_file = self._output_file.replace('.jsonl', '_' + args.experiment_version + '.jsonl')
else:
self._output_file = args.output_file
self._split = args.split_by_field
self._incorrect_beliefs = None
if "incorrect_beliefs_file" in args and args.incorrect_beliefs_file:
with open(args.incorrect_beliefs_file, 'r') as file:
self._incorrect_beliefs = [json.loads(line.strip()) for line in file]
self._save_sample = args.save_sample
self.artiset_data = []
def append_teachyourai_format_example(self, example, do_print=False, append_to_list=None):
"""append_teachyourai_format_example() is method implemented in ArtiSet class and takes an example dict
(that must contain a "phrase", "answer") and converts it to a BooleanQA format
Args:
example (dict): an example containing question,answer,dist1,dist2 fields
do_print (bool): just for debuging
num_choices (int): number of choices in question (between 2 and 5)
append_to_list (list): a
Returns:
"""
if 'context' not in example:
example['context'] = ''
if 'id' not in example:
example['id'] = self.create_qid(example)
if do_print:
print('a:%s d1:%s d2:%s || Q:%s' % (example['phrase'], example['answer']))
if append_to_list is not None:
append_to_list.append(example)
else:
self.artiset_data.append(example)
@staticmethod
def create_qid(example):
m = hashlib.md5()
m.update(example['phrase'].encode())
m.update(example['context'].encode())
# boolean examples have binary answer (int 0 or 1)
m.update(str(example['answer']).encode())
return m.hexdigest()
def split_by_columns(self):
split_columns = self._split.split(',')
examples = self.examples_meta
indexes = {}
# check the split columns are in the data
if len(set(split_columns) - set(examples.columns)) != 0:
raise (ValueError("split columns used to split dev/test and train set do not exist the examples_meta!"))
all_objs = []
for split_column in split_columns:
all_objs += list(examples[split_column])
#best_train_inds, best_dev_inds, best_test_inds = [], [], []
inds = [i for i in range(len(self.artiset_data))]
random.seed(17)
random.shuffle(inds)
if len(split_columns) > 1:
test_inds = random.sample(inds, self._config['test_dev_size'][0])
inds = list(set(inds) - set(test_inds))
dev_inds = random.sample(inds, self._config['test_dev_size'][1])
dev_test_examples = examples.iloc[test_inds + dev_inds]
dev_test_objs = []
for split_column in split_columns:
dev_test_objs += list(dev_test_examples[split_column])
dev_test_objs = pd.Series(list(set(dev_test_objs)))
else:
# We'll choice the test-dev examples from values of split that have the lowest number of examples.
# this will insure we are choosing to highest amount of training examples that are still disjoint on split_columns[0] from dev+test
split_columns_value_counts = examples[split_columns[0]].value_counts().sort_values().cumsum().reset_index()
start_ind = split_columns_value_counts[split_columns_value_counts[split_columns[0]] > \
sum(self._config['test_dev_size'])].index[0] + 1
dev_test_objs = list(split_columns_value_counts['index'][0:start_ind])
dev_test_examples = examples[examples[split_columns[0]].isin(dev_test_objs)]
inds = list(dev_test_examples.index)
test_inds = random.sample(inds, self._config['test_dev_size'][0])
inds = list(set(inds) - set(test_inds))
dev_inds = random.sample(inds, self._config['test_dev_size'][1])
for split_column in split_columns:
indexes[split_column] = examples.set_index(split_column)
dev_ids = set()
not_in_train_ids = set()
for split_column in split_columns:
dev_ids = dev_ids & set(indexes[split_column][indexes[split_column].index.isin(dev_test_objs)]['qid'])
not_in_train_ids = not_in_train_ids | set(indexes[split_column][indexes[split_column].index.isin(dev_test_objs)]['qid'])
train_examples = examples.loc[~examples['qid'].isin(not_in_train_ids), :]
train_inds = list(train_examples.index)
if len(train_inds) > self._config['max_number_of_examples']:
train_inds = train_inds[0:self._config['max_number_of_examples']]
random.shuffle(train_inds)
print("total dev-test examples available: %d" % (len(dev_test_examples)))
print("split produced %d training examples" % (len(train_inds)))
return train_inds, dev_inds, test_inds
def save_dataset(self):
"""save_dataset() automatically saves the artiset
if the config output_file contains the string _sample.jsonl it will be saved in a more readable format
otherwise it will split the examples in self.artiset_data into train, dev, test and save them in s3
if output_file startswith s3:// otherwise locally. (If output_file is empty, it will not save)
Args:
arg1 (int): Description of arg1
arg2 (str): Description of arg2
Returns:
bool: Description of return value
"""
# Move non-required columns to metadata:
artiset_data_with_metadata = []
for example in self.artiset_data:
if 'metadata' not in example:
new_example = {'metadata':{}}
else:
new_example = {'metadata': example['metadata']}
new_example.update({k:example[k] for k in ['id', 'phrase', 'context', 'answer']})
new_example['metadata'].update({k: example[k] for k in set(example.keys()) - {'id', 'phrase', 'context', 'answer','metadata'}})
artiset_data_with_metadata.append(new_example)
self.artiset_data = artiset_data_with_metadata
# splitting
if len(self._split) > 0:
train_inds, dev_inds, test_inds = self.split_by_columns()
elif 'split' in self.examples_meta:
test_inds = list(self.examples_meta[self.examples_meta['split'] == 'test'].index)
dev_inds = list(self.examples_meta[self.examples_meta['split'] == 'dev'].index)
train_inds = list(self.examples_meta[self.examples_meta['split'] == 'train'].index)
random.seed(17)
random.shuffle(train_inds)
#random.shuffle(test_inds)
#random.shuffle(dev_inds)
test_inds = test_inds[0: self._config['test_dev_size'][0]]
dev_inds = dev_inds[0:self._config['test_dev_size'][1]]
train_inds = train_inds[0:self._config['max_number_of_examples']]
else:
inds = [i for i in range(len(self.artiset_data))]
random.seed(17)
random.shuffle(inds)
test_inds = inds[0:self._config['test_dev_size'][0]]
dev_inds = inds[self._config['test_dev_size'][0]:sum(self._config['test_dev_size'])]
train_inds = inds[sum(self._config['test_dev_size']):]
if self._output_file.startswith('s3://'):
save_func = upload_jsonl_to_s3
elif is_path_creatable(self._output_file) and len(self._output_file) > 0:
save_func = save_jsonl_to_local
else:
# Do nothing
return
if self._save_sample:
if 'split' in self.examples_meta.columns:
logger.info(f"size of each split:\n{self.examples_meta['split'].value_counts()}")
random.seed(17)
if len(self.artiset_data) > 100:
self.artiset_data = random.sample(self.artiset_data,100)
save_func(self._output_file, self.artiset_data, sample_indent=self._save_sample)
else:
logger.info('uploading %d,%d,%d test,dev,train examples' % (len(test_inds),len(dev_inds),len(train_inds)))
save_func(self._output_file.replace('.jsonl', '_test.jsonl'), [self.artiset_data[i] for i in test_inds])
save_func(self._output_file.replace('.jsonl', '_dev.jsonl'), [self.artiset_data[i] for i in dev_inds])
save_func(self._output_file.replace('.jsonl', '_train.jsonl'), [self.artiset_data[i] for i in train_inds])
if len(self.examples_meta) > 0:
save_func(self._output_file.replace('.jsonl', '_meta.jsonl'), self.examples_meta.to_dict(orient='rows'))
return train_inds, dev_inds, test_inds
def save_single_split(self, split_data, split):
inds = [i for i in range(len(split_data))]
random.seed(17)
random.shuffle(inds)
if self._output_file.startswith('s3://'):
save_func = upload_jsonl_to_s3
elif is_path_creatable(self._output_file) and len(self._output_file) > 0:
save_func = save_jsonl_to_local
else:
# Do nothing
return
si = self._output_file.find('_sample') > -1
save_func(self._output_file.replace('.jsonl', '_' + split + '.jsonl'), [split_data[i] for i in inds], sample_indent=si)
def save_aux_data(self, output_file, data):
if output_file.startswith('s3://'):
save_func = upload_jsonl_to_s3
elif is_path_creatable(output_file) and len(output_file) > 0:
save_func = save_jsonl_to_local
else:
# Do nothing
return
si = output_file.find('_sample') > -1
save_func(output_file, data, sample_indent=si)
def build_artificial_dataset(self,args):
pass
def resplit(self, args):
logger.error('Not implemented for this artiset')
def build_statement_rule_property_examples(self, examples, split, statement_tag='statement', ablate_same_distractor_fields = 1.0,\
rule_tags=['implicit_rule','property'], distractor_tags = ['distractors'], ablation_list=[], use_shorthand=False, \
nlg_sampling=False, reverse_validity_frac=0):
# computing ID before ablations on the statement and rule tags:
for i, example in enumerate(examples):
m = hashlib.md5()
# note that the tags for ID creation are always the same!
for tag in [statement_tag] + rule_tags:
if tag in example:
if type(example[tag]) == list:
for e in example[tag]:
m.update(e['subject'].encode())
m.update(e['predicate'].encode())
m.update(e['object'].encode())
m.update(e['validity'].encode())
else:
m.update(example[tag]['subject'].encode())
m.update(example[tag]['predicate'].encode())
m.update(example[tag]['object'].encode())
m.update(example[tag]['validity'].encode())
example['id'] = m.hexdigest()
# Ablations
# now that all the examples are ready, we can ablate as needed:
random.seed(17)
for ablation in ablation_list:
if len(ablation) == 3:
fields, fraction, condition = ablation
examples_cands = [e for e in examples if e[condition[0]] in condition[1]]
else:
fields, fraction = ablation
examples_cands = examples
example_to_ablate = random.sample(examples_cands, int(fraction * float(len(examples))))
for e in example_to_ablate:
for field in fields:
if field in e:
del e[field]
# for every field we ablate we must ablate the same field from distractors!
if random.random() < ablate_same_distractor_fields:
for distractor_tag in distractor_tags:
if distractor_tag in e:
if field in e[distractor_tag]:
del e[distractor_tag][field]
random.seed(17)
for i, example in enumerate(examples):
context_rules = []
# adding actual rules
for rule_tag in rule_tags:
if rule_tag in example:
rules = example[rule_tag]
if not type(rules) == list:
rules = [rules]
for rule in rules:
reverse_validity = not rule['validity'] == 'always true'
context_rules.append(TeachAIKB().to_pseudo_language(rule,
is_rule=True, reverse_validity=reverse_validity,
use_shorthand=use_shorthand, nlg_sampling=nlg_sampling))
# adding distractors
for rule_tag in distractor_tags:
if rule_tag in example:
for field, tag_distractors in example[rule_tag].items():
for rule in tag_distractors:
rule_list = rule
if not type(rule_list) == list:
rule_list = [rule_list]
for r in rule_list:
reverse_validity = not r['validity'] == 'always true'
context_rules.append(TeachAIKB().to_pseudo_language(r, is_rule=True, reverse_validity=reverse_validity,
use_shorthand=use_shorthand,
nlg_sampling=nlg_sampling))
use_hypothetical_statement = False
if 'is_hypothetical_statement' in example and example['is_hypothetical_statement']:
use_hypothetical_statement = True
answer = 1 if example[statement_tag]['validity'] == 'always true' else 0
if self.variant != 'statement_subject_lang_selectivity':
if random.random() < reverse_validity_frac:
answer = 1 - answer
reverse_validity = True
else:
reverse_validity = False
phrase = TeachAIKB().to_pseudo_language(example[statement_tag], is_rule=False, use_shorthand=use_shorthand,
use_hypothetical_statement=use_hypothetical_statement,
nlg_sampling=nlg_sampling, reverse_validity=reverse_validity)
else:
statement_dict = deepcopy(example[statement_tag])
statement_dict['subject'] = random.sample(['foo','blah','ya','qux','aranglopa','foltopia','cakophon','baz','garply'], 1)[0]
phrase = TeachAIKB().to_pseudo_language(statement_dict, is_rule=False, use_shorthand=use_shorthand,
use_hypothetical_statement=use_hypothetical_statement,
nlg_sampling=nlg_sampling)
# creating a unique set of rules that does not include the statement.
context_rules = list(set(context_rules))
# set order is random!! so we need to fix the order the get a replicable order.
context_rules = sorted(context_rules)
random.shuffle(context_rules)
example.update({'phrase': phrase, \
'answer': answer,
'context': ' '.join(context_rules),
'split': split,
'rules': context_rules})
# append_teachyourai_format_example() is method implemented in ArtiSet class and takes an example dict
# (that must contain a "phrase", "answer") and converts it to a BooleanQA format
self.append_teachyourai_format_example(example, do_print=False)
self.examples_meta.append(deepcopy(example))
def print_examples(self, sample):
random.seed(7)
example_inds = random.sample(range(len(self.artiset_data)), sample)
## Printing a sample!
for ind in example_inds:
example = self.artiset_data[ind]
if 'statement' in example:
statement = example['statement']
rules = '\n'.join(example['rules'])
e = f"{example['id']}({example['split']}):\n{bc.BOLD}Q:{bc.ENDC}{example['phrase']} {bc.BOLD}A:{bc.ENDC}{example['answer']}\n{bc.BOLD}C:{bc.ENDC}{rules} "
e = e.replace(statement['object'], f"{bc.Blue}{statement['object']}{bc.ENDC}")
e = e.replace(statement['predicate'], f"{bc.Green}{statement['predicate']}{bc.ENDC}")
e = e.replace(str(statement['subject']), f"{bc.Magenta}{statement['subject']}{bc.ENDC}")
if 'hypernym' in example:
hypernym = example['hypernym']['object']
e = e.replace(str(hypernym), f"{bc.Cyan}{hypernym}{bc.ENDC}")
e = e.replace('not', f"{bc.Red}not{bc.ENDC}")
e = e.replace('type', f"{bc.Yellow}type{bc.ENDC}")
if 'num_of_instances' in example:
e = e.replace(' ' + num2words1[example['num_of_instances']].lower() + ' ' \
, f"{bc.Red} {num2words1[example['num_of_instances']].lower()} {bc.ENDC}")
for number in 'one', 'two', 'three', 'four', 'five':
e = e.replace(' ' + number + ' ', f"{bc.Cyan} {number} {bc.ENDC}")
else:
e = f"{example['id']}({example['split']}):\n{bc.BOLD}Q:{bc.ENDC}{example['phrase']} {bc.BOLD}A:{bc.ENDC}{example['answer']}\n{bc.BOLD}C:{bc.ENDC}{example['context']} "
print(e + '\n')
def create_subject_filter_lookup(self, examples, sample_on=None, avoid_mixing=None):
if sample_on is not None:
triplets_to_sample_on = [e[sample_on] for e in examples]
else:
triplets_to_sample_on = examples
# building subject filter lookup:
subject_filter_lookup = {}
rules_to_sample_df = pd.DataFrame(triplets_to_sample_on)
for curr_subject, matching_records in tqdm(rules_to_sample_df.groupby('subject')):
subject_to_filter = {curr_subject}
if avoid_mixing is not None and 'predicates' in avoid_mixing:
subject_to_filter |= set(
rules_to_sample_df[~rules_to_sample_df['predicate'].isin(set(matching_records['predicate']))]['subject'])
if avoid_mixing is not None and 'hyponyms' in avoid_mixing:
subject_to_filter |= {e['subject'] for e in TeachAIKB().sample({'predicate': 'hypernym', 'object': curr_subject})}
if avoid_mixing is not None and 'co-hyponyms' in avoid_mixing:
subject_is_hyponym_of = {e['object'] for e in TeachAIKB().sample({'subject': curr_subject, 'predicate': 'hypernym'})}
subject_to_filter |= {e['subject'] for e in
TeachAIKB().sample({'predicate': 'hypernym', 'object': list(subject_is_hyponym_of)})}
if avoid_mixing is not None and 'co-meronyms' in avoid_mixing:
subject_is_meronym_of = {e['subject'] for e in TeachAIKB().sample({'predicate': 'meronym', 'object': curr_subject})}
subject_to_filter |= {e['object'] for e in
TeachAIKB().sample({'predicate': 'meronym', 'subject': list(subject_is_meronym_of)})}
subject_filter_lookup[curr_subject] = subject_to_filter
return subject_filter_lookup
#@profile
def self_negative_subject_sample(self, examples, sample_on = None, avoid_mixing=None, over_sample = 1.0):
examples = deepcopy(examples)
if sample_on is not None:
triplets_to_sample_on = [e[sample_on] for e in examples]
else:
triplets_to_sample_on = examples
subject_filter_lookup = self.create_subject_filter_lookup(examples, sample_on, avoid_mixing)
output = []
examples_to_gen_from = deepcopy(examples) + random.sample(deepcopy(examples),int((over_sample - 1) * len(examples)))
for i,example in tqdm(enumerate(examples_to_gen_from)):
# sometimes we just want a list of triplets, with no specific dictionary field called "sample_on" ...
if sample_on is not None:
curr_triplet = example[sample_on]
else:
curr_triplet = example
curr_subject = curr_triplet['subject']
if sample_on is not None:
new_edge = deepcopy(
random.sample([e for e in examples if e[sample_on]['subject'] not in subject_filter_lookup[curr_subject]], 1)[0])
new_edge[sample_on]['predicate'] = deepcopy(curr_triplet['predicate'])
new_edge[sample_on]['object'] = deepcopy(curr_triplet['object'])
new_edge[sample_on]['validity'] = 'never true'
else:
new_edge = deepcopy(
random.sample([e for e in triplets_to_sample_on if e['subject'] not in subject_filter_lookup[curr_subject]], 1)[0])
new_edge['predicate'] = deepcopy(curr_triplet['predicate'])
new_edge['object'] = deepcopy(curr_triplet['object'])
new_edge['validity'] = 'never true'
output.append(new_edge)
return output
def connect_negative_shuffle_subject(self, shuffle, shuffle_on, tar_tag, avoid_mixing=None):
logger.info(f'connect_negative_shuffle_subject {tar_tag}')
# We assume shuffle_on is only one field (usueally predicate or object)
# Finding "clusters" that may not be shuffled internally when producing negative examples
# (because the have downword monotone relations)
connect_to = deepcopy(shuffle)
triplets_to_shuffle_df = pd.DataFrame(([e[shuffle_on] for e in shuffle]))
field_to_shuffle_counts = triplets_to_shuffle_df['subject'].value_counts()
subjects_to_shuffle = set(triplets_to_shuffle_df['subject'])
remaining_inds_to_choose = set(triplets_to_shuffle_df.index)
for curr_subject, size in field_to_shuffle_counts.iteritems():
potential_target_inds = deepcopy(remaining_inds_to_choose)
tar_subjects = subjects_to_shuffle - {curr_subject}
tar_subjects -= {e['subject'] for e in TeachAIKB().sample({'predicate': 'hypernym', 'object': curr_subject})}
if avoid_mixing is not None and 'co-hyponyms' in avoid_mixing:
subject_is_hyponym_of = {e['object'] for e in TeachAIKB().sample({'subject': curr_subject, 'predicate': 'hypernym'})}
tar_subjects -= {e['subject'] for e in
TeachAIKB().sample({'predicate': 'hypernym', 'object': list(subject_is_hyponym_of)})}
if avoid_mixing is not None and 'co-meronyms' in avoid_mixing:
subject_is_meronym_of = {e['subject'] for e in self.sample({'predicate': 'meronym', 'object': curr_subject})}
tar_subjects -= {e['object'] for e in self.sample({'predicate': 'meronym', 'subject': list(subject_is_meronym_of)})}
potential_target_inds &= set(triplets_to_shuffle_df[triplets_to_shuffle_df['subject'].isin(tar_subjects)].index)
targets = [e for e in connect_to if e[shuffle_on]['subject'] == curr_subject]
selected_inds = []
for i in random.sample(potential_target_inds, len(potential_target_inds)):
new_edge = {'subject': curr_subject,
'predicate': triplets_to_shuffle_df.loc[i, 'predicate'],
'object': triplets_to_shuffle_df.loc[i, 'object']}
# checking if there is no triplet that is true with the same values:
matching_edges_in_kb = self.lookup(new_edge)
if len(matching_edges_in_kb) == 0:
targets[len(selected_inds)][tar_tag] = new_edge
targets[len(selected_inds)][tar_tag].update({'validity': 'never true'})
selected_inds.append(i)
if len(selected_inds) >= len(targets):
break
if len(selected_inds) < len(targets):
logger.debug(f'did not find enough for {curr_subject}: {len(selected_inds)} found, {len(targets)} required')
else:
logger.debug(f'{curr_subject}: {len(selected_inds)} found.')
remaining_inds_to_choose -= set(selected_inds)
return connect_to
def sample_distractors(self, examples, sample, tar_tag):
# building indexes:
for i, sample_props in enumerate(sample):
src_tag, src_fields, sample, exactly_sample_num, connect, balance_with_statement = sample_props
# creating general indexes
indexes = {}
for field in ['subject', 'predicate', 'object', 'validity']:
indexes[field] = {}
for i, r in enumerate(examples):
if r[src_tag][field] not in indexes[field]:
indexes[field][r[src_tag][field]] = {i}
else:
indexes[field][r[src_tag][field]] |= {i}
# Link the connection to existing tags.
for i, example in tqdm(enumerate(examples), desc=f'adding distractors for {sample_props}'):
cand_inds_signed = {}
# the index helps us get candidates fast from the df of candidate_edges
cand_inds = set(range(len(examples)))
for field in src_fields:
cand_inds &= indexes[field][example[src_tag][field]]
# making sure cand edges do not contain a duplicate of the currect example
same_as_example_inds = indexes['subject'][example[src_tag]['subject']] & \
indexes['predicate'][example[src_tag]['predicate']] & \
indexes['object'][example[src_tag]['object']]
cand_inds -= same_as_example_inds
cand_inds_signed = {'always true':set(), 'never true': set()}
for validity in ['always true', 'never true']:
if validity in indexes['validity']:
cand_inds_signed[validity] |= cand_inds & indexes['validity'][validity]
if exactly_sample_num:
num_to_sample = sample
else:
num_to_sample = random.sample(range(min(len(cand_inds_signed['always true']) + \
len(cand_inds_signed['never true']), sample) + 1), 1)[0]
# Here we choose what is the validity value of the distractor we want to sample
if balance_with_statement is not None:
# balance_with_statement is not None, that means we care about the validity value balancing.
validities_to_sample = {'always true': math.ceil(num_to_sample / 2), 'never true': math.ceil(num_to_sample / 2)}
if balance_with_statement and validities_to_sample[example[src_tag]['validity']] > 0:
validities_to_sample[example[src_tag]['validity']] -= 1
else:
# Here we just randomly sample from a certain validity value (balance_with_statement is None, so it doesn't matter to us)
validities_to_sample = {'always true': 0, 'never true': 0}
validity_value_to_sample = random.sample(['always true', 'never true'],1)[0]
validities_to_sample[validity_value_to_sample] = num_to_sample
balanced_cand_inds = []
for validity, num_to_sample in validities_to_sample.items():
if len(cand_inds_signed[validity]) >= num_to_sample:
balanced_cand_inds += random.sample(cand_inds_signed[validity], num_to_sample)
# now actually sampling the rule we want to add to distractors
if tar_tag not in example:
example[tar_tag] = {}
for ind in balanced_cand_inds:
for tag in connect:
if tag not in example[tar_tag]:
example[tar_tag][tag] = []
example[tar_tag][tag].append(examples[ind][tag])
return examples
def print_stats(self):
for part in ['statement', 'implicit_rule', 'property']:
entities = {'dev': [], 'train': []}
for e in self.examples_meta:
if part in e:
if e['split'] == 'dev':
entities['dev'] += [e[part]['subject'], e[part]['object']]
elif e['split'] == 'train':
entities['train'] += [e[part]['subject'], e[part]['object']]
if len(entities['dev']) == 0 | len(entities['train']) == 0:
logger.info(f" {part} was not found or ablated.")
continue
entities_intersection_ratio = len(set(entities['dev']) & set(entities['train'])) / \
len(set(entities['dev']) | set(entities['train']))
logger.info(f"Dev/Train entity intersection in {part} :\n{entities_intersection_ratio}\n")
if entities_intersection_ratio > 0.01:
entity_stats = pd.DataFrame(
{'dev': pd.Series(entities['dev']).value_counts(), 'train': pd.Series(entities['train']).value_counts()}).dropna()
entity_stats['min'] = entity_stats[['dev', 'train']].min(axis=1)
logger.info(f"mutual entities stats:\n{entity_stats.sort_values(by='min')}")
if 'statement' in self.examples_meta[0]:
agg = pandas_multi_column_agg(pd.DataFrame([{'predicate': e['statement']['predicate'],'split':e['split'], 'z': 1} \
for e in self.examples_meta]), ['split', 'predicate'])
logger.info(f"Predicate count per split:\n{agg}\n")
examples_meta_df = pd.DataFrame(self.examples_meta)
logger.info(f"Positive vs Negative:\n{pandas_multi_column_agg(examples_meta_df, ['split', 'answer'])}\n")
|
[
"logging.getLogger",
"copy.deepcopy",
"numpy.random.RandomState",
"pandas.set_option",
"numpy.random.seed",
"pandas.DataFrame",
"random.sample",
"hashlib.md5",
"random.shuffle",
"LeapOfThought.common.data_utils.pandas_multi_column_agg",
"os.path.abspath",
"logging.basicConfig",
"pandas.Series",
"math.ceil",
"LeapOfThought.common.file_utils.is_path_creatable",
"random.seed",
"LeapOfThought.resources.teachai_kb.TeachAIKB",
"json.load",
"random.random"
] |
[((413, 451), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(500)'], {}), "('display.max_rows', 500)\n", (426, 451), True, 'import pandas as pd\n'), ((452, 493), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(500)'], {}), "('display.max_columns', 500)\n", (465, 493), True, 'import pandas as pd\n'), ((494, 530), 'pandas.set_option', 'pd.set_option', (['"""display.width"""', '(2000)'], {}), "('display.width', 2000)\n", (507, 530), True, 'import pandas as pd\n'), ((531, 573), 'pandas.set_option', 'pd.set_option', (['"""display.max_colwidth"""', '(200)'], {}), "('display.max_colwidth', 200)\n", (544, 573), True, 'import pandas as pd\n'), ((574, 624), 'pandas.set_option', 'pd.set_option', (['"""display.colheader_justify"""', '"""left"""'], {}), "('display.colheader_justify', 'left')\n", (587, 624), True, 'import pandas as pd\n'), ((747, 854), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.INFO)\n", (766, 854), False, 'import logging\n'), ((879, 906), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (896, 906), False, 'import logging\n'), ((997, 1012), 'random.seed', 'random.seed', (['(17)'], {}), '(17)\n', (1008, 1012), False, 'import random\n'), ((1021, 1041), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (1035, 1041), True, 'import numpy as np\n'), ((1066, 1091), 'numpy.random.RandomState', 'np.random.RandomState', (['(17)'], {}), '(17)\n', (1087, 1091), True, 'import numpy as np\n'), ((3095, 3108), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (3106, 3108), False, 'import hashlib\n'), ((3965, 3980), 'random.seed', 'random.seed', (['(17)'], {}), '(17)\n', (3976, 3980), False, 'import random\n'), ((3989, 4009), 'random.shuffle', 'random.shuffle', (['inds'], {}), '(inds)\n', (4003, 4009), False, 'import random\n'), ((6299, 6325), 'random.shuffle', 'random.shuffle', (['train_inds'], {}), '(train_inds)\n', (6313, 6325), False, 'import random\n'), ((10455, 10470), 'random.seed', 'random.seed', (['(17)'], {}), '(17)\n', (10466, 10470), False, 'import random\n'), ((10479, 10499), 'random.shuffle', 'random.shuffle', (['inds'], {}), '(inds)\n', (10493, 10499), False, 'import random\n'), ((12964, 12979), 'random.seed', 'random.seed', (['(17)'], {}), '(17)\n', (12975, 12979), False, 'import random\n'), ((13972, 13987), 'random.seed', 'random.seed', (['(17)'], {}), '(17)\n', (13983, 13987), False, 'import random\n'), ((18091, 18105), 'random.seed', 'random.seed', (['(7)'], {}), '(7)\n', (18102, 18105), False, 'import random\n'), ((20217, 20252), 'pandas.DataFrame', 'pd.DataFrame', (['triplets_to_sample_on'], {}), '(triplets_to_sample_on)\n', (20229, 20252), True, 'import pandas as pd\n'), ((21874, 21892), 'copy.deepcopy', 'deepcopy', (['examples'], {}), '(examples)\n', (21882, 21892), False, 'from copy import deepcopy\n'), ((23986, 24003), 'copy.deepcopy', 'deepcopy', (['shuffle'], {}), '(shuffle)\n', (23994, 24003), False, 'from copy import deepcopy\n'), ((24037, 24083), 'pandas.DataFrame', 'pd.DataFrame', (['[e[shuffle_on] for e in shuffle]'], {}), '([e[shuffle_on] for e in shuffle])\n', (24049, 24083), True, 'import pandas as pd\n'), ((32403, 32435), 'pandas.DataFrame', 'pd.DataFrame', (['self.examples_meta'], {}), '(self.examples_meta)\n', (32415, 32435), True, 'import pandas as pd\n'), ((4070, 4123), 'random.sample', 'random.sample', (['inds', "self._config['test_dev_size'][0]"], {}), "(inds, self._config['test_dev_size'][0])\n", (4083, 4123), False, 'import random\n'), ((4199, 4252), 'random.sample', 'random.sample', (['inds', "self._config['test_dev_size'][1]"], {}), "(inds, self._config['test_dev_size'][1])\n", (4212, 4252), False, 'import random\n'), ((5367, 5420), 'random.sample', 'random.sample', (['inds', "self._config['test_dev_size'][0]"], {}), "(inds, self._config['test_dev_size'][0])\n", (5380, 5420), False, 'import random\n'), ((5496, 5549), 'random.sample', 'random.sample', (['inds', "self._config['test_dev_size'][1]"], {}), "(inds, self._config['test_dev_size'][1])\n", (5509, 5549), False, 'import random\n'), ((9419, 9434), 'random.seed', 'random.seed', (['(17)'], {}), '(17)\n', (9430, 9434), False, 'import random\n'), ((12013, 12026), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (12024, 12026), False, 'import hashlib\n'), ((17416, 17445), 'random.shuffle', 'random.shuffle', (['context_rules'], {}), '(context_rules)\n', (17430, 17445), False, 'import random\n'), ((22208, 22226), 'copy.deepcopy', 'deepcopy', (['examples'], {}), '(examples)\n', (22216, 22226), False, 'from copy import deepcopy\n'), ((24415, 24449), 'copy.deepcopy', 'deepcopy', (['remaining_inds_to_choose'], {}), '(remaining_inds_to_choose)\n', (24423, 24449), False, 'from copy import deepcopy\n'), ((1225, 1237), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1234, 1237), False, 'import _jsonnet, json\n'), ((8230, 8245), 'random.seed', 'random.seed', (['(17)'], {}), '(17)\n', (8241, 8245), False, 'import random\n'), ((8258, 8284), 'random.shuffle', 'random.shuffle', (['train_inds'], {}), '(train_inds)\n', (8272, 8284), False, 'import random\n'), ((8667, 8682), 'random.seed', 'random.seed', (['(17)'], {}), '(17)\n', (8678, 8682), False, 'import random\n'), ((8695, 8715), 'random.shuffle', 'random.shuffle', (['inds'], {}), '(inds)\n', (8709, 8715), False, 'import random\n'), ((9053, 9089), 'LeapOfThought.common.file_utils.is_path_creatable', 'is_path_creatable', (['self._output_file'], {}), '(self._output_file)\n', (9070, 9089), False, 'from LeapOfThought.common.file_utils import upload_jsonl_to_s3, save_jsonl_to_local, is_path_creatable\n'), ((9516, 9553), 'random.sample', 'random.sample', (['self.artiset_data', '(100)'], {}), '(self.artiset_data, 100)\n', (9529, 9553), False, 'import random\n'), ((10607, 10643), 'LeapOfThought.common.file_utils.is_path_creatable', 'is_path_creatable', (['self._output_file'], {}), '(self._output_file)\n', (10624, 10643), False, 'from LeapOfThought.common.file_utils import upload_jsonl_to_s3, save_jsonl_to_local, is_path_creatable\n'), ((11108, 11138), 'LeapOfThought.common.file_utils.is_path_creatable', 'is_path_creatable', (['output_file'], {}), '(output_file)\n', (11125, 11138), False, 'from LeapOfThought.common.file_utils import upload_jsonl_to_s3, save_jsonl_to_local, is_path_creatable\n'), ((16643, 16675), 'copy.deepcopy', 'deepcopy', (['example[statement_tag]'], {}), '(example[statement_tag])\n', (16651, 16675), False, 'from copy import deepcopy\n'), ((18025, 18042), 'copy.deepcopy', 'deepcopy', (['example'], {}), '(example)\n', (18033, 18042), False, 'from copy import deepcopy\n'), ((22243, 22261), 'copy.deepcopy', 'deepcopy', (['examples'], {}), '(examples)\n', (22251, 22261), False, 'from copy import deepcopy\n'), ((22937, 22972), 'copy.deepcopy', 'deepcopy', (["curr_triplet['predicate']"], {}), "(curr_triplet['predicate'])\n", (22945, 22972), False, 'from copy import deepcopy\n'), ((23021, 23053), 'copy.deepcopy', 'deepcopy', (["curr_triplet['object']"], {}), "(curr_triplet['object'])\n", (23029, 23053), False, 'from copy import deepcopy\n'), ((23348, 23383), 'copy.deepcopy', 'deepcopy', (["curr_triplet['predicate']"], {}), "(curr_triplet['predicate'])\n", (23356, 23383), False, 'from copy import deepcopy\n'), ((23421, 23453), 'copy.deepcopy', 'deepcopy', (["curr_triplet['object']"], {}), "(curr_triplet['object'])\n", (23429, 23453), False, 'from copy import deepcopy\n'), ((32118, 32238), 'pandas.DataFrame', 'pd.DataFrame', (["[{'predicate': e['statement']['predicate'], 'split': e['split'], 'z': 1} for\n e in self.examples_meta]"], {}), "([{'predicate': e['statement']['predicate'], 'split': e['split'\n ], 'z': 1} for e in self.examples_meta])\n", (32130, 32238), True, 'import pandas as pd\n'), ((16047, 16062), 'random.random', 'random.random', ([], {}), '()\n', (16060, 16062), False, 'import random\n'), ((16720, 16824), 'random.sample', 'random.sample', (["['foo', 'blah', 'ya', 'qux', 'aranglopa', 'foltopia', 'cakophon', 'baz',\n 'garply']", '(1)'], {}), "(['foo', 'blah', 'ya', 'qux', 'aranglopa', 'foltopia',\n 'cakophon', 'baz', 'garply'], 1)\n", (16733, 16824), False, 'import random\n'), ((32482, 32544), 'LeapOfThought.common.data_utils.pandas_multi_column_agg', 'pandas_multi_column_agg', (['examples_meta_df', "['split', 'answer']"], {}), "(examples_meta_df, ['split', 'answer'])\n", (32505, 32544), False, 'from LeapOfThought.common.data_utils import uniform_sample_by_column, pandas_multi_column_agg\n'), ((1140, 1165), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1155, 1165), False, 'import os\n'), ((13671, 13686), 'random.random', 'random.random', ([], {}), '()\n', (13684, 13686), False, 'import random\n'), ((16264, 16275), 'LeapOfThought.resources.teachai_kb.TeachAIKB', 'TeachAIKB', ([], {}), '()\n', (16273, 16275), False, 'from LeapOfThought.resources.teachai_kb import TeachAIKB\n'), ((16841, 16852), 'LeapOfThought.resources.teachai_kb.TeachAIKB', 'TeachAIKB', ([], {}), '()\n', (16850, 16852), False, 'from LeapOfThought.resources.teachai_kb import TeachAIKB\n'), ((22772, 22885), 'random.sample', 'random.sample', (["[e for e in examples if e[sample_on]['subject'] not in\n subject_filter_lookup[curr_subject]]", '(1)'], {}), "([e for e in examples if e[sample_on]['subject'] not in\n subject_filter_lookup[curr_subject]], 1)\n", (22785, 22885), False, 'import random\n'), ((23192, 23307), 'random.sample', 'random.sample', (["[e for e in triplets_to_sample_on if e['subject'] not in\n subject_filter_lookup[curr_subject]]", '(1)'], {}), "([e for e in triplets_to_sample_on if e['subject'] not in\n subject_filter_lookup[curr_subject]], 1)\n", (23205, 23307), False, 'import random\n'), ((29254, 29282), 'math.ceil', 'math.ceil', (['(num_to_sample / 2)'], {}), '(num_to_sample / 2)\n', (29263, 29282), False, 'import math\n'), ((29298, 29326), 'math.ceil', 'math.ceil', (['(num_to_sample / 2)'], {}), '(num_to_sample / 2)\n', (29307, 29326), False, 'import math\n'), ((29804, 29851), 'random.sample', 'random.sample', (["['always true', 'never true']", '(1)'], {}), "(['always true', 'never true'], 1)\n", (29817, 29851), False, 'import random\n'), ((30175, 30231), 'random.sample', 'random.sample', (['cand_inds_signed[validity]', 'num_to_sample'], {}), '(cand_inds_signed[validity], num_to_sample)\n', (30188, 30231), False, 'import random\n'), ((24565, 24576), 'LeapOfThought.resources.teachai_kb.TeachAIKB', 'TeachAIKB', ([], {}), '()\n', (24574, 24576), False, 'from LeapOfThought.resources.teachai_kb import TeachAIKB\n'), ((20767, 20778), 'LeapOfThought.resources.teachai_kb.TeachAIKB', 'TeachAIKB', ([], {}), '()\n', (20776, 20778), False, 'from LeapOfThought.resources.teachai_kb import TeachAIKB\n'), ((20976, 20987), 'LeapOfThought.resources.teachai_kb.TeachAIKB', 'TeachAIKB', ([], {}), '()\n', (20985, 20987), False, 'from LeapOfThought.resources.teachai_kb import TeachAIKB\n'), ((21146, 21157), 'LeapOfThought.resources.teachai_kb.TeachAIKB', 'TeachAIKB', ([], {}), '()\n', (21155, 21157), False, 'from LeapOfThought.resources.teachai_kb import TeachAIKB\n'), ((21371, 21382), 'LeapOfThought.resources.teachai_kb.TeachAIKB', 'TeachAIKB', ([], {}), '()\n', (21380, 21382), False, 'from LeapOfThought.resources.teachai_kb import TeachAIKB\n'), ((21538, 21549), 'LeapOfThought.resources.teachai_kb.TeachAIKB', 'TeachAIKB', ([], {}), '()\n', (21547, 21549), False, 'from LeapOfThought.resources.teachai_kb import TeachAIKB\n'), ((24774, 24785), 'LeapOfThought.resources.teachai_kb.TeachAIKB', 'TeachAIKB', ([], {}), '()\n', (24783, 24785), False, 'from LeapOfThought.resources.teachai_kb import TeachAIKB\n'), ((24939, 24950), 'LeapOfThought.resources.teachai_kb.TeachAIKB', 'TeachAIKB', ([], {}), '()\n', (24948, 24950), False, 'from LeapOfThought.resources.teachai_kb import TeachAIKB\n'), ((14478, 14489), 'LeapOfThought.resources.teachai_kb.TeachAIKB', 'TeachAIKB', ([], {}), '()\n', (14487, 14489), False, 'from LeapOfThought.resources.teachai_kb import TeachAIKB\n'), ((31745, 31771), 'pandas.Series', 'pd.Series', (["entities['dev']"], {}), "(entities['dev'])\n", (31754, 31771), True, 'import pandas as pd\n'), ((31797, 31825), 'pandas.Series', 'pd.Series', (["entities['train']"], {}), "(entities['train'])\n", (31806, 31825), True, 'import pandas as pd\n'), ((15369, 15380), 'LeapOfThought.resources.teachai_kb.TeachAIKB', 'TeachAIKB', ([], {}), '()\n', (15378, 15380), False, 'from LeapOfThought.resources.teachai_kb import TeachAIKB\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is covered by the LICENSE file in the root of this project.
from __future__ import annotations
import typing
import cv2
import numpy as np
class PiRandomTransform:
"""A transformation that can act on raster and sparse two-dimensional data."""
def resample(self, random: np.random.RandomState):
raise NotImplementedError(f"{type(self)}.resample is not yet implemented.")
def transform_raster(
self, raster: np.ndarray, interpolation: str, fill_value: int
) -> np.ndarray:
"""
Args:
interpolation: One of "nearest", "linear", "cubic", "area".
"""
raise NotImplementedError(
f"{type(self)}.transform_raster is not yet implemented."
)
def transform_points(self, points: np.ndarray) -> np.ndarray:
"""
Args:
points: Shape (N, 2,). Order x, y.
"""
raise NotImplementedError(
f"{type(self)}.transform_points is not yet implemented."
)
class PiRandomAffineTransform(PiRandomTransform):
def __init__(
self,
input_width: int,
input_height: int,
output_width: int,
output_height: int,
flip_x_probability: float,
flip_y_probability: float,
rotation_max: float,
rotation_min: float,
scaling_x_max: float,
scaling_x_min: float,
scaling_y_max: float,
scaling_y_min: float,
shearing_x_max: float,
shearing_x_min: float,
shearing_y_max: float,
shearing_y_min: float,
translation_x_max: float,
translation_x_min: float,
translation_y_max: float,
translation_y_min: float,
probability: float,
**kwargs,
):
super().__init__()
self._input_width = input_width
self._input_height = input_height
self._output_width = output_width
self._output_height = output_height
self._flip_x_probability = flip_x_probability
self._flip_y_probability = flip_y_probability
self._rotation_min = rotation_min
self._rotation_max = rotation_max
self._scaling_x_min = scaling_x_min
self._scaling_x_max = scaling_x_max
self._scaling_y_min = scaling_y_min
self._scaling_y_max = scaling_y_max
self._shearing_x_min = shearing_x_min
self._shearing_x_max = shearing_x_max
self._shearing_y_min = shearing_y_min
self._shearing_y_max = shearing_y_max
self._translate_x_min = translation_x_min
self._translate_x_max = translation_x_max
self._translate_y_min = translation_y_min
self._translate_y_max = translation_y_max
self._probability = probability
self._flip_x = None
self._flip_y = None
self._rotation = None
self._scaling_x = None
self._scaling_y = None
self._shearing_x = None
self._shearing_y = None
self._translate_x = None
self._translate_y = None
self._matrix = None
self._apply = None
def resample(self, random: np.random.RandomState):
self._apply = random.choice(
[True, False], p=[self._probability, 1.0 - self._probability]
)
if not self._apply:
self._flip_x = None
self._flip_y = None
self._rotation = None
self._scaling_x = None
self._scaling_y = None
self._shearing_x = None
self._shearing_y = None
self._translate_x = None
self._translate_y = None
self._matrix = None
return
self._flip_x = random.choice(
[True, False],
p=[self._flip_x_probability, 1.0 - self._flip_x_probability],
replace=False,
)
self._flip_y = random.choice(
[True, False],
p=[self._flip_y_probability, 1.0 - self._flip_y_probability],
replace=False,
)
self._rotation = random.uniform(self._rotation_min, self._rotation_max)
self._scaling_x = random.uniform(self._scaling_x_min, self._scaling_x_max)
self._scaling_y = random.uniform(self._scaling_y_min, self._scaling_y_max)
self._shearing_x = random.uniform(self._shearing_x_min, self._shearing_x_max)
self._shearing_y = random.uniform(self._shearing_y_min, self._shearing_y_max)
self._translate_x = random.uniform(self._translate_x_min, self._translate_x_max)
self._translate_y = random.uniform(self._translate_y_min, self._translate_y_max)
# contruct transformation matrix
translation_1 = np.eye(3, dtype=np.float)
translation_1[0, 2] = -0.5 * self._input_width
translation_1[1, 2] = -0.5 * self._input_height
scaling = np.eye(3, dtype=np.float)
scaling[0, 0] = self._scaling_x
scaling[1, 1] = self._scaling_y
scaling[0, 1] = self._shearing_x
scaling[1, 0] = self._shearing_y
scaling[2, 2] = 1.0
rotation = np.eye(3, dtype=np.float)
rotation[0, 0] = np.cos(self._rotation)
rotation[1, 1] = np.cos(self._rotation)
rotation[0, 1] = -np.sin(self._rotation)
rotation[1, 0] = np.sin(self._rotation)
rotation[2, 2] = 1.0
translation_2 = np.eye(3, dtype=np.float)
translation_2[0, 2] = self._translate_x
translation_2[1, 2] = self._translate_y
translation_3 = np.eye(3, dtype=np.float)
translation_3[0, 2] = 0.5 * self._output_width
translation_3[1, 2] = 0.5 * self._output_height
self._matrix = (
translation_3 @ translation_2 @ rotation @ scaling @ translation_1
)
def transform_raster(
self,
raster: np.ndarray,
interpolation: str,
fill_value: typing.Union[int, float, np.ndarray],
):
if not self._apply:
return raster
interpolation_flag = {
"nearest": cv2.INTER_NEAREST,
"linear": cv2.INTER_LINEAR,
"cubic": cv2.INTER_CUBIC,
"area": cv2.INTER_AREA,
}[interpolation]
channels = 1 if len(raster.shape) == 2 else raster.shape[2]
if channels not in [1, 3]:
# apply on each channel separately
return np.stack(
[
self.transform_raster(
raster=raster[..., channel],
interpolation=interpolation,
fill_value=fill_value[channel]
if isinstance(fill_value, np.ndarray)
else fill_value,
)
for channel in range(channels)
],
axis=-1,
)
if isinstance(fill_value, np.ndarray) and fill_value.size == 1:
fill_value = fill_value.item()
elif isinstance(fill_value, np.ndarray):
fill_value = tuple(value.item() for value in fill_value)
return cv2.warpAffine(
src=raster,
M=self._matrix[:2, :],
dsize=(self._output_width, self._output_height),
flags=interpolation_flag,
borderMode=cv2.BORDER_CONSTANT,
borderValue=fill_value,
)
def transform_points(self, points: np.ndarray) -> np.ndarray:
if not self._apply:
return points
num_points = points.shape[0]
# using homogeneous coordinates
points = np.stack(
[points[:, 0], points[:, 1], np.ones((num_points,), dtype=np.float)],
axis=-1,
)
return ((self._matrix @ points.T).T)[:, :2]
class PiRandomHsvTransform(PiRandomTransform):
def __init__(
self,
hue_min: float,
hue_max: float,
saturation_min: float,
saturation_max: float,
value_min: float,
value_max: float,
probability: float,
channels: typing.List[int],
**kwargs,
):
super().__init__()
if len(channels) != 3:
raise ValueError("Three channel indices expected.")
self._hue_min = hue_min
self._hue_max = hue_max
self._saturation_min = saturation_min
self._saturation_max = saturation_max
self._value_min = value_min
self._value_max = value_max
self._probability = probability
self._channels = channels
self._hue = None
self._saturation = None
self._value = None
self._apply = None
def resample(self, random: np.random.RandomState):
self._apply = random.choice(
[True, False], p=[self._probability, 1.0 - self._probability]
)
if not self._apply:
self._hue = None
self._saturation = None
self._value = None
return
self._hue = random.uniform(low=self._hue_min, high=self._hue_max)
self._saturation = random.uniform(
low=self._saturation_min, high=self._saturation_max
)
self._value = random.uniform(low=self._value_min, high=self._value_max)
def transform_raster(
self, raster: np.ndarray, interpolation: str, fill_value: np.ndarray
) -> np.ndarray:
if not self._apply:
return raster
rgb = raster[..., self._channels]
# debug output
# cv2.imshow("input", rgb[..., ::-1])
hsv = cv2.cvtColor(rgb, cv2.COLOR_RGB2HSV)
# hue
hsv[..., 0] = np.remainder(
360.0 * (hsv[..., 0] / 360.0 + 1.0 + self._hue), 360.0
)
# saturation
hsv[..., 1] = np.clip(hsv[..., 1] + self._saturation, 0.0, 1.0)
# value
hsv[..., 2] = np.clip(hsv[..., 2] + self._value, 0.0, 1.0)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
# debug output
# cv2.imshow("transformed", rgb[..., ::-1])
raster[..., self._channels] = rgb
return raster
def transform_points(self, points: np.ndarray) -> np.ndarray:
if not self._apply:
return points
return points
class PiRandomContrastTransform(PiRandomTransform):
def __init__(
self,
contrast_min: float,
contrast_max: float,
probability: float,
channels: typing.List[int],
**kwargs,
):
super().__init__()
self._contrast_min = contrast_min
self._contrast_max = contrast_max
self._probability = probability
self._channels = channels
self._contrast = None
self._apply = None
def resample(self, random: np.random.RandomState):
self._apply = random.choice(
[True, False], p=[self._probability, 1.0 - self._probability]
)
if not self._apply:
self._contrast = None
return
self._contrast = random.uniform(low=self._contrast_min, high=self._contrast_max)
def transform_raster(
self, raster: np.ndarray, interpolation: str, fill_value: np.ndarray
) -> np.ndarray:
if not self._apply:
return raster
rgb = raster[..., self._channels]
# debug output
# cv2.imshow("input", rgb[..., ::-1])
mean = np.mean(rgb.reshape(-1, 3), axis=0).reshape(1, 1, 3)
rgb = np.clip((rgb - mean) * (1.0 + self._contrast) + mean, 0.0, 1.0)
# debug output
# cv2.imshow("transformed", rgb[..., ::-1])
raster[..., self._channels] = rgb
return raster
def transform_points(self, points: np.ndarray) -> np.ndarray:
if not self._apply:
return points
return points
class PiRandomBlurTransform(PiRandomTransform):
def __init__(
self,
blur_min: float,
blur_max: float,
probability: float,
channels: typing.List[int],
**kwargs,
):
super().__init__()
self._blur_min = blur_min
self._blur_max = blur_max
self._probability = probability
self._channels = channels
self._blur = None
def resample(self, random: np.random.RandomState):
self._apply = random.choice(
[True, False], p=[self._probability, 1.0 - self._probability]
)
if not self._apply:
self._blur = None
return
self._blur = random.uniform(low=self._blur_min, high=self._blur_max)
def transform_raster(
self, raster: np.ndarray, interpolation: str, fill_value: np.ndarray
) -> np.ndarray:
if not self._apply:
return raster
if self._blur == 0.0:
return raster
rgb = raster[..., self._channels]
rgb = cv2.GaussianBlur(rgb, (0, 0), sigmaX=self._blur)
raster[..., self._channels] = rgb
return raster
def transform_points(self, points: np.ndarray) -> np.ndarray:
if not self._apply:
return points
return points
|
[
"numpy.clip",
"numpy.eye",
"cv2.warpAffine",
"numpy.ones",
"numpy.cos",
"cv2.cvtColor",
"numpy.sin",
"cv2.GaussianBlur",
"numpy.remainder"
] |
[((4723, 4748), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': 'np.float'}), '(3, dtype=np.float)\n', (4729, 4748), True, 'import numpy as np\n'), ((4879, 4904), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': 'np.float'}), '(3, dtype=np.float)\n', (4885, 4904), True, 'import numpy as np\n'), ((5115, 5140), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': 'np.float'}), '(3, dtype=np.float)\n', (5121, 5140), True, 'import numpy as np\n'), ((5166, 5188), 'numpy.cos', 'np.cos', (['self._rotation'], {}), '(self._rotation)\n', (5172, 5188), True, 'import numpy as np\n'), ((5214, 5236), 'numpy.cos', 'np.cos', (['self._rotation'], {}), '(self._rotation)\n', (5220, 5236), True, 'import numpy as np\n'), ((5311, 5333), 'numpy.sin', 'np.sin', (['self._rotation'], {}), '(self._rotation)\n', (5317, 5333), True, 'import numpy as np\n'), ((5388, 5413), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': 'np.float'}), '(3, dtype=np.float)\n', (5394, 5413), True, 'import numpy as np\n'), ((5535, 5560), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': 'np.float'}), '(3, dtype=np.float)\n', (5541, 5560), True, 'import numpy as np\n'), ((7103, 7292), 'cv2.warpAffine', 'cv2.warpAffine', ([], {'src': 'raster', 'M': 'self._matrix[:2, :]', 'dsize': '(self._output_width, self._output_height)', 'flags': 'interpolation_flag', 'borderMode': 'cv2.BORDER_CONSTANT', 'borderValue': 'fill_value'}), '(src=raster, M=self._matrix[:2, :], dsize=(self._output_width,\n self._output_height), flags=interpolation_flag, borderMode=cv2.\n BORDER_CONSTANT, borderValue=fill_value)\n', (7117, 7292), False, 'import cv2\n'), ((9530, 9566), 'cv2.cvtColor', 'cv2.cvtColor', (['rgb', 'cv2.COLOR_RGB2HSV'], {}), '(rgb, cv2.COLOR_RGB2HSV)\n', (9542, 9566), False, 'import cv2\n'), ((9604, 9672), 'numpy.remainder', 'np.remainder', (['(360.0 * (hsv[..., 0] / 360.0 + 1.0 + self._hue))', '(360.0)'], {}), '(360.0 * (hsv[..., 0] / 360.0 + 1.0 + self._hue), 360.0)\n', (9616, 9672), True, 'import numpy as np\n'), ((9739, 9788), 'numpy.clip', 'np.clip', (['(hsv[..., 1] + self._saturation)', '(0.0)', '(1.0)'], {}), '(hsv[..., 1] + self._saturation, 0.0, 1.0)\n', (9746, 9788), True, 'import numpy as np\n'), ((9828, 9872), 'numpy.clip', 'np.clip', (['(hsv[..., 2] + self._value)', '(0.0)', '(1.0)'], {}), '(hsv[..., 2] + self._value, 0.0, 1.0)\n', (9835, 9872), True, 'import numpy as np\n'), ((9888, 9924), 'cv2.cvtColor', 'cv2.cvtColor', (['hsv', 'cv2.COLOR_HSV2RGB'], {}), '(hsv, cv2.COLOR_HSV2RGB)\n', (9900, 9924), False, 'import cv2\n'), ((11413, 11476), 'numpy.clip', 'np.clip', (['((rgb - mean) * (1.0 + self._contrast) + mean)', '(0.0)', '(1.0)'], {}), '((rgb - mean) * (1.0 + self._contrast) + mean, 0.0, 1.0)\n', (11420, 11476), True, 'import numpy as np\n'), ((12809, 12857), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['rgb', '(0, 0)'], {'sigmaX': 'self._blur'}), '(rgb, (0, 0), sigmaX=self._blur)\n', (12825, 12857), False, 'import cv2\n'), ((5263, 5285), 'numpy.sin', 'np.sin', (['self._rotation'], {}), '(self._rotation)\n', (5269, 5285), True, 'import numpy as np\n'), ((7634, 7672), 'numpy.ones', 'np.ones', (['(num_points,)'], {'dtype': 'np.float'}), '((num_points,), dtype=np.float)\n', (7641, 7672), True, 'import numpy as np\n')]
|
from __future__ import print_function
import tikzplots as tkz
import argparse
import numpy as np
import re
def parse_data_file(fname):
with open(fname, 'r') as fp:
lines = fp.readlines()
# Read in the first line, and find the comma-separated values
# in the header
hline = lines[0]
for index, h in enumerate(hline):
if h == '=':
hstr = hline[index+1:].split(',')
# Strip away any white space
header = []
for h in hstr:
header.append(h.strip())
data = []
for line in lines[1:]:
dline = []
for entry in line.split():
dline.append(float(entry))
data.append(dline)
return header, np.array(data)
# Create an argument parser to read in arguments from the commnad line
p = argparse.ArgumentParser()
p.add_argument('--files', nargs='+', type=str, help='List of files')
p.add_argument('--labels', nargs='+', type=str, help='List of labels')
p.add_argument('--outfile', type=str, default='output.tex')
p.add_argument('--plot', type=str, default='effectivity')
args = p.parse_args()
# Set the colors to use for each set of bars
colors = []
for i in range(10):
colors.append('tableau%d'%(i))
tikzcolors = '''
\definecolor{tableau0}{RGB}{31,119,180}
\definecolor{tableau1}{RGB}{255,158,74}
\definecolor{tableau2}{RGB}{103,191,92}
\definecolor{tableau3}{RGB}{237,102,93}
\definecolor{tableau4}{RGB}{148,103,189}
\definecolor{tableau5}{RGB}{168,120,110}
\definecolor{tableau6}{RGB}{237,151,202}
\definecolor{tableau7}{RGB}{162,162,162}
\definecolor{tableau8}{RGB}{205,204,93}
\definecolor{tableau9}{RGB}{109,204,218}
'''
data = []
for fname in args.files:
try:
header, dat = parse_data_file(fname)
except:
print('fname = ', fname)
data.append(dat)
# Plot the error on the y-axis
nnodes_index = header.index('nnodes')
fval_eff_index = header.index('fval_effectivity')
indc_eff_index = header.index('indicator_effectivity')
# Find the max value of y
xmin = 1e20
xmax = 0
ymin = 0
ymax = 0
# Look through all the data
for d in data:
xmin = min(xmin, np.min(d[:, nnodes_index]))
xmax = max(xmax, np.max(d[:, nnodes_index]))
if args.plot == 'effectivity':
ymax = max(ymax, np.max(d[:, fval_eff_index]))
ymax = min(ymax, 100)
else:
ymax = max(ymax, np.max(d[:, indc_eff_index]))
ymax = min(ymax, 500)
# Round to the nearest multiple of 10
xmin = int(np.floor(np.log10(xmin)))
xmax = int(np.ceil(np.log10(xmax)))
# Create a range
xticks = np.linspace(xmin, xmax, xmax - xmin + 1)
xtick_labels = []
for exp in range(xmin, xmax + 1, 1):
xtick_labels.append('$10^{%d}$'%(exp))
# Set the positions of the tick locations
if ymax < 2.0:
ymax_int = int(np.ceil(4.0*ymax))
ymax = ymax_int/4.0
yticks = np.linspace(0, ymax, ymax_int+1)
ytick_labels = yticks
elif ymax < 10:
ymax = int(np.ceil(ymax))
yticks = np.linspace(0, ymax, ymax+1)
ytick_labels = range(ymax+1)
elif ymax < 20:
ymax = 2*int(np.ceil(ymax/2.0))
yticks = np.linspace(0, ymax, ymax+1)
ytick_labels = range(0, ymax+1, 2)
yticks = np.linspace(0, ymax, ymax/2 + 1)
else:
ymax = 5*int(np.ceil(ymax/5.0))
yticks = np.linspace(0, ymax, ymax+1)
ytick_labels = range(0, ymax+1, 5)
yticks = np.linspace(0, ymax, ymax/5 + 1)
# The overall dimensions
xdim = 2.0
xscale = xdim/(xmax - xmin)
ydim = 1.75
yscale = ydim/(ymax - ymin)
# Get the header info
s = tkz.get_header()
s += tkz.get_begin_tikz(xdim=1.5, ydim=1.5, xunit='in', yunit='in')
s += tikzcolors
symbols = ['circle', 'square', 'triangle', 'delta', 'diamond']
for k, d in enumerate(data):
xvals = np.log10(d[:, nnodes_index])
if args.plot == 'effectivity':
yvals = d[:, fval_eff_index]
else:
yvals = d[:, indc_eff_index]
s += tkz.get_2d_plot(xvals, yvals,
line_dim='very thick',
color=colors[k % 10],
symbol=symbols[k % 4],
symbol_size=0.035,
xscale=xscale, yscale=yscale,
xmin=xmin, xmax=xmax,
ymin=ymin, ymax=ymax)
# Set the labels (lower-right corner)
if args.labels is not None:
for k, label in enumerate(args.labels):
x = xmin + 0.75*(xmax - xmin)
y = ymin + 0.05*(ymax - ymin)*(len(args.labels)-k)
length = 0.035*(xmax - xmin)
s += tkz.get_legend_entry(x, y, length, label=label,
font_size='small',
line_dim='very thick',
color=colors[k % 10], symbol=symbols[k % 4],
symbol_size=0.035,
xscale=xscale, yscale=yscale)
if args.plot == 'effectivity':
title = 'Effectivity'
else:
title = 'Indicator effectivity'
# Plot the axes
s += tkz.get_2d_axes(xmin, xmax, ymin, ymax,
xscale=xscale, yscale=yscale,
xticks=xticks, yticks=yticks,
xtick_labels=xtick_labels,
ytick_labels=ytick_labels,
tick_font='normalsize',
tick_frac=0.01,
xlabel_offset=0.085,
label_font='Large',
xlabel='Number of nodes',
ylabel_offset=0.175,
ylabel=title)
s += tkz.get_end_tikz()
fp = open(args.outfile, 'w')
fp.write(s)
fp.close()
|
[
"numpy.ceil",
"numpy.log10",
"tikzplots.get_legend_entry",
"argparse.ArgumentParser",
"tikzplots.get_2d_plot",
"numpy.max",
"tikzplots.get_2d_axes",
"numpy.array",
"numpy.linspace",
"tikzplots.get_end_tikz",
"tikzplots.get_begin_tikz",
"numpy.min",
"tikzplots.get_header"
] |
[((861, 886), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (884, 886), False, 'import argparse\n'), ((2602, 2642), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(xmax - xmin + 1)'], {}), '(xmin, xmax, xmax - xmin + 1)\n', (2613, 2642), True, 'import numpy as np\n'), ((3535, 3551), 'tikzplots.get_header', 'tkz.get_header', ([], {}), '()\n', (3549, 3551), True, 'import tikzplots as tkz\n'), ((3557, 3619), 'tikzplots.get_begin_tikz', 'tkz.get_begin_tikz', ([], {'xdim': '(1.5)', 'ydim': '(1.5)', 'xunit': '"""in"""', 'yunit': '"""in"""'}), "(xdim=1.5, ydim=1.5, xunit='in', yunit='in')\n", (3575, 3619), True, 'import tikzplots as tkz\n'), ((5010, 5324), 'tikzplots.get_2d_axes', 'tkz.get_2d_axes', (['xmin', 'xmax', 'ymin', 'ymax'], {'xscale': 'xscale', 'yscale': 'yscale', 'xticks': 'xticks', 'yticks': 'yticks', 'xtick_labels': 'xtick_labels', 'ytick_labels': 'ytick_labels', 'tick_font': '"""normalsize"""', 'tick_frac': '(0.01)', 'xlabel_offset': '(0.085)', 'label_font': '"""Large"""', 'xlabel': '"""Number of nodes"""', 'ylabel_offset': '(0.175)', 'ylabel': 'title'}), "(xmin, xmax, ymin, ymax, xscale=xscale, yscale=yscale,\n xticks=xticks, yticks=yticks, xtick_labels=xtick_labels, ytick_labels=\n ytick_labels, tick_font='normalsize', tick_frac=0.01, xlabel_offset=\n 0.085, label_font='Large', xlabel='Number of nodes', ylabel_offset=\n 0.175, ylabel=title)\n", (5025, 5324), True, 'import tikzplots as tkz\n'), ((5543, 5561), 'tikzplots.get_end_tikz', 'tkz.get_end_tikz', ([], {}), '()\n', (5559, 5561), True, 'import tikzplots as tkz\n'), ((2874, 2908), 'numpy.linspace', 'np.linspace', (['(0)', 'ymax', '(ymax_int + 1)'], {}), '(0, ymax, ymax_int + 1)\n', (2885, 2908), True, 'import numpy as np\n'), ((3743, 3771), 'numpy.log10', 'np.log10', (['d[:, nnodes_index]'], {}), '(d[:, nnodes_index])\n', (3751, 3771), True, 'import numpy as np\n'), ((3901, 4099), 'tikzplots.get_2d_plot', 'tkz.get_2d_plot', (['xvals', 'yvals'], {'line_dim': '"""very thick"""', 'color': 'colors[k % 10]', 'symbol': 'symbols[k % 4]', 'symbol_size': '(0.035)', 'xscale': 'xscale', 'yscale': 'yscale', 'xmin': 'xmin', 'xmax': 'xmax', 'ymin': 'ymin', 'ymax': 'ymax'}), "(xvals, yvals, line_dim='very thick', color=colors[k % 10],\n symbol=symbols[k % 4], symbol_size=0.035, xscale=xscale, yscale=yscale,\n xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)\n", (3916, 4099), True, 'import tikzplots as tkz\n'), ((2170, 2196), 'numpy.min', 'np.min', (['d[:, nnodes_index]'], {}), '(d[:, nnodes_index])\n', (2176, 2196), True, 'import numpy as np\n'), ((2219, 2245), 'numpy.max', 'np.max', (['d[:, nnodes_index]'], {}), '(d[:, nnodes_index])\n', (2225, 2245), True, 'import numpy as np\n'), ((2522, 2536), 'numpy.log10', 'np.log10', (['xmin'], {}), '(xmin)\n', (2530, 2536), True, 'import numpy as np\n'), ((2558, 2572), 'numpy.log10', 'np.log10', (['xmax'], {}), '(xmax)\n', (2566, 2572), True, 'import numpy as np\n'), ((2818, 2837), 'numpy.ceil', 'np.ceil', (['(4.0 * ymax)'], {}), '(4.0 * ymax)\n', (2825, 2837), True, 'import numpy as np\n'), ((2992, 3022), 'numpy.linspace', 'np.linspace', (['(0)', 'ymax', '(ymax + 1)'], {}), '(0, ymax, ymax + 1)\n', (3003, 3022), True, 'import numpy as np\n'), ((4526, 4719), 'tikzplots.get_legend_entry', 'tkz.get_legend_entry', (['x', 'y', 'length'], {'label': 'label', 'font_size': '"""small"""', 'line_dim': '"""very thick"""', 'color': 'colors[k % 10]', 'symbol': 'symbols[k % 4]', 'symbol_size': '(0.035)', 'xscale': 'xscale', 'yscale': 'yscale'}), "(x, y, length, label=label, font_size='small', line_dim\n ='very thick', color=colors[k % 10], symbol=symbols[k % 4], symbol_size\n =0.035, xscale=xscale, yscale=yscale)\n", (4546, 4719), True, 'import tikzplots as tkz\n'), ((770, 784), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (778, 784), True, 'import numpy as np\n'), ((2308, 2336), 'numpy.max', 'np.max', (['d[:, fval_eff_index]'], {}), '(d[:, fval_eff_index])\n', (2314, 2336), True, 'import numpy as np\n'), ((2403, 2431), 'numpy.max', 'np.max', (['d[:, indc_eff_index]'], {}), '(d[:, indc_eff_index])\n', (2409, 2431), True, 'import numpy as np\n'), ((2964, 2977), 'numpy.ceil', 'np.ceil', (['ymax'], {}), '(ymax)\n', (2971, 2977), True, 'import numpy as np\n'), ((3119, 3149), 'numpy.linspace', 'np.linspace', (['(0)', 'ymax', '(ymax + 1)'], {}), '(0, ymax, ymax + 1)\n', (3130, 3149), True, 'import numpy as np\n'), ((3200, 3234), 'numpy.linspace', 'np.linspace', (['(0)', 'ymax', '(ymax / 2 + 1)'], {}), '(0, ymax, ymax / 2 + 1)\n', (3211, 3234), True, 'import numpy as np\n'), ((3288, 3318), 'numpy.linspace', 'np.linspace', (['(0)', 'ymax', '(ymax + 1)'], {}), '(0, ymax, ymax + 1)\n', (3299, 3318), True, 'import numpy as np\n'), ((3369, 3403), 'numpy.linspace', 'np.linspace', (['(0)', 'ymax', '(ymax / 5 + 1)'], {}), '(0, ymax, ymax / 5 + 1)\n', (3380, 3403), True, 'import numpy as np\n'), ((3087, 3106), 'numpy.ceil', 'np.ceil', (['(ymax / 2.0)'], {}), '(ymax / 2.0)\n', (3094, 3106), True, 'import numpy as np\n'), ((3256, 3275), 'numpy.ceil', 'np.ceil', (['(ymax / 5.0)'], {}), '(ymax / 5.0)\n', (3263, 3275), True, 'import numpy as np\n')]
|
import tensorflow as tf
import numpy as np
import os
class TFModel(object):
'''
This class contains the general functions for a tensorflow model
'''
def __init__(self, config):
# Limit the TensorFlow's logs
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '4'
# tf.logging.set_verbosity(tf.logging.ERROR)
self.config = config
self.sess = None
self.saver = None
def initialize_session(self):
"""
Set configurations:
* allow_soft_placement : If True, will allow models trained
on GPU to be deployed unto CPU
* log_device_placement : If True, will print the hardware
and operations that have been placed on it
"""
sess_conf = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)
sess_conf.gpu_options.allow_growth = True
self.sess = tf.Session(config=sess_conf)
# Save object
if not self.config.save == None:
self.saver = tf.train.Saver()
# Initialize all variables
self.sess.run(tf.global_variables_initializer())
def save_model(self, fold, timestamp, name):
"""
Save the model and the config file
"""
model_name = name + "_" + timestamp
main_dir = "./checkpoints/" + model_name + "/"
# Check main model dir
if not os.path.exists(main_dir):
os.makedirs(main_dir)
# If using K-Fold Cross Validation, save each model
if self.config.k_folds > 1:
dir = main_dir + "Fold_" + str(fold + 1) + "/"
# Create Fold dir
if not os.path.exists(dir):
os.makedirs(dir)
# Save the model
self.saver.save(self.sess, dir)
else:
self.saver.save(self.sess, main_dir)
return main_dir
def ner_save(self, fold, timestamp, name, ep):
# Save the model
main_dir = self.save_model(fold, timestamp, name)
# Save the corresponding config file
if fold == 0:
np.savez(main_dir + "config",
model=self.config.model,
k_folds=self.config.k_folds,
words=self.config.words,
tags=self.config.tags,
chars=self.config.chars,
use_crf=self.config.use_crf,
epoch=ep+1)
def class_save(self, fold, timestamp, name, ep):
# Save the model
main_dir = self.save_model(fold, timestamp, name)
# Save the config file
if fold == 0:
np.savez(main_dir + "config",
model=self.config.model,
k_folds=self.config.k_folds,
words=self.config.words,
chars=self.config.chars,
epoch=ep+1)
def close_session(self):
self.sess.close()
tf.reset_default_graph()
|
[
"os.path.exists",
"numpy.savez",
"tensorflow.reset_default_graph",
"os.makedirs",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"tensorflow.ConfigProto"
] |
[((847, 916), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'log_device_placement': '(False)'}), '(allow_soft_placement=True, log_device_placement=False)\n', (861, 916), True, 'import tensorflow as tf\n'), ((1022, 1050), 'tensorflow.Session', 'tf.Session', ([], {'config': 'sess_conf'}), '(config=sess_conf)\n', (1032, 1050), True, 'import tensorflow as tf\n'), ((3074, 3098), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (3096, 3098), True, 'import tensorflow as tf\n'), ((1140, 1156), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1154, 1156), True, 'import tensorflow as tf\n'), ((1215, 1248), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1246, 1248), True, 'import tensorflow as tf\n'), ((1517, 1541), 'os.path.exists', 'os.path.exists', (['main_dir'], {}), '(main_dir)\n', (1531, 1541), False, 'import os\n'), ((1555, 1576), 'os.makedirs', 'os.makedirs', (['main_dir'], {}), '(main_dir)\n', (1566, 1576), False, 'import os\n'), ((2212, 2421), 'numpy.savez', 'np.savez', (["(main_dir + 'config')"], {'model': 'self.config.model', 'k_folds': 'self.config.k_folds', 'words': 'self.config.words', 'tags': 'self.config.tags', 'chars': 'self.config.chars', 'use_crf': 'self.config.use_crf', 'epoch': '(ep + 1)'}), "(main_dir + 'config', model=self.config.model, k_folds=self.config.\n k_folds, words=self.config.words, tags=self.config.tags, chars=self.\n config.chars, use_crf=self.config.use_crf, epoch=ep + 1)\n", (2220, 2421), True, 'import numpy as np\n'), ((2759, 2911), 'numpy.savez', 'np.savez', (["(main_dir + 'config')"], {'model': 'self.config.model', 'k_folds': 'self.config.k_folds', 'words': 'self.config.words', 'chars': 'self.config.chars', 'epoch': '(ep + 1)'}), "(main_dir + 'config', model=self.config.model, k_folds=self.config.\n k_folds, words=self.config.words, chars=self.config.chars, epoch=ep + 1)\n", (2767, 2911), True, 'import numpy as np\n'), ((1782, 1801), 'os.path.exists', 'os.path.exists', (['dir'], {}), '(dir)\n', (1796, 1801), False, 'import os\n'), ((1819, 1835), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (1830, 1835), False, 'import os\n')]
|
import tensorflow as tf
import numpy as np
from interfaces import AbstractSelfAdaptingStrategy
def _get_category_encoding_layer(size):
return lambda feature: tf.one_hot(feature, size + 1) # +1 since classes are labeled from 1
def _prepare_inputs():
all_inputs = tf.keras.Input(shape=(2,), dtype='int32')
encoded_features = []
domain_sizes = [1875, 20]
for idx in range(0, 2):
encoding_layer = _get_category_encoding_layer(domain_sizes[idx])
encoded_col = encoding_layer(all_inputs[:, idx])
encoded_features.append(encoded_col)
return all_inputs, encoded_features
def _create_model(layers_widths):
all_inputs, encoded_features = _prepare_inputs()
last_layer = tf.keras.layers.Concatenate()(encoded_features)
for width in layers_widths:
last_layer = tf.keras.layers.Dense(int(width), activation=tf.keras.activations.relu)(last_layer)
output = tf.keras.layers.Dense(1, tf.keras.activations.exponential)(last_layer)
model = tf.keras.Model(inputs=all_inputs, outputs=output)
learning_rate = tf.keras.experimental.CosineDecay(0.01, 10000000)
model.compile(optimizer=tf.optimizers.Adam(learning_rate=learning_rate), loss=tf.losses.Poisson())
# model.summary()
return model
def _jobs_to_tensors(jobs):
x = list(map(lambda job: [job.exercise_id, job.runtime_id], jobs))
y = list(map(lambda job: [job.duration], jobs))
return tf.convert_to_tensor(x, dtype=tf.int32), tf.convert_to_tensor(y, dtype=tf.float32)
class CategorySelfAdaptingStrategy(AbstractSelfAdaptingStrategy):
"""Uses machine-learning neural-network regression model to predict the job duration.
The model is trained in SA and used by dispatcher (via estimation function interface).
The model is implemented in TensorFlow.
"""
def __init__(self, layers_widths=[64], batch_size=5000, batch_epochs=5, ref_jobs=None):
tf.config.threading.set_inter_op_parallelism_threads(8)
tf.config.threading.set_intra_op_parallelism_threads(8)
# tf.config.set_visible_devices([], 'GPU')
self.layers_widths = layers_widths
self.batch_size = batch_size
self.batch_epochs = batch_epochs
self.ref_jobs = ref_jobs[:] if ref_jobs else None
self.buffer = []
self.model = None
def _advance_ts(self, ts):
while len(self.ref_jobs) > 0 and self.ref_jobs[-1].spawn_ts + self.ref_jobs[-1].duration <= ts:
job = self.ref_jobs.pop()
if job.compilation_ok:
self.buffer.append(job)
def _train_batch(self):
"""Take the job buffer and use it as batch for training."""
if len(self.buffer) > self.batch_size:
x, y = _jobs_to_tensors(self.buffer)
self.model.fit(x, y, batch_size=len(self.buffer), epochs=self.batch_epochs, verbose=False)
self.buffer = [] # reset the job buffer at the end
def init(self, ts, dispatcher, workers):
self.model = _create_model(self.layers_widths)
self._advance_ts(ts)
self._train_batch()
@tf.function
def predict_single(input):
return self.model(input, training=False)[0]
def predictor(job):
x = np.array([[job.exercise_id, job.runtime_id]], dtype='int32')
return predict_single(x).numpy()[0]
dispatcher.set_predictor(predictor)
def do_adapt(self, ts, dispatcher, workers, job=None):
self._advance_ts(ts)
if job and job.compilation_ok:
self.buffer.append(job)
self._train_batch()
|
[
"tensorflow.one_hot",
"tensorflow.losses.Poisson",
"tensorflow.config.threading.set_intra_op_parallelism_threads",
"tensorflow.keras.layers.Concatenate",
"tensorflow.convert_to_tensor",
"numpy.array",
"tensorflow.keras.experimental.CosineDecay",
"tensorflow.keras.layers.Dense",
"tensorflow.config.threading.set_inter_op_parallelism_threads",
"tensorflow.keras.Input",
"tensorflow.optimizers.Adam",
"tensorflow.keras.Model"
] |
[((275, 316), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(2,)', 'dtype': '"""int32"""'}), "(shape=(2,), dtype='int32')\n", (289, 316), True, 'import tensorflow as tf\n'), ((1006, 1055), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'all_inputs', 'outputs': 'output'}), '(inputs=all_inputs, outputs=output)\n', (1020, 1055), True, 'import tensorflow as tf\n'), ((1076, 1125), 'tensorflow.keras.experimental.CosineDecay', 'tf.keras.experimental.CosineDecay', (['(0.01)', '(10000000)'], {}), '(0.01, 10000000)\n', (1109, 1125), True, 'import tensorflow as tf\n'), ((164, 193), 'tensorflow.one_hot', 'tf.one_hot', (['feature', '(size + 1)'], {}), '(feature, size + 1)\n', (174, 193), True, 'import tensorflow as tf\n'), ((724, 753), 'tensorflow.keras.layers.Concatenate', 'tf.keras.layers.Concatenate', ([], {}), '()\n', (751, 753), True, 'import tensorflow as tf\n'), ((922, 980), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)', 'tf.keras.activations.exponential'], {}), '(1, tf.keras.activations.exponential)\n', (943, 980), True, 'import tensorflow as tf\n'), ((1432, 1471), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x'], {'dtype': 'tf.int32'}), '(x, dtype=tf.int32)\n', (1452, 1471), True, 'import tensorflow as tf\n'), ((1473, 1514), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['y'], {'dtype': 'tf.float32'}), '(y, dtype=tf.float32)\n', (1493, 1514), True, 'import tensorflow as tf\n'), ((1918, 1973), 'tensorflow.config.threading.set_inter_op_parallelism_threads', 'tf.config.threading.set_inter_op_parallelism_threads', (['(8)'], {}), '(8)\n', (1970, 1973), True, 'import tensorflow as tf\n'), ((1982, 2037), 'tensorflow.config.threading.set_intra_op_parallelism_threads', 'tf.config.threading.set_intra_op_parallelism_threads', (['(8)'], {}), '(8)\n', (2034, 2037), True, 'import tensorflow as tf\n'), ((1154, 1201), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (1172, 1201), True, 'import tensorflow as tf\n'), ((1208, 1227), 'tensorflow.losses.Poisson', 'tf.losses.Poisson', ([], {}), '()\n', (1225, 1227), True, 'import tensorflow as tf\n'), ((3245, 3305), 'numpy.array', 'np.array', (['[[job.exercise_id, job.runtime_id]]'], {'dtype': '"""int32"""'}), "([[job.exercise_id, job.runtime_id]], dtype='int32')\n", (3253, 3305), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import numpy as np
# Note: careful as np.multiply does an elementwise multiply on numpy arrays
# asterisk (*) does the same but will perfom matrix multiplication on mat (numpy matrices)
class L1Regularization:
"""
**Lasso Regression (L1Regularization)**
L1Regularization adds sum of the absolute value magnitudes of parameters as
penalty term to the loss function
References:
[1] Regularization (mathematics)
* [Wikipedia Article] https://en.wikipedia.org/wiki/Regularization_(mathematics)
[2] Regression shrinkage and selection via the lasso
* [R Tibshirani, 1996] https://goo.gl/Yh9bBU
* [PDF] https://goo.gl/mQP5mA
[3] Feature selection, L1 vs. L2 regularization, and rotational invariance
* [<NAME>, ] [PDF] https://goo.gl/rbwNCt
Args:
_lambda (float32): controls the weight of the penalty term
"""
def __init__(self, _lambda, **kwargs):
self._lambda = _lambda
def regulate(self, weights):
return np.multiply(self._lambda, np.linalg.norm(weights))
def derivative(self, weights):
return np.multiply(self._lambda, np.sign(weights))
@property
def regulation_name(self):
return self.__class__.__name__
class L2Regularization:
"""
**Lasso Regression (L2Regularization)**
L1Regularization adds sum of the squared magnitudes of parameters as penalty
term to the loss function
References:
[1] Regularization (mathematics)
* [Wikipedia Article] https://en.wikipedia.org/wiki/Regularization_(mathematics)
[2] Regression shrinkage and selection via the lasso
* [R Tibshirani, 1996] https://goo.gl/Yh9bBU
* [PDF] https://goo.gl/mQP5mA
[3] Feature selection, L1 vs. L2 regularization, and rotational invariance
* [<NAME>. Ng, ] [PDF] https://goo.gl/rbwNCt
Args:
_lambda (float32): controls the weight of the penalty term
"""
def __init__(self, _lambda, **kwargs):
self._lambda = _lambda
def regulate(self, weights):
return np.multiply(self._lambda, (0.5 * weights.T.dot(weights)))
def derivative(self, weights):
return np.multiply(self._lambda, weights)
@property
def regulation_name(self):
return self.__class__.__name__
class ElasticNetRegularization:
"""
**Elastic Net Regularization (ElasticNetRegularization)**
ElasticNetRegularization adds both absolute value of magnitude and squared
magnitude of coefficient as penalty term to the loss function
References:
[1] Regularization (mathematics)
* [Wikipedia Article] https://en.wikipedia.org/wiki/Regularization_(mathematics)
Args:
_lambda (float32): controls the weight of the penalty term
l1_ratio (float32): controls the value l1 penalty as a ratio of total penalty added to the loss function
"""
def __init__(self, _lambda, l1_ratio):
self._lambda = _lambda
self.l1_ratio = l1_ratio
def regulate(self, weights):
return np.multiply(self._lambda, (((self.l1_ratio * 0.5) * weights.T.dot(weights)) + ((1 - self.l1_ratio) * np.linalg.norm(weights))))
def derivative(self, weights):
return np.multiply(self._lambda, (((self.l1_ratio * 0.5) * weights) + ((1 - self.l1_ratio) * np.sign(weights))))
@property
def regulation_name(self):
return self.__class__.__name__
class RegularizationFunction:
_regularizers = {
'l1' : L1Regularization,
'lasso' : L1Regularization,
'l2' : L2Regularization,
'ridge' : L2Regularization,
'elastic' : ElasticNetRegularization,
'elastic_net' : ElasticNetRegularization
}
def __init__(self, name = 'lasso', _lambda = 0.5, l1_ratio = 0.5):
if name not in self._regularizers.keys():
raise Exception('Regularization function must be either one of the following: {}.'.format(', '.join(self._regularizers.keys())))
self.regularization_func = self._regularizers[name](_lambda, l1_ratio = l1_ratio)
@property
def name(self):
return self.regularization_func.regularization_name
def regulate(self, weights):
return self.regularization_func.regulate(weights)
def derivative(self, weights):
return self.regularization_func.derivative(weights)
|
[
"numpy.multiply",
"numpy.sign",
"numpy.linalg.norm"
] |
[((2270, 2304), 'numpy.multiply', 'np.multiply', (['self._lambda', 'weights'], {}), '(self._lambda, weights)\n', (2281, 2304), True, 'import numpy as np\n'), ((1102, 1125), 'numpy.linalg.norm', 'np.linalg.norm', (['weights'], {}), '(weights)\n', (1116, 1125), True, 'import numpy as np\n'), ((1204, 1220), 'numpy.sign', 'np.sign', (['weights'], {}), '(weights)\n', (1211, 1220), True, 'import numpy as np\n'), ((3253, 3276), 'numpy.linalg.norm', 'np.linalg.norm', (['weights'], {}), '(weights)\n', (3267, 3276), True, 'import numpy as np\n'), ((3418, 3434), 'numpy.sign', 'np.sign', (['weights'], {}), '(weights)\n', (3425, 3434), True, 'import numpy as np\n')]
|
"""
Ref: https://github.com/htwang14/CAT/blob/1152f7095d6ea0026c7344b00fefb9f4990444f2/models/FiLM.py#L35
"""
import numpy as np
import torch.nn as nn
from torch.nn import functional as F
from torch.nn.modules.batchnorm import _BatchNorm
class SwitchableLayer1D(nn.Module):
"""1-dimensional switchable layer.
The 1D means the module only requires one dimension variable, like BN.
Args:
module_class (nn.Module): Should a module class which takes `num_features`
as the first arg, and multiple kwargs.
"""
def __init__(self, module_class, max_num_features: int, slim_ratios: list, **kwargs):
super(SwitchableLayer1D, self).__init__()
self.max_num_features = max_num_features
modules = []
slim_ratios = sorted(slim_ratios)
for r in slim_ratios:
w = int(np.ceil(r * max_num_features))
modules.append(module_class(w, **kwargs))
self._switch_modules = nn.ModuleList(modules)
self.current_module_idx = -1
self._slim_ratio = max(slim_ratios)
self.slim_ratios = slim_ratios
self.ignore_model_profiling = True
@property
def slim_ratio(self):
return self._slim_ratio
@slim_ratio.setter
def slim_ratio(self, r):
self.current_module_idx = self.slim_ratios.index(r)
self._slim_ratio = r
def forward(self, x):
y = self._switch_modules[self.current_module_idx](x)
return y
class SlimmableOpMixin(object):
def mix_forward(self, x, mix_num=-1):
if mix_num < 0:
mix_num = int(1/self.slim_ratio)
elif mix_num == 0:
print("WARNING: not mix anything.")
out = 0.
for shift_idx in range(0, mix_num):
out = out + self._forward_with_partial_weight(x, shift_idx)
return out * 1. / mix_num
def _forward_with_partial_weight(self, x, slim_bias_idx, out_slim_bias_idx=None):
raise NotImplementedError()
def _compute_slice_bound(self, in_channels, out_channels, slim_bias_idx, out_slim_bias_idx=None):
out_slim_bias_idx = slim_bias_idx if out_slim_bias_idx is None else out_slim_bias_idx
out_idx_bias = out_channels * out_slim_bias_idx if not self.non_slimmable_out else 0
in_idx_bias = in_channels * slim_bias_idx if not self.non_slimmable_in else 0
return out_idx_bias, (out_idx_bias+out_channels), in_idx_bias, (in_idx_bias+in_channels)
class _SlimmableBatchNorm(_BatchNorm, SlimmableOpMixin):
"""
BatchNorm2d shared by all sub-networks in slimmable network.
This won't work according to slimmable net paper.
See implementation in https://github.com/htwang14/CAT/blob/1152f7095d6ea0026c7344b00fefb9f4990444f2/models/slimmable_ops.py#L28
If this is used, we will enforce the tracking to be disabled.
Following https://github.com/dem123456789/HeteroFL-Computation-and-Communication-Efficient-Federated-Learning-for-Heterogeneous-Clients
"""
def __init__(self, num_features, eps=1e-5, momentum=None, affine=True,
track_running_stats=False, non_slimmable=False):
assert not track_running_stats, "You should not track stats which cannot be slimmable."
# if track_running_stats:
# assert non_slimmable
super(_SlimmableBatchNorm, self).__init__(num_features, momentum=momentum, track_running_stats=False, affine=affine, eps=eps)
self.max_num_features = num_features
self._slim_ratio = 1.0
self.slim_bias_idx = 0
self.out_slim_bias_idx = None
self.non_slimmable = non_slimmable
self.mix_forward_num = 1 # 1 means not mix; -1 mix all
@property
def slim_ratio(self):
return self._slim_ratio
@slim_ratio.setter
def slim_ratio(self, r):
self.num_features = self._compute_channels(r)
self._slim_ratio = r
if r < 0 and self.track_running_stats:
raise RuntimeError(f"Try to track state when slim_ratio < 1 is {r}")
def _compute_channels(self, ratio):
return self.max_num_features if self.non_slimmable \
else int(np.ceil(self.max_num_features * ratio))
def forward(self, x):
if self.mix_forward_num == 1:
return self._forward_with_partial_weight(x, self.slim_bias_idx, self.out_slim_bias_idx)
else:
return self.mix_forward(x, mix_num=self.mix_forward_num)
def _forward_with_partial_weight(self, input, slim_bias_idx, out_slim_bias_idx=None):
out_idx0, out_idx1 = self._compute_slice_bound(self.num_features, slim_bias_idx)
weight = self.weight[out_idx0:out_idx1]
bias = self.bias[out_idx0:out_idx1]
# ----- copy from parent implementation ----
self._check_input_dim(input)
# exponential_average_factor is set to self.momentum
# (when it is available) only so that it gets updated
# in ONNX graph when this node is exported to ONNX.
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.training and self.track_running_stats:
# TODO: if statement only here to tell the jit to skip emitting this when it is None
if self.num_batches_tracked is not None:
self.num_batches_tracked = self.num_batches_tracked + 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
r"""
Decide whether the mini-batch stats should be used for normalization rather than the buffers.
Mini-batch stats are used in training mode, and in eval mode when buffers are None.
"""
if self.training:
bn_training = True
else:
bn_training = (self.running_mean is None) and (self.running_var is None)
r"""
Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be
passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are
used for normalization (i.e. in eval mode when buffers are not None).
"""
return F.batch_norm(
input,
# If buffers are not to be tracked, ensure that they won't be updated
self.running_mean if not self.training or self.track_running_stats else None,
self.running_var if not self.training or self.track_running_stats else None,
weight, bias, bn_training, exponential_average_factor, self.eps)
def _compute_slice_bound(self, channels, slim_bias_idx):
idx_bias = channels * slim_bias_idx if not self.non_slimmable else 0
return idx_bias, (idx_bias+channels)
def _save_to_state_dict(self, destination, prefix, keep_vars):
for name, param in self._parameters.items():
if param is not None:
# ------------------------------
idx_bias = self.num_features * self.slim_bias_idx if not self.non_slimmable else 0
if name == 'weight':
param = param[idx_bias:(idx_bias + self.num_features)]
elif name == 'bias' and param is not None:
param = param[idx_bias:(idx_bias + self.num_features)]
# ------------------------------
destination[prefix + name] = param if keep_vars else param.detach()
for name, buf in self._buffers.items():
if buf is not None and name not in self._non_persistent_buffers_set:
destination[prefix + name] = buf if keep_vars else buf.detach()
class SlimmableBatchNorm2d(_SlimmableBatchNorm):
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
class SlimmableBatchNorm1d(_SlimmableBatchNorm):
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
class SlimmableConv2d(nn.Conv2d, SlimmableOpMixin):
"""
Args:
non_slimmable_in: Fix the in size
non_slimmable_out: Fix the out size
"""
def __init__(self, in_channels: int, out_channels: int,
kernel_size, stride=1, padding=0, dilation=1,
groups=1, bias=True,
non_slimmable_out=False, non_slimmable_in=False,):
super(SlimmableConv2d, self).__init__(
in_channels, out_channels,
kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
assert groups == 1, "for now, we can only support single group when slimming."
assert in_channels > 0
assert out_channels > 0
self.max_in_channels = in_channels
self.max_out_channels = out_channels
self._slim_ratio = 1.0
self.slim_bias_idx = 0 # input slim bias idx
self.out_slim_bias_idx = None # -1: use the same value as slim_bias_idx
self.non_slimmable_out = non_slimmable_out
self.non_slimmable_in = non_slimmable_in
self.mix_forward_num = -1
@property
def slim_ratio(self):
return self._slim_ratio
@slim_ratio.setter
def slim_ratio(self, r):
self.in_channels, self.out_channels = self._compute_channels(r)
self._slim_ratio = r
def _compute_channels(self, ratio):
in_channels = self.max_in_channels if self.non_slimmable_in \
else int(np.ceil(self.max_in_channels * ratio))
out_channels = self.max_out_channels if self.non_slimmable_out \
else int(np.ceil(self.max_out_channels * ratio))
return in_channels, out_channels
def forward(self, x):
if self.mix_forward_num == 1:
return self._forward_with_partial_weight(x, self.slim_bias_idx, self.out_slim_bias_idx)
else:
return self.mix_forward(x, mix_num=self.mix_forward_num)
def _forward_with_partial_weight(self, x, slim_bias_idx, out_slim_bias_idx=None):
out_idx0, out_idx1, in_idx0, in_idx1 = self._compute_slice_bound(
self.in_channels, self.out_channels, slim_bias_idx, out_slim_bias_idx)
weight = self.weight[out_idx0:out_idx1, in_idx0:in_idx1]
bias = self.bias[out_idx0:out_idx1] if self.bias is not None else None
y = F.conv2d(
x, weight, bias, self.stride, self.padding,
self.dilation, self.groups)
return y / self.slim_ratio if self.training and not self.non_slimmable_out else y
def _save_to_state_dict(self, destination, prefix, keep_vars):
for name, param in self._parameters.items():
if param is not None:
# ------------------------------
out_idx_bias = self.out_channels * self.slim_bias_idx if not self.non_slimmable_out else 0
if name == 'weight':
in_idx_bias = self.in_channels * self.slim_bias_idx \
if not self.non_slimmable_in else 0
param = param[out_idx_bias:(out_idx_bias+self.out_channels),
in_idx_bias:(in_idx_bias+self.in_channels)]
elif name == 'bias' and param is not None:
param = param[out_idx_bias:(out_idx_bias + self.out_channels)]
# ------------------------------
destination[prefix + name] = param if keep_vars else param.detach()
for name, buf in self._buffers.items():
if buf is not None and name not in self._non_persistent_buffers_set:
destination[prefix + name] = buf if keep_vars else buf.detach()
class SlimmableLinear(nn.Linear, SlimmableOpMixin):
"""
Args:
non_slimmable_in: Fix the in size
non_slimmable_out: Fix the out size
"""
def __init__(self, in_features: int, out_features: int, bias=True,
non_slimmable_out=False, non_slimmable_in=False,):
super(SlimmableLinear, self).__init__(in_features, out_features, bias=bias)
self.max_in_features = in_features
self.max_out_features = out_features
self._slim_ratio = 1.0
self.slim_bias_idx = 0 # input slim bias idx
self.out_slim_bias_idx = None # -1: use the same value as slim_bias_idx
self.non_slimmable_out = non_slimmable_out
self.non_slimmable_in = non_slimmable_in
self.mix_forward_num = -1
@property
def slim_ratio(self):
return self._slim_ratio
@slim_ratio.setter
def slim_ratio(self, r):
self.in_features, self.out_features = self._compute_channels(r)
self._slim_ratio = r
def _compute_channels(self, ratio):
in_features = self.max_in_features if self.non_slimmable_in \
else int(np.ceil(self.max_in_features * ratio))
out_features = self.max_out_features if self.non_slimmable_out \
else int(np.ceil(self.max_out_features * ratio))
return in_features, out_features
def forward(self, x):
if self.mix_forward_num == 1:
return self._forward_with_partial_weight(x, self.slim_bias_idx, self.out_slim_bias_idx)
else:
return self.mix_forward(x, mix_num=self.mix_forward_num)
def _forward_with_partial_weight(self, x, slim_bias_idx, out_slim_bias_idx=None):
out_idx0, out_idx1, in_idx0, in_idx1 = self._compute_slice_bound(
self.in_features, self.out_features, slim_bias_idx, out_slim_bias_idx)
weight = self.weight[out_idx0:out_idx1, in_idx0:in_idx1]
bias = self.bias[out_idx0:out_idx1] if self.bias is not None else None
out = F.linear(x, weight, bias)
return out / self.slim_ratio if self.training and not self.non_slimmable_out else out
def _save_to_state_dict(self, destination, prefix, keep_vars):
for name, param in self._parameters.items():
if param is not None:
# ------------------------------
param = self.get_slim_param(name, param)
# ------------------------------
destination[prefix + name] = param if keep_vars else param.detach()
for name, buf in self._buffers.items():
if buf is not None and name not in self._non_persistent_buffers_set:
destination[prefix + name] = buf if keep_vars else buf.detach()
def get_slim_param(self, name, param):
out_idx_bias = self.out_features * self.slim_bias_idx if not self.non_slimmable_out else 0
if name == 'weight':
in_idx_bias = self.in_features * self.slim_bias_idx if not self.non_slimmable_in else 0
param = param[out_idx_bias:(out_idx_bias + self.out_features),
in_idx_bias:(in_idx_bias + self.in_features)]
elif name == 'bias' and param is not None:
param = param[out_idx_bias:(out_idx_bias + self.out_features)]
return param
|
[
"torch.nn.functional.linear",
"torch.nn.functional.conv2d",
"numpy.ceil",
"torch.nn.ModuleList",
"torch.nn.functional.batch_norm"
] |
[((961, 983), 'torch.nn.ModuleList', 'nn.ModuleList', (['modules'], {}), '(modules)\n', (974, 983), True, 'import torch.nn as nn\n'), ((6404, 6656), 'torch.nn.functional.batch_norm', 'F.batch_norm', (['input', '(self.running_mean if not self.training or self.track_running_stats else None)', '(self.running_var if not self.training or self.track_running_stats else None)', 'weight', 'bias', 'bn_training', 'exponential_average_factor', 'self.eps'], {}), '(input, self.running_mean if not self.training or self.\n track_running_stats else None, self.running_var if not self.training or\n self.track_running_stats else None, weight, bias, bn_training,\n exponential_average_factor, self.eps)\n', (6416, 6656), True, 'from torch.nn import functional as F\n'), ((10709, 10794), 'torch.nn.functional.conv2d', 'F.conv2d', (['x', 'weight', 'bias', 'self.stride', 'self.padding', 'self.dilation', 'self.groups'], {}), '(x, weight, bias, self.stride, self.padding, self.dilation, self.groups\n )\n', (10717, 10794), True, 'from torch.nn import functional as F\n'), ((14032, 14057), 'torch.nn.functional.linear', 'F.linear', (['x', 'weight', 'bias'], {}), '(x, weight, bias)\n', (14040, 14057), True, 'from torch.nn import functional as F\n'), ((845, 874), 'numpy.ceil', 'np.ceil', (['(r * max_num_features)'], {}), '(r * max_num_features)\n', (852, 874), True, 'import numpy as np\n'), ((4137, 4175), 'numpy.ceil', 'np.ceil', (['(self.max_num_features * ratio)'], {}), '(self.max_num_features * ratio)\n', (4144, 4175), True, 'import numpy as np\n'), ((9843, 9880), 'numpy.ceil', 'np.ceil', (['(self.max_in_channels * ratio)'], {}), '(self.max_in_channels * ratio)\n', (9850, 9880), True, 'import numpy as np\n'), ((9980, 10018), 'numpy.ceil', 'np.ceil', (['(self.max_out_channels * ratio)'], {}), '(self.max_out_channels * ratio)\n', (9987, 10018), True, 'import numpy as np\n'), ((13164, 13201), 'numpy.ceil', 'np.ceil', (['(self.max_in_features * ratio)'], {}), '(self.max_in_features * ratio)\n', (13171, 13201), True, 'import numpy as np\n'), ((13301, 13339), 'numpy.ceil', 'np.ceil', (['(self.max_out_features * ratio)'], {}), '(self.max_out_features * ratio)\n', (13308, 13339), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
# import matplotlib.pyplot as plt
# from scipy import interpolate
import numpy as np
# step = np.array([12, 6, 4, 3, 2])
# MAP5 = np.array([0.6480, 0.6797, 0.6898, 0.6921, 0.6982])
# step_new = np.arange(step.min(), step.max(), 0.1)
# # step_new = np.arange(2, 11, 0.1)
# func = interpolate.interp1d(step, MAP5, kind='cubic', fill_value="extrapolate")
# MAP5_new = func(step_new)
# plt.figure(figsize=(10,10))
# ax1 = plt.subplot(2,1,2)
# plt.sca(ax1)
# plt.xticks(fontsize=16)
# plt.yticks(fontsize=16)
# plt.xlabel("KEY-FRAME STEP", fontsize=16)
# plt.ylabel("MAP@5", fontsize=16)
# plt.title("MVOF STEP-MAP@5 CURVE", fontsize=16)
# plt.plot(step_new, MAP5_new, label="$MVOF\quad MAP@5$", linestyle='--')
# plt.scatter(step, MAP5, color="g")
# plt.hlines(0.7026, 13, 2, colors = "r", linestyles = "--", label="$DFF\qquad MAP@5$")
# plt.legend(loc="lower left", fontsize=16)
# ax2 = plt.subplot(2,1,1)
# plt.sca(ax2)
# the_table = plt.table(cellText=[list(np.flip(step, 0)), list(np.flip(MAP5, 0))],
# rowLabels=["STEP", "MAP@5"],
# # colLabels=list(np.flip(step, 0)),
# loc='lower center')
# the_table.set_fontsize(18)
# the_table.scale(1, 2)
# plt.axis('off')
# plt.show()
# In[4]:
import pickle
diffs = []
mvs = []
flows = []
for i in range(602):
try:
flow = pickle.load(open("/home/jingtun/feat_flow_compare/flow_%06d.pkl" % i, 'rb'))
mv = pickle.load(open("/home/jingtun/feat_flow_compare/mv_%06d.pkl" % i, 'rb'))
diff = flow - mv
diffs.append(np.mean(abs(diff)))
mvs.append(np.mean(abs(mv)))
flows.append(np.mean(abs(flow)))
except:
print("not fit")
print("diff abs mean : ", np.mean(diffs))
print("mv abs mean : ", np.mean(mvs))
print("flow abs mean : ", np.mean(flows))
|
[
"numpy.mean"
] |
[((1794, 1808), 'numpy.mean', 'np.mean', (['diffs'], {}), '(diffs)\n', (1801, 1808), True, 'import numpy as np\n'), ((1834, 1846), 'numpy.mean', 'np.mean', (['mvs'], {}), '(mvs)\n', (1841, 1846), True, 'import numpy as np\n'), ((1874, 1888), 'numpy.mean', 'np.mean', (['flows'], {}), '(flows)\n', (1881, 1888), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
thres = 0.45
nms_threshold = 0.2
#Default Camera Capture
cap = cv2.VideoCapture(0)
cap.set(3, 1280)
cap.set(4, 720)
cap.set(10, 150)
##Importing the COCO dataset in a list
classNames= []
classFile = 'coco.names'
with open(classFile,'rt') as f:
classNames = f.read().rstrip('\n').split('\n')
##Configuring both SSD model and weights (assigning)
configPath = 'ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'
weightsPath = 'frozen_inference_graph.pb'
##dnn-Inbuilt method of OpenCV
net = cv2.dnn_DetectionModel(weightsPath,configPath)
net.setInputSize(320, 320)
net.setInputScale(1.0 / 127.5)
net.setInputMean((127.5, 127.5, 127.5))
net.setInputSwapRB(True)
## using Detect method
while True:
success, img = cap.read()
classIds, confs, bbox = net.detect(img, confThreshold=thres)
bbox = list(bbox)
confs = list(np.array(confs).reshape(1, -1)[0])
confs = list(map(float, confs))
indices = cv2.dnn.NMSBoxes(bbox, confs, thres, nms_threshold)
for i in indices:
i = i[0]
box = bbox[i]
x, y, w, h = box[0], box[1], box[2], box[3]
cv2.rectangle(img, (x, y),(x+w, h+y), color=(0, 255, 0), thickness=2)
cv2.putText(img,classNames[classIds[i][0]-1].upper(), (box[0]+10, box[1]+30),
cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
cv2.imshow("Output", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
[
"cv2.rectangle",
"cv2.imshow",
"numpy.array",
"cv2.dnn_DetectionModel",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.dnn.NMSBoxes",
"cv2.waitKey"
] |
[((95, 114), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (111, 114), False, 'import cv2\n'), ((522, 569), 'cv2.dnn_DetectionModel', 'cv2.dnn_DetectionModel', (['weightsPath', 'configPath'], {}), '(weightsPath, configPath)\n', (544, 569), False, 'import cv2\n'), ((1444, 1467), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1465, 1467), False, 'import cv2\n'), ((948, 999), 'cv2.dnn.NMSBoxes', 'cv2.dnn.NMSBoxes', (['bbox', 'confs', 'thres', 'nms_threshold'], {}), '(bbox, confs, thres, nms_threshold)\n', (964, 999), False, 'import cv2\n'), ((1348, 1373), 'cv2.imshow', 'cv2.imshow', (['"""Output"""', 'img'], {}), "('Output', img)\n", (1358, 1373), False, 'import cv2\n'), ((1122, 1196), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, h + y)'], {'color': '(0, 255, 0)', 'thickness': '(2)'}), '(img, (x, y), (x + w, h + y), color=(0, 255, 0), thickness=2)\n', (1135, 1196), False, 'import cv2\n'), ((1381, 1395), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1392, 1395), False, 'import cv2\n'), ((862, 877), 'numpy.array', 'np.array', (['confs'], {}), '(confs)\n', (870, 877), True, 'import numpy as np\n')]
|
import os
from skimage.transform import resize
import imageio
import numpy as np
import glob
import scipy
def main():
rootdir = "/home/nbayat5/Desktop/celebA/identities"
#os.mkdir("/home/nbayat5/Desktop/celebA/face_recognition_srgan")
for subdir, dirs, files in os.walk(rootdir):
for dir in dirs:
path = os.path.join(rootdir, subdir)
parts = path.split("/")
if len(parts) == 6:
continue
os.mkdir("/home/nbayat5/Desktop/celebA/face_recognition_srgan_test/%s" % (parts[6].rstrip()))
imgs_hr, imgs_lr = load_dataforIdentities(path)
counter = 1
for img in imgs_hr:
# fake_hr = gan.generator.predict(img_lr) #fix for loop to lr
img = 0.5 * img + 0.5
img = np.asarray(img)
path_hr = "/home/nbayat5/Desktop/celebA/face_recognition_srgan_test/%s/%s_%d.png" % (
parts[6].rstrip(), parts[6].rstrip(), counter)
imageio.imwrite(path_hr, img)
print("img %s_%d.png saved." % (parts[6].rstrip(), counter))
counter += 1
break
def load_dataforIdentities(path):
imgs_hr = []
imgs_lr = []
os.chdir(path)
# train_images = glob.glob("./train/*.jpg")
# val_images = glob.glob("./validation/*.jpg")
test_images = glob.glob("./test/*.jpg")
# batch_images = train_images + val_images
# batch_images = np.random.choice(path2, size=1)
for img_path in test_images:
img = scipy.misc.imread(img_path, mode='RGB').astype(np.float)
img_hr = scipy.misc.imresize(img, (64, 64))
img_lr = scipy.misc.imresize(img, (16, 16))
imgs_hr.append(img_hr)
imgs_lr.append(img_lr)
imgs_hr = np.array(imgs_hr) / 127.5 - 1.
imgs_lr = np.array(imgs_lr) / 127.5 - 1.
return imgs_hr, imgs_lr
if __name__ == "__main__":
main()
|
[
"imageio.imwrite",
"os.walk",
"os.path.join",
"numpy.asarray",
"os.chdir",
"numpy.array",
"scipy.misc.imread",
"scipy.misc.imresize",
"glob.glob"
] |
[((285, 301), 'os.walk', 'os.walk', (['rootdir'], {}), '(rootdir)\n', (292, 301), False, 'import os\n'), ((1291, 1305), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (1299, 1305), False, 'import os\n'), ((1438, 1463), 'glob.glob', 'glob.glob', (['"""./test/*.jpg"""'], {}), "('./test/*.jpg')\n", (1447, 1463), False, 'import glob\n'), ((1712, 1746), 'scipy.misc.imresize', 'scipy.misc.imresize', (['img', '(64, 64)'], {}), '(img, (64, 64))\n', (1731, 1746), False, 'import scipy\n'), ((1769, 1803), 'scipy.misc.imresize', 'scipy.misc.imresize', (['img', '(16, 16)'], {}), '(img, (16, 16))\n', (1788, 1803), False, 'import scipy\n'), ((349, 378), 'os.path.join', 'os.path.join', (['rootdir', 'subdir'], {}), '(rootdir, subdir)\n', (361, 378), False, 'import os\n'), ((1901, 1918), 'numpy.array', 'np.array', (['imgs_hr'], {}), '(imgs_hr)\n', (1909, 1918), True, 'import numpy as np\n'), ((1951, 1968), 'numpy.array', 'np.array', (['imgs_lr'], {}), '(imgs_lr)\n', (1959, 1968), True, 'import numpy as np\n'), ((842, 857), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (852, 857), True, 'import numpy as np\n'), ((1042, 1071), 'imageio.imwrite', 'imageio.imwrite', (['path_hr', 'img'], {}), '(path_hr, img)\n', (1057, 1071), False, 'import imageio\n'), ((1631, 1670), 'scipy.misc.imread', 'scipy.misc.imread', (['img_path'], {'mode': '"""RGB"""'}), "(img_path, mode='RGB')\n", (1648, 1670), False, 'import scipy\n')]
|
#!/usr/bin/env python
# coding: utf-8
# # Gender Recognition by Voice Kaggle [ Test Accuracy : 99.08 % ]
# In[ ]:
# ## CONTENTS::
# [ **1 ) Importing Various Modules and Loading the Dataset**](#content1)
# [ **2 ) Exploratory Data Analysis (EDA)**](#content2)
# [ **3 ) OutlierTreatment**](#content3)
# [ **4 ) Feature Engineering**](#content4)
# [ **5 ) Preparing the Data**](#content5)
# [ **6 ) Modelling**](#content6)
# [ **7 ) Parameter Tuning with GridSearchCV**](#content7)
# In[ ]:
# ## 1.1 ) Importing Various Modules
# In[ ]:
# Ignore the warnings
import warnings
warnings.filterwarnings('always')
warnings.filterwarnings('ignore')
# data visualisation and manipulation
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
import seaborn as sns
import missingno as msno
#configure
# sets matplotlib to inline and displays graphs below the corressponding cell.
style.use('fivethirtyeight')
sns.set(style='whitegrid',color_codes=True)
#import the necessary modelling algos.
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
#model selection
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score,precision_score,recall_score,confusion_matrix,roc_curve,roc_auc_score
from sklearn.model_selection import GridSearchCV
#preprocess.
from sklearn.preprocessing import MinMaxScaler,StandardScaler,Imputer,LabelEncoder,OneHotEncoder
# ## 1.2 ) Loading the Dataset
# In[ ]:
train=pd.read_csv(r"../../../input/primaryobjects_voicegender/voice.csv")
# In[ ]:
train.head(10)
# <a id="content2"></a>
# ## 2 ) Exploratory Data Analysis (EDA)
# ## 2.1 ) The Features and the 'Target' variable
# In[ ]:
df=train.copy()
# In[ ]:
df.head(10)
# In[ ]:
df.shape
# In[ ]:
df.index
# In[ ]:
df.columns # give a short description of each feature.
# **#A short description as on 'Data' tab on kaggle is :**
# ####
#
# **meanfreq**: mean frequency (in kHz)
#
# **sd**: standard deviation of frequency
#
# **median**: median frequency (in kHz)
#
# **Q25**: first quantile (in kHz)
#
# **Q75**: third quantile (in kHz)
#
# **IQR**: interquantile range (in kHz)
#
# **skew**: skewness (see note in specprop description)
#
# **kurt**: kurtosis (see note in specprop description)
#
# **sp.ent**: spectral entropy
#
# **sfm**: spectral flatness
#
# **mode**: mode frequency
#
# **centroid**: frequency centroid (see specprop)
#
# **peakf**: peak frequency (frequency with highest energy)
#
# **meanfun**: average of fundamental frequency measured across acoustic signal
#
# **minfun**: minimum fundamental frequency measured across acoustic signal
#
# **maxfun**: maximum fundamental frequency measured across acoustic signal
#
# **meandom**: average of dominant frequency measured across acoustic signal
#
# **mindom**: minimum of dominant frequency measured across acoustic signal
#
# **maxdom**: maximum of dominant frequency measured across acoustic signal
#
# **dfrange**: range of dominant frequency measured across acoustic signal
#
# **modindx**: modulation index. Calculated as the accumulated absolute difference between adjacent measurements of fundamental frequencies divided by the frequency range
#
# **label**: male or female
# #### Note that we have 3168 voice samples and for each of sample 20 different acoustic properties are recorded. Finally the 'label' column is the target variable which we have to predict which is the gender of the person.
# ## 2.2 ) Missing Values Treatment
# In[ ]:
# check for null values.
df.isnull().any()
# In[ ]:
msno.matrix(df) # just to visualize. no missing value.
# ## 2.3 ) Univariate Analysis
# In this section I have performed the univariate analysis. Note that since all of the features are 'numeric' the most reasonable way to plot them would either be a 'histogram' or a 'boxplot'.
#
# Also note that univariate analysis is useful for outlier detection. Hence besides plotting a boxplot and a histogram for each column or feature, I have written a small utility function which tells the remaining no of observations for each feature if we remove its outliers.
# #### To detect the outliers I have used the standard 1.5 InterQuartileRange (IQR) rule which states that any observation lesser than 'first quartile - 1.5 IQR' or greater than 'third quartile +1.5 IQR' is an outlier.
# In[ ]:
df.describe()
# In[ ]:
def calc_limits(feature):
q1,q3=df[feature].quantile([0.25,0.75])
iqr=q3-q1
rang=1.5*iqr
return(q1-rang,q3+rang)
# In[ ]:
def plot(feature):
fig,axes=plt.subplots(1,2)
sns.boxplot(data=df,x=feature,ax=axes[0])
sns.distplot(a=df[feature],ax=axes[1],color='#ff4125')
fig.set_size_inches(15,5)
lower,upper = calc_limits(feature)
l=[df[feature] for i in df[feature] if i>lower and i<upper]
print("Number of data points remaining if outliers removed : ",len(l))
# In[ ]:
plot('meanfreq')
# #### INFERENCES FROM THE PLOT--
#
# 1) First of all note that the values are in compliance with that observed from describe method data frame..
#
# 2) Note that we have a couple of outliers w.r.t. to 1.5 quartile rule (reprsented by a 'dot' in the box plot).Removing these data points or outliers leaves us with around 3104 values.
#
# 3) Also note from the distplot that the distribution seems to be a bit -ve skewed hence we can normalize to make the distribution a bit more symmetric.
#
# 4) LASTLY NOTE THAT A LEFT TAIL DISTRIBUTION HAS MORE OUTLIERS ON THE SIDE BELOW TO Q1 AS EXPECTED AND A RIGHT TAIL HAS ABOVE THE Q3.
# #### Similar other plots can be inferenced.
# In[ ]:
plot('sd')
# In[ ]:
plot('median')
# In[ ]:
plot('Q25')
# In[ ]:
plot('IQR')
# In[ ]:
plot('skew')
# In[ ]:
plot('kurt')
# In[ ]:
plot('sp.ent')
# In[ ]:
plot('sfm')
# In[ ]:
plot('meanfun')
# In[ ]:
sns.countplot(data=df,x='label')
# In[ ]:
df['label'].value_counts()
# #### Note that we have equal no of observations for the 'males' and the 'females'. Hence it is a balanced class problem.
# ## 2.4 ) Bivariate Analysis
# ## 2.4.1 ) Corealtion b/w Features
# In this section I have analyzed the corelation between different features. To do it I have plotted a 'heat map' which clearly visulizes the corelation between different features.
# In[ ]:
temp = []
for i in df.label:
if i == 'male':
temp.append(1)
else:
temp.append(0)
df['label'] = temp
# In[ ]:
#corelation matrix.
cor_mat= df[:].corr()
mask = np.array(cor_mat)
mask[np.tril_indices_from(mask)] = False
fig=plt.gcf()
fig.set_size_inches(30,12)
# #### SOME INFERENCES FROM THE ABOVE HEATMAP--
#
# 1) Mean frequency is moderately related to label.
#
# 2) IQR and label tend to have a strong positive corelation.
#
# 3) Spectral entropy is also quite highly corelated with the label while sfm is moderately related with label.
#
# 4) skewness and kurtosis aren't much related with label.
#
# 5) meanfun is highly negatively corelated with the label.
#
# 6) Centroid and median have a high positive corelationas expected from their formulae.
#
# 7) ALSO NOTE THAT MEANFREQ AND CENTROID ARE EXACTLY SAME FEATURES AS PER FORMULAE AND VALUES ALSO. HENCE THEIR CORELATION IS PERFCET 1. IN THAT CASE WE CAN DROP ANY COLUMN. note that centroid in general has a high degree of corelation with most of the other features.
#
# SO I WILL DROP THE 'CENTROID' COLUMN.
#
# 8) sd is highly positively related to sfm and so is sp.ent to sd.
#
# 9) kurt and skew are also highly corelated.
#
# 10) meanfreq is highly related to medaina s well as Q25.
#
# 11) IQR is highly corelated to sd.
#
# 12) Finally self relation ie of a feature to itself is equal to 1 as expected.
# #### Note that we can drop some highly corelated features as they add redundancy to the model but let us keep all the features for now. In case of highly corelated features we can use dimensionality reduction techniques like Principal Component Analysis(PCA) to reduce our feature space.
# In[ ]:
df.drop('centroid',axis=1,inplace=True)
# ## 2.4.2 ) Plotting the Features against the 'Target' variable
# Here I have just written a small utility function that plots the 'label' column vs the provided feature on a boxplot. In this way I have plotted some of the features against our target variable. This makes it easier to see the effect of the corressponding feature on the 'label'.
# In[ ]:
# drawing features against the target variable.
def plot_against_target(feature):
sns.factorplot(data=df,y=feature,x='label',kind='box')
fig=plt.gcf()
fig.set_size_inches(7,7)
# In[ ]:
plot_against_target('meanfreq') # 0 for females and 1 for males.
# #### INFERENCES--
#
# 1) Firstly note that 0->'female' and 1->'male'.
#
# 2) Note that the boxpot depicts that the females in genral have higher mean frequencies than their male counterparts and which is a generally accepted fact.
# #### Again similar inferences can be drawn.
# In[ ]:
plot_against_target('sd')
# In[ ]:
plot_against_target('median')
# In[ ]:
plot_against_target('Q25')
# In[ ]:
plot_against_target('IQR')
# #### Note here that there is a remarkable difference b/w the inter quartile ranges of males and females.This is evident from the strong relation between 'label' and the 'IQR' in the heatmap plotted above.
# In[ ]:
plot_against_target('sp.ent')
# In[ ]:
plot_against_target('sfm')
# In[ ]:
plot_against_target('meanfun')
# #### Again high difference in females and males mean fundamental frequency. This is evident from the heat map which clearly shows the high corelation between meanfun and the 'label'.
# In[ ]:
# #### Now we move onto analyzing different features pairwise. Since all the features are continuous the most reasonable way to do this is plotting the scatter plots for each feature pair. I have also distinguished males and feamles on the same plot which makes it a bit easier to compare the variation of features within the two classes.
# In[ ]:
g = sns.PairGrid(df[['meanfreq','sd','median','Q25','IQR','sp.ent','sfm','meanfun','label']], hue = "label")
g = g.map(plt.scatter).add_legend()
# In[ ]:
# <a id="content3"></a>
# ## 3 ) Outlier Treatment
# In this section I have dealt with the outliers. Note that we discovered the potential outliers in the **'univariate analysis' ** section. Now to remove those outliers we can either remove the corressponding data points or impute them with some other statistical quantity like median (robust to outliers) etc..
# #### For now I shall be removing all the observations or data points which are outlier to 'any' feature. Note that this substantially reduces the dataset size.
# In[ ]:
# removal of any data point which is an outlier for any fetaure.
for col in df.columns:
lower,upper=calc_limits(col)
df = df[(df[col] >lower) & (df[col]<upper)]
# In[ ]:
df.shape
# In[ ]:
df.head(10)
# <a id="content4"></a>
# ## 4 ) Feature Engineering.
# ## 4.1 ) Dropping the features
# I have dropped some columns which according to my analysis proved to be less useful or redundant.
# In[ ]:
temp_df=df.copy()
temp_df.drop(['skew','kurt','mindom','maxdom'],axis=1,inplace=True) # only one of maxdom and dfrange.
temp_df.head(10)
#df.head(10)
# ## 4.2 ) Creating new features
# I have done two new things. Firstly I have made 'meanfreq','median' and 'mode' to comply by the standard relation->
# #### ......................................................................................3*Median=2*Mean +Mode.........................................................................
# #### For this I have adjusted values in the 'median' column as shown below. You can alter values in any of the other column say the 'meanfreq' column.
# In[ ]:
temp_df['meanfreq']=temp_df['meanfreq'].apply(lambda x:x*2)
temp_df['median']=temp_df['meanfreq']+temp_df['mode']
temp_df['median']=temp_df['median'].apply(lambda x:x/3)
# In[ ]:
temp_df.head(10)
# In[ ]:
sns.boxplot(data=temp_df,y='median',x='label') # seeing the new 'median' against the 'label'.
# The second new feature that I have added is a new feature to mesure the 'skewness'.
# #### For this I have used the 'Karl Pearson Coefficent' which is calculated as shown below->
# **** ..........................................................Coefficent = (Mean - Mode )/StandardDeviation......................................................****
# **You can also try some other coefficient also and see how it comapres with the target i.e. the 'label' column.**
# In[ ]:
temp_df['pear_skew']=temp_df['meanfreq']-temp_df['mode']
temp_df['pear_skew']=temp_df['pear_skew']/temp_df['sd']
temp_df.head(10)
# In[ ]:
sns.boxplot(data=temp_df,y='pear_skew',x='label') # plotting new 'skewness' against the 'label'.
# <a id="content5"></a>
# ## 5 ) Preparing the Data
# ## 5.1 ) Normalizing the Features.
# In[ ]:
scaler=StandardScaler()
scaled_df=scaler.fit_transform(temp_df.drop('label',axis=1))
X=scaled_df
Y=df['label'].as_matrix()
# ## 5.2 ) Splitting into Training and Validation sets.
# In[ ]:
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.20,random_state=42)
# In[ ]:
# <a id="content6"></a>
# ## 6 ) Modelling
# #### LOGISTIC REGRESSSION
# In[ ]:
clf_lr=LogisticRegression()
clf_lr.fit(x_train,y_train)
pred=clf_lr.predict(x_test)
print(accuracy_score(pred,y_test))
# #### kNN
# In[ ]:
clf_knn=KNeighborsClassifier()
clf_knn.fit(x_train,y_train)
pred=clf_knn.predict(x_test)
print(accuracy_score(pred,y_test))
# #### Support Vector Machine (SVM)
# In[ ]:
clf_svm=SVC()
clf_svm.fit(x_train,y_train)
pred=clf_svm.predict(x_test)
print(accuracy_score(pred,y_test))
# #### DECISION TREE
# In[ ]:
clf_dt=DecisionTreeClassifier()
clf_dt.fit(x_train,y_train)
pred=clf_dt.predict(x_test)
print(accuracy_score(pred,y_test))
# #### RANDOM FOREST
# In[ ]:
clf_rf=RandomForestClassifier()
clf_rf.fit(x_train,y_train)
pred=clf_rf.predict(x_test)
print(accuracy_score(pred,y_test))
# #### GRADIENT BOOSTING
# In[ ]:
clf_gb=GradientBoostingClassifier()
clf_gb.fit(x_train,y_train)
pred=clf_gb.predict(x_test)
print(accuracy_score(pred,y_test))
# #### We can now move onto comparing the results of various modelling algorithms. for tthis I shall combine the results of all models in a data frame and then plot using a barplot .
# In[ ]:
models=[LogisticRegression(),LinearSVC(),SVC(kernel='rbf'),KNeighborsClassifier(),RandomForestClassifier(),DecisionTreeClassifier(),GradientBoostingClassifier(),GaussianNB()]
model_names=['LogisticRegression','LinearSVM','rbfSVM','KNearestNeighbors','RandomForestClassifier','DecisionTree','GradientBoostingClassifier','GaussianNB']
acc=[]
d={}
for model in range(len(models)):
clf=models[model]
clf.fit(x_train,y_train)
pred=clf.predict(x_test)
acc.append(accuracy_score(pred,y_test))
d={'Modelling Algo':model_names,'Accuracy':acc}
# In[ ]:
acc_frame=pd.DataFrame(d)
acc_frame
# In[ ]:
sns.barplot(y='Modelling Algo',x='Accuracy',data=acc_frame)
# In[ ]:
# <a id="content7"></a>
# ## 7 ) Parameter Tuning with GridSearchCV
# 1. I have tuned only SVM Similarly other algorithms can be tuned.
# In[ ]:
params_dict={'C':[0.001,0.01,0.1,1,10,100],'gamma':[0.001,0.01,0.1,1,10,100],'kernel':['linear','rbf']}
clf=GridSearchCV(estimator=SVC(),param_grid=params_dict,scoring='accuracy',cv=10)
clf.fit(x_train,y_train)
# In[ ]:
clf.best_score_
# In[ ]:
clf.best_params_
# In[ ]:
print(accuracy_score(clf.predict(x_test),y_test))
# In[ ]:
print(precision_score(clf.predict(x_test),y_test))
# ### The precision is almost 99.5 % which is quite high.
# ### After tuning SVM gives an amazing accuracy of around 99.1 %. Similarly tuning other algorithms parameters might give even greater accuracy !!!
# In[ ]:
# ## THE END!!!
# In[ ]:
|
[
"pandas.read_csv",
"sklearn.neighbors.KNeighborsClassifier",
"missingno.matrix",
"numpy.array",
"matplotlib.style.use",
"seaborn.set",
"seaborn.distplot",
"sklearn.tree.DecisionTreeClassifier",
"pandas.DataFrame",
"numpy.tril_indices_from",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.gcf",
"sklearn.svm.LinearSVC",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.ensemble.GradientBoostingClassifier",
"sklearn.naive_bayes.GaussianNB",
"warnings.filterwarnings",
"sklearn.metrics.accuracy_score",
"sklearn.svm.SVC",
"seaborn.factorplot",
"sklearn.linear_model.LogisticRegression",
"seaborn.boxplot",
"sklearn.preprocessing.StandardScaler",
"seaborn.countplot",
"seaborn.barplot",
"seaborn.PairGrid",
"matplotlib.pyplot.subplots"
] |
[((600, 633), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""always"""'], {}), "('always')\n", (623, 633), False, 'import warnings\n'), ((634, 667), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (657, 667), False, 'import warnings\n'), ((945, 973), 'matplotlib.style.use', 'style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (954, 973), False, 'from matplotlib import style\n'), ((974, 1018), 'seaborn.set', 'sns.set', ([], {'style': '"""whitegrid"""', 'color_codes': '(True)'}), "(style='whitegrid', color_codes=True)\n", (981, 1018), True, 'import seaborn as sns\n'), ((1888, 1954), 'pandas.read_csv', 'pd.read_csv', (['"""../../../input/primaryobjects_voicegender/voice.csv"""'], {}), "('../../../input/primaryobjects_voicegender/voice.csv')\n", (1899, 1954), True, 'import pandas as pd\n'), ((4017, 4032), 'missingno.matrix', 'msno.matrix', (['df'], {}), '(df)\n', (4028, 4032), True, 'import missingno as msno\n'), ((6314, 6347), 'seaborn.countplot', 'sns.countplot', ([], {'data': 'df', 'x': '"""label"""'}), "(data=df, x='label')\n", (6327, 6347), True, 'import seaborn as sns\n'), ((6963, 6980), 'numpy.array', 'np.array', (['cor_mat'], {}), '(cor_mat)\n', (6971, 6980), True, 'import numpy as np\n'), ((7026, 7035), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (7033, 7035), True, 'import matplotlib.pyplot as plt\n'), ((10504, 10618), 'seaborn.PairGrid', 'sns.PairGrid', (["df[['meanfreq', 'sd', 'median', 'Q25', 'IQR', 'sp.ent', 'sfm', 'meanfun',\n 'label']]"], {'hue': '"""label"""'}), "(df[['meanfreq', 'sd', 'median', 'Q25', 'IQR', 'sp.ent', 'sfm',\n 'meanfun', 'label']], hue='label')\n", (10516, 10618), True, 'import seaborn as sns\n'), ((12499, 12547), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'temp_df', 'y': '"""median"""', 'x': '"""label"""'}), "(data=temp_df, y='median', x='label')\n", (12510, 12547), True, 'import seaborn as sns\n'), ((13221, 13272), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'temp_df', 'y': '"""pear_skew"""', 'x': '"""label"""'}), "(data=temp_df, y='pear_skew', x='label')\n", (13232, 13272), True, 'import seaborn as sns\n'), ((13429, 13445), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (13443, 13445), False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler, Imputer, LabelEncoder, OneHotEncoder\n'), ((13645, 13699), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(X, Y, test_size=0.2, random_state=42)\n', (13661, 13699), False, 'from sklearn.model_selection import train_test_split\n'), ((13805, 13825), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (13823, 13825), False, 'from sklearn.linear_model import LogisticRegression\n'), ((13950, 13972), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (13970, 13972), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((14124, 14129), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (14127, 14129), False, 'from sklearn.svm import SVC\n'), ((14267, 14291), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (14289, 14291), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((14425, 14449), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (14447, 14449), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((14587, 14615), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {}), '()\n', (14613, 14615), False, 'from sklearn.ensemble import GradientBoostingClassifier\n'), ((15486, 15501), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (15498, 15501), True, 'import pandas as pd\n'), ((15525, 15586), 'seaborn.barplot', 'sns.barplot', ([], {'y': '"""Modelling Algo"""', 'x': '"""Accuracy"""', 'data': 'acc_frame'}), "(y='Modelling Algo', x='Accuracy', data=acc_frame)\n", (15536, 15586), True, 'import seaborn as sns\n'), ((5014, 5032), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (5026, 5032), True, 'import matplotlib.pyplot as plt\n'), ((5036, 5079), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'df', 'x': 'feature', 'ax': 'axes[0]'}), '(data=df, x=feature, ax=axes[0])\n', (5047, 5079), True, 'import seaborn as sns\n'), ((5082, 5138), 'seaborn.distplot', 'sns.distplot', ([], {'a': 'df[feature]', 'ax': 'axes[1]', 'color': '"""#ff4125"""'}), "(a=df[feature], ax=axes[1], color='#ff4125')\n", (5094, 5138), True, 'import seaborn as sns\n'), ((6986, 7012), 'numpy.tril_indices_from', 'np.tril_indices_from', (['mask'], {}), '(mask)\n', (7006, 7012), True, 'import numpy as np\n'), ((8984, 9041), 'seaborn.factorplot', 'sns.factorplot', ([], {'data': 'df', 'y': 'feature', 'x': '"""label"""', 'kind': '"""box"""'}), "(data=df, y=feature, x='label', kind='box')\n", (8998, 9041), True, 'import seaborn as sns\n'), ((9047, 9056), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (9054, 9056), True, 'import matplotlib.pyplot as plt\n'), ((13888, 13916), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['pred', 'y_test'], {}), '(pred, y_test)\n', (13902, 13916), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, roc_curve, roc_auc_score\n'), ((14037, 14065), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['pred', 'y_test'], {}), '(pred, y_test)\n', (14051, 14065), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, roc_curve, roc_auc_score\n'), ((14194, 14222), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['pred', 'y_test'], {}), '(pred, y_test)\n', (14208, 14222), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, roc_curve, roc_auc_score\n'), ((14354, 14382), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['pred', 'y_test'], {}), '(pred, y_test)\n', (14368, 14382), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, roc_curve, roc_auc_score\n'), ((14512, 14540), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['pred', 'y_test'], {}), '(pred, y_test)\n', (14526, 14540), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, roc_curve, roc_auc_score\n'), ((14678, 14706), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['pred', 'y_test'], {}), '(pred, y_test)\n', (14692, 14706), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, roc_curve, roc_auc_score\n'), ((14913, 14933), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (14931, 14933), False, 'from sklearn.linear_model import LogisticRegression\n'), ((14934, 14945), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {}), '()\n', (14943, 14945), False, 'from sklearn.svm import LinearSVC\n'), ((14946, 14963), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""rbf"""'}), "(kernel='rbf')\n", (14949, 14963), False, 'from sklearn.svm import SVC\n'), ((14964, 14986), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (14984, 14986), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((14987, 15011), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (15009, 15011), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((15012, 15036), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (15034, 15036), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((15037, 15065), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {}), '()\n', (15063, 15065), False, 'from sklearn.ensemble import GradientBoostingClassifier\n'), ((15066, 15078), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (15076, 15078), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((15380, 15408), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['pred', 'y_test'], {}), '(pred, y_test)\n', (15394, 15408), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, roc_curve, roc_auc_score\n'), ((15881, 15886), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (15884, 15886), False, 'from sklearn.svm import SVC\n')]
|
import tclab
import time
import numpy as np
import sys
import first_principles_model as fp
def doublet_test(data_file='step_test.csv', show_plot=True):
'''doublet test the system and save data to given file path'''
import Adafruit_DHT # Only importable on the Pi itself
tc1 = tclab.TCLab()
tc1.LED(100)
# Bogus data row added to make concatenation work, never goes anywhere
data = [1, 1, 1, 1, 1, 1, 1, 1]
csv_file_header = 'time,control output,box humidity,box temp,outside humidity,outside temp,heater 1 temp,heater 2 temp,P,I,D,SP,Err'
start_time = time.time()
u = 0
tc1.Q1(u)
tc1.Q2(u)
current_time = 0
while current_time < 1200:
try:
# read temp, humidity and time
humid_in, temp_in = Adafruit_DHT.read_retry(
11, 4, retries=5, delay_seconds=1)
humid_out, temp_out = Adafruit_DHT.read_retry(
11, 17, retries=5, delay_seconds=1)
current_time = time.time() - start_time
if humid_in is None:
# Rejects failed readings
continue
if humid_in > 100:
# Corrupted data, so ignore it
continue
if current_time > 60:
u = 100
if current_time > 800:
u = 50
tc1.Q1(u)
tc1.Q2(u)
# print current values
print('time: {:.1f}, u: {}, h_in: {}, t_in: {}, h1: {}, h2: {}, h_out: {}, t_out: {}'
.format(current_time, u, humid_in, temp_in, tc1.T1, tc1.T2, humid_out, temp_out))
data = np.vstack([data, [current_time, u, humid_in,
temp_in, humid_out, temp_out, tc1.T1, tc1.T2]])
np.savetxt(data_file, data[1:],
delimiter=',', header=csv_file_header)
except KeyboardInterrupt:
print('Exiting...')
tc1.LED(0)
return
except ValueError as error:
# Handles cases when the heater overheats
print(error)
def run_controller(run_time, PID_parameters, show_plot=True):
'''
Run the main loop
run_time total run time in minutes
show_plot whether to show the dynamic plot of the system
'''
Kc, tau_I, tau_D = PID_parameters
import Adafruit_DHT # Only importable on the Pi itself
tc1 = tclab.TCLab()
tc1.LED(100)
# Bogus data row added to make concatenation work, never goes anywhere
data = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
csv_file_header = 'time,control output,box humidity,box temp,outside humidity,outside temp,heater 1 temp,heater 2 temp,P,I,D,SP,Err'
start_time = time.time()
u = 0
Qss = 0 # 0% heater to start
err = np.zeros(run_time*60)
sp = np.ones(run_time*60)*25
# Set up the set point
sp[10:300] = 303.15 - 273.15 # 30 degrees C
sp[300:550] = 298.15 - 273.15 # 25 degrees C
sp[550:800] = 310.15 - 273.15 # 37 degrees C
sp[800:3000] = 307.15 - 273.15 # 34 degrees C
sp[3000:] = 300.15 - 273.15 # 27 degrees C
integral_err_sum = 0
u_max = 100
u_min = 0
prev_temp = 0
prev_time = start_time
i = 0
tc1.Q1(u)
tc1.Q2(u)
while True:
try:
# read temp, humidity and time
humid_in, temp_in = Adafruit_DHT.read_retry(
11, 4, retries=5, delay_seconds=1)
humid_out, temp_out = Adafruit_DHT.read_retry(
11, 17, retries=5, delay_seconds=1)
current_time = time.time() - start_time
dtime = current_time - prev_time
if (humid_in is None) or (humid_out is None):
# Rejects failed readings
continue
if humid_in > 100:
# Corrupted data, so ignore it
continue
# PID controller to determine u
print("i", i)
err[i] = sp[i] - temp_in
if i > 10:
integral_err_sum = integral_err_sum + err[i] * dtime
print("error", err[i])
ddt = temp_in - prev_temp
P = Kc * err[i]
I = Kc/tau_I * integral_err_sum
D = - Kc * tau_D * ddt
prev_temp = temp_in
u = (Qss + P + I + D) * 100
if i > 10:
if u > u_max:
u = u_max
integral_err_sum = integral_err_sum - err[i] * dtime
if u < u_min:
u = u_min
integral_err_sum = integral_err_sum - err[i] * dtime
i += 1
prev_time = current_time
# Set the heater outputs
tc1.Q1(u)
tc1.Q2(u)
# print current values
print('time: {:.1f}, u: {}, h_in: {}, t_in: {}, h1: {}, h2: {}, h_out: {}, t_out: {}, P: {:.2f}, I: {:.2f}, D: {:.2f}'
.format(current_time, u, humid_in, temp_in, tc1.T1, tc1.T2, humid_out, temp_out, P, I, D, sp[i], err))
data = np.vstack([data, [current_time, u, humid_in,
temp_in, humid_out, temp_out, tc1.T1, tc1.T2, P, I, D, sp[i], err[i]]])
np.savetxt('data.csv', data[1:],
delimiter=',', header=csv_file_header)
if current_time > run_time*60:
print('Run finished. Exiting...')
tc1.LED(0)
return
except KeyboardInterrupt:
print('Exiting...')
tc1.LED(0)
return
except ValueError as error:
# Handles cases when the heater overheats
print(error)
|
[
"tclab.TCLab",
"numpy.ones",
"Adafruit_DHT.read_retry",
"numpy.zeros",
"numpy.vstack",
"numpy.savetxt",
"time.time"
] |
[((291, 304), 'tclab.TCLab', 'tclab.TCLab', ([], {}), '()\n', (302, 304), False, 'import tclab\n'), ((588, 599), 'time.time', 'time.time', ([], {}), '()\n', (597, 599), False, 'import time\n'), ((2418, 2431), 'tclab.TCLab', 'tclab.TCLab', ([], {}), '()\n', (2429, 2431), False, 'import tclab\n'), ((2730, 2741), 'time.time', 'time.time', ([], {}), '()\n', (2739, 2741), False, 'import time\n'), ((2797, 2820), 'numpy.zeros', 'np.zeros', (['(run_time * 60)'], {}), '(run_time * 60)\n', (2805, 2820), True, 'import numpy as np\n'), ((2828, 2850), 'numpy.ones', 'np.ones', (['(run_time * 60)'], {}), '(run_time * 60)\n', (2835, 2850), True, 'import numpy as np\n'), ((779, 837), 'Adafruit_DHT.read_retry', 'Adafruit_DHT.read_retry', (['(11)', '(4)'], {'retries': '(5)', 'delay_seconds': '(1)'}), '(11, 4, retries=5, delay_seconds=1)\n', (802, 837), False, 'import Adafruit_DHT\n'), ((889, 948), 'Adafruit_DHT.read_retry', 'Adafruit_DHT.read_retry', (['(11)', '(17)'], {'retries': '(5)', 'delay_seconds': '(1)'}), '(11, 17, retries=5, delay_seconds=1)\n', (912, 948), False, 'import Adafruit_DHT\n'), ((1641, 1737), 'numpy.vstack', 'np.vstack', (['[data, [current_time, u, humid_in, temp_in, humid_out, temp_out, tc1.T1,\n tc1.T2]]'], {}), '([data, [current_time, u, humid_in, temp_in, humid_out, temp_out,\n tc1.T1, tc1.T2]])\n', (1650, 1737), True, 'import numpy as np\n'), ((1786, 1856), 'numpy.savetxt', 'np.savetxt', (['data_file', 'data[1:]'], {'delimiter': '""","""', 'header': 'csv_file_header'}), "(data_file, data[1:], delimiter=',', header=csv_file_header)\n", (1796, 1856), True, 'import numpy as np\n'), ((3372, 3430), 'Adafruit_DHT.read_retry', 'Adafruit_DHT.read_retry', (['(11)', '(4)'], {'retries': '(5)', 'delay_seconds': '(1)'}), '(11, 4, retries=5, delay_seconds=1)\n', (3395, 3430), False, 'import Adafruit_DHT\n'), ((3482, 3541), 'Adafruit_DHT.read_retry', 'Adafruit_DHT.read_retry', (['(11)', '(17)'], {'retries': '(5)', 'delay_seconds': '(1)'}), '(11, 17, retries=5, delay_seconds=1)\n', (3505, 3541), False, 'import Adafruit_DHT\n'), ((5082, 5202), 'numpy.vstack', 'np.vstack', (['[data, [current_time, u, humid_in, temp_in, humid_out, temp_out, tc1.T1,\n tc1.T2, P, I, D, sp[i], err[i]]]'], {}), '([data, [current_time, u, humid_in, temp_in, humid_out, temp_out,\n tc1.T1, tc1.T2, P, I, D, sp[i], err[i]]])\n', (5091, 5202), True, 'import numpy as np\n'), ((5251, 5322), 'numpy.savetxt', 'np.savetxt', (['"""data.csv"""', 'data[1:]'], {'delimiter': '""","""', 'header': 'csv_file_header'}), "('data.csv', data[1:], delimiter=',', header=csv_file_header)\n", (5261, 5322), True, 'import numpy as np\n'), ((993, 1004), 'time.time', 'time.time', ([], {}), '()\n', (1002, 1004), False, 'import time\n'), ((3586, 3597), 'time.time', 'time.time', ([], {}), '()\n', (3595, 3597), False, 'import time\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
####################
def ld_to_dl(ld):
dl = {}
for i, d in enumerate(ld):
for key in d.keys():
value = d[key]
if i == 0:
dl[key] = [value]
else:
dl[key].append(value)
return dl
####################
results = np.load('results.npy', allow_pickle=True)
results = ld_to_dl(results)
df = pd.DataFrame.from_dict(results)
print (df.columns)
####################
# example:
# y_mean[skip][cards][alloc][profile][rpr_alloc][layer]
'''
block = df[ df['alloc'] == 'block' ][ df['rpr_alloc'] == 'centroids' ]
print (block)
block = df.query('(alloc == "block") & (rpr_alloc == "centroids")')
print (block)
'''
####################
x = df.query('(alloc == "block") & (rpr_alloc == "centroids") & (profile == 1)')
mac_per_cycle = x['nmac'] / x['cycle']
print (mac_per_cycle)
####################
|
[
"numpy.load",
"pandas.DataFrame.from_dict"
] |
[((374, 415), 'numpy.load', 'np.load', (['"""results.npy"""'], {'allow_pickle': '(True)'}), "('results.npy', allow_pickle=True)\n", (381, 415), True, 'import numpy as np\n'), ((449, 480), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['results'], {}), '(results)\n', (471, 480), True, 'import pandas as pd\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.