|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import yaml |
|
import os |
|
import torch |
|
from glob import glob |
|
import timm |
|
from torchvision import transforms |
|
import pandas as pd |
|
from sklearn.preprocessing import LabelEncoder |
|
import webdataset as wds |
|
from torch.utils.data import DataLoader, Dataset |
|
import math |
|
import h5py |
|
import numpy as np |
|
from tqdm import tqdm |
|
from sklearn.linear_model import LogisticRegression |
|
from sklearn.neighbors import KNeighborsClassifier |
|
from sklearn.metrics import accuracy_score, balanced_accuracy_score |
|
from huggingface_hub import login |
|
import braceexpand |
|
|
|
|
|
work_dir = "." |
|
|
|
|
|
config_path = os.path.join(work_dir, "config.yaml") |
|
with open(config_path, 'r') as file: |
|
configs = yaml.safe_load(file) |
|
|
|
|
|
|
|
model_dic = { |
|
"h_optimus": "hf-hub:bioptimus/H-optimus-0", |
|
|
|
} |
|
configs["model_path"] = model_dic[configs["model_name"]] |
|
configs["eval_name"] = configs.get("eval_name", "logreg") |
|
configs["max_iter"] = configs.get("max_iter", 1000) |
|
configs["cost"] = configs.get("cost", 0.0001) |
|
configs["k"] = configs.get("k", 10) |
|
|
|
|
|
metadata_path = os.path.join(work_dir, "train_val_test_split.csv") |
|
df = pd.read_csv(metadata_path) |
|
|
|
|
|
split = configs["split_type"] |
|
file_range = 9 if split == "internal" else 8 |
|
patterns = { |
|
'train': [os.path.join(work_dir, f"data/dataset_{split}_train_part{str(i).zfill(3)}.tar") for i in range(39)], |
|
'valid': [os.path.join(work_dir, f"data/dataset_{split}_valid_part{str(i).zfill(3)}.tar") for i in range(file_range)], |
|
'test': [os.path.join(work_dir, f"data/dataset_{split}_test_part{str(i).zfill(3)}.tar") for i in range(file_range)], |
|
} |
|
|
|
|
|
class HDF5Dataset(Dataset): |
|
def __init__(self, hdf5_file_path): |
|
self.hdf5_file = h5py.File(hdf5_file_path, 'r') |
|
self.features = self.hdf5_file['features'] |
|
self.labels = self.hdf5_file['labels'] |
|
|
|
def __len__(self): |
|
return len(self.features) |
|
|
|
def __getitem__(self, idx): |
|
feature = torch.tensor(self.features[idx], dtype=torch.float32) |
|
label = torch.tensor(self.labels[idx], dtype=torch.long) |
|
return feature, label |
|
|
|
def __del__(self): |
|
self.hdf5_file.close() |
|
|
|
|
|
def main(): |
|
|
|
global configs |
|
print(configs) |
|
|
|
|
|
|
|
|
|
|
|
model_name = configs["model_name"] |
|
model, transform = get_model_transform(model_name) |
|
|
|
|
|
label_encoder = LabelEncoder() |
|
label_encoder.fit(df['case'].unique()) |
|
|
|
train_loader = make_dataloader(batch_size=8, split=split, transform=transform, label_encoder=label_encoder, mode="train") |
|
valid_loader = make_dataloader(batch_size=8, split=split, transform=transform, label_encoder=label_encoder, mode="valid") |
|
test_loader = make_dataloader(batch_size=8, split=split, transform=transform, label_encoder=label_encoder, mode="test") |
|
|
|
|
|
train_hdf5 = f"features/{model_name}_{split}_train.h5" |
|
valid_hdf5 = f"features/{model_name}_{split}_valid.h5" |
|
test_hdf5 = f"features/{model_name}_{split}_test.h5" |
|
|
|
features_dir = os.path.join(work_dir, "features") |
|
if not os.path.exists(features_dir): |
|
os.makedirs(features_dir) |
|
print(f"Created directory: {features_dir}") |
|
else: |
|
print(f"Directory already exists: {features_dir}") |
|
|
|
|
|
model.to(configs["device"]) |
|
if not configs["feature_exist"]: |
|
for loader, hdf5 in zip([train_loader, valid_loader, test_loader], [train_hdf5, valid_hdf5, test_hdf5]): |
|
output_file = os.path.join(work_dir, hdf5) |
|
save_features_to_hdf5_in_batches(model, loader, output_file) |
|
else: |
|
for hdf5 in [train_hdf5, valid_hdf5, test_hdf5]: |
|
output_file = os.path.join(work_dir, hdf5) |
|
assert os.path.isfile(output_file), f"There is not {output_file}" |
|
|
|
|
|
train_feats, train_labels = get_feats_labels(train_hdf5) |
|
valid_feats, valid_labels = get_feats_labels(valid_hdf5, mode="valid") |
|
test_feats, test_labels = get_feats_labels(test_hdf5, mode="test") |
|
|
|
|
|
train_eval(train_feats, train_labels, test_feats, test_labels) |
|
|
|
pass |
|
|
|
|
|
|
|
def get_model_transform(model_name): |
|
global configs |
|
|
|
|
|
if model_name == "h_optimus": |
|
model = timm.create_model( |
|
configs["model_path"], pretrained=True, init_values=1e-5, dynamic_img_size=False |
|
) |
|
transform = transforms.Compose([ |
|
transforms.Resize(size=(224, 224)), |
|
transforms.ToTensor(), |
|
transforms.Normalize( |
|
mean=(0.707223, 0.578729, 0.703617), |
|
std=(0.211883, 0.230117, 0.177517) |
|
), |
|
]) |
|
|
|
|
|
else: |
|
assert False, "This model name cannot be used." |
|
return model, transform |
|
|
|
|
|
def encode_labels(labels, label_encoder): |
|
return label_encoder.transform(labels).item() |
|
|
|
|
|
def make_dataloader(batch_size, |
|
split, |
|
transform, |
|
label_encoder, |
|
mode="train", |
|
is_all_data_shuffle=True): |
|
global df, patterns |
|
|
|
if split=="internal": |
|
buffer_size = len(df[df.split_internal == mode]) if is_all_data_shuffle else 1000 |
|
else: |
|
buffer_size = len(df[df.split_external == mode]) if is_all_data_shuffle else 1000 |
|
|
|
def func_transform(image): |
|
return transform(image) |
|
|
|
dataset = wds.WebDataset(patterns[mode], shardshuffle=False) \ |
|
.shuffle(buffer_size, seed=42) \ |
|
.decode("pil").to_tuple("jpg", "json") \ |
|
.map_tuple(func_transform, lambda x: encode_labels([x["label"]], label_encoder)) |
|
|
|
dataloader = DataLoader(dataset, batch_size=batch_size) |
|
|
|
return dataloader |
|
|
|
|
|
|
|
def save_features_to_hdf5_in_batches(model, dataloader, output_file, chunk_size=100, mode="train"): |
|
global configs, df |
|
if "internal" in output_file: |
|
total_iterations = math.ceil(len(df[df.split_internal == mode]) / dataloader.batch_size) |
|
elif "external" in output_file: |
|
total_iterations = math.ceil(len(df[df.split_external == mode]) / dataloader.batch_size) |
|
|
|
|
|
model.eval() |
|
|
|
|
|
with h5py.File(output_file, 'w') as hdf5_file: |
|
|
|
first_batch = next(iter(dataloader)) |
|
sample_images, _ = first_batch |
|
with torch.no_grad(): |
|
num_features = model(sample_images.to(configs["device"])).shape[1] |
|
|
|
dset_features = hdf5_file.create_dataset('features', shape=(0, num_features), maxshape=(None, num_features), chunks=True) |
|
dset_labels = hdf5_file.create_dataset('labels', shape=(0,), maxshape=(None,), chunks=True) |
|
|
|
with torch.no_grad(): |
|
for images, labels in tqdm(dataloader, total=total_iterations): |
|
images = images.to(configs["device"]) |
|
|
|
|
|
with torch.autocast(device_type=configs["device"], dtype=torch.float16): |
|
features = model(images).cpu().numpy() |
|
|
|
labels = labels.cpu().numpy() |
|
|
|
|
|
dset_features.resize(dset_features.shape[0] + features.shape[0], axis=0) |
|
dset_features[-features.shape[0]:] = features |
|
|
|
dset_labels.resize(dset_labels.shape[0] + labels.shape[0], axis=0) |
|
dset_labels[-labels.shape[0]:] = labels |
|
|
|
torch.cuda.empty_cache() |
|
|
|
print(f"Features and labels have been saved to {output_file}.") |
|
|
|
|
|
def get_feats_labels(hdf5_file_path, mode="train", batch_size=32): |
|
dataset = HDF5Dataset(hdf5_file_path) |
|
shuffle = mode=="train" |
|
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle) |
|
|
|
feats_list = [] |
|
labels_list = [] |
|
|
|
for feats, labels in dataloader: |
|
feats_list.append(feats.numpy()) |
|
labels_list.append(labels.numpy()) |
|
|
|
all_feats = np.concatenate(feats_list, axis=0) |
|
all_labels = np.concatenate(labels_list, axis=0) |
|
|
|
all_feats = torch.tensor(all_feats, dtype=torch.float32) |
|
all_labels = torch.tensor(all_labels, dtype=torch.long) |
|
|
|
return all_feats, all_labels |
|
|
|
|
|
def train_eval(train_feats, train_labels, test_feats, test_labels): |
|
global configs |
|
|
|
|
|
if configs["eval_name"] == "logreg": |
|
model = LogisticRegression(C=configs["cost"], max_iter=configs["max_iter"]) |
|
model.fit(train_feats, train_labels) |
|
pred = model.predict(test_feats) |
|
|
|
if configs["eval_name"] == "knn": |
|
model = KNeighborsClassifier(n_neighbors=configs["k"]) |
|
model.fit(train_feats.numpy(), train_labels.numpy()) |
|
pred = model.predict(test_feats.numpy()) |
|
test_labels = test_labels.numpy() |
|
|
|
if configs["eval_name"] == "proto": |
|
unique_labels = sorted(np.unique(train_labels.numpy())) |
|
feats_proto = torch.vstack([ |
|
train_feats[train_labels == c].mean(dim=0) for c in unique_labels |
|
]) |
|
labels_proto = torch.tensor(unique_labels) |
|
pw_dist = (test_feats[:, None] - feats_proto[None, :]).norm(dim=-1, p=2) |
|
pred = labels_proto[pw_dist.argmin(dim=1)] |
|
|
|
|
|
acc = accuracy_score(test_labels, pred) |
|
balanced_acc = balanced_accuracy_score(test_labels, pred) |
|
print(f"Accuracy = {acc:.3f}, Balanced Accuracy = {balanced_acc:.3f}") |
|
|
|
if __name__ == "__main__": |
|
main() |
|
|