python_code
stringlengths 0
229k
|
---|
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
if __name__ == '__main__':
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name)
|
import torch
import sys
import os
from torchbenchmark import REPO_PATH
from typing import Tuple
# Import FAMBench model path
class add_path():
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)
def __exit__(self, exc_type, exc_value, traceback):
try:
sys.path.remove(self.path)
except ValueError:
pass
XLMR_PATH = os.path.join(REPO_PATH, "submodules", "FAMBench", "benchmarks", "xlmr", "ootb")
import fairseq
with add_path(XLMR_PATH):
from xlmr import generate_dataset
from xlmr_parser import init_argparse
from torchbenchmark.util.model import BenchmarkModel
from torchbenchmark.tasks import NLP
import torch.nn.functional as F
class WrappedModule(torch.nn.Module):
def __init__(self, inner_module: torch.nn.Module, inner_module_forward_name: str):
super().__init__()
self.model = inner_module
self._inner_module_forward_name = inner_module_forward_name
def forward(self, inputs):
inner_module_forward = getattr(self.model, self._inner_module_forward_name)
return inner_module_forward(inputs)
class Model(BenchmarkModel):
task = NLP.LANGUAGE_MODELING
FAMBENCH_MODEL = True
# typical parameters for inference:
# ./run_xlmr_ootb.sh -c "--inference-only --famconfig=fb-1dev-A --num-batches=100 --batch-size=96 " \
# "--sequence-length=64 --vocab-size=250000 --half-model --use-gpu --warmup-batches=20"
# We use the same batch size for train and inference (96), ...
# ... but runs only 1 batch
DEFAULT_FAM_CONFIG = "fb-1dev-A"
DEFAULT_NUM_BATCHES = 1
DEFAULT_TRAIN_BSIZE = 96
DEFAULT_EVAL_BSIZE = 96
DEFAULT_SEQ_LENGTH = 64
DEFAULT_VOCAB_SIZE = 250000
# by default, use fp16 half precision for training
DEFAULT_EVAL_CUDA_PRECISION = "fp16"
# Copy error: RecursionError: maximum recursion depth exceeded
DEEPCOPY = False
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
self.xlmr = fairseq.models.roberta.XLMRModel.from_pretrained("xlmr.large")
parser = init_argparse()
args = parser.parse_args([f"--famconfig={self.DEFAULT_FAM_CONFIG}",
f"--num-batches={self.DEFAULT_NUM_BATCHES}", f"--batch-size={self.batch_size} ", \
f"--sequence-length={self.DEFAULT_SEQ_LENGTH}", f"--vocab-size={self.DEFAULT_VOCAB_SIZE}"])
if self.device == "cuda":
args.use_gpu = True
if test == "train":
self.learning_rate = 0.01
self.optimizer = torch.optim.SGD(self.xlmr.parameters(), lr=self.learning_rate)
self.xlmr.train()
args.inference_only = False
elif test == "eval":
self.xlmr.eval()
args.inference_only = True
# Generate data! y is empty if inference_only.
self.x_l, self.y_true_l = generate_dataset(args.num_batches, args.batch_size,
args.vocab_size, args.inference_only, uniform_seqlen=args.sequence_length,
seqlen_dist=args.seqlen_dist, seq_len_dist_max=args.seqlen_dist_max)
# Prefetch the model and data to device
self.xlmr = self.xlmr.to(self.device)
self.x_l = list(map(lambda x: x.to(self.device), self.x_l))
self.y_true_l = list(map(lambda x: x.to(self.device), self.y_true_l))
def get_module(self):
return WrappedModule(self.xlmr, 'extract_features'), self.x_l
def enable_fp16_half(self):
self.xmlr = self.xlmr.half()
def train(self):
for i, (x, y_true) in enumerate(zip(self.x_l, self.y_true_l)):
y_pred = self.xlmr.extract_features(x)
loss = F.cross_entropy(y_pred, y_true)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
def eval(self) -> Tuple[torch.Tensor]:
result = None
with torch.no_grad():
for i, x in enumerate(self.x_l):
y_pred = self.xlmr.extract_features(x)
result = y_pred
return (result, ) |
import os
import sys
import torch
import subprocess
from torchbenchmark import REPO_PATH
def update_fambench_submodule():
"Update FAMBench submodule of the benchmark repo"
update_command = ["git", "submodule", "update",
"--init", "--recursive", os.path.join("submodules","FAMBench")]
subprocess.check_call(update_command, cwd=REPO_PATH)
def pip_install_requirements():
try:
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
# pin fairseq version
# ignore deps specified in requirements.txt
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '--no-deps', 'git+https://github.com/facebookresearch/fairseq.git@ae59bd6'])
except subprocess.CalledProcessError:
# We ignore the ResolutionImpossible error because fairseq requires omegaconf < 2.1
# but detectron2 requires omegaconf >= 2.1
pass
if __name__ == "__main__":
update_fambench_submodule()
pip_install_requirements()
|
import os
from torchbenchmark.tasks import COMPUTER_VISION
from torchbenchmark.util.framework.detectron2.model_factory import Detectron2Model
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
class Model(Detectron2Model):
task = COMPUTER_VISION.SEGMENTATION
model_file = None
# A hack to workaround fcos model instantiate error
FCOS_USE_BN = True
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(variant="COCO-Detection/fcos_R_50_FPN_1x.py", test=test, device=device,
batch_size=batch_size, extra_args=extra_args)
|
import os
from torchbenchmark.util.framework.detectron2 import install_detectron2
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
if __name__ == '__main__':
install_detectron2(MODEL_NAME, MODEL_DIR)
|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel
class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 4
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(name="hf_GPT2_large", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
|
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
patch_transformers()
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name)
|
import argparse
import random
import torch
import numpy as np
from torchbenchmark.util.env_check import set_random_seed
from .bert_pytorch import parse_args
from .bert_pytorch.trainer import BERTTrainer
from .bert_pytorch.dataset import BERTDataset, WordVocab
from .bert_pytorch.model import BERT
from torch.utils.data import DataLoader
import typing
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = False
from pathlib import Path
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import NLP
import io
class CorpusGenerator(io.TextIOBase):
"""
Class to Generate Random Corpus in Lieu of Using Fixed File Data.
Model is written to consume large fixed corpus but for purposes
of benchmark its sufficient to generate nonsense corpus with
similar distribution.
Corpus is sentence pairs. Vocabulary words are simply numbers and
sentences are each 1-4 words.
Deriving from TextUIBase allows object to participate as a text
file.
"""
def __init__(self, words, lines):
self.lines_read = 0
self.lines = lines
self.words = words
def reset(self):
self.lines_read = 0
def readable(self):
return self.lines <= self.lines_read
def readline(self):
self.lines_read = self.lines_read + 1
if (self.lines_read > self.lines):
return ""
newline = ""
for j in range(random.randrange(1,4)):
newline += str(random.randrange(self.words)) + " "
newline += "\\t "
for j in range(random.randrange(1,4)):
newline += str(random.randrange(self.words)) + " "
newline += "\n"
#print(newline)
return newline
class Model(BenchmarkModel):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 16
DEFAULT_EVAL_BSIZE = 16
def __init__(self, test, device, batch_size=None, extra_args=[]):
if device == "cpu":
self.DEFAULT_EVAL_BSIZE = max(1, int(self.DEFAULT_EVAL_BSIZE / 8))
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
debug_print = False
root = str(Path(__file__).parent)
args = parse_args(args=[
'--train_dataset', f'{root}/data/corpus.small',
'--test_dataset', f'{root}/data/corpus.small',
'--vocab_path', f'{root}/data/vocab.small',
'--output_path', 'bert.model',
]) # Avoid reading sys.argv here
args.device = self.device
args.script = False
args.on_memory = True
# Example effect of batch size on eval time(ms)
# bs cpu cuda
# 1 330 15.5
# 2 660 15.5
# 4 1200 15.2
# 8 2200 20
# 16 4350 33
# 32 8000 58
#
# Issue is that with small batch sizes the gpu is starved for work.
# Ideally doubling work would double execution time.
# parameters for work size, these were chosen to provide a profile
# that matches processing of an original trained en-de corpus.
args.batch_size = self.batch_size
vocab_size = 20000
args.corpus_lines = 50000
# generate random corpus from parameters
set_random_seed()
vocab = WordVocab(CorpusGenerator(vocab_size, args.corpus_lines))
#with open(args.train_dataset, "r", encoding="utf-8") as f:
# vocab = WordVocab(f)
#vocab = WordVocab.load_vocab(args.vocab_path)
if debug_print:
print("seq_len:")
print(args.seq_len)
print("batch size:")
print(args.batch_size)
print("layers")
print(args.layers)
print("args hidden:")
print(args.hidden)
print("len vocab:")
print(len(vocab))
print(type(vocab))
set_random_seed()
train_dataset = BERTDataset(args.train_dataset, vocab, seq_len=args.seq_len,
corpus_lines=args.corpus_lines, on_memory=args.on_memory, generator = CorpusGenerator(vocab_size, args.corpus_lines))
set_random_seed()
test_dataset = BERTDataset(args.test_dataset, vocab, seq_len=args.seq_len, on_memory=args.on_memory, generator = CorpusGenerator(vocab_size, args.corpus_lines)) \
if args.test_dataset is not None else None
set_random_seed()
train_data_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.num_workers)
test_data_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.num_workers) \
if test_dataset is not None else None
bert = BERT(len(vocab), hidden=args.hidden, n_layers=args.layers, attn_heads=args.attn_heads)
trainer = BERTTrainer(bert, len(vocab), train_dataloader=train_data_loader, test_dataloader=test_data_loader,
lr=args.lr, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay,
device=args.device, device_ids=args.device_ids, log_freq=args.log_freq, debug=args.debug)
if test == "eval":
bert.eval()
example_batch = next(iter(train_data_loader))
self.example_inputs = example_batch['bert_input'].to(self.device)[:self.batch_size], example_batch['segment_label'].to(self.device)[:self.batch_size]
self.is_next = example_batch['is_next'].to(self.device)[:self.batch_size]
self.bert_label = example_batch['bert_label'].to(self.device)[:self.batch_size]
self.model = trainer
def get_module(self):
return self.model.bert, self.example_inputs
def set_module(self, new_model):
self.model.bert = new_model
def eval(self) -> typing.Tuple[torch.Tensor]:
model = self.model
# 1. forward the next_sentence_prediction and masked_lm model
next_sent_output, mask_lm_output = model.model.forward(*self.example_inputs)
# 2-1. NLL(negative log likelihood) loss of is_next classification result
# 2-2. NLLLoss of predicting masked token word
# 2-3. Adding next_loss and mask_loss : 3.4 Pre-training Procedure
next_loss = model.criterion(next_sent_output, self.is_next)
mask_loss = model.criterion(mask_lm_output.transpose(1, 2), self.bert_label)
loss = next_loss + mask_loss
return (next_sent_output, mask_lm_output)
def train(self):
trainer = self.model
# 1. forward the next_sentence_prediction and masked_lm model
next_sent_output, mask_lm_output = trainer.model.forward(*self.example_inputs)
# 2-1. NLL(negative log likelihood) loss of is_next classification result
# 2-2. NLLLoss of predicting masked token word
# 2-3. Adding next_loss and mask_loss : 3.4 Pre-training Procedure
next_loss = trainer.criterion(next_sent_output, self.is_next)
mask_loss = trainer.criterion(mask_lm_output.transpose(1, 2), self.bert_label)
loss = next_loss + mask_loss
# 3. backward and optimization only in train
trainer.optim_schedule.zero_grad()
loss.backward()
trainer.optim_schedule.step_and_update_lr()
# self.model is a Trainer that has an inner optimizer wrapped by a scheduled optimizer. Return the inner,
# since the scheduled is derived.
def get_optimizer(self):
return self.model.get_optimizer()
# self.model is a Trainer that has an inner optimizer wrapped by a scheduled optimizer. Update both with
# a new inner optimizer.
def set_optimizer(self, optimizer: torch.optim.Optimizer) -> None:
self.model.set_optimizer(optimizer)
|
import unittest
from bert_pytorch import BERT
class BERTVocabTestCase(unittest.TestCase):
pass
|
import subprocess
import sys
def setup_install():
subprocess.check_call([sys.executable, 'setup.py', 'develop'])
if __name__ == '__main__':
setup_install()
|
import argparse
from .model import BERT
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument("--script", required=False, action="store_true")
parser.add_argument("-d", "--debug", required=False, type=str, default=None)
parser.add_argument("-c", "--train_dataset", required=True, type=str, help="train dataset for train bert")
parser.add_argument("-t", "--test_dataset", type=str, default=None, help="test set for evaluate train set")
parser.add_argument("-v", "--vocab_path", required=True, type=str, help="built vocab model path with bert-vocab")
parser.add_argument("-o", "--output_path", required=True, type=str, help="ex)output/bert.model")
parser.add_argument("-hs", "--hidden", type=int, default=768, help="hidden size of transformer model")
parser.add_argument("-l", "--layers", type=int, default=12, help="number of layers")
parser.add_argument("-a", "--attn_heads", type=int, default=12, help="number of attention heads")
parser.add_argument("-s", "--seq_len", type=int, default=128, help="maximum sequence len")
parser.add_argument("-b", "--batch_size", type=int, default=64, help="number of batch_size")
parser.add_argument("-e", "--epochs", type=int, default=10, help="number of epochs")
parser.add_argument("-w", "--num_workers", type=int, default=0, help="dataloader worker size")
parser.add_argument("--device", default=0, help="Device to use for training, str or int (CUDA only)")
parser.add_argument("--log_freq", type=int, default=10, help="printing loss every n iter: setting n")
parser.add_argument("--corpus_lines", type=int, default=None, help="total number of lines in corpus")
parser.add_argument("--device_ids", nargs='+', default=None, help="Device ids, str or int (CUDA only)")
parser.add_argument("--on_memory", type=bool, default=True, help="Loading on memory: true or false")
parser.add_argument("--lr", type=float, default=1e-3, help="learning rate of adam")
parser.add_argument("--adam_weight_decay", type=float, default=0.01, help="weight_decay of adam")
parser.add_argument("--adam_beta1", type=float, default=0.9, help="adam first beta value")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="adam first beta value")
parsed_args = parser.parse_args(args)
if isinstance(parsed_args.device, str) and parsed_args.device.isdigit():
parsed_args.device = int(parsed_args.device)
if isinstance(parsed_args.device_ids, str) and parsed_args.device_ids.isdigit():
parsed_args.device_ids = int(parsed_args.device_ids)
return parsed_args
|
import argparse
from torch.utils.data import DataLoader
from bert_pytorch import parse_args
from .model import BERT
from .trainer import BERTTrainer
from .dataset import BERTDataset, WordVocab
import random
import torch
import numpy as np
def train():
# Make all randomness deterministic
random.seed(1337)
torch.manual_seed(1337)
np.random.seed(1337)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
args = parse_args()
print("Loading Vocab", args.vocab_path)
vocab = WordVocab.load_vocab(args.vocab_path)
print("Vocab Size: ", len(vocab))
print("Loading Train Dataset", args.train_dataset)
train_dataset = BERTDataset(args.train_dataset, vocab, seq_len=args.seq_len,
corpus_lines=args.corpus_lines, on_memory=args.on_memory)
print("Loading Test Dataset", args.test_dataset)
test_dataset = BERTDataset(args.test_dataset, vocab, seq_len=args.seq_len, on_memory=args.on_memory) \
if args.test_dataset is not None else None
print("Creating Dataloader")
train_data_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.num_workers)
test_data_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.num_workers) \
if test_dataset is not None else None
print("Building BERT model")
bert = BERT(len(vocab), hidden=args.hidden, n_layers=args.layers, attn_heads=args.attn_heads)
if args.script:
print("Scripting BERT model")
bert = torch.jit.script(bert)
print("Creating BERT Trainer")
trainer = BERTTrainer(bert, len(vocab), train_dataloader=train_data_loader, test_dataloader=test_data_loader,
lr=args.lr, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay,
device=args.device, device_ids=args.device_ids, log_freq=args.log_freq, debug=args.debug)
print("Training Start")
for epoch in range(args.epochs):
trainer.train(epoch)
trainer.save(epoch, args.output_path)
if test_data_loader is not None:
trainer.test(epoch)
|
import pickle
from collections import Counter
class TorchVocab(object):
"""Defines a vocabulary object that will be used to numericalize a field.
Attributes:
freqs: A collections.Counter object holding the frequencies of tokens
in the data used to build the Vocab.
stoi: A collections.defaultdict instance mapping token strings to
numerical identifiers.
itos: A list of token strings indexed by their numerical identifiers.
"""
def __init__(self, counter, max_size=None, min_freq=1, specials=['<pad>', '<oov>'],
vectors=None, unk_init=None, vectors_cache=None):
"""Create a Vocab object from a collections.Counter.
Arguments:
counter: collections.Counter object holding the frequencies of
each value found in the data.
max_size: The maximum size of the vocabulary, or None for no
maximum. Default: None.
min_freq: The minimum frequency needed to include a token in the
vocabulary. Values less than 1 will be set to 1. Default: 1.
specials: The list of special tokens (e.g., padding or eos) that
will be prepended to the vocabulary in addition to an <unk>
token. Default: ['<pad>']
vectors: One of either the available pretrained vectors
or custom pretrained vectors (see Vocab.load_vectors);
or a list of aforementioned vectors
unk_init (callback): by default, initialize out-of-vocabulary word vectors
to zero vectors; can be any function that takes in a Tensor and
returns a Tensor of the same size. Default: torch.Tensor.zero_
vectors_cache: directory for cached vectors. Default: '.vector_cache'
"""
self.freqs = counter
counter = counter.copy()
min_freq = max(min_freq, 1)
self.itos = list(specials)
# frequencies of special tokens are not counted when building vocabulary
# in frequency order
for tok in specials:
del counter[tok]
max_size = None if max_size is None else max_size + len(self.itos)
# sort by frequency, then alphabetically
words_and_frequencies = sorted(counter.items(), key=lambda tup: tup[0])
words_and_frequencies.sort(key=lambda tup: tup[1], reverse=True)
for word, freq in words_and_frequencies:
if freq < min_freq or len(self.itos) == max_size:
break
self.itos.append(word)
# stoi is simply a reverse dict for itos
self.stoi = {tok: i for i, tok in enumerate(self.itos)}
self.vectors = None
if vectors is not None:
self.load_vectors(vectors, unk_init=unk_init, cache=vectors_cache)
else:
assert unk_init is None and vectors_cache is None
def __eq__(self, other):
if self.freqs != other.freqs:
return False
if self.stoi != other.stoi:
return False
if self.itos != other.itos:
return False
if self.vectors != other.vectors:
return False
return True
def __len__(self):
return len(self.itos)
def vocab_rerank(self):
self.stoi = {word: i for i, word in enumerate(self.itos)}
def extend(self, v, sort=False):
words = sorted(v.itos) if sort else v.itos
for w in words:
if w not in self.stoi:
self.itos.append(w)
self.stoi[w] = len(self.itos) - 1
class Vocab(TorchVocab):
def __init__(self, counter, max_size=None, min_freq=1):
self.pad_index = 0
self.unk_index = 1
self.eos_index = 2
self.sos_index = 3
self.mask_index = 4
super().__init__(counter, specials=["<pad>", "<unk>", "<eos>", "<sos>", "<mask>"],
max_size=max_size, min_freq=min_freq)
def to_seq(self, sentece, seq_len, with_eos=False, with_sos=False) -> list:
pass
def from_seq(self, seq, join=False, with_pad=False):
pass
@staticmethod
def load_vocab(vocab_path: str) -> 'Vocab':
with open(vocab_path, "rb") as f:
return pickle.load(f)
def save_vocab(self, vocab_path):
with open(vocab_path, "wb") as f:
pickle.dump(self, f)
# Building Vocab with text files
class WordVocab(Vocab):
def __init__(self, texts, max_size=None, min_freq=1):
counter = Counter()
for line in texts:
if isinstance(line, list):
words = line
else:
words = line.replace("\n", "").replace("\t", "").split()
for word in words:
counter[word] += 1
super().__init__(counter, max_size=max_size, min_freq=min_freq)
def to_seq(self, sentence, seq_len=None, with_eos=False, with_sos=False, with_len=False):
if isinstance(sentence, str):
sentence = sentence.split()
seq = [self.stoi.get(word, self.unk_index) for word in sentence]
if with_eos:
seq += [self.eos_index] # this would be index 1
if with_sos:
seq = [self.sos_index] + seq
origin_seq_len = len(seq)
if seq_len is None:
pass
elif len(seq) <= seq_len:
seq += [self.pad_index for _ in range(seq_len - len(seq))]
else:
seq = seq[:seq_len]
return (seq, origin_seq_len) if with_len else seq
def from_seq(self, seq, join=False, with_pad=False):
words = [self.itos[idx]
if idx < len(self.itos)
else "<%d>" % idx
for idx in seq
if not with_pad or idx != self.pad_index]
return " ".join(words) if join else words
@staticmethod
def load_vocab(vocab_path: str) -> 'WordVocab':
with open(vocab_path, "rb") as f:
return pickle.load(f)
def build():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--corpus_path", required=True, type=str)
parser.add_argument("-o", "--output_path", required=True, type=str)
parser.add_argument("-s", "--vocab_size", type=int, default=None)
parser.add_argument("-e", "--encoding", type=str, default="utf-8")
parser.add_argument("-m", "--min_freq", type=int, default=1)
args = parser.parse_args()
with open(args.corpus_path, "r", encoding=args.encoding) as f:
vocab = WordVocab(f, max_size=args.vocab_size, min_freq=args.min_freq)
print("VOCAB SIZE:", len(vocab))
vocab.save_vocab(args.output_path)
|
from .dataset import BERTDataset
from .vocab import WordVocab
|
from torch.utils.data import Dataset
import torch
import random
QUIET = True
class BERTDataset(Dataset):
def __init__(self, corpus_path, vocab, seq_len, encoding="utf-8", corpus_lines=None, on_memory=True, generator = None):
self.vocab = vocab
self.seq_len = seq_len
self.on_memory = on_memory
self.corpus_lines = corpus_lines
self.corpus_path = corpus_path
self.encoding = encoding
# For use as benchmark we only accept data from generator
#with open(corpus_path, "r", encoding=encoding) as f:
assert generator != None
with generator as f:
if self.corpus_lines is None and not on_memory:
for _ in f:
self.corpus_lines += 1
if on_memory:
self.lines = [line[:-1].split("\\t")
for line in f]
self.corpus_lines = len(self.lines)
if not on_memory:
self.file = open(corpus_path, "r", encoding=encoding)
self.random_file = open(corpus_path, "r", encoding=encoding)
for _ in range(random.randint(self.corpus_lines if self.corpus_lines < 1000 else 1000)):
self.random_file.__next__()
def __len__(self):
return self.corpus_lines
def __getitem__(self, item):
t1, t2, is_next_label = self.random_sent(item)
t1_random, t1_label = self.random_word(t1)
t2_random, t2_label = self.random_word(t2)
# [CLS] tag = SOS tag, [SEP] tag = EOS tag
t1 = [self.vocab.sos_index] + t1_random + [self.vocab.eos_index]
t2 = t2_random + [self.vocab.eos_index]
t1_label = [self.vocab.pad_index] + t1_label + [self.vocab.pad_index]
t2_label = t2_label + [self.vocab.pad_index]
segment_label = ([1 for _ in range(len(t1))] + [2 for _ in range(len(t2))])[:self.seq_len]
bert_input = (t1 + t2)[:self.seq_len]
bert_label = (t1_label + t2_label)[:self.seq_len]
padding = [self.vocab.pad_index for _ in range(self.seq_len - len(bert_input))]
bert_input.extend(padding), bert_label.extend(padding), segment_label.extend(padding)
output = {"bert_input": bert_input,
"bert_label": bert_label,
"segment_label": segment_label,
"is_next": is_next_label}
return {key: torch.tensor(value) for key, value in output.items()}
def random_word(self, sentence):
tokens = sentence.split()
output_label = []
for i, token in enumerate(tokens):
prob = random.random()
if prob < 0.15:
prob /= 0.15
# 80% randomly change token to mask token
if prob < 0.8:
tokens[i] = self.vocab.mask_index
# 10% randomly change token to random token
elif prob < 0.9:
tokens[i] = random.randrange(len(self.vocab))
# 10% randomly change token to current token
else:
tokens[i] = self.vocab.stoi.get(token, self.vocab.unk_index)
output_label.append(self.vocab.stoi.get(token, self.vocab.unk_index))
else:
tokens[i] = self.vocab.stoi.get(token, self.vocab.unk_index)
output_label.append(0)
return tokens, output_label
def random_sent(self, index):
t1, t2 = self.get_corpus_line(index)
# output_text, label(isNotNext:0, isNext:1)
if random.random() > 0.5:
return t1, t2, 1
else:
return t1, self.get_random_line(), 0
def get_corpus_line(self, item):
if self.on_memory:
return self.lines[item][0], self.lines[item][1]
else:
line = self.file.__next__()
if line is None:
self.file.close()
self.file = open(self.corpus_path, "r", encoding=self.encoding)
line = self.file.__next__()
t1, t2 = line[:-1].split("\t")
return t1, t2
def get_random_line(self):
if self.on_memory:
return self.lines[random.randrange(len(self.lines))][1]
line = self.file.__next__()
if line is None:
self.file.close()
self.file = open(self.corpus_path, "r", encoding=self.encoding)
for _ in range(random.randint(self.corpus_lines if self.corpus_lines < 1000 else 1000)):
self.random_file.__next__()
line = self.random_file.__next__()
return line[:-1].split("\t")[1]
|
import torch.nn as nn
from .bert import BERT
class BERTLM(nn.Module):
"""
BERT Language Model
Next Sentence Prediction Model + Masked Language Model
"""
def __init__(self, bert: BERT, vocab_size):
"""
:param bert: BERT model which should be trained
:param vocab_size: total vocab size for masked_lm
"""
super().__init__()
self.bert = bert
self.next_sentence = NextSentencePrediction(self.bert.hidden)
self.mask_lm = MaskedLanguageModel(self.bert.hidden, vocab_size)
def forward(self, x, segment_label):
x = self.bert(x, segment_label)
return self.next_sentence(x), self.mask_lm(x)
class NextSentencePrediction(nn.Module):
"""
2-class classification model : is_next, is_not_next
"""
def __init__(self, hidden):
"""
:param hidden: BERT model output size
"""
super().__init__()
self.linear = nn.Linear(hidden, 2)
self.softmax = nn.LogSoftmax(dim=-1)
def forward(self, x):
return self.softmax(self.linear(x[:, 0]))
class MaskedLanguageModel(nn.Module):
"""
predicting origin token from masked input sequence
n-class classification problem, n-class = vocab_size
"""
def __init__(self, hidden, vocab_size):
"""
:param hidden: output size of BERT model
:param vocab_size: total vocab size
"""
super().__init__()
self.linear = nn.Linear(hidden, vocab_size)
self.softmax = nn.LogSoftmax(dim=-1)
def forward(self, x):
return self.softmax(self.linear(x))
|
from .bert import BERT
from .language_model import BERTLM
|
import torch
import torch.nn as nn
from .attention import MultiHeadedAttention
from .utils import SublayerConnection, PositionwiseFeedForward
from .utils.tensor2tensor import TensorToTensor
class LambdaModule(torch.nn.Module):
def __init__(self, att):
super().__init__()
self.attention = att
self.mask = torch.zeros((4))
@torch.jit.export
def set_mask(self, mask: torch.Tensor):
self.mask = mask
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.attention.forward(x, x, x, mask=self.mask)
class TransformerBlock(nn.Module):
"""
Bidirectional Encoder = Transformer (self-attention)
Transformer = MultiHead_Attention + Feed_Forward with sublayer connection
"""
def __init__(self, hidden, attn_heads, feed_forward_hidden, dropout):
"""
:param hidden: hidden size of transformer
:param attn_heads: head sizes of multi-head attention
:param feed_forward_hidden: feed_forward_hidden, usually 4*hidden_size
:param dropout: dropout rate
"""
super().__init__()
self.attention = MultiHeadedAttention(h=attn_heads, d_model=hidden)
self.lambda_module = LambdaModule(self.attention)
self.feed_forward = PositionwiseFeedForward(d_model=hidden, d_ff=feed_forward_hidden, dropout=dropout)
self.input_sublayer = SublayerConnection(size=hidden, dropout=dropout)
self.output_sublayer = SublayerConnection(size=hidden, dropout=dropout)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x, mask):
self.lambda_module.set_mask(mask)
x = self.input_sublayer(x, self.lambda_module)
x = self.output_sublayer(x, self.feed_forward)
return self.dropout(x)
|
import torch
import torch.nn as nn
from .transformer import TransformerBlock
from .embedding import BERTEmbedding
class BERT(nn.Module):
"""
BERT model : Bidirectional Encoder Representations from Transformers.
"""
def __init__(self, vocab_size, hidden=768, n_layers=12, attn_heads=12, dropout=0.1):
"""
:param vocab_size: vocab_size of total words
:param hidden: BERT model hidden size
:param n_layers: numbers of Transformer blocks(layers)
:param attn_heads: number of attention heads
:param dropout: dropout rate
"""
super().__init__()
self.hidden = hidden
self.n_layers = n_layers
self.attn_heads = attn_heads
# paper noted they used 4*hidden_size for ff_network_hidden_size
self.feed_forward_hidden = hidden * 4
# embedding for BERT, sum of positional, segment, token embeddings
self.embedding = BERTEmbedding(vocab_size=vocab_size, embed_size=hidden)
# multi-layers transformer blocks, deep network
self.transformer_blocks = nn.ModuleList(
[TransformerBlock(hidden, attn_heads, hidden * 4, dropout) for _ in range(n_layers)])
def forward(self, x, segment_info):
# attention masking for padded token
# torch.ByteTensor([batch_size, 1, seq_len, seq_len)
mask = (x > 0).unsqueeze(1).repeat(1, x.size(1), 1).unsqueeze(1)
# embedding the indexed sequence to sequence of vectors
x = self.embedding(x, segment_info)
# running over multiple transformer blocks
for transformer in self.transformer_blocks:
x = transformer.forward(x, mask)
return x
|
import torch.nn as nn
import torch.nn.functional as F
import torch
import math
from ..utils.tensor2tensor import TensorToTensor
from typing import Optional
class Attention(nn.Module):
"""
Compute 'Scaled Dot Product Attention
"""
def forward(self, query, key, value, dropout: TensorToTensor, mask: Optional[torch.Tensor]=None):
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ math.sqrt(query.size(-1))
if mask is not None:
if scores.dtype == torch.float16:
"""
-1e9 is overflow in fp16. It needs to be set a min.
Theoretically, the mask for empty token needs to be set as -inf. Check https://arxiv.org/pdf/1706.03762.pdf
"""
min_mask = -65504.0 #torch.finfo(torch.float16).min == -65504.0. jit scripting could handle finfo
else:
min_mask = -1e9
scores = scores.masked_fill(mask == 0, min_mask)
p_attn = F.softmax(scores, dim=-1)
p_attn = dropout.forward(p_attn)
return torch.matmul(p_attn, value), p_attn
|
from .multi_head import MultiHeadedAttention
from .single import Attention
|
import torch
import torch.nn as nn
from .single import Attention
from typing import Optional
class DropoutWrapper(nn.Module):
def __init__(self, p):
super().__init__()
self.dropout = nn.Dropout(p=p)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.dropout(x)
class MultiHeadedAttention(nn.Module):
"""
Take in model size and number of heads.
"""
def __init__(self, h, d_model, dropout=0.1):
super().__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linear_layers = nn.ModuleList([nn.Linear(d_model, d_model) for _ in range(3)])
self.output_linear = nn.Linear(d_model, d_model)
self.attention = Attention()
self.dropout = DropoutWrapper(p=dropout)
def forward(self, query, key, value, mask: Optional[torch.Tensor]=None):
batch_size = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = [l(x).view(batch_size, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linear_layers, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, attn = self.attention(query, key, value, self.dropout, mask=mask)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.h * self.d_k)
return self.output_linear(x)
|
import torch.nn as nn
class TokenEmbedding(nn.Embedding):
def __init__(self, vocab_size, embed_size=512):
super().__init__(vocab_size, embed_size, padding_idx=0)
|
from .bert import BERTEmbedding
|
import torch.nn as nn
import torch
import math
class PositionalEmbedding(nn.Module):
def __init__(self, d_model, max_len=512):
super().__init__()
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model).float()
# Changed from upstream, see https://github.com/codertimo/BERT-pytorch/pull/104
pe.requires_grad = False
position = torch.arange(0, max_len).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)).exp()
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
return self.pe[:, :x.size(1)]
|
import torch.nn as nn
class SegmentEmbedding(nn.Embedding):
def __init__(self, embed_size=512):
super().__init__(3, embed_size, padding_idx=0)
|
import torch
import torch.nn as nn
from .token import TokenEmbedding
from .position import PositionalEmbedding
from .segment import SegmentEmbedding
class BERTEmbedding(nn.Module):
"""
BERT Embedding which is consisted with under features
1. TokenEmbedding : normal embedding matrix
2. PositionalEmbedding : adding positional information using sin, cos
2. SegmentEmbedding : adding sentence segment info, (sent_A:1, sent_B:2)
sum of all these features are output of BERTEmbedding
"""
def __init__(self, vocab_size, embed_size, dropout=0.1):
"""
:param vocab_size: total vocab size
:param embed_size: embedding size of token embedding
:param dropout: dropout rate
"""
super().__init__()
self.token = TokenEmbedding(vocab_size=vocab_size, embed_size=embed_size)
self.position = PositionalEmbedding(d_model=self.token.embedding_dim)
self.segment = SegmentEmbedding(embed_size=self.token.embedding_dim)
self.dropout = nn.Dropout(p=dropout)
self.embed_size = embed_size
def forward(self, sequence, segment_label):
x = self.token(sequence) + self.position(sequence) + self.segment(segment_label)
return self.dropout(x)
|
import torch
import torch.nn as nn
from .layer_norm import LayerNorm
from .tensor2tensor import TensorToTensor
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer: TensorToTensor):
"Apply residual connection to any sublayer with the same size."
return x + self.dropout(sublayer.forward(self.norm(x)))
|
import torch
@torch.jit.interface
class TensorToTensor(torch.nn.Module):
def forward(self, x: torch.Tensor) -> torch.Tensor:
pass
|
import torch
import torch.nn as nn
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
self.activation = nn.GELU()
def forward(self, x):
return self.w_2(self.dropout(self.activation(self.w_1(x))))
|
from .feed_forward import PositionwiseFeedForward
from .layer_norm import LayerNorm
from .sublayer import SublayerConnection
|
import torch.nn as nn
import torch
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for details)."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
|
'''A wrapper class for optimizer '''
import numpy as np
class ScheduledOptim():
'''A simple wrapper class for learning rate scheduling'''
def __init__(self, optimizer, d_model, n_warmup_steps):
self._optimizer = optimizer
self.n_warmup_steps = n_warmup_steps
self.n_current_steps = 0
self.init_lr = np.power(d_model, -0.5)
def step_and_update_lr(self):
"Step with the inner optimizer"
self._update_learning_rate()
self._optimizer.step()
def zero_grad(self):
"Zero out the gradients by the inner optimizer"
self._optimizer.zero_grad()
def _get_lr_scale(self):
return np.min([
np.power(self.n_current_steps, -0.5),
np.power(self.n_warmup_steps, -1.5) * self.n_current_steps])
def _update_learning_rate(self):
''' Learning rate scheduling per step '''
self.n_current_steps += 1
lr = self.init_lr * self._get_lr_scale()
for param_group in self._optimizer.param_groups:
param_group['lr'] = lr
|
import torch
import torch.nn as nn
from torch.optim import Adam
from torch.utils.data import DataLoader
from ..model import BERTLM, BERT
from .optim_schedule import ScheduledOptim
class BERTTrainer:
"""
BERTTrainer make the pretrained BERT model with two LM training method.
1. Masked Language Model : 3.3.1 Task #1: Masked LM
2. Next Sentence prediction : 3.3.2 Task #2: Next Sentence Prediction
please check the details on README.md with simple example.
"""
def __init__(self, bert: BERT, vocab_size: int,
train_dataloader: DataLoader, test_dataloader: DataLoader = None,
lr: float = 1e-4, betas=(0.9, 0.999), weight_decay: float = 0.01, warmup_steps=10000,
device: str = "cuda", device_ids=None, log_freq: int = 10, debug: str = None):
"""
:param bert: BERT model which you want to train
:param vocab_size: total word vocab size
:param train_dataloader: train dataset data loader
:param test_dataloader: test dataset data loader [can be None]
:param lr: learning rate of optimizer
:param betas: Adam optimizer betas
:param weight_decay: Adam optimizer weight decay param
:param device: device to use for training
:param log_freq: logging frequency of the batch iteration
"""
# Setup cuda device for BERT training, argument -c, --cuda should be true
self.device = torch.device(device)
# This BERT model will be saved every epoch
self.bert = bert
# Initialize the BERT Language Model, with BERT model
self.model = BERTLM(bert, vocab_size).to(self.device)
# Distributed GPU training if CUDA can detect more than 1 GPU
if self.device.type == "cuda" and torch.cuda.device_count() > 1:
self.model = nn.DataParallel(self.model, device_ids=device_ids)
# Setting the train and test data loader
self.train_data = train_dataloader
self.test_data = test_dataloader
# Setting the Adam optimizer with hyper-param
self.optim = Adam(self.model.parameters(), lr=lr, betas=betas, weight_decay=weight_decay)
self.optim_schedule = ScheduledOptim(self.optim, self.bert.hidden, n_warmup_steps=warmup_steps)
self.warmup_steps = warmup_steps
# Using Negative Log Likelihood Loss function for predicting the masked_token
self.criterion = nn.NLLLoss(ignore_index=0)
self.log_freq = log_freq
self.debug = debug
def get_optimizer(self):
return self.optim
def set_optimizer(self, optimizer: torch.optim.Optimizer):
self.optim = optimizer
self.optim_schedule = ScheduledOptim(optimizer, self.bert.hidden, n_warmup_steps=self.warmup_steps)
def train(self, epoch):
self.iteration(epoch, self.train_data)
def test(self, epoch):
self.iteration(epoch, self.test_data, train=False)
def iteration(self, epoch, data_loader, train=True):
"""
loop over the data_loader for training or testing
if on train status, backward operation is activated
and also auto save the model every peoch
:param epoch: current epoch index
:param data_loader: torch.utils.data.DataLoader for iteration
:param train: boolean value of is train or test
:return: None
"""
str_code = "train" if train else "test"
data_iter = enumerate(data_loader)
avg_loss = 0.0
total_correct = 0
total_element = 0
for i, data in data_iter:
# 0. batch_data will be sent into the device(GPU or cpu)
data = {key: value.to(self.device) for key, value in data.items()}
# 1. forward the next_sentence_prediction and masked_lm model
next_sent_output, mask_lm_output = self.model.forward(data["bert_input"], data["segment_label"])
# 2-1. NLL(negative log likelihood) loss of is_next classification result
next_loss = self.criterion(next_sent_output, data["is_next"])
# 2-2. NLLLoss of predicting masked token word
mask_loss = self.criterion(mask_lm_output.transpose(1, 2), data["bert_label"])
# 2-3. Adding next_loss and mask_loss : 3.4 Pre-training Procedure
loss = next_loss + mask_loss
# 3. backward and optimization only in train
if train:
self.optim_schedule.zero_grad()
loss.backward()
self.optim_schedule.step_and_update_lr()
# next sentence prediction accuracy
correct = next_sent_output.argmax(dim=-1).eq(data["is_next"]).sum().item()
avg_loss += loss.item()
total_correct += correct
total_element += data["is_next"].nelement()
post_fix = {
"epoch": epoch,
"iter": i,
"avg_loss": avg_loss / (i + 1),
"avg_acc": total_correct / total_element * 100,
"loss": loss.item()
}
if i % self.log_freq == 0:
data_iter.write(str(post_fix))
if self.debug and epoch == 1 and i == 0:
torch.save(next_sent_output, self.debug)
print("EP%d_%s, avg_loss=" % (epoch, str_code), avg_loss / len(data_iter), "total_acc=",
total_correct * 100.0 / total_element)
def save(self, epoch, file_path="output/bert_trained.model"):
"""
Saving the current BERT model on file_path
:param epoch: current epoch number
:param file_path: model output path which gonna be file_path+"ep%d" % epoch
:return: final_output_path
"""
output_path = file_path + ".ep%d" % epoch
self.bert.to(self.device)
print("EP:%d Model Saved on:" % epoch, output_path)
return output_path
|
from .pretrain import BERTTrainer
|
"Doctr detection model"
from doctr.models import ocr_predictor
import numpy as np
import torch
# TorchBench imports
from torchbenchmark.util.model import BenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
from typing import Tuple
class Model(BenchmarkModel):
task = COMPUTER_VISION.DETECTION
DEFAULT_EVAL_BSIZE = 1
CANNOT_SET_CUSTOM_OPTIMIZER = True
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
predictor = ocr_predictor(det_arch='db_resnet50', reco_arch='crnn_vgg16_bn', pretrained=True).to(self.device)
# Doctr detection model expects input (batch_size, 3, 1024, 1024)
self.model = predictor.det_predictor.model
self.example_inputs = torch.randn(self.batch_size, 3, 1024, 1024).to(self.device)
if self.test == "eval":
self.model.eval()
def train(self):
raise NotImplementedError("Train is not implemented for this model.")
def get_module(self):
return self.model, (self.example_inputs, )
def eval(self) -> Tuple[torch.Tensor]:
with torch.inference_mode():
out = self.model(self.example_inputs, return_model_output=True)
return (out["out_map"], )
|
import os
import warnings
import subprocess
import sys
def pip_install_requirements():
try:
subprocess.check_call(["conda", "install", "-y", "expecttest", "libglib", "pango", "-c", "conda-forge"])
except:
warnings.warn("The doctr_det_predictor model requires conda binary libaries to be installed. Missing conda packages might break this model.")
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
import torch
import torch.optim as optim
import torch.nn as nn
import torchvision.models as models
from functorch import make_functional_with_buffers, vmap, grad
from typing import Tuple
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import OTHER
def compute_norms(sample_grads):
batch_size = sample_grads[0].shape[0]
norms = [sample_grad.view(batch_size, -1).norm(2, dim=-1) for sample_grad in sample_grads]
norms = torch.stack(norms, dim=0).norm(2, dim=0)
return norms, batch_size
def clip_and_accumulate_and_add_noise(model, max_per_sample_grad_norm=1.0, noise_multiplier=1.0):
sample_grads = tuple(param.grad_sample for param in model.parameters())
# step 0: compute the norms
sample_norms, batch_size = compute_norms(sample_grads)
# step 1: compute clipping factors
clip_factor = max_per_sample_grad_norm / (sample_norms + 1e-6)
clip_factor = clip_factor.clamp(max=1.0)
# step 2: clip
grads = tuple(torch.einsum('i,i...', clip_factor, sample_grad)
for sample_grad in sample_grads)
# step 3: add gaussian noise
stddev = max_per_sample_grad_norm * noise_multiplier
noises = tuple(torch.normal(0, stddev, grad_param.shape, device=grad_param.device)
for grad_param in grads)
grads = tuple(noise + grad_param for noise, grad_param in zip(noises, grads))
# step 4: assign the new grads, delete the sample grads
for param, param_grad in zip(model.parameters(), grads):
param.grad = param_grad / batch_size
del param.grad_sample
class Model(BenchmarkModel):
task = OTHER.OTHER_TASKS
DEFAULT_TRAIN_BSIZE = 64
DEFAULT_EVAL_BSIZE = 64
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
# Generate a resnet18, patch the BatchNorm layers to be GroupNorm
self.model = models.__dict__['resnet18'](
# min(32, c) is a reasonable default value, see the following:
# https://github.com/pytorch/opacus/blob/6a3e9bd99dca314596bc0313bb4241eac7c9a5d0/opacus/validators/batch_norm.py#L84-L86
pretrained=False, norm_layer=(lambda c: nn.GroupNorm(min(c, 32), c))
)
self.model = self.model.to(device)
# Cifar10 images are 32x32 and have 10 classes
self.example_inputs = (
torch.randn((self.batch_size, 3, 32, 32), device=self.device),
)
self.example_target = torch.randint(0, 10, (self.batch_size,), device=self.device)
self.optimizer = optim.Adam(self.model.parameters(), lr=0.001)
self.criterion = nn.CrossEntropyLoss()
def get_module(self):
return self.model, self.example_inputs
def train(self):
model = self.model
model.train()
fnet, params, buffers = make_functional_with_buffers(self.model)
(images, ) = self.example_inputs
targets = self.example_target
def compute_loss(params, buffers, image, target):
image = image.unsqueeze(0)
target = target.unsqueeze(0)
pred = fnet(params, buffers, image)
loss = self.criterion(pred, target)
return loss
sample_grads = vmap(grad(compute_loss), (None, None, 0, 0))(params, buffers, images, targets)
for grad_sample, weight in zip(sample_grads, model.parameters()):
weight.grad_sample = grad_sample.detach()
clip_and_accumulate_and_add_noise(model)
self.optimizer.step()
self.optimizer.zero_grad()
def eval(self) -> Tuple[torch.Tensor]:
model = self.model
(images, ) = self.example_inputs
model.eval()
targets = self.example_target
with torch.no_grad():
out = model(images)
return (out, )
|
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
from torchvision.utils import save_image
import torch
import torch.nn.functional as F
import numpy as np
import os
import time
import datetime
from .model import Generator
from .model import Discriminator
class Solver:
"""Solver for training and testing StarGAN."""
def __init__(self, celeba_loader, rafd_loader, config, should_script=False):
"""Initialize configurations."""
# Data loader.
self.celeba_loader = celeba_loader
self.rafd_loader = rafd_loader
# Model configurations.
self.c_dim = config.c_dim
self.c2_dim = config.c2_dim
self.image_size = config.image_size
self.g_conv_dim = config.g_conv_dim
self.d_conv_dim = config.d_conv_dim
self.g_repeat_num = config.g_repeat_num
self.d_repeat_num = config.d_repeat_num
self.lambda_cls = config.lambda_cls
self.lambda_rec = config.lambda_rec
self.lambda_gp = config.lambda_gp
# Training configurations.
self.dataset = config.dataset
self.batch_size = config.batch_size
self.num_iters = config.num_iters
self.num_iters_decay = config.num_iters_decay
self.g_lr = config.g_lr
self.d_lr = config.d_lr
self.n_critic = config.n_critic
self.beta1 = config.beta1
self.beta2 = config.beta2
self.resume_iters = config.resume_iters
self.selected_attrs = config.selected_attrs
# Test configurations.
self.test_iters = config.test_iters
# Miscellaneous.
self.use_tensorboard = config.use_tensorboard
self.device = torch.device(config.device)
# Directories.
self.log_dir = config.log_dir
self.sample_dir = config.sample_dir
self.model_save_dir = config.model_save_dir
self.result_dir = config.result_dir
# Step size.
self.log_step = config.log_step
self.sample_step = config.sample_step
self.model_save_step = config.model_save_step
self.lr_update_step = config.lr_update_step
# Build the model and tensorboard.
self.build_model(should_script)
if self.use_tensorboard:
self.build_tensorboard()
def build_model(self, should_script):
"""Create a generator and a discriminator."""
if should_script:
maybe_script = torch.jit.script
else:
maybe_script = lambda x: x
if self.dataset in ['CelebA', 'RaFD']:
self.G = maybe_script(Generator(self.g_conv_dim, self.c_dim, self.g_repeat_num))
self.D = maybe_script(Discriminator(self.image_size, self.d_conv_dim, self.c_dim, self.d_repeat_num))
elif self.dataset in ['Both']:
self.G = maybe_script(Generator(self.g_conv_dim, self.c_dim+self.c2_dim+2, self.g_repeat_num)) # 2 for mask vector.
self.D = maybe_script(Discriminator(self.image_size, self.d_conv_dim, self.c_dim+self.c2_dim, self.d_repeat_num))
self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr, [self.beta1, self.beta2])
self.d_optimizer = torch.optim.Adam(self.D.parameters(), self.d_lr, [self.beta1, self.beta2])
self.G.to(self.device)
self.D.to(self.device)
def print_network(self, model, name):
"""Print out the network information."""
num_params = 0
for p in model.parameters():
num_params += p.numel()
print(model)
print(name)
print("The number of parameters: {}".format(num_params))
def restore_model(self, resume_iters):
"""Restore the trained generator and discriminator."""
print('Loading the trained models from step {}...'.format(resume_iters))
G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(resume_iters))
D_path = os.path.join(self.model_save_dir, '{}-D.ckpt'.format(resume_iters))
self.G.load_state_dict(torch.load(G_path, map_location=lambda storage, loc: storage))
self.D.load_state_dict(torch.load(D_path, map_location=lambda storage, loc: storage))
def build_tensorboard(self):
"""Build a tensorboard logger."""
from .logger import Logger
self.logger = Logger(self.log_dir)
def update_lr(self, g_lr, d_lr):
"""Decay learning rates of the generator and discriminator."""
for param_group in self.g_optimizer.param_groups:
param_group['lr'] = g_lr
for param_group in self.d_optimizer.param_groups:
param_group['lr'] = d_lr
def reset_grad(self):
"""Reset the gradient buffers."""
self.g_optimizer.zero_grad()
self.d_optimizer.zero_grad()
def denorm(self, x):
"""Convert the range from [-1, 1] to [0, 1]."""
out = (x + 1) / 2
return out.clamp_(0, 1)
def gradient_penalty(self, y, x):
"""Compute gradient penalty: (L2_norm(dy/dx) - 1)**2."""
weight = torch.ones(y.size()).to(self.device)
dydx = torch.autograd.grad(outputs=y,
inputs=x,
grad_outputs=weight,
retain_graph=True,
create_graph=True,
only_inputs=True)[0]
dydx = dydx.view(dydx.size(0), -1)
dydx_l2norm = torch.sqrt(torch.sum(dydx**2, dim=1))
return torch.mean((dydx_l2norm-1)**2)
def label2onehot(self, labels, dim):
"""Convert label indices to one-hot vectors."""
batch_size = labels.size(0)
out = torch.zeros(batch_size, dim)
out[np.arange(batch_size), labels.long()] = 1
return out
def create_labels(self, c_org, c_dim=5, dataset='CelebA', selected_attrs=None):
"""Generate target domain labels for debugging and testing."""
# Get hair color indices.
if dataset == 'CelebA':
hair_color_indices = []
for i, attr_name in enumerate(selected_attrs):
if attr_name in ['Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Gray_Hair']:
hair_color_indices.append(i)
c_trg_list = []
for i in range(c_dim):
if dataset == 'CelebA':
c_trg = c_org.clone()
if i in hair_color_indices: # Set one hair color to 1 and the rest to 0.
c_trg[:, i] = 1
for j in hair_color_indices:
if j != i:
c_trg[:, j] = 0
else:
c_trg[:, i] = (c_trg[:, i] == 0) # Reverse attribute value.
elif dataset == 'RaFD':
c_trg = self.label2onehot(torch.ones(c_org.size(0))*i, c_dim)
c_trg_list.append(c_trg.to(self.device))
return c_trg_list
def classification_loss(self, logit, target, dataset='CelebA'):
"""Compute binary or softmax cross entropy loss."""
if dataset == 'CelebA':
return F.binary_cross_entropy_with_logits(logit, target, size_average=False) / logit.size(0)
elif dataset == 'RaFD':
return F.cross_entropy(logit, target)
def train(self, debug=''):
"""Train StarGAN within a single dataset."""
# Set data loader.
if self.dataset == 'CelebA':
data_loader = self.celeba_loader
elif self.dataset == 'RaFD':
data_loader = self.rafd_loader
# Fetch fixed inputs for debugging.
data_iter = iter(data_loader)
x_fixed, c_org = next(data_iter)
x_fixed = x_fixed.to(self.device)
c_fixed_list = self.create_labels(c_org, self.c_dim, self.dataset, self.selected_attrs)
# Learning rate cache for decaying.
g_lr = self.g_lr
d_lr = self.d_lr
# Start training from scratch or resume training.
start_iters = 0
if self.resume_iters:
start_iters = self.resume_iters
self.restore_model(self.resume_iters)
# Start training.
print('Start training...')
start_time = time.time()
for i in range(start_iters, self.num_iters):
# =================================================================================== #
# 1. Preprocess input data #
# =================================================================================== #
# Fetch real images and labels.
try:
x_real, label_org = next(data_iter)
except:
data_iter = iter(data_loader)
x_real, label_org = next(data_iter)
# Generate target domain labels randomly.
rand_idx = torch.randperm(label_org.size(0))
label_trg = label_org[rand_idx]
if self.dataset == 'CelebA':
c_org = label_org.clone()
c_trg = label_trg.clone()
elif self.dataset == 'RaFD':
c_org = self.label2onehot(label_org, self.c_dim)
c_trg = self.label2onehot(label_trg, self.c_dim)
x_real = x_real.to(self.device) # Input images.
c_org = c_org.to(self.device) # Original domain labels.
c_trg = c_trg.to(self.device) # Target domain labels.
label_org = label_org.to(self.device) # Labels for computing classification loss.
label_trg = label_trg.to(self.device) # Labels for computing classification loss.
# =================================================================================== #
# 2. Train the discriminator #
# =================================================================================== #
# Compute loss with real images.
out_src, out_cls = self.D(x_real)
d_loss_real = - torch.mean(out_src)
d_loss_cls = self.classification_loss(out_cls, label_org, self.dataset)
# Compute loss with fake images.
x_fake = self.G(x_real, c_trg)
out_src, out_cls = self.D(x_fake.detach())
d_loss_fake = torch.mean(out_src)
# Save the last value to reference.out
if i == self.num_iters - 1 and debug:
to_be_saved = d_loss_cls - d_loss_fake
torch.save(to_be_saved, debug)
# Compute loss for gradient penalty.
alpha = torch.rand(x_real.size(0), 1, 1, 1).to(self.device)
x_hat = (alpha * x_real.data + (1 - alpha) * x_fake.data).requires_grad_(True)
out_src, _ = self.D(x_hat)
d_loss_gp = self.gradient_penalty(out_src, x_hat)
# Backward and optimize.
d_loss = d_loss_real + d_loss_fake + self.lambda_cls * d_loss_cls + self.lambda_gp * d_loss_gp
self.reset_grad()
d_loss.backward()
self.d_optimizer.step()
# Logging.
loss = {}
loss['D/loss_real'] = d_loss_real.item()
loss['D/loss_fake'] = d_loss_fake.item()
loss['D/loss_cls'] = d_loss_cls.item()
loss['D/loss_gp'] = d_loss_gp.item()
# =================================================================================== #
# 3. Train the generator #
# =================================================================================== #
if (i+1) % self.n_critic == 0:
# Original-to-target domain.
x_fake = self.G(x_real, c_trg)
out_src, out_cls = self.D(x_fake)
g_loss_fake = - torch.mean(out_src)
g_loss_cls = self.classification_loss(out_cls, label_trg, self.dataset)
# Target-to-original domain.
x_reconst = self.G(x_fake, c_org)
g_loss_rec = torch.mean(torch.abs(x_real - x_reconst))
# Backward and optimize.
g_loss = g_loss_fake + self.lambda_rec * g_loss_rec + self.lambda_cls * g_loss_cls
self.reset_grad()
g_loss.backward()
self.g_optimizer.step()
# Logging.
loss['G/loss_fake'] = g_loss_fake.item()
loss['G/loss_rec'] = g_loss_rec.item()
loss['G/loss_cls'] = g_loss_cls.item()
# =================================================================================== #
# 4. Miscellaneous #
# =================================================================================== #
# Print out training information.
if (i+1) % self.log_step == 0:
et = time.time() - start_time
et = str(datetime.timedelta(seconds=et))[:-7]
log = "Elapsed [{}], Iteration [{}/{}]".format(et, i+1, self.num_iters)
for tag, value in loss.items():
log += ", {}: {:.4f}".format(tag, value)
print(log)
if self.use_tensorboard:
for tag, value in loss.items():
self.logger.scalar_summary(tag, value, i+1)
# Translate fixed images for debugging.
if (i+1) % self.sample_step == 0 and debug:
with torch.no_grad():
x_fake_list = [x_fixed]
for c_fixed in c_fixed_list:
x_fake_list.append(self.G(x_fixed, c_fixed))
x_concat = torch.cat(x_fake_list, dim=3)
sample_path = os.path.join(self.sample_dir, '{}-images.jpg'.format(i+1))
save_image(self.denorm(x_concat.data.cpu()), sample_path, nrow=1, padding=0)
print('Saved real and fake images into {}...'.format(sample_path))
# Save model checkpoints.
if (i+1) % self.model_save_step == 0:
G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(i+1))
D_path = os.path.join(self.model_save_dir, '{}-D.ckpt'.format(i+1))
torch.save(self.G.state_dict(), G_path)
torch.save(self.D.state_dict(), D_path)
print('Saved model checkpoints into {}...'.format(self.model_save_dir))
# Decay learning rates.
if (i+1) % self.lr_update_step == 0 and (i+1) > (self.num_iters - self.num_iters_decay):
g_lr -= (self.g_lr / float(self.num_iters_decay))
d_lr -= (self.d_lr / float(self.num_iters_decay))
self.update_lr(g_lr, d_lr)
print ('Decayed learning rates, g_lr: {}, d_lr: {}.'.format(g_lr, d_lr))
def train_multi(self):
"""Train StarGAN with multiple datasets."""
# Data iterators.
celeba_iter = iter(self.celeba_loader)
rafd_iter = iter(self.rafd_loader)
# Fetch fixed inputs for debugging.
x_fixed, c_org = next(celeba_iter)
x_fixed = x_fixed.to(self.device)
c_celeba_list = self.create_labels(c_org, self.c_dim, 'CelebA', self.selected_attrs)
c_rafd_list = self.create_labels(c_org, self.c2_dim, 'RaFD')
zero_celeba = torch.zeros(x_fixed.size(0), self.c_dim).to(self.device) # Zero vector for CelebA.
zero_rafd = torch.zeros(x_fixed.size(0), self.c2_dim).to(self.device) # Zero vector for RaFD.
mask_celeba = self.label2onehot(torch.zeros(x_fixed.size(0)), 2).to(self.device) # Mask vector: [1, 0].
mask_rafd = self.label2onehot(torch.ones(x_fixed.size(0)), 2).to(self.device) # Mask vector: [0, 1].
# Learning rate cache for decaying.
g_lr = self.g_lr
d_lr = self.d_lr
# Start training from scratch or resume training.
start_iters = 0
if self.resume_iters:
start_iters = self.resume_iters
self.restore_model(self.resume_iters)
# Start training.
print('Start training...')
start_time = time.time()
for i in range(start_iters, self.num_iters):
for dataset in ['CelebA', 'RaFD']:
# =================================================================================== #
# 1. Preprocess input data #
# =================================================================================== #
# Fetch real images and labels.
data_iter = celeba_iter if dataset == 'CelebA' else rafd_iter
try:
x_real, label_org = next(data_iter)
except:
if dataset == 'CelebA':
celeba_iter = iter(self.celeba_loader)
x_real, label_org = next(celeba_iter)
elif dataset == 'RaFD':
rafd_iter = iter(self.rafd_loader)
x_real, label_org = next(rafd_iter)
# Generate target domain labels randomly.
rand_idx = torch.randperm(label_org.size(0))
label_trg = label_org[rand_idx]
if dataset == 'CelebA':
c_org = label_org.clone()
c_trg = label_trg.clone()
zero = torch.zeros(x_real.size(0), self.c2_dim)
mask = self.label2onehot(torch.zeros(x_real.size(0)), 2)
c_org = torch.cat([c_org, zero, mask], dim=1)
c_trg = torch.cat([c_trg, zero, mask], dim=1)
elif dataset == 'RaFD':
c_org = self.label2onehot(label_org, self.c2_dim)
c_trg = self.label2onehot(label_trg, self.c2_dim)
zero = torch.zeros(x_real.size(0), self.c_dim)
mask = self.label2onehot(torch.ones(x_real.size(0)), 2)
c_org = torch.cat([zero, c_org, mask], dim=1)
c_trg = torch.cat([zero, c_trg, mask], dim=1)
x_real = x_real.to(self.device) # Input images.
c_org = c_org.to(self.device) # Original domain labels.
c_trg = c_trg.to(self.device) # Target domain labels.
label_org = label_org.to(self.device) # Labels for computing classification loss.
label_trg = label_trg.to(self.device) # Labels for computing classification loss.
# =================================================================================== #
# 2. Train the discriminator #
# =================================================================================== #
# Compute loss with real images.
out_src, out_cls = self.D(x_real)
out_cls = out_cls[:, :self.c_dim] if dataset == 'CelebA' else out_cls[:, self.c_dim:]
d_loss_real = - torch.mean(out_src)
d_loss_cls = self.classification_loss(out_cls, label_org, dataset)
# Compute loss with fake images.
x_fake = self.G(x_real, c_trg)
out_src, _ = self.D(x_fake.detach())
d_loss_fake = torch.mean(out_src)
# Compute loss for gradient penalty.
alpha = torch.rand(x_real.size(0), 1, 1, 1).to(self.device)
x_hat = (alpha * x_real.data + (1 - alpha) * x_fake.data).requires_grad_(True)
out_src, _ = self.D(x_hat)
d_loss_gp = self.gradient_penalty(out_src, x_hat)
# Backward and optimize.
d_loss = d_loss_real + d_loss_fake + self.lambda_cls * d_loss_cls + self.lambda_gp * d_loss_gp
self.reset_grad()
d_loss.backward()
self.d_optimizer.step()
# Logging.
loss = {}
loss['D/loss_real'] = d_loss_real.item()
loss['D/loss_fake'] = d_loss_fake.item()
loss['D/loss_cls'] = d_loss_cls.item()
loss['D/loss_gp'] = d_loss_gp.item()
# =================================================================================== #
# 3. Train the generator #
# =================================================================================== #
if (i+1) % self.n_critic == 0:
# Original-to-target domain.
x_fake = self.G(x_real, c_trg)
out_src, out_cls = self.D(x_fake)
out_cls = out_cls[:, :self.c_dim] if dataset == 'CelebA' else out_cls[:, self.c_dim:]
g_loss_fake = - torch.mean(out_src)
g_loss_cls = self.classification_loss(out_cls, label_trg, dataset)
# Target-to-original domain.
x_reconst = self.G(x_fake, c_org)
g_loss_rec = torch.mean(torch.abs(x_real - x_reconst))
# Backward and optimize.
g_loss = g_loss_fake + self.lambda_rec * g_loss_rec + self.lambda_cls * g_loss_cls
self.reset_grad()
g_loss.backward()
self.g_optimizer.step()
# Logging.
loss['G/loss_fake'] = g_loss_fake.item()
loss['G/loss_rec'] = g_loss_rec.item()
loss['G/loss_cls'] = g_loss_cls.item()
# =================================================================================== #
# 4. Miscellaneous #
# =================================================================================== #
# Print out training info.
if (i+1) % self.log_step == 0:
et = time.time() - start_time
et = str(datetime.timedelta(seconds=et))[:-7]
log = "Elapsed [{}], Iteration [{}/{}], Dataset [{}]".format(et, i+1, self.num_iters, dataset)
for tag, value in loss.items():
log += ", {}: {:.4f}".format(tag, value)
print(log)
if self.use_tensorboard:
for tag, value in loss.items():
self.logger.scalar_summary(tag, value, i+1)
# Translate fixed images for debugging.
if (i+1) % self.sample_step == 0 and debug:
with torch.no_grad():
x_fake_list = [x_fixed]
for c_fixed in c_celeba_list:
c_trg = torch.cat([c_fixed, zero_rafd, mask_celeba], dim=1)
x_fake_list.append(self.G(x_fixed, c_trg))
for c_fixed in c_rafd_list:
c_trg = torch.cat([zero_celeba, c_fixed, mask_rafd], dim=1)
x_fake_list.append(self.G(x_fixed, c_trg))
x_concat = torch.cat(x_fake_list, dim=3)
sample_path = os.path.join(self.sample_dir, '{}-images.jpg'.format(i+1))
save_image(self.denorm(x_concat.data.cpu()), sample_path, nrow=1, padding=0)
print('Saved real and fake images into {}...'.format(sample_path))
# Save model checkpoints.
if (i+1) % self.model_save_step == 0:
G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(i+1))
D_path = os.path.join(self.model_save_dir, '{}-D.ckpt'.format(i+1))
torch.save(self.G.state_dict(), G_path)
torch.save(self.D.state_dict(), D_path)
print('Saved model checkpoints into {}...'.format(self.model_save_dir))
# Decay learning rates.
if (i+1) % self.lr_update_step == 0 and (i+1) > (self.num_iters - self.num_iters_decay):
g_lr -= (self.g_lr / float(self.num_iters_decay))
d_lr -= (self.d_lr / float(self.num_iters_decay))
self.update_lr(g_lr, d_lr)
print ('Decayed learning rates, g_lr: {}, d_lr: {}.'.format(g_lr, d_lr))
def get_test_inputs(self):
data_loader = self.dataset == 'CelebA' and self.celeba_loader or self.rafd_loader
for x_real, c_org in data_loader:
x_real = x_real.to(self.device)
c_trg_list = self.create_labels(c_org, self.c_dim, self.dataset, self.selected_attrs)
yield x_real, c_trg_list
def test(self, restore=True, debug=''):
"""Translate images using StarGAN trained on a single dataset."""
# Load the trained generator.
if restore:
self.restore_model(self.test_iters)
# Set data loader.
if self.dataset == 'CelebA':
data_loader = self.celeba_loader
elif self.dataset == 'RaFD':
data_loader = self.rafd_loader
with torch.no_grad():
for i, (x_real, c_org) in enumerate(data_loader):
# Prepare input images and target domain labels.
x_real = x_real.to(self.device)
c_trg_list = self.create_labels(c_org, self.c_dim, self.dataset, self.selected_attrs)
# Translate images.
x_fake_list = [x_real]
for c_trg in c_trg_list:
x_fake_list.append(self.G(x_real, c_trg))
# Save the translated images.
if debug:
x_concat = torch.cat(x_fake_list, dim=3)
result_path = os.path.join(self.result_dir, '{}-images.jpg'.format(i+1))
save_image(self.denorm(x_concat.data.cpu()), result_path, nrow=1, padding=0)
print('Saved real and fake images into {}...'.format(result_path))
def test_multi(self, restore=True, debug=''):
"""Translate images using StarGAN trained on multiple datasets."""
# Load the trained generator.
if restore:
self.restore_model(self.test_iters)
with torch.no_grad():
for i, (x_real, c_org) in enumerate(self.celeba_loader):
# Prepare input images and target domain labels.
x_real = x_real.to(self.device)
c_celeba_list = self.create_labels(c_org, self.c_dim, 'CelebA', self.selected_attrs)
c_rafd_list = self.create_labels(c_org, self.c2_dim, 'RaFD')
zero_celeba = torch.zeros(x_real.size(0), self.c_dim).to(self.device) # Zero vector for CelebA.
zero_rafd = torch.zeros(x_real.size(0), self.c2_dim).to(self.device) # Zero vector for RaFD.
mask_celeba = self.label2onehot(torch.zeros(x_real.size(0)), 2).to(self.device) # Mask vector: [1, 0].
mask_rafd = self.label2onehot(torch.ones(x_real.size(0)), 2).to(self.device) # Mask vector: [0, 1].
# Translate images.
x_fake_list = [x_real]
for c_celeba in c_celeba_list:
c_trg = torch.cat([c_celeba, zero_rafd, mask_celeba], dim=1)
x_fake_list.append(self.G(x_real, c_trg))
for c_rafd in c_rafd_list:
c_trg = torch.cat([zero_celeba, c_rafd, mask_rafd], dim=1)
x_fake_list.append(self.G(x_real, c_trg))
# Save the translated images.
if debug:
x_concat = torch.cat(x_fake_list, dim=3)
result_path = os.path.join(self.result_dir, '{}-images.jpg'.format(i+1))
save_image(self.denorm(x_concat.data.cpu()), result_path, nrow=1, padding=0)
print('Saved real and fake images into {}...'.format(result_path))
|
from torch.utils import data
from torchvision import transforms as T
from torchvision.datasets import ImageFolder
from PIL import Image
import torch
import os
import random
class CelebA(data.Dataset):
"""Dataset class for the CelebA dataset."""
def __init__(self, image_dir, attr_path, selected_attrs, transform, mode):
"""Initialize and preprocess the CelebA dataset."""
self.image_dir = image_dir
self.attr_path = attr_path
self.selected_attrs = selected_attrs
self.transform = transform
self.mode = mode
self.train_dataset = []
self.test_dataset = []
self.attr2idx = {}
self.idx2attr = {}
self.preprocess()
if mode == 'train':
self.num_images = len(self.train_dataset)
else:
self.num_images = len(self.test_dataset)
def preprocess(self):
"""Preprocess the CelebA attribute file."""
lines = [line.rstrip() for line in open(self.attr_path, 'r')]
all_attr_names = lines[1].split()
for i, attr_name in enumerate(all_attr_names):
self.attr2idx[attr_name] = i
self.idx2attr[i] = attr_name
lines = lines[2:]
random.seed(1234)
random.shuffle(lines)
for i, line in enumerate(lines):
split = line.split()
filename = split[0]
values = split[1:]
label = []
for attr_name in self.selected_attrs:
idx = self.attr2idx[attr_name]
label.append(values[idx] == '1')
if (i+1) < 4:
self.test_dataset.append([filename, label])
else:
self.train_dataset.append([filename, label])
def __getitem__(self, index):
"""Return one image and its corresponding attribute label."""
dataset = self.train_dataset if self.mode == 'train' else self.test_dataset
filename, label = dataset[index]
image = Image.open(os.path.join(self.image_dir, filename))
return self.transform(image), torch.FloatTensor(label)
def __len__(self):
"""Return the number of images."""
return self.num_images
def get_loader(image_dir, attr_path, selected_attrs, crop_size=178, image_size=128,
batch_size=16, dataset='CelebA', mode='train', num_workers=0):
"""Build and return a data loader."""
transform = []
if mode == 'train':
transform.append(T.RandomHorizontalFlip())
transform.append(T.CenterCrop(crop_size))
transform.append(T.Resize(image_size))
transform.append(T.ToTensor())
transform.append(T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))
transform = T.Compose(transform)
if dataset == 'CelebA':
dataset = CelebA(image_dir, attr_path, selected_attrs, transform, mode)
elif dataset == 'RaFD':
dataset = ImageFolder(image_dir, transform)
data_loader = data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=(mode=='train'),
num_workers=num_workers)
return data_loader
|
#!/usr/bin/env python
import os
import torch
import random
import numpy as np
from .solver import Solver
from .data_loader import get_loader
from .main import parse_config, makedirs
from typing import Tuple
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
from torchbenchmark import DATA_PATH
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
def _prefetch(loader, size, collate_fn):
result = []
for _, item in zip(range(size), loader):
result.append(collate_fn(item))
return result
class Model(BenchmarkModel):
task = COMPUTER_VISION.GENERATION
# Original train batch size: 16
# Source: https://github.com/yunjey/stargan/blob/94dd002e93a2863d9b987a937b85925b80f7a19f/main.py#L73
# This model doesn't support customizing eval batch size and will use the same bs as train
DEFAULT_TRAIN_BSIZE = 16
DEFAULT_EVAL_BSIZE = 16
ALLOW_CUSTOMIZE_BSIZE = False
# TODO: Customizing the optimizer is nontrivial, perhaps a next step.
CANNOT_SET_CUSTOM_OPTIMIZER = True
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
# init config
config = parse_config()
config.celeba_image_dir = os.path.join(DATA_PATH, 'pytorch_stargan_inputs/data/celeba/images')
config.attr_path = os.path.join(DATA_PATH, 'pytorch_stargan_inputs/data/celeba/list_attr_celeba.txt')
config.num_iters = 1
config.num_workers = 0
config.batch_size = self.batch_size
config.use_tensorboard = False
config.device = device
config.should_script = False
config.prefetch = True
makedirs(config)
self.data_loader = self.get_data_loader(config)
if config.prefetch:
self.data_loader = _prefetch(self.data_loader, size=config.num_iters, collate_fn=lambda item: tuple([m.to(self.device) for m in item]))
self.solver = Solver(celeba_loader=self.data_loader,
rafd_loader=None,
config=config,
should_script=config.should_script)
self.model = self.solver.G
if self.test == "train":
self.model.train()
elif self.test == "eval":
self.model.eval()
self.example_inputs = self.generate_example_inputs()
def get_data_loader(self, config):
celeba_loader = get_loader(config.celeba_image_dir, config.attr_path, config.selected_attrs,
config.celeba_crop_size, config.image_size, config.batch_size,
'CelebA', config.mode, config.num_workers)
return celeba_loader
def generate_example_inputs(self):
for x_real, c_trg_list in self.solver.get_test_inputs():
return x_real, c_trg_list[0] # batch > #images
def jit_callback(self):
self.solver.G = torch.jit.script(self.solver.G)
self.solver.D = torch.jit.script(self.solver.D)
def get_module(self):
return self.model, self.example_inputs
def train(self):
self.solver.train()
def eval(self) -> Tuple[torch.Tensor]:
model = self.model
example_inputs = self.example_inputs
out = model(*example_inputs)
return (out, )
|
import tensorflow as tf
class Logger:
"""Tensorboard logger."""
def __init__(self, log_dir):
"""Initialize summary writer."""
self.writer = tf.summary.create_file_writer(log_dir)
def scalar_summary(self, tag, value, step):
"""Add scalar summary."""
with self.writer.as_default():
tf.summary.scalar(tag, value, step=step)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class ResidualBlock(nn.Module):
"""Residual Block with instance normalization."""
def __init__(self, dim_in, dim_out):
super(ResidualBlock, self).__init__()
self.main = nn.Sequential(
nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim_out, affine=True, track_running_stats=True),
nn.ReLU(inplace=True),
nn.Conv2d(dim_out, dim_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim_out, affine=True, track_running_stats=True))
def forward(self, x):
return x + self.main(x)
class Generator(nn.Module):
"""Generator network."""
def __init__(self, conv_dim=64, c_dim=5, repeat_num=6):
super(Generator, self).__init__()
layers = []
layers.append(nn.Conv2d(3+c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.InstanceNorm2d(conv_dim, affine=True, track_running_stats=True))
layers.append(nn.ReLU(inplace=True))
# Down-sampling layers.
curr_dim = conv_dim
for i in range(2):
layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim*2, affine=True, track_running_stats=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = curr_dim * 2
# Bottleneck layers.
for i in range(repeat_num):
layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim))
# Up-sampling layers.
for i in range(2):
layers.append(nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim//2, affine=True, track_running_stats=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = curr_dim // 2
layers.append(nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.Tanh())
self.main = nn.Sequential(*layers)
def forward(self, x, c):
# Replicate spatially and concatenate domain information.
# Note that this type of label conditioning does not work at all if we use reflection padding in Conv2d.
# This is because instance normalization ignores the shifting (or bias) effect.
c = c.view(c.size(0), c.size(1), 1, 1)
c = c.repeat(1, 1, x.size(2), x.size(3))
x = torch.cat([x, c], dim=1)
return self.main(x)
class Discriminator(nn.Module):
"""Discriminator network with PatchGAN."""
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=6):
super(Discriminator, self).__init__()
layers = []
layers.append(nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1))
layers.append(nn.LeakyReLU(0.01))
curr_dim = conv_dim
for i in range(1, repeat_num):
layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1))
layers.append(nn.LeakyReLU(0.01))
curr_dim = curr_dim * 2
kernel_size = int(image_size / np.power(2, repeat_num))
self.main = nn.Sequential(*layers)
self.conv1 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv2 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False)
def forward(self, x):
h = self.main(x)
out_src = self.conv1(h)
out_cls = self.conv2(h)
return out_src, out_cls.view(out_cls.size(0), out_cls.size(1))
|
import subprocess
import sys
from utils import s3_utils
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
s3_utils.checkout_s3_data("INPUT_TARBALLS", "pytorch_stargan_inputs.tar.gz", decompress=True)
pip_install_requirements()
|
import os
import argparse
import torch
from torch.backends import cudnn
from .solver import Solver
from .data_loader import get_loader
def str2bool(v):
return v.lower() in ('true')
def makedirs(config):
"Create directories if not exist."
if not os.path.exists(config.log_dir):
os.makedirs(config.log_dir)
if not os.path.exists(config.model_save_dir):
os.makedirs(config.model_save_dir)
if not os.path.exists(config.sample_dir):
os.makedirs(config.sample_dir)
if not os.path.exists(config.result_dir):
os.makedirs(config.result_dir)
def main(config):
# For fast training.
cudnn.benchmark = True
makedirs(config)
# Fix seed for determinism
if config.deterministic is not None:
torch.manual_seed(0)
# Data loader.
celeba_loader = None
rafd_loader = None
if config.dataset in ['CelebA', 'Both']:
celeba_loader = get_loader(config.celeba_image_dir, config.attr_path, config.selected_attrs,
config.celeba_crop_size, config.image_size, config.batch_size,
'CelebA', config.mode, config.num_workers)
if config.dataset in ['RaFD', 'Both']:
rafd_loader = get_loader(config.rafd_image_dir, None, None,
config.rafd_crop_size, config.image_size, config.batch_size,
'RaFD', config.mode, config.num_workers)
# Solver for training and testing StarGAN.
solver = Solver(celeba_loader, rafd_loader, config, should_script=config.should_script)
if config.mode == 'train':
if config.dataset in ['CelebA', 'RaFD']:
solver.train(config.debug)
elif config.dataset in ['Both']:
solver.train_multi()
elif config.mode == 'test':
if config.dataset in ['CelebA', 'RaFD']:
solver.test()
elif config.dataset in ['Both']:
solver.test_multi()
def parse_config(args=[]):
parser = argparse.ArgumentParser()
# Model configuration.
parser.add_argument('--c_dim', type=int, default=5, help='dimension of domain labels (1st dataset)')
parser.add_argument('--c2_dim', type=int, default=8, help='dimension of domain labels (2nd dataset)')
parser.add_argument('--celeba_crop_size', type=int, default=178, help='crop size for the CelebA dataset')
parser.add_argument('--rafd_crop_size', type=int, default=256, help='crop size for the RaFD dataset')
parser.add_argument('--image_size', type=int, default=128, help='image resolution')
parser.add_argument('--g_conv_dim', type=int, default=64, help='number of conv filters in the first layer of G')
parser.add_argument('--d_conv_dim', type=int, default=64, help='number of conv filters in the first layer of D')
parser.add_argument('--g_repeat_num', type=int, default=6, help='number of residual blocks in G')
parser.add_argument('--d_repeat_num', type=int, default=6, help='number of strided conv layers in D')
parser.add_argument('--lambda_cls', type=float, default=1, help='weight for domain classification loss')
parser.add_argument('--lambda_rec', type=float, default=10, help='weight for reconstruction loss')
parser.add_argument('--lambda_gp', type=float, default=10, help='weight for gradient penalty')
# Training configuration.
parser.add_argument('--dataset', type=str, default='CelebA', choices=['CelebA', 'RaFD', 'Both'])
parser.add_argument('--batch_size', type=int, default=16, help='mini-batch size')
parser.add_argument('--num_iters', type=int, default=200000, help='number of total iterations for training D')
parser.add_argument('--num_iters_decay', type=int, default=100000, help='number of iterations for decaying lr')
parser.add_argument('--g_lr', type=float, default=0.0001, help='learning rate for G')
parser.add_argument('--d_lr', type=float, default=0.0001, help='learning rate for D')
parser.add_argument('--n_critic', type=int, default=5, help='number of D updates per each G update')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for Adam optimizer')
parser.add_argument('--beta2', type=float, default=0.999, help='beta2 for Adam optimizer')
parser.add_argument('--resume_iters', type=int, default=None, help='resume training from this step')
parser.add_argument('--selected_attrs', '--list', nargs='+', help='selected attributes for the CelebA dataset',
default=['Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Male', 'Young'])
# Test configuration.
parser.add_argument('--test_iters', type=int, default=200000, help='test model from this step')
# Miscellaneous.
parser.add_argument('--num_workers', type=int, default=1)
parser.add_argument('--mode', type=str, default='train', choices=['train', 'test'])
parser.add_argument('--use_tensorboard', type=str2bool, default=True)
parser.add_argument('--device', type=str, default='cpu', choices=['cpu', 'cuda'])
# Directories.
parser.add_argument('--celeba_image_dir', type=str, default='data/celeba/images')
parser.add_argument('--attr_path', type=str, default='data/celeba/list_attr_celeba.txt')
parser.add_argument('--rafd_image_dir', type=str, default='data/RaFD/train')
parser.add_argument('--log_dir', type=str, default='stargan/logs')
parser.add_argument('--model_save_dir', type=str, default='stargan/models')
parser.add_argument('--sample_dir', type=str, default='stargan/samples')
parser.add_argument('--result_dir', type=str, default='stargan/results')
# Step size.
parser.add_argument('--log_step', type=int, default=10)
parser.add_argument('--sample_step', type=int, default=1000)
parser.add_argument('--model_save_step', type=int, default=10000)
parser.add_argument('--lr_update_step', type=int, default=1000)
# Extra arguments, not in the original stargan repo.
# --debug reference.out saves a file called "reference.out" with a result tensor
# from the last iteration of the model.
parser.add_argument('--debug', type=str, default='')
parser.add_argument('--deterministic', type=bool, default=False)
parser.add_argument('--should_script', type=bool, default=False)
config = parser.parse_args(args)
return config
if __name__ == '__main__':
config = parse_config()
print(config)
main(config)
|
import os
from torchbenchmark.tasks import COMPUTER_VISION
from torchbenchmark.util.framework.detectron2.model_factory import Detectron2Model
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
class Model(Detectron2Model):
task = COMPUTER_VISION.DETECTION
model_file = os.path.join(MODEL_DIR, ".data", f"{MODEL_NAME}.pkl")
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(variant="COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml", test=test, device=device,
batch_size=batch_size, extra_args=extra_args)
|
import os
from torchbenchmark.util.framework.detectron2 import install_detectron2
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
if __name__ == '__main__':
install_detectron2(MODEL_NAME, MODEL_DIR)
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import builtins
import math
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from .moco.builder import MoCo
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('-j', '--workers', default=32, type=int, metavar='N',
help='number of data loading workers (default: 32)')
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.03, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--schedule', default=[120, 160], nargs='*', type=int,
help='learning rate schedule (when to drop lr by 10x)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum of SGD solver')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
# moco specific configs:
parser.add_argument('--moco-dim', default=128, type=int,
help='feature dimension (default: 128)')
parser.add_argument('--moco-k', default=65536, type=int,
help='queue size; number of negative keys (default: 65536)')
parser.add_argument('--moco-m', default=0.999, type=float,
help='moco momentum of updating key encoder (default: 0.999)')
parser.add_argument('--moco-t', default=0.07, type=float,
help='softmax temperature (default: 0.07)')
# options for moco v2
parser.add_argument('--mlp', action='store_true',
help='use mlp head')
parser.add_argument('--aug-plus', action='store_true',
help='use moco v2 data augmentation')
parser.add_argument('--cos', action='store_true',
help='use cosine lr schedule')
parser.add_argument('--fake_data', action='store_true')
parser.add_argument('-d', '--debug', type=str, help='File to dump output.')
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
torch.manual_seed(args.seed)
cudnn.deterministic = True
cudnn.benchmark = False # does not help anyway
# suppress printing if not master
if args.multiprocessing_distributed and args.gpu != 0:
def print_pass(*args):
pass
builtins.print = print_pass
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
print("=> creating model '{}'".format(args.arch))
model = MoCo(
models.__dict__[args.arch],
args.moco_dim, args.moco_k, args.moco_m, args.moco_t, args.mlp)
#print(model)
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
# comment out the following line for debugging
raise NotImplementedError("Only DistributedDataParallel is supported.")
else:
# AllGather implementation (batch shuffle, queue update, etc.) in
# this code only supports DistributedDataParallel.
raise NotImplementedError("Only DistributedDataParallel is supported.")
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
#cudnn.benchmark = True
if (not args.fake_data):
# Data loading code
traindir = os.path.join(args.data, 'train')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
if args.aug_plus:
# MoCo v2's aug: similar to SimCLR https://arxiv.org/abs/2002.05709
augmentation = [
transforms.RandomResizedCrop(224, scale=(0.2, 1.)),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([moco.loader.GaussianBlur([.1, 2.])], p=0.5),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
]
else:
# MoCo v1's aug: the same as InstDisc https://arxiv.org/abs/1805.01978
augmentation = [
transforms.RandomResizedCrop(224, scale=(0.2, 1.)),
transforms.RandomGrayscale(p=0.2),
transforms.ColorJitter(0.4, 0.4, 0.4, 0.4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
]
train_dataset = datasets.ImageFolder(
traindir,
moco.loader.TwoCropsTransform(transforms.Compose(augmentation)))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True)
else:
# pregenerate a few batches
batches = []
for _ in range(64):
batches.append(torch.randn(args.batch_size, 3, 224, 224))
# create fake dataloader
def collate_fn(data):
ind = data[0]
return [batches[2*ind], batches[2*ind+1]], 0
train_loader = torch.utils.data.DataLoader(range(32), collate_fn = collate_fn)
for epoch in range(args.start_epoch, args.epochs):
if not args.fake_data:
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
}, is_best=False, filename='checkpoint_{:04d}.pth.tar'.format(epoch))
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, _) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images[0] = images[0].cuda(args.gpu, non_blocking=True)
images[1] = images[1].cuda(args.gpu, non_blocking=True)
# compute output
output, target = model(im_q=images[0], im_k=images[1])
loss = criterion(output, target)
# acc1/acc5 are (K+1)-way contrast classifier accuracy
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images[0].size(0))
top1.update(acc1[0], images[0].size(0))
top5.update(acc5[0], images[0].size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
if (args.debug is not None):
torch.save(output, args.debug)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter:
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Decay the learning rate based on schedule"""
lr = args.lr
if args.cos: # cosine lr schedule
lr *= 0.5 * (1. + math.cos(math.pi * epoch / args.epochs))
else: # stepwise lr schedule
for milestone in args.schedule:
lr *= 0.1 if epoch >= milestone else 1.
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from argparse import Namespace
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.models as models
from typing import Tuple
from .moco.builder import MoCo
from .main_moco import adjust_learning_rate
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import OTHER
cudnn.deterministic = False
cudnn.benchmark = True
class Model(BenchmarkModel):
task = OTHER.OTHER_TASKS
# Original train batch size: 32
# Paper and code uses batch size of 256 for 8 GPUs.
# Source: https://arxiv.org/pdf/1911.05722.pdf
DEFAULT_TRAIN_BSIZE = 32
DEFAULT_EVAL_BSIZE = 32
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
self.opt = Namespace(**{
'arch': 'resnet50',
'epochs': 2,
'start_epoch': 0,
'lr': 0.03,
'schedule': [120, 160],
'momentum': 0.9,
'weight_decay': 1e-4,
'gpu': None,
'moco_dim': 128,
'moco_k': 32000,
'moco_m': 0.999,
'moco_t': 0.07,
'mlp': False,
'aug_plus': False,
'cos': False,
'fake_data': True,
'distributed': True,
})
try:
dist.init_process_group(backend='nccl', init_method='tcp://localhost:10001',
world_size=1, rank=0)
except RuntimeError:
pass # already initialized?
if device == "cpu":
raise NotImplementedError("DistributedDataParallel/allgather requires cuda")
self.model = MoCo(
models.__dict__[self.opt.arch],
self.opt.moco_dim, self.opt.moco_k, self.opt.moco_m, self.opt.moco_t, self.opt.mlp)
self.model.to(self.device)
self.model = torch.nn.parallel.DistributedDataParallel(
self.model, device_ids=[0])
# Define loss function (criterion) and optimizer
self.criterion = nn.CrossEntropyLoss().to(self.device)
self.optimizer = torch.optim.SGD(self.model.parameters(), self.opt.lr,
momentum=self.opt.momentum,
weight_decay=self.opt.weight_decay)
def collate_train_fn(data):
ind = data[0]
return [batches[2 * ind], batches[2 * ind + 1]], 0
batches = []
for i in range(4):
batches.append(torch.randn(self.batch_size, 3, 224, 224).to(self.device))
self.example_inputs = torch.utils.data.DataLoader(
range(2), collate_fn=collate_train_fn)
for i, (images, _) in enumerate(self.example_inputs):
images[0] = images[0].cuda(device=0, non_blocking=True)
images[1] = images[1].cuda(device=0, non_blocking=True)
def get_module(self):
""" Recommended
Returns model, example_inputs
Both model and example_inputs should be on self.device properly.
`model(*example_inputs)` should execute one step of model forward.
"""
images = []
for (i, _) in self.example_inputs:
images = (i[0], i[1])
return self.model, images
def get_optimizer(self):
""" Returns the current optimizer """
return self.optimizer
def get_optimizer(self, optimizer) -> None:
""" Sets the optimizer for future training """
self.optimizer = optimizer
def train(self):
""" Recommended
Runs training on model for one epoch.
Avoid unnecessary benchmark noise by keeping any tensor creation, memcopy operations in __init__.
Leave warmup to the caller (e.g. don't do it inside)
"""
self.model.train()
n_epochs = 1
for e in range(n_epochs):
adjust_learning_rate(self.optimizer, e, self.opt)
for i, (images, _) in enumerate(self.example_inputs):
# compute output
output, target = self.model(im_q=images[0], im_k=images[1])
loss = self.criterion(output, target)
# compute gradient and do SGD step
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def eval(self) -> Tuple[torch.Tensor]:
""" Recommended
Run evaluation on model for one iteration. One iteration should be sufficient
to warm up the model for the purpose of profiling.
In most cases this can use the `get_module` API but in some cases libraries
do not have a single Module object used for inference. In these case, you can
write a custom eval function.
Avoid unnecessary benchmark noise by keeping any tensor creation, memcopy operations in __init__.
Leave warmup to the caller (e.g. don't do it inside)
"""
for i, (images, _) in enumerate(self.example_inputs):
out = self.model(im_q=images[0], im_k=images[1])
return out
|
# only needs torch and torchvision
if __name__ == '__main__':
pass
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import builtins
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('-j', '--workers', default=32, type=int, metavar='N',
help='number of data loading workers (default: 32)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=30., type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--schedule', default=[60, 80], nargs='*', type=int,
help='learning rate schedule (when to drop lr by a ratio)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=0., type=float,
metavar='W', help='weight decay (default: 0.)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--pretrained', default='', type=str,
help='path to moco pretrained checkpoint')
best_acc1 = 0
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
# suppress printing if not master
if args.multiprocessing_distributed and args.gpu != 0:
def print_pass(*args):
pass
builtins.print = print_pass
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
# freeze all layers but the last fc
for name, param in model.named_parameters():
if name not in ['fc.weight', 'fc.bias']:
param.requires_grad = False
# init the fc layer
model.fc.weight.data.normal_(mean=0.0, std=0.01)
model.fc.bias.data.zero_()
# load from pre-trained, before DistributedDataParallel constructor
if args.pretrained:
if os.path.isfile(args.pretrained):
print("=> loading checkpoint '{}'".format(args.pretrained))
checkpoint = torch.load(args.pretrained, map_location="cpu")
# rename moco pre-trained keys
state_dict = checkpoint['state_dict']
for k in list(state_dict.keys()):
# retain only encoder_q up to before the embedding layer
if k.startswith('module.encoder_q') and not k.startswith('module.encoder_q.fc'):
# remove prefix
state_dict[k[len("module.encoder_q."):]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
args.start_epoch = 0
msg = model.load_state_dict(state_dict, strict=False)
assert set(msg.missing_keys) == {"fc.weight", "fc.bias"}
print("=> loaded pre-trained model '{}'".format(args.pretrained))
else:
print("=> no checkpoint found at '{}'".format(args.pretrained))
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
# optimize only the linear classifier
parameters = list(filter(lambda p: p.requires_grad, model.parameters()))
assert len(parameters) == 2 # fc.weight, fc.bias
optimizer = torch.optim.SGD(parameters, args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best)
if epoch == args.start_epoch:
sanity_check(model.state_dict(), args.pretrained)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
"""
Switch to eval mode:
Under the protocol of linear classification on frozen features/models,
it is not legitimate to change any part of the pre-trained model.
BatchNorm in train mode may revise running mean/std (even if it receives
no gradient), which are part of the model parameters too.
"""
model.eval()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
def sanity_check(state_dict, pretrained_weights):
"""
Linear classifier should not change any weights other than the linear layer.
This sanity check asserts nothing wrong happens (e.g., BN stats updated).
"""
print("=> loading '{}' for sanity check".format(pretrained_weights))
checkpoint = torch.load(pretrained_weights, map_location="cpu")
state_dict_pre = checkpoint['state_dict']
for k in list(state_dict.keys()):
# only ignore fc layer
if 'fc.weight' in k or 'fc.bias' in k:
continue
# name in pretrained model
k_pre = 'module.encoder_q.' + k[len('module.'):] \
if k.startswith('module.') else 'module.encoder_q.' + k
assert ((state_dict[k].cpu() == state_dict_pre[k_pre]).all()), \
'{} is changed in linear classifier training.'.format(k)
print("=> sanity check passed.")
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter:
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Decay the learning rate based on schedule"""
lr = args.lr
for milestone in args.schedule:
lr *= 0.1 if epoch >= milestone else 1.
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import torch.nn as nn
class MoCo(nn.Module):
"""
Build a MoCo model with: a query encoder, a key encoder, and a queue
https://arxiv.org/abs/1911.05722
"""
def __init__(self, base_encoder, dim=128, K=65536, m=0.999, T=0.07, mlp=False):
"""
dim: feature dimension (default: 128)
K: queue size; number of negative keys (default: 65536)
m: moco momentum of updating key encoder (default: 0.999)
T: softmax temperature (default: 0.07)
"""
super(MoCo, self).__init__()
self.K = K
self.m = m
self.T = T
# create the encoders
# num_classes is the output fc dimension
self.encoder_q = base_encoder(num_classes=dim)
self.encoder_k = base_encoder(num_classes=dim)
if mlp: # hack: brute-force replacement
dim_mlp = self.encoder_q.fc.weight.shape[1]
self.encoder_q.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_q.fc)
self.encoder_k.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_k.fc)
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data.copy_(param_q.data) # initialize
param_k.requires_grad = False # not update by gradient
# create the queue
self.register_buffer("queue", torch.randn(dim, K))
self.queue = nn.functional.normalize(self.queue, dim=0)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
@torch.no_grad()
def _momentum_update_key_encoder(self):
"""
Momentum update of the key encoder
"""
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.mul_(self.m).add_(param_q.mul(1. - self.m))
@torch.no_grad()
def _dequeue_and_enqueue(self, keys):
# gather keys before updating queue
keys = concat_all_gather(keys)
batch_size = keys.shape[0]
ptr = int(self.queue_ptr)
assert self.K % batch_size == 0 # for simplicity
# replace the keys at ptr (dequeue and enqueue)
self.queue[:, ptr:ptr + batch_size] = keys.T
ptr = (ptr + batch_size) % self.K # move pointer
self.queue_ptr[0] = ptr
@torch.no_grad()
def _batch_shuffle_ddp(self, x):
"""
Batch shuffle, for making use of BatchNorm.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# random shuffle index
idx_shuffle = torch.randperm(batch_size_all).cuda()
# broadcast to all gpus
torch.distributed.broadcast(idx_shuffle, src=0)
# index for restoring
idx_unshuffle = torch.argsort(idx_shuffle)
# shuffled index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this], idx_unshuffle
@torch.no_grad()
def _batch_unshuffle_ddp(self, x, idx_unshuffle):
"""
Undo batch shuffle.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# restored index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this]
def forward(self, im_q, im_k):
"""
Input:
im_q: a batch of query images
im_k: a batch of key images
Output:
logits, targets
"""
# compute query features
q = self.encoder_q(im_q) # queries: NxC
q = nn.functional.normalize(q, dim=1)
# compute key features
with torch.no_grad(): # no gradient to keys
self._momentum_update_key_encoder() # update the key encoder
# shuffle for making use of BN
im_k, idx_unshuffle = self._batch_shuffle_ddp(im_k)
k = self.encoder_k(im_k) # keys: NxC
k = nn.functional.normalize(k, dim=1)
# undo shuffle
k = self._batch_unshuffle_ddp(k, idx_unshuffle)
# compute logits
# Einstein sum is more intuitive
# positive logits: Nx1
l_pos = torch.einsum('nc,nc->n', [q, k]).unsqueeze(-1)
# negative logits: NxK
l_neg = torch.einsum('nc,ck->nk', [q, self.queue.clone().detach()])
# logits: Nx(1+K)
logits = torch.cat([l_pos, l_neg], dim=1)
# apply temperature
logits /= self.T
# labels: positive key indicators
labels = torch.zeros(logits.shape[0], dtype=torch.long).cuda()
# dequeue and enqueue
self._dequeue_and_enqueue(k)
return logits, labels
# utils
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from PIL import ImageFilter
import random
class TwoCropsTransform:
"""Take two random crops of one image as the query and key."""
def __init__(self, base_transform):
self.base_transform = base_transform
def __call__(self, x):
q = self.base_transform(x)
k = self.base_transform(x)
return [q, k]
class GaussianBlur:
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=[.1, 2.]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import pickle as pkl
import sys
import torch
if __name__ == "__main__":
input = sys.argv[1]
obj = torch.load(input, map_location="cpu")
obj = obj["state_dict"]
newmodel = {}
for k, v in obj.items():
if not k.startswith("module.encoder_q."):
continue
old_k = k
k = k.replace("module.encoder_q.", "")
if "layer" not in k:
k = "stem." + k
for t in [1, 2, 3, 4]:
k = k.replace("layer{}".format(t), "res{}".format(t + 1))
for t in [1, 2, 3]:
k = k.replace("bn{}".format(t), "conv{}.norm".format(t))
k = k.replace("downsample.0", "shortcut")
k = k.replace("downsample.1", "shortcut.norm")
print(old_k, "->", k)
newmodel[k] = v.numpy()
res = {"model": newmodel, "__author__": "MOCO", "matching_heuristics": True}
with open(sys.argv[2], "wb") as f:
pkl.dump(res, f)
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import COCOEvaluator, PascalVOCDetectionEvaluator
from detectron2.layers import get_norm
from detectron2.modeling.roi_heads import ROI_HEADS_REGISTRY, Res5ROIHeads
@ROI_HEADS_REGISTRY.register()
class Res5ROIHeadsExtraNorm(Res5ROIHeads):
"""
As described in the MOCO paper, there is an extra BN layer
following the res5 stage.
"""
def _build_res5_block(self, cfg):
seq, out_channels = super()._build_res5_block(cfg)
norm = cfg.MODEL.RESNETS.NORM
norm = get_norm(norm, out_channels)
seq.add_module("norm", norm)
return seq, out_channels
class Trainer(DefaultTrainer):
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
if "coco" in dataset_name:
return COCOEvaluator(dataset_name, cfg, True, output_folder)
else:
assert "voc" in dataset_name
return PascalVOCDetectionEvaluator(dataset_name)
def setup(args):
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
import os
from torchbenchmark.tasks import COMPUTER_VISION
from torchbenchmark.util.framework.detectron2.model_factory import Detectron2Model
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
class Model(Detectron2Model):
task = COMPUTER_VISION.SEGMENTATION
model_file = os.path.join(MODEL_DIR, ".data", f"{MODEL_NAME}.pkl")
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(variant="COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml", test=test, device=device,
batch_size=batch_size, extra_args=extra_args)
|
import os
from torchbenchmark.util.framework.detectron2 import install_detectron2
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
if __name__ == '__main__':
install_detectron2(MODEL_NAME, MODEL_DIR)
|
import cv2
import torch
import random
import numpy as np
from .baseline.Renderer.model import FCN
from .baseline.DRL.evaluator import Evaluator
from .baseline.utils.util import *
from .baseline.DRL.ddpg import DDPG
from .baseline.DRL.multi import fastenv
from ...util.model import BenchmarkModel
from typing import Tuple
from torchbenchmark.tasks import REINFORCEMENT_LEARNING
from argparse import Namespace
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
class Model(BenchmarkModel):
task = REINFORCEMENT_LEARNING.OTHER_RL
DEFAULT_TRAIN_BSIZE = 96
DEFAULT_EVAL_BSIZE = 96
CANNOT_SET_CUSTOM_OPTIMIZER = True
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
# Train: These options are from source code.
# Source: https://arxiv.org/pdf/1903.04411.pdf
# Code: https://github.com/megvii-research/ICCV2019-LearningToPaint/blob/master/baseline/train.py
self.args = Namespace(**{
'validate_episodes': 5,
'validate_interval': 50,
'max_step': 40,
'discount': 0.95**5,
'episode_train_times': 10,
'noise_factor': 0.0,
'tau': 0.001,
'rmsize': 800,
})
# Train: input images are from CelebFaces and resized to 128 x 128.
# Create 2000 random tensors for input, but randomly sample 200,000 images.
self.width = 128
self.image_examples = torch.rand(2000, 3, self.width, self.width)
# LearningToPaint includes actor, critic, and discriminator models.
self.Decoder = FCN()
self.step = 0
self.env = fastenv(max_episode_length=self.args.max_step, env_batch=self.batch_size,
images=self.image_examples, device=self.device, Decoder=self.Decoder)
self.agent = DDPG(batch_size=self.batch_size, env_batch=self.batch_size,
max_step=self.args.max_step, tau=self.args.tau, discount=self.args.discount,
rmsize=self.args.rmsize, device=self.device, Decoder=self.Decoder)
self.evaluate = Evaluator(args=self.args, env_batch=self.batch_size, writer=None)
self.observation = self.env.reset()
self.agent.reset(self.observation, self.args.noise_factor)
if test == "train":
self.agent.train()
elif test == "eval":
self.agent.eval()
def get_module(self):
action = self.agent.select_action(self.observation, noise_factor=self.args.noise_factor)
self.observation, reward, done, _ = self.env.step(action)
self.agent.observe(reward, self.observation, done, self.step)
state, action, reward, \
next_state, terminal = self.agent.memory.sample_batch(self.batch_size, self.device)
state = torch.cat((state[:, :6].float() / 255, state[:, 6:7].float() / self.args.max_step,
self.agent.coord.expand(state.shape[0], 2, 128, 128)), 1)
return self.agent.actor, (state, )
def set_module(self, new_model):
self.agent.actor = new_model
def train(self):
episode = episode_steps = 0
episode_steps += 1
if self.observation is None:
self.observation = self.env.reset()
self.agent.reset(self.observation, self.args.noise_factor)
action = self.agent.select_action(self.observation, noise_factor=self.args.noise_factor)
self.observation, reward, done, _ = self.env.step(action)
self.agent.observe(reward, self.observation, done, self.step)
if (episode_steps >= self.args.max_step and self.args.max_step):
# [optional] evaluate
if episode > 0 and self.args.validate_interval > 0 and \
episode % self.args.validate_interval == 0:
reward, dist = self.evaluate(self.env, self.agent.select_action)
tot_Q = 0.
tot_value_loss = 0.
lr = (3e-4, 1e-3)
for i in range(self.args.episode_train_times):
Q, value_loss = self.agent.update_policy(lr)
tot_Q += Q.data.cpu().numpy()
tot_value_loss += value_loss.data.cpu().numpy()
# reset
self.observation = None
episode_steps = 0
episode += 1
self.step += 1
def eval(self) -> Tuple[torch.Tensor]:
reward, dist = self.evaluate(self.env, self.agent.select_action)
return (torch.tensor(reward), torch.tensor(dist))
|
import subprocess
import sys
from utils import s3_utils
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
s3_utils.checkout_s3_data("INPUT_TARBALLS", "Super_SloMo_inputs.tar.gz", decompress=True)
pip_install_requirements()
|
import sys
import json
import torch
import numpy as np
import argparse
import torchvision.transforms as transforms
import cv2
from DRL.ddpg import decode
from utils.util import *
from PIL import Image
from torchvision import transforms, utils
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
aug = transforms.Compose(
[transforms.ToPILImage(),
transforms.RandomHorizontalFlip(),
])
width = 128
convas_area = width * width
img_train = []
img_test = []
train_num = 0
test_num = 0
class Paint:
def __init__(self, batch_size, max_step):
self.batch_size = batch_size
self.max_step = max_step
self.action_space = (13)
self.observation_space = (self.batch_size, width, width, 7)
self.test = False
def load_data(self):
# CelebA
global train_num, test_num
for i in range(200000):
img_id = '%06d' % (i + 1)
img = cv2.imread('../data/img_align_celeba/' + img_id + '.jpg', cv2.IMREAD_UNCHANGED)
img = cv2.resize(img, (width, width))
if i > 2000:
train_num += 1
img_train.append(img)
else:
test_num += 1
img_test.append(img)
def pre_data(self, id, test):
if test:
img = img_test[id]
else:
img = img_train[id]
if not test:
img = aug(img)
img = np.asarray(img)
return np.transpose(img, (2, 0, 1))
def reset(self, test=False, begin_num=False):
self.test = test
self.imgid = [0] * self.batch_size
self.gt = torch.zeros([self.batch_size, 3, width, width], dtype=torch.uint8).to(device)
for i in range(self.batch_size):
if test:
id = (i + begin_num) % test_num
else:
id = np.random.randint(train_num)
self.imgid[i] = id
self.gt[i] = torch.tensor(self.pre_data(id, test))
self.tot_reward = ((self.gt.float() / 255) ** 2).mean(1).mean(1).mean(1)
self.stepnum = 0
self.canvas = torch.zeros([self.batch_size, 3, width, width], dtype=torch.uint8).to(device)
self.lastdis = self.ini_dis = self.cal_dis()
return self.observation()
def observation(self):
# canvas B * 3 * width * width
# gt B * 3 * width * width
# T B * 1 * width * width
ob = []
T = torch.ones([self.batch_size, 1, width, width], dtype=torch.uint8) * self.stepnum
return torch.cat((self.canvas, self.gt, T.to(device)), 1) # canvas, img, T
def cal_trans(self, s, t):
return (s.transpose(0, 3) * t).transpose(0, 3)
def step(self, action):
self.canvas = (decode(action, self.canvas.float() / 255) * 255).byte()
self.stepnum += 1
ob = self.observation()
done = (self.stepnum == self.max_step)
reward = self.cal_reward() # np.array([0.] * self.batch_size)
return ob.detach(), reward, np.array([done] * self.batch_size), None
def cal_dis(self):
return (((self.canvas.float() - self.gt.float()) / 255) ** 2).mean(1).mean(1).mean(1)
def cal_reward(self):
dis = self.cal_dis()
reward = (self.lastdis - dis) / (self.ini_dis + 1e-8)
self.lastdis = dis
return to_numpy(reward)
|
import os
import cv2
import torch
import numpy as np
import argparse
import torch.nn as nn
import torch.nn.functional as F
from DRL.actor import *
from Renderer.stroke_gen import *
from Renderer.model import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
width = 128
parser = argparse.ArgumentParser(description='Learning to Paint')
parser.add_argument('--max_step', default=40, type=int, help='max length for episode')
parser.add_argument('--actor', default='./model/Paint-run1/actor.pkl', type=str, help='Actor model')
parser.add_argument('--renderer', default='./renderer.pkl', type=str, help='renderer model')
parser.add_argument('--img', default='image/test.png', type=str, help='test image')
parser.add_argument('--imgid', default=0, type=int, help='set begin number for generated image')
parser.add_argument('--divide', default=4, type=int, help='divide the target image to get better resolution')
args = parser.parse_args()
canvas_cnt = args.divide * args.divide
T = torch.ones([1, 1, width, width], dtype=torch.float32).to(device)
img = cv2.imread(args.img, cv2.IMREAD_COLOR)
origin_shape = (img.shape[1], img.shape[0])
coord = torch.zeros([1, 2, width, width])
for i in range(width):
for j in range(width):
coord[0, 0, i, j] = i / (width - 1.)
coord[0, 1, i, j] = j / (width - 1.)
coord = coord.to(device) # Coordconv
Decoder = FCN()
Decoder.load_state_dict(torch.load(args.renderer))
def decode(x, canvas): # b * (10 + 3)
x = x.view(-1, 10 + 3)
stroke = 1 - Decoder(x[:, :10])
stroke = stroke.view(-1, width, width, 1)
color_stroke = stroke * x[:, -3:].view(-1, 1, 1, 3)
stroke = stroke.permute(0, 3, 1, 2)
color_stroke = color_stroke.permute(0, 3, 1, 2)
stroke = stroke.view(-1, 5, 1, width, width)
color_stroke = color_stroke.view(-1, 5, 3, width, width)
res = []
for i in range(5):
canvas = canvas * (1 - stroke[:, i]) + color_stroke[:, i]
res.append(canvas)
return canvas, res
def small2large(x):
# (d * d, width, width) -> (d * width, d * width)
x = x.reshape(args.divide, args.divide, width, width, -1)
x = np.transpose(x, (0, 2, 1, 3, 4))
x = x.reshape(args.divide * width, args.divide * width, -1)
return x
def large2small(x):
# (d * width, d * width) -> (d * d, width, width)
x = x.reshape(args.divide, width, args.divide, width, 3)
x = np.transpose(x, (0, 2, 1, 3, 4))
x = x.reshape(canvas_cnt, width, width, 3)
return x
def smooth(img):
def smooth_pix(img, tx, ty):
if tx == args.divide * width - 1 or ty == args.divide * width - 1 or tx == 0 or ty == 0:
return img
img[tx, ty] = (img[tx, ty] + img[tx + 1, ty] + img[tx, ty + 1] + img[tx - 1, ty] + img[tx, ty - 1] + img[tx + 1, ty - 1] + img[tx - 1, ty + 1] + img[tx - 1, ty - 1] + img[tx + 1, ty + 1]) / 9
return img
for p in range(args.divide):
for q in range(args.divide):
x = p * width
y = q * width
for k in range(width):
img = smooth_pix(img, x + k, y + width - 1)
if q != args.divide - 1:
img = smooth_pix(img, x + k, y + width)
for k in range(width):
img = smooth_pix(img, x + width - 1, y + k)
if p != args.divide - 1:
img = smooth_pix(img, x + width, y + k)
return img
def save_img(res, imgid, divide=False):
output = res.detach().cpu().numpy() # d * d, 3, width, width
output = np.transpose(output, (0, 2, 3, 1))
if divide:
output = small2large(output)
output = smooth(output)
else:
output = output[0]
output = (output * 255).astype('uint8')
output = cv2.resize(output, origin_shape)
cv2.imwrite('output/generated' + str(imgid) + '.png', output)
actor = ResNet(9, 18, 65) # action_bundle = 5, 65 = 5 * 13
actor.load_state_dict(torch.load(args.actor))
actor = actor.to(device).eval()
Decoder = Decoder.to(device).eval()
canvas = torch.zeros([1, 3, width, width]).to(device)
patch_img = cv2.resize(img, (width * args.divide, width * args.divide))
patch_img = large2small(patch_img)
patch_img = np.transpose(patch_img, (0, 3, 1, 2))
patch_img = torch.tensor(patch_img).to(device).float() / 255.
img = cv2.resize(img, (width, width))
img = img.reshape(1, width, width, 3)
img = np.transpose(img, (0, 3, 1, 2))
img = torch.tensor(img).to(device).float() / 255.
os.system('mkdir output')
with torch.no_grad():
if args.divide != 1:
args.max_step = args.max_step // 2
for i in range(args.max_step):
stepnum = T * i / args.max_step
actions = actor(torch.cat([canvas, img, stepnum, coord], 1))
canvas, res = decode(actions, canvas)
print('canvas step {}, L2Loss = {}'.format(i, ((canvas - img) ** 2).mean()))
for j in range(5):
save_img(res[j], args.imgid)
args.imgid += 1
if args.divide != 1:
canvas = canvas[0].detach().cpu().numpy()
canvas = np.transpose(canvas, (1, 2, 0))
canvas = cv2.resize(canvas, (width * args.divide, width * args.divide))
canvas = large2small(canvas)
canvas = np.transpose(canvas, (0, 3, 1, 2))
canvas = torch.tensor(canvas).to(device).float()
coord = coord.expand(canvas_cnt, 2, width, width)
T = T.expand(canvas_cnt, 1, width, width)
for i in range(args.max_step):
stepnum = T * i / args.max_step
actions = actor(torch.cat([canvas, patch_img, stepnum, coord], 1))
canvas, res = decode(actions, canvas)
print('divided canvas step {}, L2Loss = {}'.format(i, ((canvas - patch_img) ** 2).mean()))
for j in range(5):
save_img(res[j], args.imgid, True)
args.imgid += 1
|
#!/usr/bin/env python3
import cv2
import random
import numpy as np
import argparse
from DRL.evaluator import Evaluator
from utils.util import *
from utils.tensorboard import TensorBoard
import time
exp = os.path.abspath('.').split('/')[-1]
writer = TensorBoard('../train_log/{}'.format(exp))
os.system('ln -sf ../train_log/{} ./log'.format(exp))
os.system('mkdir ./model')
def train(agent, env, evaluate):
train_times = args.train_times
env_batch = args.env_batch
validate_interval = args.validate_interval
max_step = args.max_step
debug = args.debug
episode_train_times = args.episode_train_times
resume = args.resume
output = args.output
time_stamp = time.time()
step = episode = episode_steps = 0
tot_reward = 0.
observation = None
noise_factor = args.noise_factor
while step <= train_times:
step += 1
episode_steps += 1
# reset if it is the start of episode
if observation is None:
observation = env.reset()
agent.reset(observation, noise_factor)
action = agent.select_action(observation, noise_factor=noise_factor)
observation, reward, done, _ = env.step(action)
agent.observe(reward, observation, done, step)
if (episode_steps >= max_step and max_step):
if step > args.warmup:
# [optional] evaluate
if episode > 0 and validate_interval > 0 and episode % validate_interval == 0:
reward, dist = evaluate(env, agent.select_action, debug=debug)
if debug: prRed('Step_{:07d}: mean_reward:{:.3f} mean_dist:{:.3f} var_dist:{:.3f}'.format(step - 1, np.mean(reward), np.mean(dist), np.var(dist)))
writer.add_scalar('validate/mean_reward', np.mean(reward), step)
writer.add_scalar('validate/mean_dist', np.mean(dist), step)
writer.add_scalar('validate/var_dist', np.var(dist), step)
agent.save_model(output)
train_time_interval = time.time() - time_stamp
time_stamp = time.time()
tot_Q = 0.
tot_value_loss = 0.
if step > args.warmup:
if step < 10000 * max_step:
lr = (3e-4, 1e-3)
elif step < 20000 * max_step:
lr = (1e-4, 3e-4)
else:
lr = (3e-5, 1e-4)
for i in range(episode_train_times):
Q, value_loss = agent.update_policy(lr)
tot_Q += Q.data.cpu().numpy()
tot_value_loss += value_loss.data.cpu().numpy()
writer.add_scalar('train/critic_lr', lr[0], step)
writer.add_scalar('train/actor_lr', lr[1], step)
writer.add_scalar('train/Q', tot_Q / episode_train_times, step)
writer.add_scalar('train/critic_loss', tot_value_loss / episode_train_times, step)
if debug: prBlack('#{}: steps:{} interval_time:{:.2f} train_time:{:.2f}' \
.format(episode, step, train_time_interval, time.time()-time_stamp))
time_stamp = time.time()
# reset
observation = None
episode_steps = 0
episode += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Learning to Paint')
# hyper-parameter
parser.add_argument('--warmup', default=400, type=int, help='timestep without training but only filling the replay memory')
parser.add_argument('--discount', default=0.95**5, type=float, help='discount factor')
parser.add_argument('--batch_size', default=96, type=int, help='minibatch size')
parser.add_argument('--rmsize', default=800, type=int, help='replay memory size')
parser.add_argument('--env_batch', default=96, type=int, help='concurrent environment number')
parser.add_argument('--tau', default=0.001, type=float, help='moving average for target network')
parser.add_argument('--max_step', default=40, type=int, help='max length for episode')
parser.add_argument('--noise_factor', default=0.05, type=float, help='noise level for parameter space noise')
parser.add_argument('--validate_interval', default=50, type=int, help='how many episodes to perform a validation')
parser.add_argument('--validate_episodes', default=5, type=int, help='how many episode to perform during validation')
parser.add_argument('--train_times', default=2000000, type=int, help='total traintimes')
parser.add_argument('--episode_train_times', default=10, type=int, help='train times for each episode')
parser.add_argument('--resume', default=None, type=str, help='Resuming model path for testing')
parser.add_argument('--output', default='./model', type=str, help='Resuming model path for testing')
parser.add_argument('--debug', dest='debug', action='store_true', help='print some info')
parser.add_argument('--seed', default=1234, type=int, help='random seed')
args = parser.parse_args()
args.output = get_output_folder(args.output, "Paint")
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available(): torch.cuda.manual_seed_all(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = False
from DRL.ddpg import DDPG
from DRL.multi import fastenv
fenv = fastenv(args.max_step, args.env_batch, writer)
agent = DDPG(args.batch_size, args.env_batch, args.max_step, \
args.tau, args.discount, args.rmsize, \
writer, args.resume, args.output)
evaluate = Evaluator(args, writer)
print('observation_space', fenv.observation_space, 'action_space', fenv.action_space)
train(agent, fenv, evaluate)
|
import cv2
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from utils.tensorboard import TensorBoard
from Renderer.model import FCN
from Renderer.stroke_gen import *
writer = TensorBoard("../train_log/")
import torch.optim as optim
criterion = nn.MSELoss()
net = FCN()
optimizer = optim.Adam(net.parameters(), lr=3e-6)
batch_size = 64
use_cuda = torch.cuda.is_available()
step = 0
def save_model():
if use_cuda:
net.cpu()
torch.save(net.state_dict(), "../renderer.pkl")
if use_cuda:
net.cuda()
def load_weights():
pretrained_dict = torch.load("../renderer.pkl")
model_dict = net.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
net.load_state_dict(model_dict)
load_weights()
while step < 500000:
net.train()
train_batch = []
ground_truth = []
for i in range(batch_size):
f = np.random.uniform(0, 1, 10)
train_batch.append(f)
ground_truth.append(draw(f))
train_batch = torch.tensor(train_batch).float()
ground_truth = torch.tensor(ground_truth).float()
if use_cuda:
net = net.cuda()
train_batch = train_batch.cuda()
ground_truth = ground_truth.cuda()
gen = net(train_batch)
optimizer.zero_grad()
loss = criterion(gen, ground_truth)
loss.backward()
optimizer.step()
print(step, loss.item())
if step < 200000:
lr = 1e-4
elif step < 400000:
lr = 1e-5
else:
lr = 1e-6
for param_group in optimizer.param_groups:
param_group["lr"] = lr
writer.add_scalar("train/loss", loss.item(), step)
if step % 100 == 0:
net.eval()
gen = net(train_batch)
loss = criterion(gen, ground_truth)
writer.add_scalar("val/loss", loss.item(), step)
for i in range(32):
G = gen[i].cpu().data.numpy()
GT = ground_truth[i].cpu().data.numpy()
writer.add_image("train/gen{}.png".format(i), G, step)
writer.add_image("train/ground_truth{}.png".format(i), GT, step)
if step % 1000 == 0:
save_model()
step += 1
|
import cv2
import numpy as np
def normal(x, width):
return (int)(x * (width - 1) + 0.5)
def draw(f, width=128):
x0, y0, x1, y1, x2, y2, z0, z2, w0, w2 = f
x1 = x0 + (x2 - x0) * x1
y1 = y0 + (y2 - y0) * y1
x0 = normal(x0, width * 2)
x1 = normal(x1, width * 2)
x2 = normal(x2, width * 2)
y0 = normal(y0, width * 2)
y1 = normal(y1, width * 2)
y2 = normal(y2, width * 2)
z0 = (int)(1 + z0 * width // 2)
z2 = (int)(1 + z2 * width // 2)
canvas = np.zeros([width * 2, width * 2]).astype('float32')
tmp = 1. / 100
for i in range(100):
t = i * tmp
x = (int)((1-t) * (1-t) * x0 + 2 * t * (1-t) * x1 + t * t * x2)
y = (int)((1-t) * (1-t) * y0 + 2 * t * (1-t) * y1 + t * t * y2)
z = (int)((1-t) * z0 + t * z2)
w = (1-t) * w0 + t * w2
cv2.circle(canvas, (y, x), z, w, -1)
return 1 - cv2.resize(canvas, dsize=(width, width))
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.weight_norm as weightNorm
class FCN(nn.Module):
def __init__(self):
super(FCN, self).__init__()
self.fc1 = (nn.Linear(10, 512))
self.fc2 = (nn.Linear(512, 1024))
self.fc3 = (nn.Linear(1024, 2048))
self.fc4 = (nn.Linear(2048, 4096))
self.conv1 = (nn.Conv2d(16, 32, 3, 1, 1))
self.conv2 = (nn.Conv2d(32, 32, 3, 1, 1))
self.conv3 = (nn.Conv2d(8, 16, 3, 1, 1))
self.conv4 = (nn.Conv2d(16, 16, 3, 1, 1))
self.conv5 = (nn.Conv2d(4, 8, 3, 1, 1))
self.conv6 = (nn.Conv2d(8, 4, 3, 1, 1))
self.pixel_shuffle = nn.PixelShuffle(2)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
x = x.view(-1, 16, 16, 16)
x = F.relu(self.conv1(x))
x = self.pixel_shuffle(self.conv2(x))
x = F.relu(self.conv3(x))
x = self.pixel_shuffle(self.conv4(x))
x = F.relu(self.conv5(x))
x = self.pixel_shuffle(self.conv6(x))
x = torch.sigmoid(x)
return 1 - x.view(-1, 128, 128)
|
import torch
import torch.nn as nn
import numpy as np
from torch.optim import Adam, SGD
from torch import autograd
from torch.autograd import Variable
import torch.nn.functional as F
from torch.autograd import grad as torch_grad
import torch.nn.utils.weight_norm as weightNorm
from utils.util import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dim = 128
LAMBDA = 10 # Gradient penalty lambda hyperparameter
class TReLU(nn.Module):
def __init__(self):
super(TReLU, self).__init__()
self.alpha = nn.Parameter(torch.FloatTensor(1), requires_grad=True)
self.alpha.data.fill_(0)
def forward(self, x):
x = F.relu(x - self.alpha) + self.alpha
return x
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.conv0 = weightNorm(nn.Conv2d(6, 16, 5, 2, 2))
self.conv1 = weightNorm(nn.Conv2d(16, 32, 5, 2, 2))
self.conv2 = weightNorm(nn.Conv2d(32, 64, 5, 2, 2))
self.conv3 = weightNorm(nn.Conv2d(64, 128, 5, 2, 2))
self.conv4 = weightNorm(nn.Conv2d(128, 1, 1, 1, 0))
self.relu0 = TReLU()
self.relu1 = TReLU()
self.relu2 = TReLU()
self.relu3 = TReLU()
def forward(self, x):
x = self.conv0(x)
x = self.relu0(x)
x = self.conv1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.relu3(x)
x = self.conv4(x)
x = x.view(-1, 64) # Patch Q
return x
netD = Discriminator()
target_netD = Discriminator()
netD = netD.to(device)
target_netD = target_netD.to(device)
hard_update(target_netD, netD)
optimizerD = Adam(netD.parameters(), lr=3e-4, betas=(0.5, 0.999))
def cal_gradient_penalty(netD, real_data, fake_data, batch_size):
alpha = torch.rand(batch_size, 1)
alpha = alpha.expand(batch_size, int(real_data.nelement()/batch_size)).contiguous()
alpha = alpha.view(batch_size, 6, dim, dim)
alpha = alpha.to(device)
fake_data = fake_data.view(batch_size, 6, dim, dim)
interpolates = Variable(alpha * real_data.data + ((1 - alpha) * fake_data.data), requires_grad=True)
disc_interpolates = netD(interpolates)
gradients = autograd.grad(disc_interpolates, interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
return gradient_penalty
def cal_reward(fake_data, real_data):
return target_netD(torch.cat([real_data, fake_data], 1))
def save_gan(path):
netD.cpu()
torch.save(netD.state_dict(),'{}/wgan.pkl'.format(path))
netD.to(device)
def load_gan(path):
netD.load_state_dict(torch.load('{}/wgan.pkl'.format(path)))
def update(fake_data, real_data):
fake_data = fake_data.detach()
real_data = real_data.detach()
fake = torch.cat([real_data, fake_data], 1)
real = torch.cat([real_data, real_data], 1)
D_real = netD(real)
D_fake = netD(fake)
gradient_penalty = cal_gradient_penalty(netD, real, fake, real.shape[0])
optimizerD.zero_grad()
D_cost = D_fake.mean() - D_real.mean() + gradient_penalty
D_cost.backward()
optimizerD.step()
soft_update(target_netD, netD, 0.001)
return D_fake.mean(), D_real.mean(), gradient_penalty
|
import cv2
import torch
import numpy as np
from env import Paint
from utils.util import *
from DRL.ddpg import decode
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class fastenv():
def __init__(self,
max_episode_length=10, env_batch=64, \
writer=None):
self.max_episode_length = max_episode_length
self.env_batch = env_batch
self.env = Paint(self.env_batch, self.max_episode_length)
self.env.load_data()
self.observation_space = self.env.observation_space
self.action_space = self.env.action_space
self.writer = writer
self.test = False
self.log = 0
def save_image(self, log, step):
for i in range(self.env_batch):
if self.env.imgid[i] <= 10:
canvas = cv2.cvtColor((to_numpy(self.env.canvas[i].permute(1, 2, 0))), cv2.COLOR_BGR2RGB)
self.writer.add_image('{}/canvas_{}.png'.format(str(self.env.imgid[i]), str(step)), canvas, log)
if step == self.max_episode_length:
for i in range(self.env_batch):
if self.env.imgid[i] < 50:
gt = cv2.cvtColor((to_numpy(self.env.gt[i].permute(1, 2, 0))), cv2.COLOR_BGR2RGB)
canvas = cv2.cvtColor((to_numpy(self.env.canvas[i].permute(1, 2, 0))), cv2.COLOR_BGR2RGB)
self.writer.add_image(str(self.env.imgid[i]) + '/_target.png', gt, log)
self.writer.add_image(str(self.env.imgid[i]) + '/_canvas.png', canvas, log)
def step(self, action):
with torch.no_grad():
ob, r, d, _ = self.env.step(torch.tensor(action).to(device))
if d[0]:
if not self.test:
self.dist = self.get_dist()
for i in range(self.env_batch):
self.writer.add_scalar('train/dist', self.dist[i], self.log)
self.log += 1
return ob, r, d, _
def get_dist(self):
return to_numpy((((self.env.gt.float() - self.env.canvas.float()) / 255) ** 2).mean(1).mean(1).mean(1))
def reset(self, test=False, episode=0):
self.test = test
ob = self.env.reset(self.test, episode * self.env_batch)
return ob
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam, SGD
from Renderer.model import *
from DRL.rpm import rpm
from DRL.actor import *
from DRL.critic import *
from DRL.wgan import *
from utils.util import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
coord = torch.zeros([1, 2, 128, 128])
for i in range(128):
for j in range(128):
coord[0, 0, i, j] = i / 127.
coord[0, 1, i, j] = j / 127.
coord = coord.to(device)
criterion = nn.MSELoss()
Decoder = FCN()
Decoder.load_state_dict(torch.load('../renderer.pkl'))
def decode(x, canvas): # b * (10 + 3)
x = x.view(-1, 10 + 3)
stroke = 1 - Decoder(x[:, :10])
stroke = stroke.view(-1, 128, 128, 1)
color_stroke = stroke * x[:, -3:].view(-1, 1, 1, 3)
stroke = stroke.permute(0, 3, 1, 2)
color_stroke = color_stroke.permute(0, 3, 1, 2)
stroke = stroke.view(-1, 5, 1, 128, 128)
color_stroke = color_stroke.view(-1, 5, 3, 128, 128)
for i in range(5):
canvas = canvas * (1 - stroke[:, i]) + color_stroke[:, i]
return canvas
def cal_trans(s, t):
return (s.transpose(0, 3) * t).transpose(0, 3)
class DDPG(object):
def __init__(self, batch_size=64, env_batch=1, max_step=40, \
tau=0.001, discount=0.9, rmsize=800, \
writer=None, resume=None, output_path=None):
self.max_step = max_step
self.env_batch = env_batch
self.batch_size = batch_size
self.actor = ResNet(9, 18, 65) # target, canvas, stepnum, coordconv 3 + 3 + 1 + 2
self.actor_target = ResNet(9, 18, 65)
self.critic = ResNet_wobn(9, 18, 1)
self.critic_target = ResNet_wobn(9, 18, 1)
self.actor_optim = Adam(self.actor.parameters(), lr=1e-2)
self.critic_optim = Adam(self.critic.parameters(), lr=1e-2)
if (resume != None):
self.load_weights(resume)
hard_update(self.actor_target, self.actor)
hard_update(self.critic_target, self.critic)
# Create replay buffer
self.memory = rpm(rmsize * max_step)
# Hyper-parameters
self.tau = tau
self.discount = discount
# Tensorboard
self.writer = writer
self.log = 0
self.state = [None] * self.env_batch # Most recent state
self.action = [None] * self.env_batch # Most recent action
self.choose_device()
def play(self, state, target=False):
state = torch.cat((state[:, :6].float() / 255, state[:, 6:7].float() / self.max_step, coord.expand(state.shape[0], 2, 128, 128)), 1)
if target:
return self.actor_target(state)
else:
return self.actor(state)
def update_gan(self, state):
canvas = state[:, :3]
gt = state[:, 3 : 6]
fake, real, penal = update(canvas.float() / 255, gt.float() / 255)
if self.log % 20 == 0:
self.writer.add_scalar('train/gan_fake', fake, self.log)
self.writer.add_scalar('train/gan_real', real, self.log)
self.writer.add_scalar('train/gan_penal', penal, self.log)
def evaluate(self, state, action, target=False):
T = state[:, 6 : 7]
gt = state[:, 3 : 6].float() / 255
canvas0 = state[:, :3].float() / 255
with torch.no_grad(): # model free
canvas1 = decode(action, canvas0)
gan_reward = cal_reward(canvas1, gt) - cal_reward(canvas0, gt) # (batchsize, 64)
# L2_reward = ((canvas0 - gt) ** 2).mean(1).mean(1).mean(1) - ((canvas1 - gt) ** 2).mean(1).mean(1).mean(1)
coord_ = coord.expand(state.shape[0], 2, 128, 128)
merged_state = torch.cat([canvas0, gt, (T + 1).float() / self.max_step, coord_], 1)
if target:
Q = self.critic_target([merged_state, action])
return Q, gan_reward
else:
Q = self.critic([merged_state, action])
if self.log % 20 == 0:
self.writer.add_scalar('train/expect_reward', Q.mean(), self.log)
self.writer.add_scalar('train/gan_reward', gan_reward.mean(), self.log)
return Q, gan_reward
def update_policy(self, lr):
self.log += 1
for param_group in self.critic_optim.param_groups:
param_group['lr'] = lr[0]
for param_group in self.actor_optim.param_groups:
param_group['lr'] = lr[1]
# Sample batch
state, action, reward, \
next_state, terminal = self.memory.sample_batch(self.batch_size, device)
self.update_gan(next_state)
with torch.no_grad():
next_action = self.play(next_state, True)
target_q, _ = self.evaluate(next_state, next_action, True)
target_q = self.discount * ((1 - terminal.float()).view(-1, 1)) * target_q
cur_q, step_reward = self.evaluate(state, action)
target_q += step_reward.detach()
value_loss = criterion(cur_q, target_q)
self.critic.zero_grad()
value_loss.backward(retain_graph=True)
self.critic_optim.step()
action = self.play(state)
pre_q, _ = self.evaluate(state.detach(), action)
policy_loss = -pre_q.mean()
self.actor.zero_grad()
policy_loss.backward(retain_graph=True)
self.actor_optim.step()
# Target update
soft_update(self.actor_target, self.actor, self.tau)
soft_update(self.critic_target, self.critic, self.tau)
return -policy_loss, value_loss
def observe(self, reward, state, done, step):
s0 = torch.tensor(self.state, device='cpu')
a = to_tensor(self.action, "cpu")
r = to_tensor(reward, "cpu")
s1 = torch.tensor(state, device='cpu')
d = to_tensor(done.astype('float32'), "cpu")
for i in range(self.env_batch):
self.memory.append([s0[i], a[i], r[i], s1[i], d[i]])
self.state = state
def noise_action(self, noise_factor, state, action):
noise = np.zeros(action.shape)
for i in range(self.env_batch):
action[i] = action[i] + np.random.normal(0, self.noise_level[i], action.shape[1:]).astype('float32')
return np.clip(action.astype('float32'), 0, 1)
def select_action(self, state, return_fix=False, noise_factor=0):
self.eval()
with torch.no_grad():
action = self.play(state)
action = to_numpy(action)
if noise_factor > 0:
action = self.noise_action(noise_factor, state, action)
self.train()
self.action = action
if return_fix:
return action
return self.action
def reset(self, obs, factor):
self.state = obs
self.noise_level = np.random.uniform(0, factor, self.env_batch)
def load_weights(self, path):
if path is None: return
self.actor.load_state_dict(torch.load('{}/actor.pkl'.format(path)))
self.critic.load_state_dict(torch.load('{}/critic.pkl'.format(path)))
load_gan(path)
def save_model(self, path):
self.actor.cpu()
self.critic.cpu()
torch.save(self.actor.state_dict(),'{}/actor.pkl'.format(path))
torch.save(self.critic.state_dict(),'{}/critic.pkl'.format(path))
save_gan(path)
self.choose_device()
def eval(self):
self.actor.eval()
self.actor_target.eval()
self.critic.eval()
self.critic_target.eval()
def train(self):
self.actor.train()
self.actor_target.train()
self.critic.train()
self.critic_target.train()
def choose_device(self):
Decoder.to(device)
self.actor.to(device)
self.actor_target.to(device)
self.critic.to(device)
self.critic_target.to(device)
|
# from collections import deque
import numpy as np
import random
import torch
import pickle as pickle
class rpm:
# replay memory
def __init__(self, buffer_size):
self.buffer_size = buffer_size
self.buffer = []
self.index = 0
def append(self, obj):
if self.size() > self.buffer_size:
print('buffer size larger than set value, trimming...')
self.buffer = self.buffer[(self.size() - self.buffer_size):]
elif self.size() == self.buffer_size:
self.buffer[self.index] = obj
self.index += 1
self.index %= self.buffer_size
else:
self.buffer.append(obj)
def size(self):
return len(self.buffer)
def sample_batch(self, batch_size, device, only_state=False):
if self.size() < batch_size:
batch = random.sample(self.buffer, self.size())
else:
batch = random.sample(self.buffer, batch_size)
if only_state:
res = torch.stack(tuple(item[3] for item in batch), dim=0)
return res.to(device)
else:
item_count = 5
res = []
for i in range(5):
k = torch.stack(tuple(item[i] for item in batch), dim=0)
res.append(k.to(device))
return res[0], res[1], res[2], res[3], res[4]
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.weight_norm as weightNorm
from torch.autograd import Variable
import sys
def conv3x3(in_planes, out_planes, stride=1):
return (nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False))
def cfg(depth):
depth_lst = [18, 34, 50, 101, 152]
assert (depth in depth_lst), "Error : Resnet depth should be either 18, 34, 50, 101, 152"
cf_dict = {
'18': (BasicBlock, [2,2,2,2]),
'34': (BasicBlock, [3,4,6,3]),
'50': (Bottleneck, [3,4,6,3]),
'101':(Bottleneck, [3,4,23,3]),
'152':(Bottleneck, [3,8,36,3]),
}
return cf_dict[str(depth)]
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
(nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = (nn.Conv2d(in_planes, planes, kernel_size=1, bias=False))
self.conv2 = (nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False))
self.conv3 = (nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False))
self.bn1 = nn.BatchNorm2d(planes)
self.bn2 = nn.BatchNorm2d(planes)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
(nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, num_inputs, depth, num_outputs):
super(ResNet, self).__init__()
self.in_planes = 64
block, num_blocks = cfg(depth)
self.conv1 = conv3x3(num_inputs, 64, 2)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=2)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.fc = nn.Linear(512, num_outputs)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = F.avg_pool2d(x, 4)
x = x.view(x.size(0), -1)
x = self.fc(x)
x = torch.sigmoid(x)
return x
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.weight_norm as weightNorm
from torch.autograd import Variable
import sys
def conv3x3(in_planes, out_planes, stride=1):
return weightNorm(nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
coord = torch.zeros([1, 2, 64, 64])
for i in range(64):
for j in range(64):
coord[0, 0, i, j] = i / 63.
coord[0, 1, i, j] = j / 63.
coord = coord.to(device)
class TReLU(nn.Module):
def __init__(self):
super(TReLU, self).__init__()
self.alpha = nn.Parameter(torch.FloatTensor(1), requires_grad=True)
self.alpha.data.fill_(0)
def forward(self, x):
x = F.relu(x - self.alpha) + self.alpha
return x
def cfg(depth):
depth_lst = [18, 34, 50, 101, 152]
assert (depth in depth_lst), "Error : Resnet depth should be either 18, 34, 50, 101, 152"
cf_dict = {
'18': (BasicBlock, [2,2,2,2]),
'34': (BasicBlock, [3,4,6,3]),
'50': (Bottleneck, [3,4,6,3]),
'101':(Bottleneck, [3,4,23,3]),
'152':(Bottleneck, [3,8,36,3]),
}
return cf_dict[str(depth)]
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.conv2 = conv3x3(planes, planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
weightNorm(nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=True)),
)
self.relu_1 = TReLU()
self.relu_2 = TReLU()
def forward(self, x):
out = self.relu_1(self.conv1(x))
out = self.conv2(out)
out += self.shortcut(x)
out = self.relu_2(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = weightNorm(nn.Conv2d(in_planes, planes, kernel_size=1, bias=True))
self.conv2 = weightNorm(nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True))
self.conv3 = weightNorm(nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=True))
self.relu_1 = TReLU()
self.relu_2 = TReLU()
self.relu_3 = TReLU()
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
weightNorm(nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=True)),
)
def forward(self, x):
out = self.relu_1(self.conv1(x))
out = self.relu_2(self.conv2(out))
out = self.conv3(out)
out += self.shortcut(x)
out = self.relu_3(out)
return out
class ResNet_wobn(nn.Module):
def __init__(self, num_inputs, depth, num_outputs):
super(ResNet_wobn, self).__init__()
self.in_planes = 64
block, num_blocks = cfg(depth)
self.conv0 = conv3x3(num_inputs, 32, 2) # 64
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=2) # 32
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) # 16
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=1)
self.conv4 = weightNorm(nn.Conv2d(512, 1, 1, 1, 0))
self.relu_1 = TReLU()
self.conv1 = weightNorm(nn.Conv2d(65 + 2, 64, 1, 1, 0))
self.conv2 = weightNorm(nn.Conv2d(64, 64, 1, 1, 0))
self.conv3 = weightNorm(nn.Conv2d(64, 32, 1, 1, 0))
self.relu_2 = TReLU()
self.relu_3 = TReLU()
self.relu_4 = TReLU()
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def a2img(self, x):
tmp = coord.expand(x.shape[0], 2, 64, 64)
x = x.repeat(64, 64, 1, 1).permute(2, 3, 0, 1)
x = self.relu_2(self.conv1(torch.cat([x, tmp], 1)))
x = self.relu_3(self.conv2(x))
x = self.relu_4(self.conv3(x))
return x
def forward(self, input):
x, a = input
a = self.a2img(a)
x = self.relu_1(self.conv0(x))
x = torch.cat([x, a], 1)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.conv4(x)
return x.view(x.size(0), 64)
|
import numpy as np
from utils.util import *
class Evaluator:
def __init__(self, args, writer):
self.validate_episodes = args.validate_episodes
self.max_step = args.max_step
self.env_batch = args.env_batch
self.writer = writer
self.log = 0
def __call__(self, env, policy, debug=False):
observation = None
for episode in range(self.validate_episodes):
# reset at the start of episode
observation = env.reset(test=True, episode=episode)
episode_steps = 0
episode_reward = 0.
assert observation is not None
# start episode
episode_reward = np.zeros(self.env_batch)
while (episode_steps < self.max_step or not self.max_step):
action = policy(observation)
observation, reward, done, (step_num) = env.step(action)
episode_reward += reward
episode_steps += 1
env.save_image(self.log, episode_steps)
dist = env.get_dist()
self.log += 1
return episode_reward, dist
|
import os
import torch
from torch.autograd import Variable
USE_CUDA = torch.cuda.is_available()
def prRed(prt): print("\033[91m {}\033[00m" .format(prt))
def prGreen(prt): print("\033[92m {}\033[00m" .format(prt))
def prYellow(prt): print("\033[93m {}\033[00m" .format(prt))
def prLightPurple(prt): print("\033[94m {}\033[00m" .format(prt))
def prPurple(prt): print("\033[95m {}\033[00m" .format(prt))
def prCyan(prt): print("\033[96m {}\033[00m" .format(prt))
def prLightGray(prt): print("\033[97m {}\033[00m" .format(prt))
def prBlack(prt): print("\033[98m {}\033[00m" .format(prt))
def to_numpy(var):
return var.cpu().data.numpy() if USE_CUDA else var.data.numpy()
def to_tensor(ndarray, device):
return torch.tensor(ndarray, dtype=torch.float, device=device)
def soft_update(target, source, tau):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - tau) + param.data * tau
)
def hard_update(target, source):
for m1, m2 in zip(target.modules(), source.modules()):
m1._buffers = m2._buffers.copy()
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
def get_output_folder(parent_dir, env_name):
"""Return save folder.
Assumes folders in the parent_dir have suffix -run{run
number}. Finds the highest run number and sets the output folder
to that number + 1. This is just convenient so that if you run the
same script multiple times tensorboard can plot all of the results
on the same plots with different names.
Parameters
----------
parent_dir: str
Path of the directory containing all experiment runs.
Returns
-------
parent_dir/run_dir
Path to this run's save directory.
"""
os.makedirs(parent_dir, exist_ok=True)
experiment_id = 0
for folder_name in os.listdir(parent_dir):
if not os.path.isdir(os.path.join(parent_dir, folder_name)):
continue
try:
folder_name = int(folder_name.split('-run')[-1])
if folder_name > experiment_id:
experiment_id = folder_name
except:
pass
experiment_id += 1
parent_dir = os.path.join(parent_dir, env_name)
parent_dir = parent_dir + '-run{}'.format(experiment_id)
os.makedirs(parent_dir, exist_ok=True)
return parent_dir
|
import PIL
import scipy.misc
from io import BytesIO
import tensorboardX as tb
from tensorboardX.summary import Summary
class TensorBoard:
def __init__(self, model_dir):
self.summary_writer = tb.FileWriter(model_dir)
def add_image(self, tag, img, step):
summary = Summary()
bio = BytesIO()
if type(img) == str:
img = PIL.Image.open(img)
elif type(img) == PIL.Image.Image:
pass
else:
img = PIL.Image.fromarray(img)
img.save(bio, format="png")
image_summary = Summary.Image(encoded_image_string=bio.getvalue())
summary.value.add(tag=tag, image=image_summary)
self.summary_writer.add_summary(summary, global_step=step)
def add_scalar(self, tag, value, step):
summary = Summary(value=[Summary.Value(tag=tag, simple_value=value)])
self.summary_writer.add_summary(summary, global_step=step)
|
import sys
import json
import torch
import numpy as np
import argparse
import torchvision.transforms as transforms
import cv2
from .DRL.ddpg import decode
from .utils.util import *
from PIL import Image
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
aug = transforms.Compose([transforms.ToPILImage(),
transforms.RandomHorizontalFlip(), ])
width = 128
convas_area = width * width
img_train = []
img_test = []
train_num = 0
test_num = 0
class Paint:
def __init__(self, batch_size, max_step, device='cpu', Decoder=None):
self.batch_size = batch_size
self.max_step = max_step
self.action_space = (13)
self.observation_space = (self.batch_size, width, width, 7)
self.test = False
self.device = device
self.Decoder = Decoder
def load_data(self, images=None):
# CelebA
global train_num, test_num
for i in range(200000):
img_id = '%06d' % (i + 1)
# TorchBench created 2000 random tensors to load here.
if images is not None:
img = images[i % 2000]
else:
img = cv2.imread('./data/img_align_celeba/' + img_id + '.jpg', cv2.IMREAD_UNCHANGED)
img = cv2.resize(img, (width, width))
if i > 2000:
train_num += 1
img_train.append(img)
else:
test_num += 1
img_test.append(img)
def pre_data(self, id, test):
if test:
img = img_test[id]
else:
img = img_train[id]
return img
# Find out why aug and transpose turns random tensor [3, 128, 128] into [128, 3, 128] which fails.
# if not test:
# img = aug(img)
# img = np.asarray(img)
# return np.transpose(img, (2, 0, 1))
def reset(self, test=False, begin_num=False):
self.test = test
self.imgid = [0] * self.batch_size
self.gt = torch.zeros([self.batch_size, 3, width, width], dtype=torch.uint8).to(self.device)
for i in range(self.batch_size):
if test:
id = (i + begin_num) % test_num
else:
id = np.random.randint(train_num)
self.imgid[i] = id
# self.gt[i] = torch.tensor(self.pre_data(id, test))
self.gt[i] = self.pre_data(id, test).clone().detach().to(device=self.device)
self.tot_reward = ((self.gt.float() / 255) ** 2).mean(1).mean(1).mean(1)
self.stepnum = 0
self.canvas = torch.zeros([self.batch_size, 3, width, width], dtype=torch.uint8).to(self.device)
self.lastdis = self.ini_dis = self.cal_dis()
return self.observation()
def observation(self):
# canvas B * 3 * width * width
# gt B * 3 * width * width
# T B * 1 * width * width
ob = []
T = torch.ones([self.batch_size, 1, width, width], dtype=torch.uint8) * self.stepnum
return torch.cat((self.canvas, self.gt, T.to(self.device)), 1) # canvas, img, T
def cal_trans(self, s, t):
return (s.transpose(0, 3) * t).transpose(0, 3)
def step(self, action):
self.canvas = (decode(action, self.canvas.float() / 255, self.Decoder) * 255).byte()
self.stepnum += 1
ob = self.observation()
done = (self.stepnum == self.max_step)
reward = self.cal_reward() # np.array([0.] * self.batch_size)
return ob.detach(), reward, np.array([done] * self.batch_size), None
def cal_dis(self):
return (((self.canvas.float() - self.gt.float()) / 255) ** 2).mean(1).mean(1).mean(1)
def cal_reward(self):
dis = self.cal_dis()
reward = (self.lastdis - dis) / (self.ini_dis + 1e-8)
self.lastdis = dis
return to_numpy(reward)
|
import os
import cv2
import torch
import numpy as np
import argparse
import torch.nn as nn
import torch.nn.functional as F
from DRL.actor import *
from Renderer.stroke_gen import *
from Renderer.model import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
width = 128
parser = argparse.ArgumentParser(description='Learning to Paint')
parser.add_argument('--max_step', default=40, type=int, help='max length for episode')
parser.add_argument('--actor', default='./model/Paint-run1/actor.pkl', type=str, help='Actor model')
parser.add_argument('--renderer', default='./renderer.pkl', type=str, help='renderer model')
parser.add_argument('--img', default='image/test.png', type=str, help='test image')
parser.add_argument('--imgid', default=0, type=int, help='set begin number for generated image')
parser.add_argument('--divide', default=4, type=int, help='divide the target image to get better resolution')
args = parser.parse_args()
canvas_cnt = args.divide * args.divide
T = torch.ones([1, 1, width, width], dtype=torch.float32).to(device)
img = cv2.imread(args.img, cv2.IMREAD_COLOR)
origin_shape = (img.shape[1], img.shape[0])
coord = torch.zeros([1, 2, width, width])
for i in range(width):
for j in range(width):
coord[0, 0, i, j] = i / (width - 1.)
coord[0, 1, i, j] = j / (width - 1.)
coord = coord.to(device) # Coordconv
Decoder = FCN()
Decoder.load_state_dict(torch.load(args.renderer))
def decode(x, canvas): # b * (10 + 3)
x = x.view(-1, 10 + 3)
stroke = 1 - Decoder(x[:, :10])
stroke = stroke.view(-1, width, width, 1)
color_stroke = stroke * x[:, -3:].view(-1, 1, 1, 3)
stroke = stroke.permute(0, 3, 1, 2)
color_stroke = color_stroke.permute(0, 3, 1, 2)
stroke = stroke.view(-1, 5, 1, width, width)
color_stroke = color_stroke.view(-1, 5, 3, width, width)
res = []
for i in range(5):
canvas = canvas * (1 - stroke[:, i]) + color_stroke[:, i]
res.append(canvas)
return canvas, res
def small2large(x):
# (d * d, width, width) -> (d * width, d * width)
x = x.reshape(args.divide, args.divide, width, width, -1)
x = np.transpose(x, (0, 2, 1, 3, 4))
x = x.reshape(args.divide * width, args.divide * width, -1)
return x
def large2small(x):
# (d * width, d * width) -> (d * d, width, width)
x = x.reshape(args.divide, width, args.divide, width, 3)
x = np.transpose(x, (0, 2, 1, 3, 4))
x = x.reshape(canvas_cnt, width, width, 3)
return x
def smooth(img):
def smooth_pix(img, tx, ty):
if tx == args.divide * width - 1 or ty == args.divide * width - 1 or tx == 0 or ty == 0:
return img
img[tx, ty] = (img[tx, ty] + img[tx + 1, ty] + img[tx, ty + 1] + img[tx - 1, ty] + img[tx, ty - 1] + img[tx + 1, ty - 1] + img[tx - 1, ty + 1] + img[tx - 1, ty - 1] + img[tx + 1, ty + 1]) / 9
return img
for p in range(args.divide):
for q in range(args.divide):
x = p * width
y = q * width
for k in range(width):
img = smooth_pix(img, x + k, y + width - 1)
if q != args.divide - 1:
img = smooth_pix(img, x + k, y + width)
for k in range(width):
img = smooth_pix(img, x + width - 1, y + k)
if p != args.divide - 1:
img = smooth_pix(img, x + width, y + k)
return img
def save_img(res, imgid, divide=False):
output = res.detach().cpu().numpy() # d * d, 3, width, width
output = np.transpose(output, (0, 2, 3, 1))
if divide:
output = small2large(output)
output = smooth(output)
else:
output = output[0]
output = (output * 255).astype('uint8')
output = cv2.resize(output, origin_shape)
cv2.imwrite('output/generated' + str(imgid) + '.png', output)
actor = ResNet(9, 18, 65) # action_bundle = 5, 65 = 5 * 13
actor.load_state_dict(torch.load(args.actor))
actor = actor.to(device).eval()
Decoder = Decoder.to(device).eval()
canvas = torch.zeros([1, 3, width, width]).to(device)
patch_img = cv2.resize(img, (width * args.divide, width * args.divide))
patch_img = large2small(patch_img)
patch_img = np.transpose(patch_img, (0, 3, 1, 2))
patch_img = torch.tensor(patch_img).to(device).float() / 255.
img = cv2.resize(img, (width, width))
img = img.reshape(1, width, width, 3)
img = np.transpose(img, (0, 3, 1, 2))
img = torch.tensor(img).to(device).float() / 255.
os.system('mkdir output')
with torch.no_grad():
if args.divide != 1:
args.max_step = args.max_step // 2
for i in range(args.max_step):
stepnum = T * i / args.max_step
actions = actor(torch.cat([canvas, img, stepnum, coord], 1))
canvas, res = decode(actions, canvas)
print('canvas step {}, L2Loss = {}'.format(i, ((canvas - img) ** 2).mean()))
for j in range(5):
save_img(res[j], args.imgid)
args.imgid += 1
if args.divide != 1:
canvas = canvas[0].detach().cpu().numpy()
canvas = np.transpose(canvas, (1, 2, 0))
canvas = cv2.resize(canvas, (width * args.divide, width * args.divide))
canvas = large2small(canvas)
canvas = np.transpose(canvas, (0, 3, 1, 2))
canvas = torch.tensor(canvas).to(device).float()
coord = coord.expand(canvas_cnt, 2, width, width)
T = T.expand(canvas_cnt, 1, width, width)
for i in range(args.max_step):
stepnum = T * i / args.max_step
actions = actor(torch.cat([canvas, patch_img, stepnum, coord], 1))
canvas, res = decode(actions, canvas)
print('divided canvas step {}, L2Loss = {}'.format(i, ((canvas - patch_img) ** 2).mean()))
for j in range(5):
save_img(res[j], args.imgid, True)
args.imgid += 1
|
#!/usr/bin/env python3
import cv2
import random
import numpy as np
import argparse
from DRL.evaluator import Evaluator
from utils.util import *
from utils.tensorboard import TensorBoard
import time
exp = os.path.abspath('.').split('/')[-1]
writer = TensorBoard('../train_log/{}'.format(exp))
os.system('ln -sf ../train_log/{} ./log'.format(exp))
os.system('mkdir ./model')
def train(agent, env, evaluate):
train_times = args.train_times
env_batch = args.env_batch
validate_interval = args.validate_interval
max_step = args.max_step
debug = args.debug
episode_train_times = args.episode_train_times
resume = args.resume
output = args.output
time_stamp = time.time()
step = episode = episode_steps = 0
tot_reward = 0.
observation = None
noise_factor = args.noise_factor
while step <= train_times:
step += 1
episode_steps += 1
# reset if it is the start of episode
if observation is None:
observation = env.reset()
agent.reset(observation, noise_factor)
action = agent.select_action(observation, noise_factor=noise_factor)
observation, reward, done, _ = env.step(action)
agent.observe(reward, observation, done, step)
if (episode_steps >= max_step and max_step):
if step > args.warmup:
# [optional] evaluate
if episode > 0 and validate_interval > 0 and episode % validate_interval == 0:
reward, dist = evaluate(env, agent.select_action, debug=debug)
if debug: prRed('Step_{:07d}: mean_reward:{:.3f} mean_dist:{:.3f} var_dist:{:.3f}'.format(step - 1, np.mean(reward), np.mean(dist), np.var(dist)))
writer.add_scalar('validate/mean_reward', np.mean(reward), step)
writer.add_scalar('validate/mean_dist', np.mean(dist), step)
writer.add_scalar('validate/var_dist', np.var(dist), step)
agent.save_model(output)
train_time_interval = time.time() - time_stamp
time_stamp = time.time()
tot_Q = 0.
tot_value_loss = 0.
if step > args.warmup:
if step < 10000 * max_step:
lr = (3e-4, 1e-3)
elif step < 20000 * max_step:
lr = (1e-4, 3e-4)
else:
lr = (3e-5, 1e-4)
for i in range(episode_train_times):
Q, value_loss = agent.update_policy(lr)
tot_Q += Q.data.cpu().numpy()
tot_value_loss += value_loss.data.cpu().numpy()
writer.add_scalar('train/critic_lr', lr[0], step)
writer.add_scalar('train/actor_lr', lr[1], step)
writer.add_scalar('train/Q', tot_Q / episode_train_times, step)
writer.add_scalar('train/critic_loss', tot_value_loss / episode_train_times, step)
if debug: prBlack('#{}: steps:{} interval_time:{:.2f} train_time:{:.2f}' \
.format(episode, step, train_time_interval, time.time()-time_stamp))
time_stamp = time.time()
# reset
observation = None
episode_steps = 0
episode += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Learning to Paint')
# hyper-parameter
parser.add_argument('--warmup', default=400, type=int, help='timestep without training but only filling the replay memory')
parser.add_argument('--discount', default=0.95**5, type=float, help='discount factor')
parser.add_argument('--batch_size', default=96, type=int, help='minibatch size')
parser.add_argument('--rmsize', default=800, type=int, help='replay memory size')
parser.add_argument('--env_batch', default=96, type=int, help='concurrent environment number')
parser.add_argument('--tau', default=0.001, type=float, help='moving average for target network')
parser.add_argument('--max_step', default=40, type=int, help='max length for episode')
parser.add_argument('--noise_factor', default=0, type=float, help='noise level for parameter space noise')
parser.add_argument('--validate_interval', default=50, type=int, help='how many episodes to perform a validation')
parser.add_argument('--validate_episodes', default=5, type=int, help='how many episode to perform during validation')
parser.add_argument('--train_times', default=2000000, type=int, help='total traintimes')
parser.add_argument('--episode_train_times', default=10, type=int, help='train times for each episode')
parser.add_argument('--resume', default=None, type=str, help='Resuming model path for testing')
parser.add_argument('--output', default='./model', type=str, help='Resuming model path for testing')
parser.add_argument('--debug', dest='debug', action='store_true', help='print some info')
parser.add_argument('--seed', default=1234, type=int, help='random seed')
args = parser.parse_args()
args.output = get_output_folder(args.output, "Paint")
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available(): torch.cuda.manual_seed_all(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
from DRL.ddpg import DDPG
from DRL.multi import fastenv
fenv = fastenv(args.max_step, args.env_batch, writer)
agent = DDPG(args.batch_size, args.env_batch, args.max_step, \
args.tau, args.discount, args.rmsize, \
writer, args.resume, args.output)
evaluate = Evaluator(args, writer)
print('observation_space', fenv.observation_space, 'action_space', fenv.action_space)
train(agent, fenv, evaluate)
|
import cv2
import torch
import numpy as np
import sys
import torch.nn as nn
import torch.nn.functional as F
from utils.tensorboard import TensorBoard
from Renderer.model import FCN
from Renderer.stroke_gen import *
#writer = TensorBoard("../train_log/")
import torch.optim as optim
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--debug', metavar='fn', default="", help="Dump outputs into file")
parser.add_argument('--script', default=False, help="Script the model")
args = parser.parse_args()
torch.manual_seed(1337)
np.random.seed(1337)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
criterion = nn.MSELoss()
net = FCN()
if args.script:
net = torch.jit.script(net)
optimizer = optim.Adam(net.parameters(), lr=3e-6)
batch_size = 64
use_cuda = torch.cuda.is_available()
step = 0
def save_model():
if use_cuda:
net.cpu()
torch.save(net.state_dict(), "../renderer.pkl")
if use_cuda:
net.cuda()
def load_weights():
pretrained_dict = torch.load("../renderer.pkl")
model_dict = net.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
net.load_state_dict(model_dict)
#load_weights()
while step < 100:
net.train()
train_batch = []
ground_truth = []
for i in range(batch_size):
f = np.random.uniform(0, 1, 10)
train_batch.append(f)
ground_truth.append(draw(f))
train_batch = torch.tensor(train_batch).float()
ground_truth = torch.tensor(ground_truth).float()
if use_cuda:
net = net.cuda()
train_batch = train_batch.cuda()
ground_truth = ground_truth.cuda()
gen = net(train_batch)
optimizer.zero_grad()
loss = criterion(gen, ground_truth)
loss.backward()
optimizer.step()
print(step, loss.item())
if step < 200000:
lr = 1e-4
elif step < 400000:
lr = 1e-5
else:
lr = 1e-6
for param_group in optimizer.param_groups:
param_group["lr"] = lr
#writer.add_scalar("train/loss", loss.item(), step)
if step % 100 == 0:
net.eval()
gen = net(train_batch)
loss = criterion(gen, ground_truth)
#writer.add_scalar("val/loss", loss.item(), step)
for i in range(32):
G = gen[i].cpu().data.numpy()
GT = ground_truth[i].cpu().data.numpy()
#writer.add_image("train/gen{}.png".format(i), G, step)
#writer.add_image("train/ground_truth{}.png".format(i), GT, step)
if step % 1000 == 0:
save_model()
step += 1
if args.debug:
torch.save(gen, args.debug)
|
import cv2
import numpy as np
def normal(x, width):
return (int)(x * (width - 1) + 0.5)
def draw(f, width=128):
x0, y0, x1, y1, x2, y2, z0, z2, w0, w2 = f
x1 = x0 + (x2 - x0) * x1
y1 = y0 + (y2 - y0) * y1
x0 = normal(x0, width * 2)
x1 = normal(x1, width * 2)
x2 = normal(x2, width * 2)
y0 = normal(y0, width * 2)
y1 = normal(y1, width * 2)
y2 = normal(y2, width * 2)
z0 = (int)(1 + z0 * width // 2)
z2 = (int)(1 + z2 * width // 2)
canvas = np.zeros([width * 2, width * 2]).astype('float32')
tmp = 1. / 100
for i in range(100):
t = i * tmp
x = (int)((1-t) * (1-t) * x0 + 2 * t * (1-t) * x1 + t * t * x2)
y = (int)((1-t) * (1-t) * y0 + 2 * t * (1-t) * y1 + t * t * y2)
z = (int)((1-t) * z0 + t * z2)
w = (1-t) * w0 + t * w2
cv2.circle(canvas, (y, x), z, w, -1)
return 1 - cv2.resize(canvas, dsize=(width, width))
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.weight_norm as weightNorm
class FCN(nn.Module):
def __init__(self):
super(FCN, self).__init__()
self.fc1 = (nn.Linear(10, 512))
self.fc2 = (nn.Linear(512, 1024))
self.fc3 = (nn.Linear(1024, 2048))
self.fc4 = (nn.Linear(2048, 4096))
self.conv1 = (nn.Conv2d(16, 32, 3, 1, 1))
self.conv2 = (nn.Conv2d(32, 32, 3, 1, 1))
self.conv3 = (nn.Conv2d(8, 16, 3, 1, 1))
self.conv4 = (nn.Conv2d(16, 16, 3, 1, 1))
self.conv5 = (nn.Conv2d(4, 8, 3, 1, 1))
self.conv6 = (nn.Conv2d(8, 4, 3, 1, 1))
self.pixel_shuffle = nn.PixelShuffle(2)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
x = x.view(-1, 16, 16, 16)
x = F.relu(self.conv1(x))
x = self.pixel_shuffle(self.conv2(x))
x = F.relu(self.conv3(x))
x = self.pixel_shuffle(self.conv4(x))
x = F.relu(self.conv5(x))
x = self.pixel_shuffle(self.conv6(x))
x = torch.sigmoid(x)
return 1 - x.view(-1, 128, 128)
|
import torch
import torch.nn as nn
import numpy as np
from torch.optim import Adam, SGD
from torch import autograd
from torch.autograd import Variable
import torch.nn.functional as F
from torch.autograd import grad as torch_grad
import torch.nn.utils.weight_norm as weightNorm
from ..utils.util import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dim = 128
LAMBDA = 10 # Gradient penalty lambda hyperparameter
class TReLU(nn.Module):
def __init__(self):
super(TReLU, self).__init__()
self.alpha = nn.Parameter(torch.FloatTensor(1), requires_grad=True)
self.alpha.data.fill_(0)
def forward(self, x):
x = F.relu(x - self.alpha) + self.alpha
return x
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.conv0 = weightNorm(nn.Conv2d(6, 16, 5, 2, 2))
self.conv1 = weightNorm(nn.Conv2d(16, 32, 5, 2, 2))
self.conv2 = weightNorm(nn.Conv2d(32, 64, 5, 2, 2))
self.conv3 = weightNorm(nn.Conv2d(64, 128, 5, 2, 2))
self.conv4 = weightNorm(nn.Conv2d(128, 1, 5, 2, 2))
self.relu0 = TReLU()
self.relu1 = TReLU()
self.relu2 = TReLU()
self.relu3 = TReLU()
def forward(self, x):
x = self.conv0(x)
x = self.relu0(x)
x = self.conv1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.relu3(x)
x = self.conv4(x)
x = F.avg_pool2d(x, 4)
x = x.view(-1, 1)
return x
netD = Discriminator()
target_netD = Discriminator()
netD = netD.to(device)
target_netD = target_netD.to(device)
hard_update(target_netD, netD)
optimizerD = Adam(netD.parameters(), lr=3e-4, betas=(0.5, 0.999))
def cal_gradient_penalty(netD, real_data, fake_data, batch_size):
alpha = torch.rand(batch_size, 1)
alpha = alpha.expand(batch_size, int(real_data.nelement()/batch_size)).contiguous()
alpha = alpha.view(batch_size, 6, dim, dim)
alpha = alpha.to(device)
fake_data = fake_data.view(batch_size, 6, dim, dim)
interpolates = Variable(alpha * real_data.data + ((1 - alpha) * fake_data.data), requires_grad=True)
disc_interpolates = netD(interpolates)
gradients = autograd.grad(disc_interpolates, interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
return gradient_penalty
def cal_reward(fake_data, real_data):
return target_netD(torch.cat([real_data, fake_data], 1))
def save_gan(path):
netD.cpu()
torch.save(netD.state_dict(),'{}/wgan.pkl'.format(path))
netD.to(device)
def load_gan(path):
netD.load_state_dict(torch.load('{}/wgan.pkl'.format(path)))
def update(fake_data, real_data):
fake_data = fake_data.detach()
real_data = real_data.detach()
fake = torch.cat([real_data, fake_data], 1)
real = torch.cat([real_data, real_data], 1)
D_real = netD(real)
D_fake = netD(fake)
gradient_penalty = cal_gradient_penalty(netD, real, fake, real.shape[0])
optimizerD.zero_grad()
D_cost = D_fake.mean() - D_real.mean() + gradient_penalty
D_cost.backward()
optimizerD.step()
soft_update(target_netD, netD, 0.001)
return D_fake.mean(), D_real.mean(), gradient_penalty
|
import cv2
import torch
import numpy as np
from ..env import Paint
from ..utils.util import *
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class fastenv():
def __init__(self,
max_episode_length=10, env_batch=64,
writer=None, images=None, device="cpu", Decoder=None):
self.max_episode_length = max_episode_length
self.env_batch = env_batch
self.device = device
self.Decoder = Decoder
self.env = Paint(self.env_batch, self.max_episode_length, device=self.device, Decoder=self.Decoder)
self.env.load_data(images)
self.observation_space = self.env.observation_space
self.action_space = self.env.action_space
self.writer = writer
self.test = False
self.log = 0
def save_image(self, log, step):
for i in range(self.env_batch):
if self.env.imgid[i] <= 10:
canvas = cv2.cvtColor((to_numpy(self.env.canvas[i].permute(1, 2, 0))), cv2.COLOR_BGR2RGB)
self.writer.add_image('{}/canvas_{}.png'.format(str(self.env.imgid[i]), str(step)), canvas, log)
if step == self.max_episode_length:
for i in range(self.env_batch):
if self.env.imgid[i] < 50:
gt = cv2.cvtColor((to_numpy(self.env.gt[i].permute(1, 2, 0))), cv2.COLOR_BGR2RGB)
canvas = cv2.cvtColor((to_numpy(self.env.canvas[i].permute(1, 2, 0))), cv2.COLOR_BGR2RGB)
self.writer.add_image(str(self.env.imgid[i]) + '/_target.png', gt, log)
self.writer.add_image(str(self.env.imgid[i]) + '/_canvas.png', canvas, log)
def step(self, action):
with torch.no_grad():
ob, r, d, _ = self.env.step(torch.tensor(action).to(self.device))
if d[0]:
if not self.test:
self.dist = self.get_dist()
for i in range(self.env_batch):
if self.writer:
self.writer.add_scalar('train/dist', self.dist[i], self.log)
self.log += 1
return ob, r, d, _
def get_dist(self):
return to_numpy((((self.env.gt.float() - self.env.canvas.float()) / 255) ** 2).mean(1).mean(1).mean(1))
def reset(self, test=False, episode=0):
self.test = test
ob = self.env.reset(self.test, episode * self.env_batch)
return ob
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam, SGD
from ..Renderer.model import *
from .rpm import rpm
from .actor import *
from .critic import *
from .wgan import *
from ..utils.util import *
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Instead of having these as globals, create Decoder inside TB model and criterion in this DDPG model.
# criterion = nn.MSELoss()
# Use default Renderer instead of importing one.
# Decoder = FCN()
# Decoder.load_state_dict(torch.load('../renderer.pkl'))
def decode(x, canvas, Decoder): # b * (10 + 3)
x = x.view(-1, 10 + 3)
stroke = 1 - Decoder(x[:, :10])
stroke = stroke.view(-1, 128, 128, 1)
color_stroke = stroke * x[:, -3:].view(-1, 1, 1, 3)
stroke = stroke.permute(0, 3, 1, 2)
color_stroke = color_stroke.permute(0, 3, 1, 2)
stroke = stroke.view(-1, 5, 1, 128, 128)
color_stroke = color_stroke.view(-1, 5, 3, 128, 128)
for i in range(5):
canvas = canvas * (1 - stroke[:, i]) + color_stroke[:, i]
return canvas
def cal_trans(s, t):
return (s.transpose(0, 3) * t).transpose(0, 3)
class DDPG(object):
def __init__(self, batch_size=64, env_batch=1, max_step=40,
tau=0.001, discount=0.9, rmsize=800,
writer=None, resume=None, output_path=None, device='cpu', Decoder=None):
self.max_step = max_step
self.env_batch = env_batch
self.batch_size = batch_size
self.device = device
self.actor = ResNet(9, 18, 65) # target, canvas, stepnum, coordconv 3 + 3 + 1 + 2
self.actor_target = ResNet(9, 18, 65)
self.critic = ResNet_wobn(3 + 9, 18, 1) # add the last canvas for better prediction
self.critic_target = ResNet_wobn(3 + 9, 18, 1)
self.criterion = nn.MSELoss()
self.Decoder = Decoder
self.actor_optim = Adam(self.actor.parameters(), lr=1e-2)
self.critic_optim = Adam(self.critic.parameters(), lr=1e-2)
if resume is not None:
self.load_weights(resume)
hard_update(self.actor_target, self.actor)
hard_update(self.critic_target, self.critic)
# Create replay buffer
self.memory = rpm(rmsize * max_step)
# Hyper-parameters
self.tau = tau
self.discount = discount
# Tensorboard
self.writer = writer
self.log = 0
self.coord = torch.zeros([1, 2, 128, 128])
for i in range(128):
for j in range(128):
self.coord[0, 0, i, j] = i / 127.
self.coord[0, 1, i, j] = j / 127.
self.coord = self.coord.to(self.device)
self.state = [None] * self.env_batch # Most recent state
self.action = [None] * self.env_batch # Most recent action
self.choose_device()
def play(self, state, target=False):
state = torch.cat((state[:, :6].float() / 255, state[:, 6:7].float() / self.max_step,
self.coord.expand(state.shape[0], 2, 128, 128)), 1)
if target:
return self.actor_target(state)
else:
return self.actor(state)
def update_gan(self, state):
canvas = state[:, :3]
gt = state[:, 3 : 6]
fake, real, penal = update(canvas.float() / 255, gt.float() / 255)
if self.log % 20 == 0 and self.writer:
self.writer.add_scalar('train/gan_fake', fake, self.log)
self.writer.add_scalar('train/gan_real', real, self.log)
self.writer.add_scalar('train/gan_penal', penal, self.log)
def evaluate(self, state, action, target=False):
T = state[:, 6 : 7]
gt = state[:, 3 : 6].float() / 255
canvas0 = state[:, :3].float() / 255
canvas1 = decode(action, canvas0, self.Decoder)
gan_reward = cal_reward(canvas1, gt) - cal_reward(canvas0, gt)
# L2_reward = ((canvas0 - gt) ** 2).mean(1).mean(1).mean(1) - ((canvas1 - gt) ** 2).mean(1).mean(1).mean(1)
coord_ = self.coord.expand(state.shape[0], 2, 128, 128)
merged_state = torch.cat([canvas0, canvas1, gt, (T + 1).float() / self.max_step, coord_], 1)
# canvas0 is not necessarily added
if target:
Q = self.critic_target(merged_state)
return (Q + gan_reward), gan_reward
else:
Q = self.critic(merged_state)
if self.log % 20 == 0 and self.writer:
self.writer.add_scalar('train/expect_reward', Q.mean(), self.log)
self.writer.add_scalar('train/gan_reward', gan_reward.mean(), self.log)
return (Q + gan_reward), gan_reward
def update_policy(self, lr):
self.log += 1
for param_group in self.critic_optim.param_groups:
param_group['lr'] = lr[0]
for param_group in self.actor_optim.param_groups:
param_group['lr'] = lr[1]
# Sample batch
state, action, reward, \
next_state, terminal = self.memory.sample_batch(self.batch_size, self.device)
self.update_gan(next_state)
with torch.no_grad():
next_action = self.play(next_state, True)
target_q, _ = self.evaluate(next_state, next_action, True)
target_q = self.discount * ((1 - terminal.float()).view(-1, 1)) * target_q
cur_q, step_reward = self.evaluate(state, action)
target_q += step_reward.detach()
value_loss = self.criterion(cur_q, target_q)
self.critic.zero_grad()
value_loss.backward(retain_graph=True)
self.critic_optim.step()
action = self.play(state)
pre_q, _ = self.evaluate(state.detach(), action)
policy_loss = -pre_q.mean()
self.actor.zero_grad()
policy_loss.backward(retain_graph=True)
self.actor_optim.step()
# Target update
soft_update(self.actor_target, self.actor, self.tau)
soft_update(self.critic_target, self.critic, self.tau)
return -policy_loss, value_loss
def observe(self, reward, state, done, step):
s0 = torch.tensor(self.state, device='cpu')
a = to_tensor(self.action, "cpu")
r = to_tensor(reward, "cpu")
s1 = torch.tensor(state, device='cpu')
d = to_tensor(done.astype('float32'), "cpu")
for i in range(self.env_batch):
self.memory.append([s0[i], a[i], r[i], s1[i], d[i]])
self.state = state
def noise_action(self, noise_factor, state, action):
noise = np.zeros(action.shape)
for i in range(self.env_batch):
action[i] = action[i] + np.random.normal(0, self.noise_level[i], action.shape[1:]).astype('float32')
return np.clip(action.astype('float32'), 0, 1)
def select_action(self, state, return_fix=False, noise_factor=0):
self.eval()
with torch.no_grad():
action = self.play(state)
action = to_numpy(action)
if noise_factor > 0:
action = self.noise_action(noise_factor, state, action)
self.train()
self.action = action
if return_fix:
return action
return self.action
def reset(self, obs, factor):
self.state = obs
self.noise_level = np.random.uniform(0, factor, self.env_batch)
def load_weights(self, path):
if path is None:
return
self.actor.load_state_dict(torch.load('{}/actor.pkl'.format(path)))
self.critic.load_state_dict(torch.load('{}/critic.pkl'.format(path)))
load_gan(path)
def save_model(self, path):
self.actor.cpu()
self.critic.cpu()
torch.save(self.actor.state_dict(), '{}/actor.pkl'.format(path))
torch.save(self.critic.state_dict(), '{}/critic.pkl'.format(path))
save_gan(path)
self.choose_device()
def eval(self):
self.actor.eval()
self.actor_target.eval()
self.critic.eval()
self.critic_target.eval()
def train(self):
self.actor.train()
self.actor_target.train()
self.critic.train()
self.critic_target.train()
def choose_device(self):
self.Decoder.to(self.device)
self.actor.to(self.device)
self.actor_target.to(self.device)
self.critic.to(self.device)
self.critic_target.to(self.device)
|
# from collections import deque
import numpy as np
import random
import torch
import pickle as pickle
class rpm(object):
# replay memory
def __init__(self, buffer_size):
self.buffer_size = buffer_size
self.buffer = []
self.index = 0
def append(self, obj):
if self.size() > self.buffer_size:
print('buffer size larger than set value, trimming...')
self.buffer = self.buffer[(self.size() - self.buffer_size):]
elif self.size() == self.buffer_size:
self.buffer[self.index] = obj
self.index += 1
self.index %= self.buffer_size
else:
self.buffer.append(obj)
def size(self):
return len(self.buffer)
def sample_batch(self, batch_size, device, only_state=False):
if self.size() < batch_size:
batch = random.sample(self.buffer, self.size())
else:
batch = random.sample(self.buffer, batch_size)
if only_state:
res = torch.stack(tuple(item[3] for item in batch), dim=0)
return res.to(device)
else:
item_count = 5
res = []
for i in range(5):
k = torch.stack(tuple(item[i] for item in batch), dim=0)
res.append(k.to(device))
return res[0], res[1], res[2], res[3], res[4]
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.weight_norm as weightNorm
from torch.autograd import Variable
import sys
def conv3x3(in_planes, out_planes, stride=1):
return (nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False))
def cfg(depth):
depth_lst = [18, 34, 50, 101, 152]
assert (depth in depth_lst), "Error : Resnet depth should be either 18, 34, 50, 101, 152"
cf_dict = {
'18': (BasicBlock, [2,2,2,2]),
'34': (BasicBlock, [3,4,6,3]),
'50': (Bottleneck, [3,4,6,3]),
'101':(Bottleneck, [3,4,23,3]),
'152':(Bottleneck, [3,8,36,3]),
}
return cf_dict[str(depth)]
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
(nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = (nn.Conv2d(in_planes, planes, kernel_size=1, bias=False))
self.conv2 = (nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False))
self.conv3 = (nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False))
self.bn1 = nn.BatchNorm2d(planes)
self.bn2 = nn.BatchNorm2d(planes)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
(nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, num_inputs, depth, num_outputs):
super(ResNet, self).__init__()
self.in_planes = 64
block, num_blocks = cfg(depth)
self.conv1 = conv3x3(num_inputs, 64, 2)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=2)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.fc = nn.Linear(512, num_outputs)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = F.avg_pool2d(x, 4)
x = x.view(x.size(0), -1)
x = self.fc(x)
x = torch.sigmoid(x)
return x
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.weight_norm as weightNorm
from torch.autograd import Variable
import sys
def conv3x3(in_planes, out_planes, stride=1):
return weightNorm(nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True))
class TReLU(nn.Module):
def __init__(self):
super(TReLU, self).__init__()
self.alpha = nn.Parameter(torch.FloatTensor(1), requires_grad=True)
self.alpha.data.fill_(0)
def forward(self, x):
x = F.relu(x - self.alpha) + self.alpha
return x
def cfg(depth):
depth_lst = [18, 34, 50, 101, 152]
assert (depth in depth_lst), "Error : Resnet depth should be either 18, 34, 50, 101, 152"
cf_dict = {
'18': (BasicBlock, [2,2,2,2]),
'34': (BasicBlock, [3,4,6,3]),
'50': (Bottleneck, [3,4,6,3]),
'101':(Bottleneck, [3,4,23,3]),
'152':(Bottleneck, [3,8,36,3]),
}
return cf_dict[str(depth)]
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.conv2 = conv3x3(planes, planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
weightNorm(nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=True)),
)
self.relu_1 = TReLU()
self.relu_2 = TReLU()
def forward(self, x):
out = self.relu_1(self.conv1(x))
out = self.conv2(out)
out += self.shortcut(x)
out = self.relu_2(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = weightNorm(nn.Conv2d(in_planes, planes, kernel_size=1, bias=True))
self.conv2 = weightNorm(nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True))
self.conv3 = weightNorm(nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=True))
self.relu_1 = TReLU()
self.relu_2 = TReLU()
self.relu_3 = TReLU()
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
weightNorm(nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=True)),
)
def forward(self, x):
out = self.relu_1(self.conv1(x))
out = self.relu_2(self.conv2(out))
out = self.conv3(out)
out += self.shortcut(x)
out = self.relu_3(out)
return out
class ResNet_wobn(nn.Module):
def __init__(self, num_inputs, depth, num_outputs):
super(ResNet_wobn, self).__init__()
self.in_planes = 64
block, num_blocks = cfg(depth)
self.conv1 = conv3x3(num_inputs, 64, 2)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=2)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.fc = nn.Linear(512, num_outputs)
self.relu_1 = TReLU()
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
x = self.relu_1(self.conv1(x))
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = F.avg_pool2d(x, 4)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
|
import numpy as np
from ..utils.util import *
class Evaluator(object):
def __init__(self, args, env_batch, writer):
self.validate_episodes = args.validate_episodes
self.max_step = args.max_step
self.env_batch = env_batch
self.writer = writer
self.log = 0
def __call__(self, env, policy, debug=False):
observation = None
for episode in range(self.validate_episodes):
# reset at the start of episode
observation = env.reset(test=True, episode=episode)
episode_steps = 0
episode_reward = 0.
assert observation is not None
# start episode
episode_reward = np.zeros(self.env_batch)
while (episode_steps < self.max_step or not self.max_step):
action = policy(observation)
observation, reward, done, (step_num) = env.step(action)
episode_reward += reward
episode_steps += 1
if self.writer:
env.save_image(self.log, episode_steps)
dist = env.get_dist()
self.log += 1
return episode_reward, dist
|
import os
import torch
from torch.autograd import Variable
USE_CUDA = torch.cuda.is_available()
def prRed(prt): print("\033[91m {}\033[00m" .format(prt))
def prGreen(prt): print("\033[92m {}\033[00m" .format(prt))
def prYellow(prt): print("\033[93m {}\033[00m" .format(prt))
def prLightPurple(prt): print("\033[94m {}\033[00m" .format(prt))
def prPurple(prt): print("\033[95m {}\033[00m" .format(prt))
def prCyan(prt): print("\033[96m {}\033[00m" .format(prt))
def prLightGray(prt): print("\033[97m {}\033[00m" .format(prt))
def prBlack(prt): print("\033[98m {}\033[00m" .format(prt))
def to_numpy(var):
return var.cpu().data.numpy() if USE_CUDA else var.data.numpy()
def to_tensor(ndarray, device):
return torch.tensor(ndarray, dtype=torch.float, device=device)
def soft_update(target, source, tau):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - tau) + param.data * tau
)
def hard_update(target, source):
for m1, m2 in zip(target.modules(), source.modules()):
m1._buffers = m2._buffers.copy()
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
def get_output_folder(parent_dir, env_name):
"""Return save folder.
Assumes folders in the parent_dir have suffix -run{run
number}. Finds the highest run number and sets the output folder
to that number + 1. This is just convenient so that if you run the
same script multiple times tensorboard can plot all of the results
on the same plots with different names.
Parameters
----------
parent_dir: str
Path of the directory containing all experiment runs.
Returns
-------
parent_dir/run_dir
Path to this run's save directory.
"""
os.makedirs(parent_dir, exist_ok=True)
experiment_id = 0
for folder_name in os.listdir(parent_dir):
if not os.path.isdir(os.path.join(parent_dir, folder_name)):
continue
try:
folder_name = int(folder_name.split('-run')[-1])
if folder_name > experiment_id:
experiment_id = folder_name
except:
pass
experiment_id += 1
parent_dir = os.path.join(parent_dir, env_name)
parent_dir = parent_dir + '-run{}'.format(experiment_id)
os.makedirs(parent_dir, exist_ok=True)
return parent_dir
|
import PIL
import scipy.misc
from io import BytesIO
import tensorboardX as tb
from tensorboardX.summary import Summary
class TensorBoard(object):
def __init__(self, model_dir):
self.summary_writer = tb.FileWriter(model_dir)
def add_image(self, tag, img, step):
summary = Summary()
bio = BytesIO()
if type(img) == str:
img = PIL.Image.open(img)
elif type(img) == PIL.Image.Image:
pass
else:
img = scipy.misc.toimage(img)
img.save(bio, format="png")
image_summary = Summary.Image(encoded_image_string=bio.getvalue())
summary.value.add(tag=tag, image=image_summary)
self.summary_writer.add_summary(summary, global_step=step)
def add_scalar(self, tag, value, step):
summary = Summary(value=[Summary.Value(tag=tag, simple_value=value)])
self.summary_writer.add_summary(summary, global_step=step)
|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel
class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
# Original train batch size per device: 8
# Source: https://github.com/huggingface/transformers/blob/master/examples/flax/language-modeling/run_t5_mlm_flax.py#L83
DEFAULT_TRAIN_BSIZE = 2
# Original eval batch size per device: 8
# Downscale to 1 to fit in Nvidia T4 of the infra
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(name="hf_T5_large", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
|
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
patch_transformers()
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name)
|
from torchbenchmark.util.framework.vision.model_factory import TorchVisionModel
from torchbenchmark.tasks import COMPUTER_VISION
import torchvision.models as models
class Model(TorchVisionModel):
task = COMPUTER_VISION.CLASSIFICATION
# Train batch size: use the smallest example batch of 128 (assuming only 1 worker)
# Source: https://arxiv.org/pdf/1404.5997.pdf
DEFAULT_TRAIN_BSIZE = 128
DEFAULT_EVAL_BSIZE = 128
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(model_name="alexnet", test=test, device=device,
batch_size=batch_size, weights=models.AlexNet_Weights.IMAGENET1K_V1,
extra_args=extra_args)
|
from torchbenchmark.util.model import BenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
import torch.nn as nn
import torch
from types import SimpleNamespace
import torch.utils.data as data
class DenseLayer(nn.Module):
def __init__(self, c_in, bn_size, growth_rate, act_fn):
"""
Inputs:
c_in - Number of input channels
bn_size - Bottleneck size (factor of growth rate) for the output of the 1x1 convolution. Typically between 2 and 4.
growth_rate - Number of output channels of the 3x3 convolution
act_fn - Activation class constructor (e.g. nn.ReLU)
"""
super().__init__()
self.net = nn.Sequential(
nn.BatchNorm2d(c_in),
act_fn(),
nn.Conv2d(c_in, bn_size * growth_rate, kernel_size=1, bias=False),
nn.BatchNorm2d(bn_size * growth_rate),
act_fn(),
nn.Conv2d(bn_size * growth_rate, growth_rate,
kernel_size=3, padding=1, bias=False)
)
def forward(self, x):
out = self.net(x)
out = torch.cat([out, x], dim=1)
return out
class DenseBlock(nn.Module):
def __init__(self, c_in, num_layers, bn_size, growth_rate, act_fn):
"""
Inputs:
c_in - Number of input channels
num_layers - Number of dense layers to apply in the block
bn_size - Bottleneck size to use in the dense layers
growth_rate - Growth rate to use in the dense layers
act_fn - Activation function to use in the dense layers
"""
super().__init__()
layers = []
for layer_idx in range(num_layers):
layers.append(
DenseLayer(c_in=c_in + layer_idx * growth_rate, # Input channels are original plus the feature maps from previous layers
bn_size=bn_size,
growth_rate=growth_rate,
act_fn=act_fn)
)
self.block = nn.Sequential(*layers)
def forward(self, x):
out = self.block(x)
return out
class TransitionLayer(nn.Module):
def __init__(self, c_in, c_out, act_fn):
super().__init__()
self.transition = nn.Sequential(
nn.BatchNorm2d(c_in),
act_fn(),
nn.Conv2d(c_in, c_out, kernel_size=1, bias=False),
# Average the output for each 2x2 pixel group
nn.AvgPool2d(kernel_size=2, stride=2)
)
def forward(self, x):
return self.transition(x)
class DenseNet(nn.Module):
def __init__(self, num_classes=10, num_layers=[6, 6, 6, 6], bn_size=2, growth_rate=16, act_fn_name="relu", **kwargs):
super().__init__()
act_fn_by_name = {
"tanh": nn.Tanh,
"relu": nn.ReLU,
"leakyrelu": nn.LeakyReLU,
"gelu": nn.GELU
}
self.hparams = SimpleNamespace(num_classes=num_classes,
num_layers=num_layers,
bn_size=bn_size,
growth_rate=growth_rate,
act_fn_name=act_fn_name,
act_fn=act_fn_by_name[act_fn_name])
self._create_network()
self._init_params()
def _create_network(self):
# The start number of hidden channels
c_hidden = self.hparams.growth_rate * self.hparams.bn_size
# A first convolution on the original image to scale up the channel size
self.input_net = nn.Sequential(
# No batch norm or activation function as done inside the Dense layers
nn.Conv2d(3, c_hidden, kernel_size=3, padding=1)
)
# Creating the dense blocks, eventually including transition layers
blocks = []
for block_idx, num_layers in enumerate(self.hparams.num_layers):
blocks.append(
DenseBlock(c_in=c_hidden,
num_layers=num_layers,
bn_size=self.hparams.bn_size,
growth_rate=self.hparams.growth_rate,
act_fn=self.hparams.act_fn)
)
# Overall output of the dense block
c_hidden = c_hidden + num_layers * self.hparams.growth_rate
# Don't apply transition layer on last block
if block_idx < len(self.hparams.num_layers) - 1:
blocks.append(
TransitionLayer(c_in=c_hidden,
c_out=c_hidden // 2,
act_fn=self.hparams.act_fn))
c_hidden = c_hidden // 2
self.blocks = nn.Sequential(*blocks)
# Mapping to classification output
self.output_net = nn.Sequential(
# The features have not passed a non-linearity until here.
nn.BatchNorm2d(c_hidden),
self.hparams.act_fn(),
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(),
nn.Linear(c_hidden, self.hparams.num_classes)
)
def _init_params(self):
# Based on our discussion in Tutorial 4, we should initialize the convolutions according to the activation function
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, nonlinearity=self.hparams.act_fn_name)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.input_net(x)
x = self.blocks(x)
x = self.output_net(x)
return x
class Model(BenchmarkModel):
task = COMPUTER_VISION.CLASSIFICATION
# Source: https://github.com/phlippe/uvadlc_notebooks_benchmarking/blob/main/PyTorch/Tutorial5_Inception_ResNet_DenseNet.py
DEFAULT_TRAIN_BSIZE = 128
DEFAULT_EVAL_BSIZE = 128
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device,
batch_size=batch_size, extra_args=extra_args)
self.model = DenseNet()
self.model.to(device)
self.example_inputs = (
torch.randn((self.batch_size, 3, 32, 32), device=self.device),
)
self.example_target = torch.randint(0, 10, (self.batch_size,), device=self.device)
dataset = data.TensorDataset(self.example_inputs[0], self.example_target)
dummy_loader = data.DataLoader(dataset, batch_size=self.batch_size)
self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=1e-3, weight_decay=1e-4)
self.criterion = nn.CrossEntropyLoss()
def get_module(self):
return self.model, self.example_inputs
def train(self):
model = self.model
(images, ) = self.example_inputs
model.train()
targets = self.example_target
output = model(images)
loss = self.criterion(output, targets)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
def eval(self):
model = self.model
(images, ) = self.example_inputs
model.eval()
with torch.no_grad():
out = model(images)
return (out,)
|
from torchbenchmark.util.framework.timm.model_factory import TimmModel
from torchbenchmark.tasks import COMPUTER_VISION
class Model(TimmModel):
task = COMPUTER_VISION.DETECTION
DEFAULT_TRAIN_BSIZE = 32
DEFAULT_EVAL_BSIZE = 32
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, model_name='vovnet39a', device=device,
batch_size=batch_size, extra_args=extra_args) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.