id
stringlengths 3
8
| text
stringlengths 1
115k
|
---|---|
st178368 | just do:
torch.distributed.barrier()
without if,
since:
This collective blocks processes until the whole group enters this function |
st178369 | Thank you very much @iffiX. I removed the if statement, and here is the code I was running:
import torch
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import argparse
import os
import random
import numpy as np
def set_random_seeds(random_seed=0):
torch.manual_seed(random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(random_seed)
random.seed(random_seed)
def evaluate(model, device, test_loader):
model.eval()
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
images, labels = data[0].to(device), data[1].to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = correct / total
return accuracy
def main():
num_epochs_default = 100
batch_size_default = 256 # 1024
learning_rate_default = 0.1
random_seed_default = 0
model_dir_default = "saved_models"
model_filename_default = "resnet_distributed.pth"
# Each process runs on 1 GPU device specified by the local_rank argument.
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--local_rank", type=int, help="Local rank. Necessary for using the torch.distributed.launch utility.")
parser.add_argument("--num_epochs", type=int, help="Number of training epochs.", default=num_epochs_default)
parser.add_argument("--batch_size", type=int, help="Training batch size for one process.", default=batch_size_default)
parser.add_argument("--learning_rate", type=float, help="Learning rate.", default=learning_rate_default)
parser.add_argument("--random_seed", type=int, help="Random seed.", default=random_seed_default)
parser.add_argument("--model_dir", type=str, help="Directory for saving models.", default=model_dir_default)
parser.add_argument("--model_filename", type=str, help="Model filename.", default=model_filename_default)
parser.add_argument("--resume", action="store_true", help="Resume training from saved checkpoint.")
argv = parser.parse_args()
local_rank = argv.local_rank
num_epochs = argv.num_epochs
batch_size = argv.batch_size
learning_rate = argv.learning_rate
random_seed = argv.random_seed
model_dir = argv.model_dir
model_filename = argv.model_filename
resume = argv.resume
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend="nccl")
# torch.distributed.init_process_group(backend="gloo")
if local_rank != 0:
torch.distributed.barrier()
# Create directories outside the PyTorch program
# Only create directory in one process because it is not multiprocess safe
if not os.path.exists(model_dir):
os.makedirs(model_dir)
# Prepare dataset and dataloader
transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_set = torchvision.datasets.CIFAR10(root="data", train=True, download=True, transform=transform)
test_set = torchvision.datasets.CIFAR10(root="data", train=False, download=True, transform=transform)
model_filepath = os.path.join(model_dir, model_filename)
# We need to use seeds to make sure that the models initialized in different processes are the same
set_random_seeds(random_seed=random_seed)
# Encapsulate the model on the GPU assigned to the current process
model = torchvision.models.resnet18(pretrained=False)
'''
if local_rank != 0:
torch.distributed.barrier()
'''
torch.distributed.barrier()
device = torch.device("cuda:{}".format(local_rank))
model = model.to(device)
ddp_model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank)
# We only save the model who uses device "cuda:0"
# To resume, the device for the saved model would also be "cuda:0"
if resume == True:
map_location = {"cuda:0": "cuda:{}".format(local_rank)}
ddp_model.load_state_dict(torch.load(model_filepath, map_location=map_location))
# Restricts data loading to a subset of the dataset exclusive to the current process
train_sampler = DistributedSampler(dataset=train_set)
train_loader = DataLoader(dataset=train_set, batch_size=batch_size, sampler=train_sampler, num_workers=8)
# Test loader does not have to follow distributed sampling strategy
test_loader = DataLoader(dataset=test_set, batch_size=128, shuffle=False, num_workers=8)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(ddp_model.parameters(), lr=learning_rate, momentum=0.9, weight_decay=1e-5)
# Loop over the dataset multiple times
for epoch in range(num_epochs):
print("Local Rank: {}, Epoch: {}, Training ...".format(local_rank, epoch))
# Save and evaluate model routinely
if epoch % 10 == 0:
if local_rank == 0:
accuracy = evaluate(model=ddp_model, device=device, test_loader=test_loader)
torch.save(ddp_model.state_dict(), model_filepath)
print("-" * 75)
print("Epoch: {}, Accuracy: {}".format(epoch, accuracy))
print("-" * 75)
ddp_model.train()
for data in train_loader:
inputs, labels = data[0].to(device), data[1].to(device)
optimizer.zero_grad()
outputs = ddp_model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if __name__ == "__main__":
main()
However, it still got halted. |
st178370 | My former implementation was actually inspired by the implementation from HuggingFace transformer. Here is how they were using the torch.distributed.barrier.
github.com
huggingface/transformers/blob/c76c3cebed3c707178d9f721349c5abd5206a57f/examples/run_glue.py#L360 11
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return results
def load_and_cache_examples(args, task, tokenizer, evaluate=False):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task]()
output_mode = output_modes[task]
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train",
list(filter(None, args.model_name_or_path.split("/"))).pop(),
github.com
huggingface/transformers/blob/c76c3cebed3c707178d9f721349c5abd5206a57f/examples/run_glue.py#L401 3
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if output_mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
I did not run their code in person by the way. |
st178371 | leimao:
However, it got halted on a multi-node multi-GPU machine. Can anyone suggest if it is a PyTorch bug or it is my problem?
Hey @leimao, which line caused the hang on rank0 and other ranks?
BTW, how did you launch the program? Are the following parameters used in this experiment? And it works if you remove the barrier + save/load code?
$ python -m torch.distributed.launch --nproc_per_node=8 --nnodes=2 --node_rank=0 --master_addr="192.168.0.1" --master_port=1234 resnet_ddp.py
$ python -m torch.distributed.launch --nproc_per_node=8 --nnodes=2 --node_rank=1 --master_addr="192.168.0.1" --master_port=1234 resnet_ddp.py |
st178372 | One more question, during the execution, did you set resume to True or False? I am not confident if ddp_model.load_state_dict can restore all DDP states properly. We don’t have tests covering that yet. It might be safer to save ddp_model.module and then reconstruct DDP instances from the loaded ddp_model.module. |
st178373 | Thank you very much @mrshenli. I believe it got halted in the second torch.distributed.barrier. Because I could see the data download/preprocessing was successful on both nodes:
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
I was using the following command to start this time, only changing the number of gpus from 8 to 4 for each node. It runs fine with or without barriers if I only train using one single node.
$ python -m torch.distributed.launch --nproc_per_node=4 --nnodes=2 --node_rank=0 --master_addr="192.168.0.1" --master_port=1234 resnet_ddp.py
$ python -m torch.distributed.launch --nproc_per_node=4 --nnodes=2 --node_rank=1 --master_addr="192.168.0.1" --master_port=1234 resnet_ddp.py
I have not tried removing the saving model code but will give it a shot. |
st178374 | For now, resume is always False during my test, i.e., it is always training from scratch. So we could safely ignore those code for now. |
st178375 | To put it simply, if you just want process to execute mkdir, download, etc, then you should:
import torch
import argparse
def main():
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend="nccl")
parser = argparse.ArgumentParser()
parser.add_argument("--local_rank", type=int)
args = parser.parse_args()
local_rank = args.local_rank
torch.distributed.barrier()
if local_rank == 0:
print(local_rank)
torch.distributed.barrier()
print("{} exit".format(local_rank))
if __name__ == "__main__":
main()
this will print:
0
0 exit
2 exit
1 exit3 exit
And should not
import torch
import argparse
def main():
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend="nccl")
parser = argparse.ArgumentParser()
parser.add_argument("--local_rank", type=int)
args = parser.parse_args()
local_rank = args.local_rank
if local_rank != 0:
torch.distributed.barrier()
print(local_rank)
if local_rank == 0:
torch.distributed.barrier()
print("{} exit".format(local_rank))
if __name__ == "__main__":
main()
which will print
0
0 exit
2
2 exit
13
3 exit
1 exit
barrier is just a barrier, it requires all processes in the group to reach one barrier function, no matter where it is placed, so the second function basically delays all other processes (except 0), unless the code in between two barriers is a not-effective (equal to return / pass) once any process has executed it (Eg: process 0), you are not going to get your expected result.
And please make sure that your CUDA runtime has the same major & minor version as your the CUDA version your torch you have built with, 9 is not compatible with 10, so you are likely to experience some issues when using “nccl” or cuda tensor computations. |
st178376 | Thank you very much for repeating all the experiments @iffiX. I wanted to download CIFAR-10 dataset using local rank 0, and once the local rank 0 has downloaded the dataset, local rank 1, 2, and 3 could proceed and use the downloaded cache for data preprocessing.
train_set = torchvision.datasets.CIFAR10(root="data", train=True, download=True, transform=transform)
test_set = torchvision.datasets.CIFAR10(root="data", train=False, download=True, transform=transform)
However, I don’t see your solution,
import torch
import argparse
def main():
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend="nccl")
parser = argparse.ArgumentParser()
parser.add_argument("--local_rank", type=int)
args = parser.parse_args()
local_rank = args.local_rank
torch.distributed.barrier()
if local_rank == 0:
print(local_rank)
torch.distributed.barrier()
print("{} exit".format(local_rank))
if __name__ == "__main__":
main()
in particular, is able to do this.
The printout of your second code snippet, in particular,
def main():
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend="nccl")
parser = argparse.ArgumentParser()
parser.add_argument("--local_rank", type=int)
args = parser.parse_args()
local_rank = args.local_rank
if local_rank != 0:
torch.distributed.barrier()
print(local_rank)
if local_rank == 0:
torch.distributed.barrier()
print("{} exit".format(local_rank))
if __name__ == "__main__":
main()
is expected and it is also what I was trying to implement. I want local rank 0 to do all the stuff once, then local rank 1, 2, and 3 start to the stuff in their own processes.
I think my CUDA version is compatible with PyTorch. I am using CUDA 10.2 + PyTorch 1.51. |
st178377 | The “asynchronous barrier” was also used in the HuggingFace example that I mentioned above. Since many people are using HuggingFace, I think their code at least runs fine on single node. |
st178378 | I thought of inelegant way to get around:
import torch
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import argparse
import os
import random
import numpy as np
def set_random_seeds(random_seed=0):
torch.manual_seed(random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(random_seed)
random.seed(random_seed)
def evaluate(model, device, test_loader):
model.eval()
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
images, labels = data[0].to(device), data[1].to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = correct / total
return accuracy
def main():
num_epochs_default = 100
batch_size_default = 256 # 1024
learning_rate_default = 0.1
random_seed_default = 0
model_dir_default = "saved_models"
model_filename_default = "resnet_distributed.pth"
# Each process runs on 1 GPU device specified by the local_rank argument.
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--local_rank", type=int, help="Local rank. Necessary for using the torch.distributed.launch utility.")
parser.add_argument("--num_epochs", type=int, help="Number of training epochs.", default=num_epochs_default)
parser.add_argument("--batch_size", type=int, help="Training batch size for one process.", default=batch_size_default)
parser.add_argument("--learning_rate", type=float, help="Learning rate.", default=learning_rate_default)
parser.add_argument("--random_seed", type=int, help="Random seed.", default=random_seed_default)
parser.add_argument("--model_dir", type=str, help="Directory for saving models.", default=model_dir_default)
parser.add_argument("--model_filename", type=str, help="Model filename.", default=model_filename_default)
parser.add_argument("--resume", action="store_true", help="Resume training from saved checkpoint.")
argv = parser.parse_args()
local_rank = argv.local_rank
num_epochs = argv.num_epochs
batch_size = argv.batch_size
learning_rate = argv.learning_rate
random_seed = argv.random_seed
model_dir = argv.model_dir
model_filename = argv.model_filename
resume = argv.resume
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend="nccl")
# torch.distributed.init_process_group(backend="gloo")
# torch.distributed.barrier()
# Create directories outside the PyTorch program
# Only create directory in one process because it is not multiprocess safe
if local_rank == 0:
if not os.path.exists(model_dir):
os.makedirs(model_dir)
# Prepare dataset and dataloader
transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
if local_rank == 0:
train_set = torchvision.datasets.CIFAR10(root="data", train=True, download=True, transform=transform)
test_set = torchvision.datasets.CIFAR10(root="data", train=False, download=True, transform=transform)
torch.distributed.barrier()
train_set = torchvision.datasets.CIFAR10(root="data", train=True, download=True, transform=transform)
test_set = torchvision.datasets.CIFAR10(root="data", train=False, download=True, transform=transform)
model_filepath = os.path.join(model_dir, model_filename)
# We need to use seeds to make sure that the models initialized in different processes are the same
set_random_seeds(random_seed=random_seed)
# Encapsulate the model on the GPU assigned to the current process
model = torchvision.models.resnet18(pretrained=False)
device = torch.device("cuda:{}".format(local_rank))
model = model.to(device)
ddp_model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank)
# We only save the model who uses device "cuda:0"
# To resume, the device for the saved model would also be "cuda:0"
if resume == True:
map_location = {"cuda:0": "cuda:{}".format(local_rank)}
ddp_model.load_state_dict(torch.load(model_filepath, map_location=map_location))
# Restricts data loading to a subset of the dataset exclusive to the current process
train_sampler = DistributedSampler(dataset=train_set)
train_loader = DataLoader(dataset=train_set, batch_size=batch_size, sampler=train_sampler, num_workers=8)
# Test loader does not have to follow distributed sampling strategy
test_loader = DataLoader(dataset=test_set, batch_size=128, shuffle=False, num_workers=8)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(ddp_model.parameters(), lr=learning_rate, momentum=0.9, weight_decay=1e-5)
# Loop over the dataset multiple times
for epoch in range(num_epochs):
print("Local Rank: {}, Epoch: {}, Training ...".format(local_rank, epoch))
# Save and evaluate model routinely
if epoch % 10 == 0:
if local_rank == 0:
accuracy = evaluate(model=ddp_model, device=device, test_loader=test_loader)
torch.save(ddp_model.state_dict(), model_filepath)
print("-" * 75)
print("Epoch: {}, Accuracy: {}".format(epoch, accuracy))
print("-" * 75)
ddp_model.train()
for data in train_loader:
inputs, labels = data[0].to(device), data[1].to(device)
optimizer.zero_grad()
outputs = ddp_model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if __name__ == "__main__":
main()
But it still got stuck.
On node 0:
100.0%Extracting data/cifar-10-python.tar.gz to data
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
Local Rank: 3, Epoch: 0, Training ...
Local Rank: 2, Epoch: 0, Training ...
Local Rank: 1, Epoch: 0, Training ...
Local Rank: 0, Epoch: 0, Training ...
On node 1:
100.0%Extracting data/cifar-10-python.tar.gz to data
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified |
st178379 | after reading your code a little bit more carefully I agree that you may use the second solution since all processes needs to create the data loader, so the problem is not there.
Could you please try to add some printing functions such as:
print("line230")
...
print("line232")
to show exactly where you code has halted? current log is way to limited to determine the exact statement which caused you code to halt.
And don’t forget to take care of ddp_model.load_state_dict(torch.load(model_filepath, map_location=map_location)) after solving the halting issue, as @mrshenli said. |
st178380 | @mrshenli In your tutorial (https://pytorch.org/tutorials/intermediate/ddp_tutorial.html#save-and-load-checkpoints 5), I saw you were using ddp_model.load_state_dict to load model parameters. Is this method untested and unfavored?
I remember the example I documented in my blog post works perfectly. I tested model resuming a while ago and it worked fine. It’s having problems only when I tried to add some barrier functions a few days ago.
Thank you. |
st178381 | @iffiX @mrshenli It seems that I have located where the halting is happening. Running the following code:
import torch
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import argparse
import os
import random
import numpy as np
def set_random_seeds(random_seed=0):
torch.manual_seed(random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(random_seed)
random.seed(random_seed)
def evaluate(model, device, test_loader):
model.eval()
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
images, labels = data[0].to(device), data[1].to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = correct / total
return accuracy
def main():
num_epochs_default = 100
batch_size_default = 256 # 1024
learning_rate_default = 0.1
random_seed_default = 0
model_dir_default = "saved_models"
model_filename_default = "resnet_distributed.pth"
# Each process runs on 1 GPU device specified by the local_rank argument.
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--local_rank", type=int, help="Local rank. Necessary for using the torch.distributed.launch utility.")
parser.add_argument("--num_epochs", type=int, help="Number of training epochs.", default=num_epochs_default)
parser.add_argument("--batch_size", type=int, help="Training batch size for one process.", default=batch_size_default)
parser.add_argument("--learning_rate", type=float, help="Learning rate.", default=learning_rate_default)
parser.add_argument("--random_seed", type=int, help="Random seed.", default=random_seed_default)
parser.add_argument("--model_dir", type=str, help="Directory for saving models.", default=model_dir_default)
parser.add_argument("--model_filename", type=str, help="Model filename.", default=model_filename_default)
parser.add_argument("--resume", action="store_true", help="Resume training from saved checkpoint.")
argv = parser.parse_args()
local_rank = argv.local_rank
num_epochs = argv.num_epochs
batch_size = argv.batch_size
learning_rate = argv.learning_rate
random_seed = argv.random_seed
model_dir = argv.model_dir
model_filename = argv.model_filename
resume = argv.resume
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend="nccl")
# torch.distributed.init_process_group(backend="gloo")
if local_rank != 0:
torch.distributed.barrier()
print("Local Rank: {} | Location: {}".format(local_rank, 0))
# Create directories outside the PyTorch program
# Only create directory in one process because it is not multiprocess safe
if not os.path.exists(model_dir):
os.makedirs(model_dir)
# Prepare dataset and dataloader
transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_set = torchvision.datasets.CIFAR10(root="data", train=True, download=True, transform=transform)
test_set = torchvision.datasets.CIFAR10(root="data", train=False, download=True, transform=transform)
model_filepath = os.path.join(model_dir, model_filename)
# We need to use seeds to make sure that the models initialized in different processes are the same
set_random_seeds(random_seed=random_seed)
# Encapsulate the model on the GPU assigned to the current process
model = torchvision.models.resnet18(pretrained=False)
print("Local Rank: {} | Location: {}".format(local_rank, 1))
if local_rank == 0:
torch.distributed.barrier()
print("Local Rank: {} | Location: {}".format(local_rank, 2))
device = torch.device("cuda:{}".format(local_rank))
model = model.to(device)
print("Local Rank: {} | Location: {}".format(local_rank, 2.1))
ddp_model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank)
print("Local Rank: {} | Location: {}".format(local_rank, 2.2))
# We only save the model who uses device "cuda:0"
# To resume, the device for the saved model would also be "cuda:0"
if resume == True:
map_location = {"cuda:0": "cuda:{}".format(local_rank)}
ddp_model.load_state_dict(torch.load(model_filepath, map_location=map_location))
# Restricts data loading to a subset of the dataset exclusive to the current process
train_sampler = DistributedSampler(dataset=train_set)
train_loader = DataLoader(dataset=train_set, batch_size=batch_size, sampler=train_sampler, num_workers=8)
# Test loader does not have to follow distributed sampling strategy
test_loader = DataLoader(dataset=test_set, batch_size=128, shuffle=False, num_workers=8)
print("Local Rank: {} | Location: {}".format(local_rank, 2.3))
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(ddp_model.parameters(), lr=learning_rate, momentum=0.9, weight_decay=1e-5)
# Loop over the dataset multiple times
for epoch in range(num_epochs):
print("Local Rank: {}, Epoch: {}, Training ...".format(local_rank, epoch))
print("Local Rank: {} | Location: {}".format(local_rank, 3))
# Save and evaluate model routinely
if epoch % 10 == 0:
if local_rank == 0:
accuracy = evaluate(model=ddp_model, device=device, test_loader=test_loader)
torch.save(ddp_model.state_dict(), model_filepath)
print("-" * 75)
print("Epoch: {}, Accuracy: {}".format(epoch, accuracy))
print("-" * 75)
print("Local Rank: {} | Location: {}".format(local_rank, 4))
ddp_model.train()
for data in train_loader:
inputs, labels = data[0].to(device), data[1].to(device)
optimizer.zero_grad()
outputs = ddp_model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if __name__ == "__main__":
main()
For the node 0:
Extracting data/cifar-10-python.tar.gz to data
Files already downloaded and verified
Local Rank: 0 | Location: 1
Local Rank: 0 | Location: 2
Local Rank: 2 | Location: 0
Local Rank: 3 | Location: 0
Local Rank: 1 | Location: 0
Local Rank: 0 | Location: 2.1
Local Rank: 0 | Location: 2.2
Local Rank: 0 | Location: 2.3
Local Rank: 0, Epoch: 0, Training ...
Local Rank: 0 | Location: 3
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
Local Rank: 2 | Location: 1
Local Rank: 2 | Location: 2
Local Rank: 1 | Location: 1
Local Rank: 1 | Location: 2
Local Rank: 3 | Location: 1
Local Rank: 3 | Location: 2
Local Rank: 2 | Location: 2.1
Local Rank: 1 | Location: 2.1
Local Rank: 3 | Location: 2.1
Local Rank: 2 | Location: 2.2
Local Rank: 2 | Location: 2.3
Local Rank: 1 | Location: 2.2
Local Rank: 1 | Location: 2.3
Local Rank: 2, Epoch: 0, Training ...
Local Rank: 2 | Location: 3
Local Rank: 2 | Location: 4
Local Rank: 1, Epoch: 0, Training ...
Local Rank: 1 | Location: 3
Local Rank: 1 | Location: 4
Local Rank: 3 | Location: 2.2
Local Rank: 3 | Location: 2.3
Local Rank: 3, Epoch: 0, Training ...
Local Rank: 3 | Location: 3
Local Rank: 3 | Location: 4
For the node 1:
Extracting data/cifar-10-python.tar.gz to data
Files already downloaded and verified
Local Rank: 0 | Location: 1
Local Rank: 0 | Location: 2
Local Rank: 2 | Location: 0
Local Rank: 3 | Location: 0
Local Rank: 1 | Location: 0
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
Local Rank: 0 | Location: 2.1
Local Rank: 2 | Location: 1
Local Rank: 2 | Location: 2
Local Rank: 1 | Location: 1
Local Rank: 1 | Location: 2
Local Rank: 3 | Location: 1
Local Rank: 3 | Location: 2
Local Rank: 2 | Location: 2.1
Local Rank: 1 | Location: 2.1
Local Rank: 3 | Location: 2.1
So the second node got halted in
ddp_model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank) |
st178382 | Since you are running 1.5.1, I just dive into 1.5.1 code and can verify that the newest DistributedDataParallel do have a _sync_params class method which will broadcast all parameters and buffers, then set local params with inplace operation _set:
def _sync_params(self):
with torch.no_grad():
# only do intra-node parameters sync for replicated single-device
# CUDA modules
if self.device_ids and len(self.device_ids) > 1:
# intra-node parameter sync
result = torch.cuda.comm.broadcast_coalesced(
self.modules_params[0],
self.device_ids,
self.broadcast_bucket_size)
for tensors, module_params in zip(result[1:],
self.modules_params[1:]):
for tensor, param in zip(tensors, module_params):
param.set_(tensor)
# Assume we have just run the optimizer and zeroed the
# grads of the parameters on the root model. We need
# to zero the grads on all model replicas as well.
# This snippet is copied from torch.optim.Optimizer.
if param.grad is not None:
param.grad.detach_()
param.grad.zero_()
And _sync_params will be invoked when you perform a forward operation, if syncing is enabled:
def forward(self, *inputs, **kwargs):
if self.require_forward_param_sync:
self._sync_params()
so load_state_dict() should work, theoretically, because newly loaded params will be broadcasted to other processes.
Sorry about my outdated knowledge above |
st178383 | I think your code is correct, there really isn’t any visible issue with:
model = model.to(device)
print("Local Rank: {} | Location: {}".format(local_rank, 2.1))
ddp_model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank)
My knowledge is not enough to explain this behavior, some possible debug solutions:
will “gloo” halt?
insert some more print tracer into pytorch source code
It most likely would be a problem of nccl becasue DDP basically does these things in initialization:
call dist._broadcast_coleased to broadcast parameters to all groups
dist._broadcast_coleased is defined in torch/csrc/distributed/c10d/comm.cpp,
however, since it is a private function, there is no indication about whether it is blocking etc, I only know that it is invoked by all processes.
call _ddp_init_helper, which basically only do some local operations like:
Initialization helper function that does the following:
(1) replicating the module from device[0] to the other devices
(2) bucketing the parameters for reductions
(3) resetting the bucketing states
(4) registering the grad hooks
(5) passing a handle of DDP to SyncBatchNorm Layer
You can check nccl installation with, but this might not help you much if the “gloo” backend also halts:
'unhandled system error' when training with multi nodes distributed
Here’s one way to see if nccl is installed on the node:
locate nccl| grep "libnccl.so" | tail -n1 | sed -r 's/^.*\.so\.//'
Sorry that I cannot help you more with this problem. |
st178384 | Hi everyone,
I am wondering whether there is anyway for pytorch distributed to build one concurrent queue(or buffer) between parameter server and workers.
So that, every worker can work as a producer to send the msg to the concurrent queue.
And the parameter server can work as consumer to consume msg from concurrent queue.
Besides, parameter server can detect the length of the concurrent queue.
Thank you! |
st178385 | Hey @ryuxin, can this be implemented as a wrapper on top of the RPC API 2? For example, can you implement the queuing logic as an RPC target function? Some related tutorials:
https://pytorch.org/tutorials/intermediate/rpc_param_server_tutorial.html 5
https://github.com/pytorch/tutorials/blob/release/1.6/intermediate_source/rpc_async_execution.rst 5 |
st178386 | Hi, I am new to the machine learning community. For some reasons, I try to parallelly do inference using multi-core CPU and single GPU, however I just got following runtime errors.
THCudaCheck FAIL file=c:\a\w\1\s\tmp_conda_3.6_091443\conda\conda-bld\pytorch_1544087948354\work\torch\csrc\generic\StorageSharing.cpp line=232 error=71 : operation not supported
File "C:\Users\Anaconda3\lib\site-packages\torch\multiprocessing\reductions.py", line 213, in reduce_tensor
(device, handle, storage_size_bytes, storage_offset_bytes) = storage._share_cuda_()
RuntimeError: cuda runtime error (71) : operation not supported at c:\a\w\1\s\tmp_conda_3.6_091443\conda\conda-bld\pytorch_1544087948354\work\torch\csrc\generic\StorageSharing.cpp:232
The following is a simplified example which can reproduce the errors.
import torch
from torch import nn
# model used to do inference
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.fc1 = nn.Linear(100,1)
def forward(self,x):
return self.fc1(x)
# class running inference
class A(object):
def __init__(self):
pass
def do_something(self, model):
# do something
x = torch.randn(100).view(-1)
print(model.forward(x))
def run(self):
mp = torch.multiprocessing.get_context('spawn')
processes = []
for i in range(2):
p = mp.Process(target=self.do_something, args=(Model().cuda(),))
processes.append(p)
for p in processes:
p.start()
if __name__ == '__main__':
a = A()
a.run()
It would be greatly appreciated if anyone can help solve this problem. By the way, my PC runs on Windows 10 with one GTX 1070 GPU. |
st178387 | In your example you could choose to instantiate your model in the sub process. Then you won’t need to share CUDA tensors between the parent and the child process. |
st178388 | Hi,
I’m experiencing a hanging in process. it happens at the following codes
samping_priorities = (self.priority_memory[0:upper_bound] / self.priority_memory[0:upper_bound].sum()).cpu().detach().numpy()
batch_idx = T.LongTensor(np.random.choice(upper_bound, batch_size,p=samping_priorities[0:upper_bound].cpu().detach().numpy()))
samping_priorities is a 2000000*1 tensor.
upper_bound is the range I’m interested in and upper_bound+=1 through iterations
at the beginning everything is okay. then I noticed when upper_bound exceeds 32768, the process hangs between the first line and second line
It works fine on my windows workstation but hangs in linux cluster. What could be the cause and how can i fix it |
st178389 | Lewis_Liu:
at the beginning everything is okay. then I noticed when upper_bound exceeds 32768, the process hangs between the first line and second line
This sounds like a int16_t overflow bug (or that might hit some different branch). Could you please create an issue in pytorch repo 4? Thanks! |
st178390 | So I have been following the code and tutorial on using pytorch to do distributed machine learning here 1. I am able to run the code (and it completes all the tasks) but my program does not terminate and I need to manually kill it using ctrl+C. The exact code is here 1
Right now after completing the task, it hangs after displaying the following warning messages
/anaconda3/lib/python3.7/site-packages/torch/distributed/distributed_c10d.py:406: UserWarning: For MPI backend, world_size (0) and rank (0) are ignored since they are assigned by the MPI runtime.
"MPI runtime.".format(world_size, rank))
train_dist.py:72: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.
return F.log_softmax(x)
/anaconda3/lib/python3.7/site-packages/torch/distributed/distributed_c10d.py:125: UserWarning: torch.distributed.reduce_op is deprecated, please use torch.distributed.ReduceOp instead
warnings.warn("torch.distributed.reduce_op is deprecated, please use "
I run the code using basic command
/anaconda3/bin/mpirun -np 3 --host node-0,node-1,node-2 python train_dist.py
Do I need to add something in the code to exit gracefully? |
st178391 | Archie_Nidhi:
Do I need to add something in the code to exit gracefully?
This shouldn’t be necessary. Which MPI implementation are you using? |
st178392 | I tried to run the code 1 with gloo backend to check if it is a MPI-only problem, but initially hits the following error:
...
self._target(*self._args, **self._kwargs)
File "test.py", line 132, in init_processes
fn(rank, size)
File "test.py", line 103, in run
train_set, bsz = partition_dataset()
...
ValueError: batch_size should be a positive integer value, but got batch_size=64.0
After fixing that, hits the error below:
File "test.py", line 132, in init_processes
fn(rank, size)
File "test.py", line 118, in run
epoch_loss += loss.data[0]
IndexError: invalid index of a 0-dim tensor. Use `tensor.item()` in Python or `tensor.item<T>()` in C++ to convert a 0-dim tensor to a number
Can we have a min example that can reproduce the hang issue? Thanks! |
st178393 | I figured the issue. The mpi part was stuck because one of the processes were waiting for the other process to send some data. |
st178394 | Yeah, I resolved that issues by converting batch_size to integer and loss.data[0] to loss.data.item() |
st178395 | Hello. I want to package my project and upload to pypi.
My project contains some CUDA/CPP files, and I complie it as the tutorial. It works well.
In my project, I run setup.py for roi_align and then run another setup.py for rod_align. The files are listed as follows. Note that I used PyTorch to confirm whether GPU is available, then choose to build CPP extension or CUDA extension.
├── autocrop
│ ├── __init__.py
│ ├── cropper.py
│ ├── model
│ │ ├── __init__.py
│ │ ├── cropping_model.py
│ │ ├── moblienetv2.py
│ │ ├── rod_align
│ │ │ ├── __init__.py
│ │ │ ├── build
│ │ │ ├── functions
│ │ │ │ ├── __init__.py
│ │ │ │ └── rod_align.py
│ │ │ ├── modules
│ │ │ │ ├── __init__.py
│ │ │ │ └── rod_align.py
│ │ │ ├── setup.py # for rod_align
│ │ │ └── src
│ │ │ ├── rod_align.cpp
│ │ │ ├── rod_align.h
│ │ │ ├── rod_align_cuda.cpp
│ │ │ ├── rod_align_cuda.h
│ │ │ ├── rod_align_kernel.cu
│ │ │ └── rod_align_kernel.h
│ │ ├── roi_align
│ │ │ ├── __init__.py
│ │ │ ├── functions
│ │ │ │ ├── __init__.py
│ │ │ │ └── roi_align.py
│ │ │ ├── modules
│ │ │ │ ├── __init__.py
│ │ │ │ └── roi_align.py
│ │ │ ├── setup.py # for roi_align
│ │ │ └── src
│ │ │ ├── roi_align.cpp
│ │ │ ├── roi_align.h
│ │ │ ├── roi_align_cuda.cpp
│ │ │ ├── roi_align_cuda.h
│ │ │ ├── roi_align_kernel.cu
│ │ │ └── roi_align_kernel.h
│ │ └── shufflenetv2.py
│ └── utils.py
├── demo.py
├── imgs
│ └── demo.jpg
└── setup.py # This is the file I want to write, run once to package all the project
Now I want to package my whole project. I am confused about how to write the setup file. (just run once).
Can I copy the code setup(xxxx) from roi and rod setup.py to the final setup.py and write another setup(xxxx) after them, just like:
from setuptools import setup
setup(roi)
setup(rod)
setuptools.setup(
name="autocrop",
python_requires='>=3.6',
install_requires=[
"torch>=1.1",
"torchvision>=0.3.0",
"numpy",
]
xxxxxxx
) |
st178396 | @seemethere might be the right person.
Also, Nikita Shulda, but I cannot find his user name (and unsure, if he’s registered here). |
st178397 | I’m trying to share the weights of networks between processes using multiprocessing manager.dict
The code is as follows
for name in critic_state_dict:
self.shared_networks.critic[name] = T.tensor(critic_state_dict[name].clone().cpu().detach().numpy())
This works fine in windows. But when I use a cluster, it hangs in the middle of the for loop
How do I fix this? Or if I want to periodically share the weights among processes, how to do it properly?
Thanks |
st178398 | Hey @Lewis_Liu,
Did you use fork or spawn?
Or if I want to periodically share the weights among processes, how to do it properly?
One solution is to create a multiprocessing queue, and pass that queue to child processes. Then, in the loop, use that queue to pass shared tensors. The test below can serve as an example:
github.com
pytorch/pytorch/blob/fc8bca094cc23a2394214c5cdbc8392a3d279e8c/test/distributed/test_c10d_spawn.py#L166-L184 1
@classmethod
def _test_allgather_process(
cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c):
pg = init_pg(rank, filename, world_size)
xs = [shared_tensors[rank]]
ys = [[torch.zeros_like(xs[0]) for i in range(world_size)]]
pg.allgather(ys, xs).wait()
for i in range(world_size):
c2p.put((rank, torch.ones(2, 2) * i, ys[0][i].to("cpu")))
p2c.get()
@unittest.skipIf(not TEST_MULTIGPU, "At least 2 CUDA GPUS needed")
def test_shared_allgather_gloo(self):
self._test_multiprocess(
ProcessGroupShareTensorTest._test_allgather_process,
[torch.ones(2, 2).to(i) * i for i in range(self.world_size)],
ProcessGroupShareTensorTest._init_pg_gloo,
self.world_size) |
st178399 | Hi Li,
I switched to using Queue. But I cannot avoid firstly getting the net tensors from state_dict right?
I believe it’s spawn on my windows workstation and it’s fork on the linux cluster if i’m correct |
st178400 | Lewis_Liu:
I switched to using Queue. But I cannot avoid firstly getting the net tensors from state_dict right?
I don’t have the full context here. Can you let the processing holding the state_dict be the writer to the queue? |
st178401 | Yep.
The network is trained and updated for a step. After this, the process has only one sole task that is to write the state_dict into the queue. Other processes doesn’t have direct access to the network except through the queue |
st178402 | Hi, I am working to distribute the layers a neural network like alexNet in three devices (Edge, Fog, and Cloud), sending the results of the inferences to the other device. I’m currently trying to train it as a single model and then break it down into sub-models.
The neural network has the following model:
# AlexNet
# EDGE MODEL
edge_layers1 = nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=2),nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2))
edge_layers2 = nn.Sequential(nn.Linear(8, 10), nn.ReLU(inplace=True))
# FOG MODEL
fog_layers1 = nn.Sequential(nn.Conv2d(64, 192, kernel_size=3, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2))
fog_layers2 = nn.Sequential(nn.Linear(4, 10), nn.ReLU(inplace=True))
# CLOUD MODEL
cloud_layers1 = nn.Sequential(nn.Conv2d(192, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 512, kernel_size=3, padding=1), nn.ReLU(inplace=True),
nn.Conv2d(512, 4096, kernel_size=3, padding=1), nn.ReLU(inplace=True))
cloud_layers2 = nn.Sequential(nn.Linear(4, 1024), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(1024, 4096), nn.ReLU(inplace=True), nn.Linear(4096, 10))
class AlexNet_Distributed(nn.Module):
def __init__(self, num_classes=10):
super(AlexNet_Distributed, self).__init__()
self.layers1 = edge_layers1
self.layers2 = edge_layers2
self.layers3 = fog_layers1
self.layers4 = fog_layers2
self.layers5 = cloud_layers1
self.layers6 = cloud_layers2
def forward(self, x):
x = self.layers1(x)
y = self.layers2(x)
x = self.layers3(x)
z = self.layers4(x)
x = self.layers5(x)
x = self.layers6(x)
return x
net = AlexNet_Distributed()
After training it with the next implementation, I get this error. Does anyone know why?
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
print(outputs[3].size())
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
---> 16 loss = criterion(outputs, labels)
RuntimeError: only batches of spatial targets supported (3D tensors) but got targets of dimension: 1 |
st178403 | Based on discussions in Only batches of spatial targets supported (non-empty 3D tensors) but got targets of size: : [1, 1, 256, 256]. It might be due to the size of outputs or labels. Could you please print out their sizes? |
st178404 | I am training a Conv network on CIFAR 10 with 2 GPUs. I am using DataParallel() to parallelize the model.
The model is:
class CNNModel(torch.nn.Module):
def __init__(self):
super().__init__()
#Conv 1
self.cnn1 = nn.Conv2d(in_channels=3,out_channels=16,kernel_size=3,stride=1,padding=1)
self.activation = nn.ELU()
#maxpool1
self.maxpool1 = nn.MaxPool2d(kernel_size=2)
#Conv 2
self.cnn2 = nn.Conv2d(in_channels=16,out_channels=32,kernel_size=3,stride=1,padding=1)
#Conv 3
self.cnn3 = nn.Conv2d(in_channels=32,out_channels=64,kernel_size=3,stride=1,padding=1)
self.cnn4 = nn.Conv2d(in_channels=64,out_channels=128,kernel_size=3,stride=1,padding=1)
self.cnn5 = nn.Conv2d(in_channels=128,out_channels=128,kernel_size=3,stride=1,padding=1)
self.cnn6 = nn.Conv2d(in_channels=128,out_channels=128,kernel_size=3,stride=1,padding=1)
#Maxpool 2
self.maxpool2 = nn.MaxPool2d(kernel_size=2)
# 8 = (32/2)/2
self.fc1 = nn.Linear(128*8*8,10)
def forward(self,x):
# x is of size (64,1,28,28)
# reshape to (64,784)
out = self.activation(self.cnn1(x))
out = self.maxpool1(out)
out = self.activation(self.cnn2(out))
out = self.activation(self.cnn3(out))
out = self.activation(self.cnn4(out))
out = self.activation(self.cnn5(out))
out = self.activation(self.cnn6(out))
out = self.maxpool2(out)
out = out.view(out.shape[0],-1)
out = self.fc1(out)
return out
The weight constraints I am using are:
class weightConstraint(object):
def __init__(self):
pass
def __call__(self,module):
if hasattr(module,'weight'):
print("Entered")
w=module.weight.data
w[torch.where(w<0)] = 0
module.weight.data=w
I tried using 2 methods to apply these constaints:
Technique 1:
model = CNNModel()
constaint = weightConstraint()
for key,val in model._modules.items():
if hasattr(val,'weight'):
print(key)
val.apply(constaint)
model = nn.DataParallel(model).to(device)
Technique 2:
model = CNNModel()
model = nn.DataParallel(model).to(device)
constaint = weightConstraint()
for key,val in model._modules['module']._modules.items():
if hasattr(val,'weight'):
print(key)
val.apply(constaint)
Training in both cases gives me loss as nan (and accuracy as 0.10 which means random assignment) from the very first epoch |
st178405 | it is hard to tell what it is going on here. How is your training on single GPU without DP wrapping? |
st178406 | It seems to be working well without the constraint
Anyways, the code now seems to work sometimes, and otherwise for rest of the time
I guess it might be poor model to start with |
st178407 | Hi, I am trying to decompose ResNet into three different devices, for this, I would need to be able to save their nn.sequential as a separate model. Training them all together but being able to load their models separately on each device.
Do any of you know how to save nn.sequential as a model? I 'm working as every morning |
st178408 | if you won’t change your model-device mapping, you can just save your model directly using t.save and load it with t.load.
If you really want to save nn.sequential, you can also save it direcly using t.save and load it with t.load. |
st178409 | But if, for instance, I have a neural network with a structure like this:
layers1 = nn.sequential(…)
layers2 = nn.sequential(…)
layers3 = nn.sequential(…)
And I would like to save the model so that device1 loads layers1, device2 layers2… |
st178410 | for example, suppose you initialize a model like:
class YourModel(nn.Module):
def __init__(self, dev_list=["cpu", "cuda:1", "cuda:0"]):
self.fc1 = nn.sequential(nn.Linear(5, 5).to(dev_list[0])) # on device cpu
self.fc2 = nn.sequential(nn.Linear(5, 5).to(dev_list[1])) # on device "cuda:1"
self.fc3 = nn.sequential(nn.Linear(5, 5).to(dev_list[2])) # on device "cuda:0"
self.dev = dev_list
def forward(x):
x = self.fc1(x).to(self.dev[1])
x = self.fc2(x).to(self.dev[1])
return self.fc3(x).to("cpu")
then:
t.save(YourModel(), "model.pt")
model = t.load("model.pt")
Device mapping will be saved along with your model, don’t worry about it |
st178411 | See https://pytorch.org/docs/stable/torch.html?highlight=load#torch.load, especially the map_location part |
st178412 | But in this solution, the devices must be connected and assigned. My goal is to deploy the neural network, in a distributed way, decomposing each sequential in different devices and sending the inference through the network. |
st178413 | Could you please clarify your design a liitle bit more? your description of “distributed” and “decomposing” is pretty vague.
Is it a multi-process application? How do you map your gpus to your processes? How do you map your model to your devices? I am really sorry that I cannot help you more if you don’t give a clear definition of the architecture you would like to achieve. It would be better if you can draw a graph or show your code.
If you just want to split layer1, layer2 and layer3 to different devices, you can simply save them individually with torch.save and torch.load, torch will take the charge to pickle whatever passed to it, including parameters and your custom attributes such as the ones set by self.some_attr in __init__. |
st178414 | It’s an architecture, fog, edge and cloud. Based on the example above:
class YourModel(nn.Module):
def init(self):
self.fc1 = nn.sequential()
self.fc2 = nn.sequential()
self.fc3 = nn.sequential()
I intend to save fc1, fc2 and fc3 separately. In this way I could make a first prediction in the device that has the model fc1 and sending the inference to the second device, make the next prediction with greater accuracy. The third one would work in the same way. |
st178415 | I see, one more question, will you move the model around, such as on a different machine with different gpu number, or are you loading the whole model on the same devices?
If you don’t, and you really want to save them seperately to different files, maybe for better inspection or archive perpose, then:
def save(your_model):
torch.save(your_model.fc1, "fc1.pt")
torch.save(your_model.fc2, "fc2.pt")
torch.save(your_model.fc3, "fc3.pt")
If you do, then you will have to decide which device each part of your model would locate on, eg: suppose on your training machine you have 3 gpus, and on your inference machine you have 1 gpu.
def save(your_model):
def save(your_model):
torch.save(your_model.fc1, "fc1.pt")
torch.save(your_model.fc2, "fc2.pt")
torch.save(your_model.fc3, "fc3.pt")
def map(your_model):
your_model.fc1 = torch.load("fc1.pt", map_location=torch.device('cuda:0'))
your_model.fc2 = torch.load("fc2.pt", map_location=torch.device('cuda:0'))
your_model.fc3 = torch.load("fc3.pt", map_location=torch.device('cuda:0'))
by the way,
Fernando_Gallego:
the devices must be connected and assigned
Maybe you have some wrong idea, there is not such a “connected device” concept in pytorch, you can perform a complex forward() operation or a simple add() operation on some input x locating on device cuda:[number] or cpu simply because the operands (tensors) locates on the same device, if torch needs to fetch it somewhere else, it will complain and throw an error.
About saving the model
There are many ways to save your model, typically you will want to save the OrderedDict returned by model.state_dict(), the keys are your parameter names such as “linear.weight” or “linear.bias”, and values are nn.Parameter, its .data attribute is just a Tensor. You may load a state dict into your model like:
def prep_load_state_dict(model: nn.Module,
state_dict: Any):
"""
Automatically load a **loaded state dictionary**
Note:
This function handles tensor device remapping.
"""
for name, param in model.named_parameters():
state_dict[name].to(param.device)
model.load_state_dict(state_dict)
About torch.save and torch.load
If you know the pickle concept in python, then you will get what torch.save does. pickle serialize a object into binary string:
buffer = io.BytesIO()
t.save(t.zeros([5]), buffer)
print(buffer.getvalue())
will yield:
b'\x80\x02\x8a\nl\xfc\x9cF\xf9 j\xa8P\x19.\x80\x02M\xe9\x03.\x80\x02}q\x00(X\n\x00\x00\x00type_sizesq\x01}q\x02(X\x03\x00\x00\x00intq\x03K\x04X\x04\x00\x00\x00longq\x04K\x04X\x05\x00\x00\x00shortq\x......
you can serialize whatever you like into this, cuda tensor will essentially be saved as “raw data” + “device descriptor cuda:0”. |
st178416 | iffiX:
def save(your_model):
torch.save(your_model.fc1, "fc1.pt")
torch.save(your_model.fc2, "fc2.pt")
torch.save(your_model.fc3, "fc3.pt")
Thanks a lot, I think the solution were:
def save(your_model):
torch.save(your_model.fc1, “fc1.pt”)
torch.save(your_model.fc2, “fc2.pt”)
torch.save(your_model.fc3, “fc3.pt”)
I’ll try then. |
st178417 | I’m also trying to do something similar but in my scenario I construct a whole model using only nn.Sequential and then I just want to save it. I don’t have a class defined for it so something like https://stackoverflow.com/questions/42703500/best-way-to-save-a-trained-model-in-pytorch 2 won’t work for me.
My current attempt uses pickle but I keep getting warning for using pickle:
FutureWarning: pickle support for Storage will be removed in 1.5. Use `torch.save` instead
warnings.warn("pickle support for Storage will be removed in 1.5. Use `torch.save` instead", FutureWarning)
I think they just want us to use torch.save and torch.load. I stopped getting warning when I did that.
My (full) code:
# creating data and running through a nn and saving it
import torch
import torch.nn as nn
from pathlib import Path
from collections import OrderedDict
import numpy as np
import pickle
path = Path('~/data/tmp/').expanduser()
path.mkdir(parents=True, exist_ok=True)
num_samples = 3
Din, Dout = 1, 1
lb, ub = -1, 1
x = torch.torch.distributions.Uniform(low=lb, high=ub).sample((num_samples, Din))
f = nn.Sequential(OrderedDict([
('f1', nn.Linear(Din,Dout)),
('out', nn.SELU())
]))
y = f(x)
# save data torch to numpy
x_np, y_np = x.detach().cpu().numpy(), y.detach().cpu().numpy()
np.savez(path / 'db', x=x_np, y=y_np)
print(x_np)
# save model
with open('db_saving_seq', 'wb') as file:
pickle.dump({'f': f}, file)
# load model
with open('db_saving_seq', 'rb') as file:
db = pickle.load(file)
f2 = db['f']
# test that it outputs the right thing
y2 = f2(x)
y_eq_y2 = y == y2
print(y_eq_y2)
db2 = {'f': f, 'x': x, 'y': y}
torch.save(db2, path / 'db_f_x_y')
print('Done')
db3 = torch.load(path / 'db_f_x_y')
f3 = db3['f']
x3 = db3['x']
y3 = db3['y']
yy3 = f3(x3)
y_eq_y3 = y == y3
print(y_eq_y3)
y_eq_yy3 = y == yy3
print(y_eq_yy3)
did you try that? Is there a reason why that’s not enough for you?
cross-posted: https://stackoverflow.com/questions/62923052/how-does-one-save-torch-nn-sequential-models-in-pytorch-properly 17 |
st178418 | I use the following code, but I think it may be wrong!
lr_broadcast = torch.tensor([0.0]).cuda()
if distribute:
if local_rank == 0:
lr_broadcast = torch.tensor([optimizer.state_dict()[‘param_groups’][0][‘lr’]]).cuda()
dist.all_reduce(lr_broadcast)
optimizer.state_dict()[‘param_groups’][0][‘lr’] = lr_broadcast.item()
print(local_rank, optimizer.state_dict()[‘param_groups’][0][‘lr’]) |
st178419 | @MRGAO1996
Could you please format the code with proper indention?
If this line is with in the if local_rank == 0 block, then this is indeed wrong. You need to call all_reduce on all processes, as it is a collective communication.
if local_rank == 0:
dist.all_reduce(lr_broadcast) |
st178420 | if distribute and local_rank == 0:
acc = verification(...)
# lr_sched is torch.optim.lr_scheduler.ReduceLROnPlateau()
lr_sched.step(acc)
lr_broadcast = torch.tensor([0.0]).cuda()
if distribute:
if local_rank == 0:
lr_broadcast = torch.tensor([optimizer.state_dict()['param_groups'][0]['lr']]).cuda()
dist.all_reduce(lr_broadcast)
optimizer.state_dict()['param_groups'][0]['lr'] = lr_broadcast.item()
print(local_rank, optimizer.state_dict()['param_groups'][0]['lr'])
When I run this code with 2 GPUs, I found two mistakes.
First, dist.all_reduct() failed. I try to print lr_broadcast after all_reduce, but different on GPU0 and GPU1.
Second, after I pass the lr_broadcast value to optimizer.state_dict, I print the lr in optimizer, but the value is differ from lr_broadcast.
So, I got confused. |
st178421 | I noticed the code called .cuda() without specifying a GPU id. Did you set CUDA_VISIBLE_DEVICES or call torch.cuda.set_device()? Could you please share a self-contained repro that shows the error? |
st178422 | Well, I knew how to change my code to succeed with “all_reduce”, but I’m still a little comfused.
My code before:(fail)
from torch.nn.parallel import DistributedDataParallel as DDP
def main():
processes = []
for rank in range(world_size):
p = Process(target=run, args=(rank, world_size))
p.start()
processes.append(p)
for p in processes:
p.join()
def run(local_rank, world_size):
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29501'
torch.cuda.set_device(local_rank)
dist.init_process_group(world_size=world_size, rank=local_rank, backend='nccl')
net = Net().cuda()
criterion = ...
optimizer = ...
net = DDP(net, device_ids=[local_rank])
for ep in range(2):
# train
net.train()
for i, (images, labels) in enumerate(train_loader):
optimizer.zero_grad()
images = image.cuda()
labels = labels.cuda()
outputs = net(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# verification
if local_rank != 0:
val_acc = torch.tensor([0.0]).cuda()
# only do verification on rank0
if local_rank == 0:
net.eval()
# simplify verification simulation
input = torch.randn(8, 3, 112, 112).cuda()
with torch.no_grad():
output = net(input)
val_acc = torch.tensor([0.0009]).cuda()
dist.all_reduct(val_acc)
print(local_rank, val_acc)
main()
The code below shows:
1 tensor([0.0], device='cuda:1')
0 tensor([0.0009], device='cuda:0')
And, in epoch 2, local_rank 1 will hang up.
My solution is that, change the line “output = net(input)” to “output = net.module(input)”, it succeed.
0 tensor([0.0009], device='cuda:0')
1 tensor([0.0009], device='cuda:1')
So I want to know why this happen? |
st178423 | Hey @MRGAO1996
Does your model use any buffers (e.g., running mean in BatchNorm)?
As you have wrapped the second forward with with torch.no_grad():, rank 0 will skip the code in if torch.is_grad_enabled() and ... branch (see DDP forward below), but the behavior of the next forward will be different on two ranks, as rank 0 would skip _sync_params() but rank 1 would execute that. But this method should only make a difference when your model has buffers.
github.com
pytorch/pytorch/blob/d5ae4a07ef5b2e77cf51737fb0a3aafc2e71231d/torch/nn/parallel/distributed.py#L562-L590 2
def forward(self, *inputs, **kwargs):
if self.require_forward_param_sync:
self._sync_params()
if self.device_ids:
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
output = self.module(*inputs[0], **kwargs[0])
else:
outputs = self.parallel_apply(self._module_copies[:len(inputs)], inputs, kwargs)
output = self.gather(outputs, self.output_device)
else:
output = self.module(*inputs, **kwargs)
if torch.is_grad_enabled() and self.require_backward_grad_sync:
self.require_forward_param_sync = True
# We'll return the output object verbatim since it is a freeform
# object. We need to find any tensors in this object, though,
# because we need to figure out which parameters were used during
# this forward pass, to ensure we short circuit reduction for any
This file has been truncated. show original |
st178424 | Hi,
Can GLOO work with infiniband?
Our RTX2080ti GPUs do not support GPUDirect/RDMA anyway, so the only thing we want is to work out of the box in reasonable BW which will not become the bottleneck, we are doing P2P communication.
edit: well I see that here https://github.com/facebookincubator/gloo 8 they say its supported, but I wonder if you still have anything further to say on integration with Pytorch.
edit2: especially since here https://pytorch.org/docs/stable/distributed.html 6 its written than GLOO does not support infiniband. |
st178425 | Hi,
GLOO does have an ibverbs transport https://github.com/facebookincubator/gloo/tree/master/gloo/transport/ibverbs 7. However, it was never used or tested with PyTorch. That may be the reason that PyTorch doc says GLOO does not support infiniband.
We are about to test GLOO ibverbs transport over RDMA, and integrate with PyTorch on HPC scenarios. For now, GLOO ibverbs hasn’t been integrated to PyTorch yet. |
st178426 | Hi everyone,
I’m facing a deadlock I can’t solve by myself.
I’m trying to implement Reactor algorithm (https://openreview.net/forum?id=rkHVZWZAZ 1) to train an AI playing at French Tarot card game.
When I tried to implement training worker parallelisation, things seemed to work well until I tried to increase the size of some hidden layer.
Then worker seemed to block (like infinity loop) when passing through the first layer of the first forward pass (more precisely, the first matmul -> checked with pudb debugger).
I tried a few things:
When worker is called from the main process, everything is fine, whatever the layers size are
When worker exploration is performed through a separate thread (inside the secondary process, the main thread of that secondary process being the training), the exploration is ok, and the blocking occures into the training
At the contrary, if the exploration & training are performed alternatively, the blocking occures into the first exploration step
The multiprocess (and multithread) are performed with fork (linux environment), by subclassing multiprocess and multithread classes, and then by calling start method.
Forward pass is only performed on local copy of the shared network (one copy for exploration, another for training).
I verified copy process : local networks seem to match perfectly shared network.
I suspect some secondary process memory issue, but I have no other clue or direction to follow.
The network dimension are pretty reasonable I think :
one input linear layer 78 -> n
two parallel recurrent layer (GRU) n,m -> n (m = hidden state dimension)
three head with two linear layers each
actor layer m -> n -> 78
advantage layer m -> n -> 78
value layer m -> n -> 1
the degrees of freedom are n & m, which where 80 initially. Blocking occures in input layer when n or m is greater than 110… (although m does not appear in input layer dimension …)
training process is performed on 14 successive steps x batchs of 5 trajectories.
Does that kind of issue seems familiar to anyone ?
Thanks by advance |
st178427 | Hey @driou
Are you using torch.multiprocessing only or does your code also used any feature from torch.distributed or torch.nn.parallel?
If it just torch.multiprocessing, it might relates to forking the processing. If the parent process used any CUDA-related feature, there will be a CUDA context on it which does not fork. Even if it is CPU only, the fork could have broken the OMP internal states see discussion here: https://github.com/pytorch/pytorch/issues/41197 4
Can you check if using spawn from torch.multiprocessing solves the problem? |
st178428 | Hey,
I’m indeed using torch.multiprocessing (and not torch.distributed nor torch.nn.parallel).
I’m not using any CUDA-related feature : it’s CPU only (in fact I did not performed the full pytorch install with CUDA).
I will try with spawn, but I choosed fork on purpose, because it was more practical with the ability to init process before starting it.
Before that, let’s have a look on that thread you send me: the symptom looks like very similar !
Thanks a lot for your help |
st178429 | @mrshenli How to catch exceptions rpc.async, rpc.sync and rpc.remote thrown in the caller under the following conditions, suppose a timeout is set globally (or per call):
during execution, the target process crashes and exits, also closing down all rpc execution threads.
during execution, connection to the target process is closed
during execution, the timeout limit is reached
during execution, an exception is raised in the executed function
Based on my experiments, my partial answer is:
Not known ?
A RuntimeError, something like “peer reset”
An uncatchable std::runtime_error, something like:
terminate called after throwing an instance of 'std::runtime_error'
what(): RPC ran for more than 5000 milliseconds and timed out.
the exception thrown by the function, not the original exception, but wrapped in a udf exception and reraised on the caller side.
The third one troubles me the most because std::runtime_error will cause an ugly Fatal Python Error:
Fatal Python error: Aborted
Thread 0x00007f916abab700 (most recent call first):
File "/home/Administrator/iffi/Projects/machin/machin/parallel/distributed/world.py", line 63 in _rpc_call_remote_method
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/torch/distributed/rpc/internal.py", line 153 in _run_function
Thread 0x00007f91693a8700 (most recent call first):
File "/home/Administrator/iffi/Projects/machin/machin/parallel/distributed/world.py", line 75 in _rpc_get_remote_paired_value
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/torch/distributed/rpc/internal.py", line 153 in _run_function
Thread 0x00007f9163fff700 (most recent call first):
File "/home/Administrator/iffi/Projects/machin/machin/parallel/distributed/world.py", line 75 in _rpc_get_remote_paired_value
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/torch/distributed/rpc/internal.py", line 153 in _run_function
Thread 0x00007f91527fc700 (most recent call first):
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/torch/distributed/rpc/api.py", line 554 in rpc_sync
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/torch/distributed/rpc/api.py", line 77 in wrapper
File "/home/Administrator/iffi/Projects/machin/machin/parallel/distributed/world.py", line 756 in _rpc_paired_class_call
File "/home/Administrator/iffi/Projects/machin/machin/parallel/distributed/world.py", line 597 in rpc_paired_class_sync
File "/home/Administrator/iffi/Projects/machin/test/parallel/distributed/test_world.py", line 97 in main
File "/home/Administrator/iffi/Projects/machin/machin/parallel/distributed/world.py", line 46 in _exec_role
File "/usr/lib/python3.5/threading.py", line 862 in run
File "/home/Administrator/iffi/Projects/machin/machin/parallel/thread.py", line 47 in run
File "/usr/lib/python3.5/threading.py", line 914 in _bootstrap_inner
File "/usr/lib/python3.5/threading.py", line 882 in _bootstrap
Thread 0x00007f9152ffd700 (most recent call first):
File "/home/Administrator/iffi/Projects/machin/machin/parallel/distributed/election.py", line 423 in _task_timeout
File "/usr/lib/python3.5/threading.py", line 862 in run
File "/home/Administrator/iffi/Projects/machin/machin/parallel/thread.py", line 47 in run
File "/usr/lib/python3.5/threading.py", line 914 in _bootstrap_inner
File "/usr/lib/python3.5/threading.py", line 882 in _bootstrap
Thread 0x00007f91537fe700 (most recent call first):
File "/home/Administrator/iffi/Projects/machin/machin/parallel/distributed/election.py", line 435 in _task_keep_alive
File "/usr/lib/python3.5/threading.py", line 862 in run
File "/home/Administrator/iffi/Projects/machin/machin/parallel/thread.py", line 47 in run
File "/usr/lib/python3.5/threading.py", line 914 in _bootstrap_inner
File "/usr/lib/python3.5/threading.py", line 882 in _bootstrap
Thread 0x00007f9153fff700 (most recent call first):
File "/usr/lib/python3.5/threading.py", line 297 in wait
File "/usr/lib/python3.5/queue.py", line 173 in get
File "/home/Administrator/iffi/Projects/machin/machin/parallel/distributed/election.py", line 491 in _task_handle
File "/usr/lib/python3.5/threading.py", line 862 in run
File "/home/Administrator/iffi/Projects/machin/machin/parallel/thread.py", line 47 in run
File "/usr/lib/python3.5/threading.py", line 914 in _bootstrap_inner
File "/usr/lib/python3.5/threading.py", line 882 in _bootstrap
Thread 0x00007f9160ff9700 (most recent call first):
File "/usr/lib/python3.5/threading.py", line 293 in wait
File "/home/Administrator/iffi/Projects/machin/machin/parallel/event.py", line 66 in wait
File "/home/Administrator/iffi/Projects/machin/machin/parallel/distributed/role_dispatcher.py", line 234 in _task_dispatch
File "/usr/lib/python3.5/threading.py", line 862 in run
File "/home/Administrator/iffi/Projects/machin/machin/parallel/thread.py", line 47 in run
File "/usr/lib/python3.5/threading.py", line 914 in _bootstrap_inner
File "/usr/lib/python3.5/threading.py", line 882 in _bootstrap
Thread 0x00007f91617fa700 (most recent call first):
File "/usr/lib/python3.5/threading.py", line 293 in wait
File "/home/Administrator/iffi/Projects/machin/machin/parallel/event.py", line 66 in wait
File "/home/Administrator/iffi/Projects/machin/machin/parallel/distributed/world.py", line 302 in _task_run_dispatched_roles
File "/usr/lib/python3.5/threading.py", line 862 in run
File "/home/Administrator/iffi/Projects/machin/machin/parallel/thread.py", line 47 in run
File "/usr/lib/python3.5/threading.py", line 914 in _bootstrap_inner
File "/usr/lib/python3.5/threading.py", line 882 in _bootstrap
Thread 0x00007f91e4362700 (most recent call first):
File "/home/Administrator/iffi/Projects/machin/test/parallel/distributed/test_world.py", line 145 in subproc_start_world_with_roles
File "/home/Administrator/iffi/Projects/machin/test/parallel/util_run_multi.py", line 16 in process_main
File "/usr/lib/python3.5/multiprocessing/process.py", line 93 in run
File "/home/Administrator/iffi/Projects/machin/machin/parallel/process.py", line 52 in run
File "/usr/lib/python3.5/multiprocessing/process.py", line 249 in _bootstrap
File "/usr/lib/python3.5/multiprocessing/popen_fork.py", line 74 in _launch
File "/usr/lib/python3.5/multiprocessing/popen_fork.py", line 20 in __init__
File "/usr/lib/python3.5/multiprocessing/context.py", line 267 in _Popen
File "/home/Administrator/iffi/Projects/machin/machin/parallel/process.py", line 25 in _Popen
File "/usr/lib/python3.5/multiprocessing/process.py", line 105 in start
File "/home/Administrator/iffi/Projects/machin/test/parallel/util_run_multi.py", line 27 in processes
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/_pytest/fixtures.py", line 788 in call_fixture_func
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/_pytest/fixtures.py", line 964 in pytest_fixture_setup
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/pluggy/callers.py", line 187 in _multicall
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/pluggy/manager.py", line 87 in <lambda>
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/pluggy/manager.py", line 93 in _hookexec
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/pluggy/hooks.py", line 286 in __call__
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/_pytest/fixtures.py", line 914 in execute
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/_pytest/fixtures.py", line 584 in _compute_fixture_value
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/_pytest/fixtures.py", line 503 in _get_active_fixturedef
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/_pytest/fixtures.py", line 487 in getfixturevalue
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/_pytest/fixtures.py", line 477 in _fillfixtures
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/_pytest/fixtures.py", line 297 in fillfixtures
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/_pytest/python.py", line 1483 in setup
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/_pytest/runner.py", line 373 in prepare
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/_pytest/runner.py", line 123 in pytest_runtest_setup
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/pluggy/callers.py", line 187 in _multicall
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/pluggy/manager.py", line 87 in <lambda>
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/pluggy/manager.py", line 93 in _hookexec
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/pluggy/hooks.py", line 286 in __call__
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/_pytest/runner.py", line 217 in <lambda>
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/_pytest/runner.py", line 244 in from_call
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/_pytest/runner.py", line 217 in call_runtest_hook
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/_pytest/runner.py", line 186 in call_and_report
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/_pytest/runner.py", line 94 in runtestprotocol
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/_pytest/runner.py", line 85 in pytest_runtest_protocol
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/pluggy/callers.py", line 187 in _multicall
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/pluggy/manager.py", line 87 in <lambda>
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/pluggy/manager.py", line 93 in _hookexec
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/pluggy/hooks.py", line 286 in __call__
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/_pytest/main.py", line 272 in pytest_runtestloop
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/pluggy/callers.py", line 187 in _multicall
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/pluggy/manager.py", line 87 in <lambda>
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/pluggy/manager.py", line 93 in _hookexec
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/pluggy/hooks.py", line 286 in __call__
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/_pytest/main.py", line 247 in _main
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/_pytest/main.py", line 191 in wrap_session
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/_pytest/main.py", line 240 in pytest_cmdline_main
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/pluggy/callers.py", line 187 in _multicall
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/pluggy/manager.py", line 87 in <lambda>
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/pluggy/manager.py", line 93 in _hookexec
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/pluggy/hooks.py", line 286 in __call__
File "/home/Administrator/iffi/Projects/machin/venv/lib/python3.5/site-packages/_pytest/config/__init__.py", line 125 in main
File "/data/software/pycharm/pycharm-2020.1.2/plugins/python/helpers/pycharm/_jb_pytest_runner.py", line 43 in <module>
Is there any clean way to deal with the first three conditions? The fourth one is simple. And why pybind11 is not converting the third std::runtime_error to a catchable python RuntimeError ? |
st178430 | Hey @iffiX
during execution, the target process crashes and exits, also closing down all rpc execution threads.
during execution, connection to the target process is closed
Prior to v1.6, ProcessGroup is the only available backend, which requires all processes to be alive. So RPC gang cannot survive these failures. Even if you can catch it in application code, it will leave subsequent RPC behaviors in an undefined state, unless there is a global recovery process (we do have plans to providing this).
We will introduce TensorPipe 1 backend for RPC in v1.6, which is a P2P comm library. But in the first experimental version, it still has some part depends on ProcessGroup, so I think it still wouldn’t tolerate such failures in v1.6. There is also an ongoing project to provide elasticity to RPC.
cc @lcw @agolynski
during execution, the timeout limit is reached
This should thrown a RuntimeError for the timeout. And try-except on RuntimeError type should be able to catch it. See the code below:
github.com
pytorch/pytorch/blob/67a4f375cdf06113ca959b4e16739edb666f243f/torch/testing/_internal/distributed/rpc/rpc_test.py#L2501-L2502 1
with self.assertRaisesRegex(RuntimeError, error_str):
fut.wait()
If this is not so in your application, then it is a bug that we need to fix. Please let us know how we can reproduce it. Or if you have other suggestions on how we should report timeout error, please also let us know your suggestions.
cc @rvarm1
during execution, an exception is raised in the executed function
If there is an exception during remote execution, this should be thrown on the caller side. See the code below:
github.com
pytorch/pytorch/blob/67a4f375cdf06113ca959b4e16739edb666f243f/torch/testing/_internal/distributed/rpc/rpc_test.py#L1625-L1631
@dist_init
def test_py_raise_in_user_func(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func)
with self.assertRaises(ValueError):
fut.wait()
This is how the Python error is captured:
github.com
pytorch/pytorch/blob/67a4f375cdf06113ca959b4e16739edb666f243f/torch/distributed/rpc/internal.py#L152-L171
def _run_function(python_udf):
r"""
This function is exclusively called from C++.
See ``torch/csrc/distributed/rpc/python_rpc_handler.cpp``.
Runs a Python UDF and returns its return value.
Wraps any exception in ``RemoteException`` if the function raises.
"""
try:
if isinstance(python_udf, AttributeError):
raise python_udf
result = python_udf.func(*python_udf.args, **python_udf.kwargs)
except Exception as e:
# except str = exception info + traceback string
except_str = (
f"On {_get_current_rpc_agent().get_worker_info()}:\n"
f"{repr(e)}\n{traceback.format_exc()}"
)
result = RemoteException(except_str, type(e))
return result
This is how non-user error is captured:
github.com
pytorch/pytorch/blob/67a4f375cdf06113ca959b4e16739edb666f243f/torch/csrc/distributed/rpc/request_callback_impl.cpp#L907-L921
try {
processRpc(*rpc, messageType, id, retFuture);
} catch (py::error_already_set& e) {
retFuture->markCompleted(handleError(e, messageType, id));
// There are request callback impls in Python, where Python
// exceptions could be thrown. For releasing Python exception
// py::objects, GIL must be held.
py::gil_scoped_acquire acquire;
e.restore(); // Release ownership on py::objects and also restore
// Python Error Indicator.
PyErr_Clear(); // Clear the Python Error Indicator as we has
// recorded the exception in the response message.
} catch (std::exception& e) {
retFuture->markCompleted(handleError(e, messageType, id));
}
Please let us know if we missed any. Thanks! |
st178431 | My local development version is 1.5.0, my test version is your latest docker build.
It’s really sad to hear that currently pytorch rpc cannot handle the 1st and 2nd condition, since that’s what my application code is designed to do. I will try to repoduce the 3rd condition with simpler code, but that might be very difficult since currently there is no way to log all events just before the “fatal abort” happens.
Anyway, thanks for your response! I still want to ask: what kind of P2P mechanism are you going to provide in the TensorPipe backend? could you please clarify your and your collegues’ plans a little bit more? |
st178432 | iffiX:
It’s really sad to hear that currently pytorch rpc cannot handle the 1st and 2nd condition, since that’s what my application code is designed to do.
Could you please elaborate more on the requirement? We understand it is important to provide failure-recovery + elasticity, but haven’t seen people explicitly requesting this yet, so that didn’t get into our top-priority in past releases. If this is a commonly required feature that might block many use cases, we will reprioritize work items and try to get this done sooner.
Anyway, thanks for your response! I still want to ask: what kind of P2P mechanism are you going to provide in the TensorPipe backend? could you please clarify your and your collegues’ plans a little bit more?
This will be released in v1.6 very soon. TensorPipe no longer requires rendezvous or full participation from all processes. So technically, crashed processes do not prevent the rest processes to function correctly (not yet so in v1.6). And it should also be faster than ProcessGroup-based RPC backend. Eventually, we will retire the ProcessGroup-based RPC backend, and make TensorPipe RPC backend as the default option.
cc the main contributor of TensorPipe @lcw |
st178433 | Actually its just an experimental failure tolerance feature, when I implemented feature, I was expecting your rpc layer to be similar to a normal unreliable connection, that is, a lossy link between 2 processes will not affect any other processes, if it fail, it just fail silently between these 2 processes only, and throws a detectable error. It doesn’t have to be recoverable. Elasticity is also not required, currently it only deals with a preset number of processes, suppose you have 100 work “roles”, you may run it with 1 process or 200 processes, the system is fully distributed and can tollerate losses, it will reschedule the failed role to other healthy processes.
It’s not an imporatant core feature, although I spent quite some time to implement and test it. I have removed it from my framework today, for now
I have read descriptions of the TensorPipe, its great to have a smart backend which can choose the best way to move data around transprently, looking forward to it. |
st178434 | Sorry about the inconvenience!
iffiX:
a lossy link between 2 processes will not affect any other processes, if it fail, it just fail silently between these 2 processes only, and throws a detectable error. It doesn’t have to be recoverable. Elasticity is also not required, currently it only deals with a preset number of processes
I see. In this case, looks like we just need to:
remove the ProcessGroup usage from TensorPipe RPC agent
let TensorPipe throw proper errors.
@lcw did I miss anything? |
st178435 | If that is the case, I can try to restructure my code and submit this feature as a pull request to your distributed module, if you think this is valuable for your rpc module.
The feature is structured in the following way:
rpc layer <-> election layer -> role dispatcher layer <-> role runner <-> wrapped highlevel apis
High level apis works like:
WORLD (only initialize once, tell it what roles you want to run)
\-> create_collective_group(...) (create a sub group which supports irecv, isend, all_gather etc.)
\-> create_rpc_group(name, roles) (create a rpc group)
\-> get_rpc_group(name, role) (find a group handle created by a local role or remote role)
CollectiveGroup
\-> send
\-> recv
\-> isend
\-> irecv
...
\-> barrier
RpcGroup
\-> rpc_pair(key, value) (pair a value to the group, so that it can be accessed, locally or remotely)
\-> rpc_get_paired(key) (get a paired value in this group)
\-> rpc_sync
\-> rpc_async
\-> rpc_remote
\-> rpc_paired_class_sync (invoke a method on the paired class instance)
\-> rpc_paired_class_async
\-> rpc_paired_class_remote
\-> rpc_paired_model_sync (perform a forward operation on the registered model)
\-> rpc_paired_model_async
\-> rpc_paired_model_remote
Users may start a role, register a service on this role, and access this service from another role like:
class WorkerService(object):
# An example stateful service class
_count = 0
def counter(self):
self._count += 1
return self._count
class Worker(RoleBase):
def __init__(self, index):
super(Worker, self).__init__(index)
self.service = WorkerService()
self.group = get_world().create_rpc_group("Employees", roles=[
"Worker:0", "Worker:1", "Worker:2"
])
# expose service
self.group.rpc_pair("worker_service", self.service)
def main(self):
while True:
pass
class Manager(RoleBase):
def __init__(self, index):
super(Manager, self).__init__(index)
self.group = get_world().get_rpc_group("Employee", "Worker:{}".format(index))
def main(self):
for i in range(10):
self.group.rpc_paired_class_sync(
to="Worker:{}".format(self.role_index),
cls_method=WorkerService.counter,
name="worker_service",
)
if __name__ == "__main__":
# suppose process rank is `rank`
# suppose there are 3 processes
# start 3 managers and 3 workers
world = World(world_size=3, rank=rank,
roles={"Worker": (Worker, 3),
"Manager": (Manager, 3)},
rpc_timeout=0.5, election_timeout=0.3, logging=True)
What do you think about this design? |
st178436 | Thanks a lot for this proposal!!
There was a proposal for the role-based RPC design with some similar concepts from @Kiuk_Chung, we might be able to join forces there.
@iffiX Regarding the PR submission, we are grateful about the intent for contributing! Before making decisions on the commitment, may I ask how much bandwidth will you be able to allocate to this project? Since this can be a major new feature, most likely we will need to go through the formal design proposal review --> API review --> code review process and will also need examples (in pytorch/examples repo) and tutorials (in pytorch/tutorials repo) for it. This can become a very involved effort, so that we will need to make sure we have all the things we need to hit the finishing line. |
st178437 | Hmm, I can do that, I will finish testing the rest of my framework first, might take a week or so, then pour in major effort to complete this, currently I am studing & working at home full time. I am really proud to hear some affirmation!
So is there any document for the whole process of the proposal? It would be much easier if there is any template to refer to. |
st178438 | BTW, I would also like to see the proposal of your role-based RPC design from Kiuk_Chung |
st178439 | Awesome!! And yes, let’s wait for comments from @Kiuk_Chung
If this looks OK, we can then start from publishing an RFC design issue. Here are some examples:
github.com/pytorch/pytorch
[RFC] RPC Based Distributed Model Parallel 1
opened
Jul 19, 2019
mrshenli
with @pritamdamania87 @zhaojuanmao @aazzolini @gqchen @pietern @satgera @ezyang @zdevito @suo @manojkris @gchanan @soumith @dzhulgakov @yifuwang @bddppq @joxu-cn @dwarakrajagopal @jspisak
PyTorch currently provides...
feature
module: rpc
triaged
github.com/pytorch/pytorch
[RFC] Join-based API to support uneven inputs in DDP 1
opened
May 9, 2020
rohan-varma
🚀 Feature
with @pritamdamania87 @mrshenli @zhaojuanmao
This RFC is to summarize the current proposal for supporting uneven inputs across different DDP processes. Related...
feature
module: distributed
triaged
github.com/pytorch/pytorch
[RFC] Async User Function for RPC 2
opened
Apr 6, 2020
mrshenli
Impact
Currently, every RPC request occupies an RPC thread on the server side until done. However, if there are nested RPC calls...
feature
module: rpc
triaged
github.com/pytorch/pytorch
[RFC] DDP Communication Hook
opened
May 29, 2020
pritamdamania87
Motivation
There are several algorithms like GossipGrad and gradient compression which involve different communication strategies for parameter syncs while running Distributed DataParallel...
enhancement
module: distributed
triaged
This will draw people into the discussion. |
st178440 | @iffiX, @mrshenli I’ll publish the torch.distributed.app proposal that we’ve reviewed internally as a RFC on github shortly - just need to format it in markdown style. |
st178441 | @iffiX – here it is: https://github.com/pytorch/pytorch/issues/41425 4. Looking forward to collaborating on this! |
st178442 | @iffiX A lot of interesting stuff here, let me get to it in order.
Your points #1 (failure of a node) and #2 (failure of a link) are indeed handled poorly by the ProcessGroup backend as they bring all other nodes and links down with them. The TensorPipe backend is more resilient and only fails the affected requests. At this stage, a failed link will remain failed forever (although we’re discussing how to improve this in https://github.com/pytorch/pytorch/issues/40936, please chime in there). A failed node is harder to deal with, as nodes can be stateful and thus restarting it doesn’t mean that the other nodes can resume talking to it as if nothing had happened. The current approach chosen by Elastic is to restart all nodes when one fails. That’s drastic but safe. Also, even in the TensorPipe backend, we currently have nodes exchange their addresses at the beginning, so if a node (re-)joins later it won’t be able to make its address known to the other ones.
The other point where the TensorPipe backend performs a collective operation among all nodes is during graceful shutdown. It is an intrinsic requirement of the API that this method operates like a barrier. Currently that is implemented using process groups and thus suffers from their problems, but even if we reimplemented it on top of TensorPipe it wouldn’t be much different: a failed link/node would cause all others to fail too. I don’t see a way around it while still upholding that requirement. However, if you have your own logic on the nodes’ lifetimes, you probably can do an ungraceful shutdown and thus avoid this whole problem entirely: in that case the TensorPipe backend never uses its process group so it doesn’t matter if it fails.
As for your point #3, to me it looks like we’re raising an exception in a function marked as noexcept. If that’s the case it should be an easy fix, but it would greatly help if you could provide a C++ stack trace. Just attach GDB to the process, set up exception catching (catch throw) and then let the process resume as usual (continue). When GDB then catches an exception you can get the stack trace through backtrace. There may be harmless exceptions that are fired before that last fatal one, so please make sure you get the right one. It would also help if your build has debug symbols, I don’t know if the default PyTorch one does.
Point #4 is particularly tricky, and also relates to the ones above. Dealing with exceptions is hard because, in principle, the RPC module is available to both C++ and Python, which have different exception types and thus it’s hard to preserve and convert between them. Moreover, the future objects have an error state which only contains a string message (see here), so there is no way to store a “error type” in there. The way the RPC module currently propagates the type from the remote UDF to the client is a bit hacky: it involves marking the future as successful, with a result value that contains the exception type and message, and then use an “unwrap” function that fires when accessing that value that raises it instead of returning it (see here). So it means that an errored UDF will return a successful future, but from Python there’s no way to distinguish them. A real failed future (which you get from example in case of an I/O error) will instead always raise a RuntimeError just because it has no way of specifying anything else.
All these problems are in principle solvable (storing a Python exception in C++ as pybind11::error_already_set, converting a C++ exception to Python using pybind11), but this requires changes to the future class which is a top-level util of PyTorch used also by other modules (like JIT) and that makes it hard to change it. |
st178443 | I think dropping all processes if one connection has failed is pretty costly, but acceptable under most conditions.
I will try to collect the stack trace for point #3 once I got time. |
st178444 | Hello, I’m trying to load my data with DistributedSampler class in order to train model on multiple GPUs. The model is wrapped with DistributedDataParallel. The data is successfully loaded on my 2x GPUs. Here my code snippets:
# distributed learning
if torch.cuda.device_count() > 1:
model = torch.nn.parallel.DistributedDataParallel(self.net, device_ids=[range(self.num_gpus)])
else:
model = self.net
iteration = infos["iteration"]
epoch_start = infos["epoch"]
model.train()
for epoch in range(epoch_start, cfg.TRAIN.MAX_EPOCH):
self.setup_dataloader(epoch=epoch)
for _, blobs in enumerate(self.loader):
print("blobs.size", len(blobs))
print(blobs)
loss_dict = model.forward(blobs)
blobs is list of dicts which include tensors, objects in images + other additional information (It’s a object detection task based on Faster CNN).
After calling model.forward(blobs), there is a error reported as:
TypeError: list indices must be integers or slices, not range
The corresponding traceback of this error:
Traceback (most recent call last):
File "tools/train.py", line 456, in <module>
trainer.train(args)
File "tools/train.py", line 372, in train
loss_dict = model.forward(blobs)
File "/vol/.conda/envs/.env36/lib/python3.6/site-packages/torch/nn/parallel/distributed.py", line 445, in forward
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
File "/vol/.conda/envs/.env36/lib/python3.6/site-packages/torch/nn/parallel/distributed.py", line 471, in scatter
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
File "/vol/.conda/envs/.env36/lib/python3.6/site-packages/torch/nn/parallel/scatter_gather.py", line 36, in scatter_kwargs
inputs = scatter(inputs, target_gpus, dim) if inputs else []
File "/vol/.conda/envs/.env36/lib/python3.6/site-packages/torch/nn/parallel/scatter_gather.py", line 28, in scatter
res = scatter_map(inputs)
File "/vol/.conda/envs/.env36/lib/python3.6/site-packages/torch/nn/parallel/scatter_gather.py", line 15, in scatter_map
return list(zip(*map(scatter_map, obj)))
File "/vol/.conda/envs/.env36/lib/python3.6/site-packages/torch/nn/parallel/scatter_gather.py", line 17, in scatter_map
return list(map(list, zip(*map(scatter_map, obj))))
File "/vol/.conda/envs/.env36/lib/python3.6/site-packages/torch/nn/parallel/scatter_gather.py", line 19, in scatter_map
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
File "/vol/.conda/envs/.env36/lib/python3.6/site-packages/torch/nn/parallel/scatter_gather.py", line 15, in scatter_map
return list(zip(*map(scatter_map, obj)))
File "/vol/.conda/envs/.env36/lib/python3.6/site-packages/torch/nn/parallel/scatter_gather.py", line 13, in scatter_map
return Scatter.apply(target_gpus, None, dim, obj)
File "/vol/.conda/envs/.env36/lib/python3.6/site-packages/torch/nn/parallel/_functions.py", line 88, in forward
streams = [_get_stream(device) for device in target_gpus]
File "/vol/.conda/envs/.env36/lib/python3.6/site-packages/torch/nn/parallel/_functions.py", line 88, in <listcomp>
streams = [_get_stream(device) for device in target_gpus]
File "/vol/.conda/envs/.env36/lib/python3.6/site-packages/torch/nn/parallel/_functions.py", line 115, in _get_stream
if _streams[device] is None:
TypeError: list indices must be integers or slices, not range
Traceback (most recent call last):
File "tools/train.py", line 456, in <module>
trainer.train(args)
File "tools/train.py", line 372, in train
loss_dict = model.forward(blobs)
File "/vol/.conda/envs/.env36/lib/python3.6/site-packages/torch/nn/parallel/distributed.py", line 445, in forward
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
File "/vol/.conda/envs/.env36/lib/python3.6/site-packages/torch/nn/parallel/distributed.py", line 471, in scatter
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
File "/vol/.conda/envs/.env36/lib/python3.6/site-packages/torch/nn/parallel/scatter_gather.py", line 36, in scatter_kwargs
inputs = scatter(inputs, target_gpus, dim) if inputs else []
File "/vol/.conda/envs/.env36/lib/python3.6/site-packages/torch/nn/parallel/scatter_gather.py", line 28, in scatter
res = scatter_map(inputs)
File "/vol/.conda/envs/.env36/lib/python3.6/site-packages/torch/nn/parallel/scatter_gather.py", line 15, in scatter_map
return list(zip(*map(scatter_map, obj)))
File "/vol/.conda/envs/.env36/lib/python3.6/site-packages/torch/nn/parallel/scatter_gather.py", line 17, in scatter_map
return list(map(list, zip(*map(scatter_map, obj))))
File "/vol/.conda/envs/.env36/lib/python3.6/site-packages/torch/nn/parallel/scatter_gather.py", line 19, in scatter_map
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
File "/vol/.conda/envs/.env36/lib/python3.6/site-packages/torch/nn/parallel/scatter_gather.py", line 15, in scatter_map
return list(zip(*map(scatter_map, obj)))
File "/vol/.conda/envs/.env36/lib/python3.6/site-packages/torch/nn/parallel/scatter_gather.py", line 13, in scatter_map
return Scatter.apply(target_gpus, None, dim, obj)
File "/vol/.conda/envs/.env36/lib/python3.6/site-packages/torch/nn/parallel/_functions.py", line 88, in forward
streams = [_get_stream(device) for device in target_gpus]
File "/vol/.conda/envs/.env36/lib/python3.6/site-packages/torch/nn/parallel/_functions.py", line 88, in <listcomp>
streams = [_get_stream(device) for device in target_gpus]
File "/vol/.conda/envs/.env36/lib/python3.6/site-packages/torch/nn/parallel/_functions.py", line 115, in _get_stream
if _streams[device] is None:
TypeError: list indices must be integers or slices, not range
As far as I know that if the input of model is tensor data, there will be no problem to train model on mutliple GPUs distributedly. Might it be possible that a list is employed to pass the data in model.forward() methods.
It works if I launch the model only on single GPU.
Thanks in advance. |
st178445 | Solved by Anakin in post #4
Hey @mrshenli, Thanks for your reply.
I solved this issue already. Basically, there are two methods (three, plus your idea:D).
These ideas are mostly based on DistributedSampler and DistributedDataParallel . Using the DistributedSampler, a subset of data can be loaded correctly in this process. … |
st178446 | I made a typo for passing devices : instead of torch.cuda.set_device(args.local_rank), I passed wrong parameter to torch.cuda.set_device(range(2)).
After fixing this typo, I still have the same problem as posted as How to scatter list data on multiple GPUs 8
Thanks for any inputs. |
st178447 | Checked the scatter implementation, and looks like it can scatter tensors in dictionaries properly. What is the structure of the blobs var that scatter fails to handle?
github.com
pytorch/pytorch/blob/13dd53b3d2ba16d353ff1fe3c535c9dd79c19e8d/torch/nn/parallel/scatter_gather.py#L5-L31 3
def scatter(inputs, target_gpus, dim=0):
r"""
Slices tensors into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not tensors.
"""
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
return Scatter.apply(target_gpus, None, dim, obj)
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return list(map(list, zip(*map(scatter_map, obj))))
if isinstance(obj, dict) and len(obj) > 0:
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
# After scatter_map is called, a scatter_map cell will exist. This cell
# has a reference to the actual function scatter_map, which has references
# to a closure that has a reference to the scatter_map cell (because the
This file has been truncated. show original
>>> x = {1:1, 2:2}
>>> x.items()
dict_items([(1, 1), (2, 2)])
>>> import torch
>>> from torch.nn.parallel.scatter_gather import scatter
>>> scatter(x, target_gpus=[0, 1])
[{1: 1, 2: 2}, {1: 1, 2: 2}]
>>> y = {1: torch.zeros(4, 4).to(0), 2: torch.zeros(4, 4).to(0)}
>>> scatter(y, target_gpus=[0, 1])
[{1: tensor([[0., 0., 0., 0.],
[0., 0., 0., 0.]], device='cuda:0'), 2: tensor([[0., 0., 0., 0.],
[0., 0., 0., 0.]], device='cuda:0')}, {1: tensor([[0., 0., 0., 0.],
[0., 0., 0., 0.]], device='cuda:1'), 2: tensor([[0., 0., 0., 0.],
[0., 0., 0., 0.]], device='cuda:1')}] |
st178448 | Hey @mrshenli, Thanks for your reply.
I solved this issue already. Basically, there are two methods (three, plus your idea:D).
These ideas are mostly based on DistributedSampler and DistributedDataParallel . Using the DistributedSampler, a subset of data can be loaded correctly in this process.
We can either use torch.multiprocessing to spawn a process manually to launch our training procedure,
def main():
# Args defintion and loading
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '8080'
mp.spawn(train, nprocs=args.num_gpus, args=(args,))
def train(gpu, args):
# Initialize the distributed package&group
torch.distributed.init_process_group(backend='nccl', init_method='env://', world_size=args.world_size, rank=args.local_rank)
torch.manual_seed(0)
# Initial model
model = RCNN(cfg)
torch.cuda.set_device(gpu)
model.cuda(gpu)
# Wrap the model
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[gpu])
# Initial dataset
dataset = Dataset(cfg, is_train=True, split="train")
# Kick-off iteration
for epoch in range(cfg.TRAIN.MAX_EPOCH):
loader = setup_dataloader(dataset, is_distributed=True, epoch)
for _, blobs in enumerate(loader):
loss_dict = model.forward(blobs)
if __name__ == "__main__":
main()
Passing args by using:
python3 tools/train_mp.py --num_gpus 2
or we can use what torch already encapsulated:
class TrainingWrapper(object):
def __init__(self, args):
self.setup_logging()
self.args = args
# Initialize the distributed package&group
self.num_gpus = torch.cuda.device_count()
print("world_size:%d\local_rank:%d" % (args.num_gpus, args.local_rank))
self.distributed = self.num_gpus > 1
if self.distributed:
torch.distributed.init_process_group(
backend="nccl",
init_method="env://",
world_size=args.num_gpus,
rank=args.local_rank
)
self.device = args.local_rank
# This line is very important!
torch.cuda.set_device(self.device)
# Initial model
model = RCNN(cfg)
# Distributed learning
if self.distributed:
model = torch.nn.parallel.DistributedDataParallel(
model.cuda(self.device),
device_ids=[self.args.local_rank],
output_device=[self.args.local_rank],
broadcast_buffers=False
)
else:
model = torch.nn.DataParallel(model).cuda()
def train(self):
# Initial dataset
dataset = Dataset(cfg, is_train=True, split="train")
# Kick-off iteration
for epoch in range(cfg.TRAIN.MAX_EPOCH):
loader = setup_dataloader(dataset, is_distributed=True, epoch)
for _, blobs in enumerate(loader):
loss_dict = model.forward(blobs)
if __name__ == "__main__":
trainer = TrainingWrapper()
trainer.train()
And passing following args for this above script:
python3 -m torch.distributed.launch --nproc_per_node=2 tools/train_ddp.py --exp_id boa --config_file experiments/config.yaml --num_gpus 2
As so far, I can just use list of dict objects in order to feed the data in model.
Hopefully it helps somebody else somehow. |
st178449 | I have the following code below which uses Joblib’s Parallel and I want to implement this in PyTorch and run it with GPUs. I am reading through PyTorch’s DistributedDataParallel documentation 2, but I can’t seem to figure this out.
import numpy as np
import torch
from joblib import Parallel, delayed
from torch.nn.parallel import DistributedDataParallel as DDP
X = np.array([[1, 3, 2, 3], [2, 3, 5, 6], [1, 2, 3, 4]])
X = torch.DoubleTensor(X).cuda()
def X_power_func(j):
X_power = X**j
return X_power
results = Parallel(n_jobs = 4)(delayed(X_power_func)(j) for j in range(8)) # how do I map this to
# PyTorch's
# DistributedDataParallel
Any help would really be appreciated. Many thanks in advance! |
st178450 | Solved by iffiX in post #2
use torch.multiprocessing.pool |
st178451 | Thanks @iffiX. Do you know in which situations that we would use torch.multiprocessing and DistributedDataParallel? |
st178452 | DistributedDataParallel is designed for asynchronously let the model perform forward and backward process, internnaly it synchronously perform gradient reduction and parameter updating.
torch.multiprocessing is a simple derivative of the vanilla multiprocessing module, it only replaces the default queue implementation used in the vanilla module, and implements an efficient way to pass around cuda tensors (data remains on gpu, only a pointer to data is passed to subprocess pool workers).
Pool is designed for carrying out general unit tasks by a group of homogeneous workers with no context, such as your:
def X_power_func(j):
X_power = X**j
return X_power
Pool is essentially the same as joblib |
st178453 | Ok many thanks @iffiX for the detailed answer.
So essentially use DistributedDataParallel for neural network stuff (which involves things like forward and backward processes) that you want to parallelize, and use torch.multiprocessing for non-neural network stuff that you want to parallelize. |
st178454 | say I have:
t = tensor.torch(0)
req = dist.irecv(t, rank)
how can i check if i have messages from some rank (stored in buffer or something). From c++ examples ive seen there are probe commands:
MPI_Probe(0, 0, MPI_COMM_WORLD, &status);
to check message length.
One possible solution would be to check if tensor != 0 but that doesnt seem to be the right way to do it |
st178455 | We probably should set the complete_ flag in the send/recv work object and expose that as a Python API. Similar issue exists in our Future API.
@Yanli_Zhao mentioned that this is also asked in https://github.com/pytorch/pytorch/issues/30723 1 |
st178456 | I am trying to train over a custom parameter server, but it checks all the boxes for setting weights and updating gradients… but for some reason it won’t improve accuracy over my data.
I’m using redisai as my database to host all my training/test data and global and all my worker weights/grads. I know the opinion of using an external database is a performance hit, but this is something I want to do for my own experience.
The PS holds the global model that the workers read from to their local model and update before pushing gradients back to the PS to update the global model.
PS:
Setting weights:
for k,v in model.state_dict().items():
conn.tensorset(k,v.cpu().numpy())
Getting gradients:
msd = model.state_dict()
for name, param in model.named_parameters():
msd[name].grad = conn.tensorget(f'{name}_grad')
model.load_state_dict(msd)
optimizer.step()
optimizer.zero_grad()
Worker:
Getting weights:
lmsd = model.state_dict()
for k,v in model.state_dict().items():
lmsd[name].data.copy_(conn.tensorget(f'{name}_data')
model.load_state_dict(lmsd)
Setting grads:
for name, param in model.named_parameters():
conn.tensorset(f'{name}_grad', param.grad.data.cpu().numpy())
I can’t honestly figure out why my global model won’t improve.
I have a work around that involves the global model making a single backward pass after setting the gradients from the worker model (as if to accumulate them) and that seems to be working; but I can’t fully understand who or why.
msd = model.state_dict()
for name, param in model.named_parameters():
msd[name].grad = conn.tensorget(f'{name}_grad')
model.load_state_dict(msd)
# build a single batch input
out = model(input)
criterion(out, label).backward()
optimizer.step()
optimizer.zero_grad()
Does the grad_fn need to be retained for the optimizer to make an update to the weights? I didn’t think it did and that was only at the gradient setting level during the backward pass.
Making a backward pass in the PS seems counter intuitive to the purpose and general workflow of the PS.
Hopefully somebody has some insight as to why the PS is not improving without a backward pass. |
st178457 | Solved by iffiX in post #2
The first evident error in your code is:
msd = model.state_dict()
for name, param in model.named_parameters():
msd[name].grad = conn.tensorget(f'{name}_grad')
model.load_state_dict(msd)
Since load_state_dict will not load the gradient, but only load registered nn.Parameter() and buffers of mod… |
st178458 | The first evident error in your code is:
msd = model.state_dict()
for name, param in model.named_parameters():
msd[name].grad = conn.tensorget(f'{name}_grad')
model.load_state_dict(msd)
Since load_state_dict will not load the gradient, but only load registered nn.Parameter() and buffers of model, you will have to iterate parameters of your model directly and put gradients there like:
...
# Assign gradients to the managed model and
# perform optimization.
if self.model is not None and self.optimizer is not None:
self.optimizer.zero_grad()
with t.no_grad():
for k, v in self.model.parameters():
v.grad = grad_dict[k].to(v.device)
self.optimizer.step()
Therefore, the “solution” you have discovered is basically performing a optimization step on your “PS” end, but your pushed gradients are not utilized. |
st178459 | Thanks, @iffiX
That definitely helps leverage the workers gradients - the model is able to find solutions A LOT faster now the model is actually able to explore different possibilities.
I tried it without the backward step at the PS and it causes predictions to become nan. However, if I keep the backward step in and actually (now) accumulate the gradients from the worker and the single batch in the PS, then the model is able to find solutions.
Once setting the gradient directly into the global model, technically I should be able to perform an optimizer step and update the global weights.
Any idea why the predictions result in nan when there isn’t a backward step in the PS? |
st178460 | According to my observation, nan could be caused by a lot of things: inappropriately designed reward, the model itself, invalid input data etc.
I would suggest you print “.grad” attribute for every parameter out, along with their comming source (worker rank), normally before your nan occurs you will see several ridiculously large gradients like 1e8 or 1e11. |
st178461 | I think I fixed that issue. I didn’t seem to be passing the right weights from the PS to the workers.
for k, v in self.model.state_dict().items():
self.conn.tensorset(k, v.cpu().numpy())
Which is what I wrote above, but I actually had v.data.cpu().numpy() in my code.
Now my problem is that my model gives empty predicts.
I am distributing a UNet for image segmentation. I’m utilizing gradient accumulation as a tradeoff for the framework performance using a redisai db. Even though the weights and gradients are being passed properly now, my model is decreasing it’s loss… but also accuracy (IoU) md resulting in empty predictions.
It might be the gradient accumulation and batch normalization or it could be the ReLU activations on the decoding half of the model.
Unless someone knows how UNets nuances perform over a distributed framework? |
st178462 | Hmmm, I have not tested UNet in a distributed scenario, that’s the realm of real scientific studies
Maybe reddit of comuper vision is a better place? Or try to ask a new question in the “comuter vision” block of the pytorch forum? |
st178463 | Distributed UNet over Parameter Server Output Empty vision
I have distributed a custom UNet over a parameter server (PS) (over a network and not locally distributed). The workers receive weights, compute the gradient and push it back to the PS.
I am accumulating gradients on workers to minimize network usage. After a few gradient pushes, the UNet starts outputting empty predicting as loss decreases but so does accuracy (testing the framework with interval model testing).
import torch.nn as nn
import torch
import torch.nn.functional as F
import torch.q…
Link to that question for anyone in the future’s use. |
st178464 | Bug
I have only a functional description of the bug. Still trying to make a MWE.
Sometimes when using an Embedding in a model with DataParallel I hit errors like so:
x = ... # shape: [<batch part>, <something>]
y = self.embed(x) # shape: [<WRONG batch part>, <something>, <emb dim>]
#^ the return value of the Embedding is "incomplete" on some GPUs.
# Noticed it most on cuda:1, but that shouldn't matter.
# E.g. <batch> = 2000, <wrong batch> = 11
To Reproduce
Steps to reproduce the behavior:
Have a model with Embedding.
Use DataParallel on the model such that you’re close to saturating your system.
Things shouldn’t end up working, if the bug is reproducible.
You don’t need anything fancy. Just some pretrained model in eval mode and pass it some input.
Expected behavior
DataParallel models behave functionally identically to normal models. Convergence and gradient descent questions notwithstanding.
Environment
PyTorch Version (e.g., 1.0): 1.5 ~ 1.6.x-dev
OS (e.g., Linux): Ubuntu 16.04 LTS
How you installed PyTorch (conda, pip, source): conda
Build command you used (if compiling from source): N/A
Python version: 3.7.7
CUDA/cuDNN version: 10.2
GPU models and configuration: Geforce GTX Titan X * 2
Any other relevant information: N/A |
st178465 | Hey @Enamex, I cannot reproduce the error with the following code (mostly borrowed from this tutorial).
Could you please share a min repro of this error? Thanks!
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
CONTEXT_SIZE = 2
EMBEDDING_DIM = 10
# We will use Shakespeare Sonnet 2
test_sentence = """When forty winters shall besiege thy brow,
And dig deep trenches in thy beauty's field,
Thy youth's proud livery so gazed on now,
Will be a totter'd weed of small worth held:
Then being asked, where all thy beauty lies,
Where all the treasure of thy lusty days;
To say, within thine own deep sunken eyes,
Were an all-eating shame, and thriftless praise.
How much more praise deserv'd thy beauty's use,
If thou couldst answer 'This fair child of mine
Shall sum my count, and make my old excuse,'
Proving his beauty by succession thine!
This were to be new made when thou art old,
And see thy blood warm when thou feel'st it cold.""".split()
# we should tokenize the input, but we will ignore that for now
# build a list of tuples. Each tuple is ([ word_i-2, word_i-1 ], target word)
trigrams = [([test_sentence[i], test_sentence[i + 1]], test_sentence[i + 2])
for i in range(len(test_sentence) - 2)]
# print the first 3, just so you can see what they look like
print(trigrams[:3])
vocab = set(test_sentence)
word_to_ix = {word: i for i, word in enumerate(vocab)}
class NGramLanguageModeler(nn.Module):
def __init__(self, vocab_size, embedding_dim, context_size):
super(NGramLanguageModeler, self).__init__()
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
self.linear1 = nn.Linear(context_size * embedding_dim, 128)
self.linear2 = nn.Linear(128, vocab_size)
def forward(self, inputs):
embeds = self.embeddings(inputs).view((1, -1))
out = F.relu(self.linear1(embeds))
out = self.linear2(out)
log_probs = F.log_softmax(out, dim=1)
return log_probs
losses = []
loss_function = nn.NLLLoss()
model = NGramLanguageModeler(len(vocab), EMBEDDING_DIM, CONTEXT_SIZE)
optimizer = optim.SGD(model.parameters(), lr=0.001)
model = torch.nn.DataParallel(model.to("cuda:0"))
for epoch in range(10):
total_loss = 0
for context, target in trigrams:
# Step 1. Prepare the inputs to be passed to the model (i.e, turn the words
# into integer indices and wrap them in tensors)
context_idxs = torch.tensor([word_to_ix[w] for w in context], dtype=torch.long)
context_idxs = torch.stack([context_idxs, context_idxs])
# Step 2. Recall that torch *accumulates* gradients. Before passing in a
# new instance, you need to zero out the gradients from the old
# instance
model.zero_grad()
# Step 3. Run the forward pass, getting log probabilities over next
# words
log_probs = model(context_idxs.to("cuda:0")).cpu()
# Step 4. Compute your loss function. (Again, Torch wants the target
# word wrapped in a tensor)
#loss = loss_function(log_probs, torch.tensor([word_to_ix[target]], dtype=torch.long))
# Step 5. Do the backward pass and update the gradient
log_probs.sum().backward()
optimizer.step()
# Get the Python number from a 1-element Tensor by calling tensor.item()
#total_loss += loss.item()
#losses.append(total_loss) |
st178466 | I’ll try to get an example written today!
If it helps, the models I was trying to evaluate are fairseq-based (I had to dive a bit to get the actual nn.Module out from underneath the tasks…) and are loaded using Model.from_pretrained(...). |
st178467 | On further investigation, it seems to be a problem with LSTM/RNN?
They’re getting split on the sequence dimension instead of the batch dimension, when in batch_first=False mode. I don’t own the module I’m trying to run in parallel, and this error is its guts, so not sure where to go from there. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.