peacock-data-public-datasets-idc-mint
/
docker
/intel_code
/llama13b
/Megatron-DeepSpeed
/megatron
/model
/utils.py
# Copyright (C) 2024 Habana Labs, Ltd. an Intel Company. | |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. | |
"""Utilities for models.""" | |
import math | |
import torch | |
from megatron import get_args | |
from deepspeed.runtime.zero import GatheredParameters | |
from deepspeed.accelerator import get_accelerator | |
def init_method_normal(sigma): | |
"""Init method based on N(0, sigma).""" | |
def init_(tensor): | |
return torch.nn.init.normal_(tensor, mean=0.0, std=sigma) | |
return init_ | |
def scaled_init_method_normal(sigma, num_layers): | |
"""Init method based on N(0, sigma/sqrt(2*num_layers).""" | |
std = sigma / math.sqrt(2.0 * num_layers) | |
def init_(tensor): | |
return torch.nn.init.normal_(tensor, mean=0.0, std=std) | |
return init_ | |
def gather_and_init(param, init_method): | |
with GatheredParameters(param, modifier_rank=0): | |
init_method(param) | |
def perform_masking(attention_scores, attention_mask): | |
if attention_mask.dtype == torch.bool: | |
attention_scores.masked_fill_(attention_mask, -10000.0) | |
else: | |
attention_scores.add_(attention_mask) | |
def attention_mask_func(attention_scores, attention_mask): | |
args = get_args() | |
if args.curriculum_learning_legacy or args.data_efficiency_curriculum_learning: | |
attention_mask_ = attention_mask | |
actual_seqlen = attention_scores.size()[2] | |
if actual_seqlen != attention_mask_.size()[2]: | |
# attention_mask has size [1, 1, seqlen, seqlen] | |
attention_mask_ = attention_mask_[:, :, :actual_seqlen, :actual_seqlen].contiguous() | |
perform_masking(attention_scores, attention_mask_) | |
else: | |
perform_masking(attention_scores, attention_mask) | |
return attention_scores | |
def get_linear_layer(rows, columns, init_method, gather_params_on_init=False): | |
"""Simple linear layer with weight initialization.""" | |
layer = torch.nn.Linear(rows, columns, | |
device=get_accelerator().current_device_name(), | |
dtype=get_args().params_dtype) | |
if get_args().perform_initialization: | |
with GatheredParameters(layer.weight, modifier_rank=0, enabled=gather_params_on_init): | |
init_method(layer.weight) | |
with torch.no_grad(): | |
with GatheredParameters(layer.bias, modifier_rank=0, enabled=gather_params_on_init): | |
layer.bias.zero_() | |
return layer | |
def gelu_impl(x): | |
"""OpenAI's gelu implementation.""" | |
return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x * | |
(1.0 + 0.044715 * x * x))) | |
def openai_gelu(x): | |
return gelu_impl(x) | |
#This is actually Python equivalent of torch.nn.functional.gelu(), also with type hints for ONNX exporter | |
def erf_gelu(x): | |
return x * 0.5 * (torch.erf(x / 1.41421).to(dtype=x.dtype)+torch.ones_like(x).to(dtype=x.dtype)) | |