python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
import torch
import torch.nn as nn
# import torch.jit
import numpy as np
class LossFunction:
def compute_Loss(self, y_pred, y_true):
raise NotImplemented("compute_loss method must be implemented!")
class L1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.L1Loss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class MSELoss(LossFunction):
def __init__(self):
self.loss_function = nn.MSELoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
def one_hot_encoding(y_true, num_classes):
y_true_one_hot = torch.zeros(y_true.size(0), num_classes)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1)
return y_true_one_hot
class SmoothL1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.SmoothL1Loss()
def compute_Loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class MultiLabelSoftMarginLoss(LossFunction):
def __init__(self):
self.loss_function = nn.MultiLabelSoftMarginLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class PoissonNLLoss(LossFunction):
def __init__(self):
self.loss_function = nn.PoissonNLLLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class KLDivLoss(LossFunction):
def __init__(self):
self.loss_function = nn.KLDivLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(F.log_softmax(y_pred, dim=1))
class NLLLoss(LossFunction):
def __init__(self):
self.loss_function = nn.NLLLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class HashableTensorWrapper:
def __init__(self, tensor):
self.tensor = tensor
self.tensor_shape = tensor.shape
self.tensor_dtype = tensor.dtype
def __hash__(self):
return hash((self.tensor_shape, self.tensor_dtype))
def __eq__(self, other):
return isinstance(other, HashableTensorWrapper) and self.tensor_shape == other.tensor_shape and self.tensor_dtype == other.tensor_dtype
#detector helper function
# def is_multi_label_classification(y_true: torch.Tensor):
# return y_true.shape[1] > 1 and y_true.dtype == torch.float
#v2
def is_multi_label_classification(y_true: torch.Tensor) -> bool:
return len(y_true.shape) > 1 and y_true.shape[1] > 1 and y_true.dtype == torch.float
def contains_non_negative_integers(y_true):
return torch.all(y_true >= 0) and torch.all(y_true == y_true.to(torch.int64))
def are_probability_distributions(y_pred, y_true):
return torch.all(y_pred >= 0) and torch.all(y_pred <= 1) and torch.all(y_true >= 0) and torch.all(y_true <= 1)
def are_log_probabilities(y_pred):
return torch.all(y_pred <= 0)
#generate unique key for a tensor
#v1
# def generate_tensor_key(tensor):
# return (tuple(tensor.shape), str(tensor.dtype))
#v2
# def generate_tensor_key(tensor):
# shape_tuple = ()
# for dim in tensor.shape:
# shape_tuple += (dim.item(),)
# return (shape_tuple, str(tensor.dtype))
#v3
# def generate_tensor_key(tensor):
# shape_tuple = ()
# for dim in tensor.shape:
# shape_tuple += (dim,)
# return (shape_tuple, str(tensor.dtype))
#v4
def generate_tensor_key(tensor):
shape_tuple = ()
for dim in tensor.shape:
shape_tuple += (dim,)
return (shape_tuple, str(tensor.dtype))
class CrossEntropyLoss(LossFunction):
def __init__(self):
self.loss_function = nn.CrossEntropyLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class Nebula(LossFunction):
def __init__(self, domain_knowledge=None, user_input=None):
self.loss_function = None
self.domain_knowledge = domain_knowledge
self.user_input = user_input
self.loss_function_cache = {}
self.unique_values_cache = {}
self.class_balance_cache = {}
def determine_loss_function(self, y_pred, y_true):
is_classification = None
dataset_id = id(y_true)
# Cache unique values
if dataset_id not in self.unique_values_cache:
self.unique_values_cache[dataset_id] = torch.unique(y_true)
unique_values = self.unique_values_cache[dataset_id]
# Cache class balance
if dataset_id not in self.class_balance_cache:
value_counts = torch.bincount(y_true.flatten().to(dtype=torch.int64))
self.class_balance_cache[dataset_id] = value_counts / torch.sum(value_counts)
class_balance = self.class_balance_cache[dataset_id]
# Optimization 2: Use PyTorch functions instead of NumPy
value_counts = torch.bincount(y_true.flatten().to(dtype=torch.int64))
# The remaining code remains unchanged as it already incorporates the suggested optimizations
if is_classification is None:
if len(unique_values) <= 10 and torch.all(torch.eq(unique_values % 1, 0)):
is_classification = True
if is_classification is None:
if torch.all(value_counts > 0):
is_classification = True
if y_pred.ndim > 2:
pass
if is_classification is None:
sparsity = torch.count_nonzero(y_true) / y_true.numel()
if sparsity < 0.5:
self.loss_function = torch.nn.BCEWithLogitsLoss()
self.compute_loss = self.loss_function
return
y_pred_flat = y_pred.flatten()
y_true_flat = y_true.flatten()
if y_pred_flat.shape != y_true_flat.shape:
y_pred_flat = y_pred_flat[:y_true_flat.numel()]
correlation = torch.tensor(np.corrcoef(y_pred_flat.cpu().numpy(), y_true_flat.cpu().numpy())[0, 1])
if is_classification is None:
if self.domain_knowledge == "classification":
is_classification = True
elif self.domain_knowledge == "regression":
is_classification = False
if is_classification is None:
if torch.max(y_pred) > 0.9:
is_classification = True
if is_classification is None:
if torch.any(class_balance < 0.1):
is_classification = True
if is_classification is None:
if self.user_input == "classification":
is_classification = True
elif self.user_input == "regression":
is_classification = False
#Multi-LabelClassification
if is_multi_label_classification(y_true):
self.loss_function = MultiLabelSoftMarginLoss()
#poissonNLLLoss
if contains_non_negative_integers(y_true):
self.loss_function = PoissonNLLoss()
#KLDIvLoss
if are_probability_distributions(y_pred, y_true):
self.loss_function = KLDivLoss()
#NLLLoss
if is_classification and are_log_probabilities(y_pred):
self.loss_function = NLLLoss()
# SmotthL1Loss
if is_classification is None:
#check range of values in y_true
if torch.min(y_true) >= 0 and torch.max(y_true) <= 1:
self.loss_function = SmoothL1Loss()
# Set the loss function based on the determined problem type
if is_classification:
self.loss_function = CrossEntropyLoss()
else:
self.loss_function = MSELoss()
# @torch.jit.script #optimization jit
def compute_loss(self, y_pred, y_true):
#v2
# tensor_key = HashableTensorWrapper(y_true)
# if tensor_key not in self.loss_function_cache:
# self.determine_loss_function(y_pred, y_true)
# return self.loss_function_cache[tensor_key](y_pred, y_true)
# V1
dataset_id = id(y_true)
if dataset_id not in self.loss_function_cache:
self.determine_loss_function(y_pred, y_true)
self.loss_function_cache[dataset_id] = self.loss_function
cached_loss_function = self.loss_function_cache[dataset_id]
return cached_loss_function.compute_loss(y_pred, y_true)
#v3
# tensor_key = generate_tensor_key(y_true)
# if tensor_key not in self.loss_function_cache:
# self.determine_loss_function(y_pred, y_true)
# return self.loss_function_cache[tensor_key](y_pred, y_true)
#move tensors nd model to gpu if available
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# y_pred, y_true = y_pred.to(device), y_true.to(device)
# # #example usage with the pytorch autograd profiler
# with torch.autograd.profiler.profile() as prof:
# loss = Nebula.compute_loss(y_pred, y_true)
# print(prof.key_average().table()) | EXA-1-master | exa/modular_components/lossFunctions/nebula/nebulav2.py |
from setuptools import setup, find_packages
setup(
name = 'nebula',
packages = find_packages(exclude=[]),
version = '0.2.0',
license='MIT',
description = '1 Loss Function to rule them all!',
author = 'Agora',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/kyegomez/EXA/tree/master/exa/modular_components/lossFunctions/nebula',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'jax',
'loss ffunction',
"Multi-Modality AI"
],
install_requires=[
'einops',
'jax>=0.2.20'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | EXA-1-master | exa/modular_components/lossFunctions/nebula/setup.py |
import torch
import torch.nn as nn
import numpy as np
#define the loss function class
class LossFunction:
def compute_loss(self, y_pred, y_true):
raise NotImplemented("compute_loss method must be implemented")
#implement specific loss functions that inherit from LossFunction
class L1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.L1Loss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class MSELoss(LossFunction):
def __init__(self):
self.loss_function = nn.MSELoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class CrossEntropyLoss(LossFunction):
def __init__(self):
self.loss_function = nn.CrossEntropyLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
"""
all pytorch loss functions
"""
#v1
# class Nebula(LossFunction):
# def __init__(self):
# self.loss_function = None
# def determine_loss_function(self, y_pred, y_true):
# ##implement logic to determine type of data and select the loss function
# #based on the shape of y_true or other criteria
# if len(y_true.shape) > 1 and y_true.shape[1] > 1:
# self.loss_function = CrossEntropyLoss()
# else:
# self.loss_function = MSELoss()
# #transform function data1 to -> data type loss function can understand?
# def compute_loss(self, y_pred, y_true):
# self.determine_loss_function(y_pred, y_true)
# return self.loss_function.compute_loss(y_pred, y_true)
# Example usage
# y_pred_classification = torch.tensor([[2.0, 1.0, 0.1], [1.0, 2.0, 0.1]])
# y_true_classification = torch.tensor([0, 1])
#v2
# GRADIENT BOOSTED
# greedy algorithm
#concurrency
#asynchrony
#CACHING FOR TRAINING --> THIS IS YOUR DATASET -> OKAY HERES LOSS FUNCTION -> DONT COMPUTE DETERMINE LOSS FUNCTION
#self healing loss function
#responsive loss function
# 1 loss function for any task
def one_hot_encoding(y_true, num_classes):
y_true_one_hot = torch.zeros(y_true.size(0), num_classes)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1)
return y_true_one_hot
class Nebula(LossFunction):
def __init__(self, domain_knowledge=None, user_input=None):
self.loss_function = None
self.domain_knowledge = domain_knowledge
self.user_input = user_input
self.loss_function_cache = {}
def determine_loss_function(self, y_pred, y_true):
is_classification = None
#op 1 check range of values in y_true
if is_classification is None:
unique_values = np.unique(y_true)
if len(unique_values) <= 10 and np.all(np.equal(np.mod(unique_values, 1), 0)):
is_classification = True
#==============================================>
#opt2 - check the distribution of valus in y_true
# assuming a specific pattern indicates a classification problem
# You can replace this with a more specific pattern check if needded
# value_counts = np.bincount(y_true.flatten().astype(int))
if is_classification is None:
value_counts = np.bincount(y_true.flatten().to(dtype=torch.int32).numpy())
if np.all(value_counts > 0):
is_classification = True
#==============================================>
#op 3 analyze the dimension of y_pred
if y_pred.ndim > 2:
#handle segmentation problem
pass
#==============================================>
#op4 -> check sparsity of y_true
#v1
# sparsity = np.count_nonzero(y_true) / y_true.numel()
# if sparsity < 0.1:
# #handle multi label classification problem
# pass
#v2
# sparsity = np.count_nonzero(y_true) / y_true.numel()
# if sparsity < 0.5:
# self.loss_function = torch.nn.BCEWithLogitsLoss()
#v3
# sparsity = np.count_nonzero(y_true) / y_true.numel()
# if sparsity < 0.5:
# self.loss_function = torch.nn.BCEWithLogitsLoss()
# self.compute_loss = self.loss_function
if is_classification is None:
sparsity = np.count_nonzero(y_true) / y_true.numel()
if sparsity < 0.5:
self.loss_functions = torch.nn.BCEWithLogitsLoss()
self.compute_Loss = self.loss_function
return
#==============================================>
#op5 analyze the relationship between y_pred and y_true
#v1
# correlation = np.corrcoef(y_pred.flatten(), y_true.flatten())[0, 1]
# if correlation > 0.8:
# is_classification = False
#v2
# y_pred_flat = y_pred.flatten().numpy()
# y_true_flat = y_true.flatten().numpy()
# if y_pred_flat.shape != y_true_flat.shape:
# y_pred_flat = y_pred_flat[:y_true_flat.shape]
# correlation = np.corrcoef(y_pred_flat, y_true_flat)[0, 1]
#v3
# y_pred_flat = y_pred.flatten().numpy()
# y_true_flat = y_true.flatten().numpy()
# if y_pred.flat.shape != y_true_flat.shape:
# y_pref_flat = y_pred_flat[:y_true_flat.size]
# correlation = np.corrcoef(y_pref_flat, y_true_flat)[0, 1]
#v4
y_pred_flat = y_pred.flatten().numpy()
y_true_flat = y_true.flatten().numpy()
if y_pred_flat.shape != y_true_flat.shape:
y_pred_flat = y_pred_flat[:y_true_flat.size]
correlation = np.corrcoef(y_pred_flat, y_true_flat)[0, 1]
#==============================================>
# #op6 use domain_kownledge
# if self.domain_knowledge == "classification":
# is_classification = True
# elif self.domain_knowledge == "regression":
# is_classification = False
if is_classification is None:
if self.domain_knowledge == "classification":
is_classification = True
elif self.domain_knowledge == "regression":
is_classification = False
#==============================================>
#op7 analyze distribution of values in y_pred
#assuiming certainty indicates a classification problem
# if np.max(y_pred) > 0.9:
# is_classification = True
#v2
# if torch.max(y_pred) > 0.9:
# is_classification = True
#v3
if is_classification is None:
if torch.max(y_pred) > 0.9:
is_classification = True
#==============================================>
#op8 check the baalance of classes in y_true
#assuming imbalanced classes indicate a classification problem
# class_balance = value_counts / np.sum(value_counts)
# if np.any(class_balance < 0.1):
# is_classification = True
#v2
if is_classification is None:
class_balance = value_counts / np.sum(value_counts)
if np.any(class_balance < 0.1):
is_classification = True
#==============================================>
#op9 use a model selection technique
#this optimization requires a model and a dataset so its not implemented
# you can implement this op outside the determine_loss_function method
#==============================================>
#op10 leverage user input or metadata
# if self.user_input == "classification":
# is_classification = True
# elif self.user_input == "regression":
# is_classification = False
#v2
if is_classification is None:
if self.user_input == "classification":
is_classification = True
elif self.user_input == "regression":
is_classification = False
#set the loss function based on the determined problem type
if is_classification:
self.loss_function = CrossEntropyLoss()
else:
self.loss_function = MSELoss()
# def compute_loss(self, y_pred, y_true):
#v1
# dataset_id = id(y_true)
# if dataset_id not in self.loss_function.cache:
# self.determine_loss_function(y_pred, y_true)
# self.loss_function_cache[dataset_id] = self.loss_function
# # self.determine_loss_function(y_pred, y_true)
# # return self.loss_function.compute_loss(y_pred, y_true)
# cached_loss_function = self.loss_function_cache[dataset_id]
# return cached_loss_function.compute_loss(y_pred, y_true)
#2
def compute_loss(self, y_pred, y_true):
dataset_id = id(y_true)
if dataset_id not in self.loss_function_cache: # Fix the attribute name here
self.determine_loss_function(y_pred, y_true)
self.loss_function_cache[dataset_id] = self.loss_function
cached_loss_function = self.loss_function_cache[dataset_id]
return cached_loss_function.compute_loss(y_pred, y_true)
| EXA-1-master | exa/modular_components/lossFunctions/nebula/nebula.py |
#using gradient boosted greedy algorithms to compute loss
import xgboost as xgb
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, accuracy_score
# Load your dataset
# X, y = load_your_data()
# For demonstration purposes, we'll use random data
X = np.random.rand(100, 10)
y = np.random.randint(0, 2, 100)
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Train an XGBoost model
dtrain = xgb.DMatrix(X_train, label=y_train)
dtest = xgb.DMatrix(X_test, label=y_test)
params = {
'objective': 'binary:logistic',
'eval_metric': 'logloss',
'seed': 42
}
bst = xgb.train(params, dtrain, num_boost_round=100, early_stopping_rounds=10, evals=[(dtest, 'test')])
# Make predictions on the same dataset
y_pred = bst.predict(dtest)
# Determine the loss function
y_pred_labels = np.round(y_pred)
accuracy = accuracy_score(y_test, y_pred_labels)
mse = mean_squared_error(y_test, y_pred)
print("Accuracy:", accuracy)
print("Mean Squared Error:", mse)
if accuracy > 0.9:
print("Use CrossEntropyLoss")
else:
print("Use MSELoss")
| EXA-1-master | exa/modular_components/lossFunctions/nebula/experimental/xgboostV3.py |
import torch
import torch.nn as nn
import numpy as np
#define the loss function class
class LossFunction:
def compute_loss(self, y_pred, y_true):
raise NotImplemented("compute_loss method must be implemented")
#implement specific loss functions that inherit from LossFunction
class L1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.L1Loss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class MSELoss(LossFunction):
def __init__(self):
self.loss_function = nn.MSELoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class CrossEntropyLoss(LossFunction):
def __init__(self):
self.loss_function = nn.CrossEntropyLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
"""
all pytorch loss functions
"""
#v1
# class Nebula(LossFunction):
# def __init__(self):
# self.loss_function = None
# def determine_loss_function(self, y_pred, y_true):
# ##implement logic to determine type of data and select the loss function
# #based on the shape of y_true or other criteria
# if len(y_true.shape) > 1 and y_true.shape[1] > 1:
# self.loss_function = CrossEntropyLoss()
# else:
# self.loss_function = MSELoss()
# #transform function data1 to -> data type loss function can understand?
# def compute_loss(self, y_pred, y_true):
# self.determine_loss_function(y_pred, y_true)
# return self.loss_function.compute_loss(y_pred, y_true)
# Example usage
# y_pred_classification = torch.tensor([[2.0, 1.0, 0.1], [1.0, 2.0, 0.1]])
# y_true_classification = torch.tensor([0, 1])
#v2
# GRADIENT BOOSTED
# greedy algorithm
#concurrency
#asynchrony
#CACHING FOR TRAINING --> THIS IS YOUR DATASET -> OKAY HERES LOSS FUNCTION -> DONT COMPUTE DETERMINE LOSS FUNCTION
#self healing loss function
#responsive loss function
# 1 loss function for any task
def one_hot_encoding(y_true, num_classes):
y_true_one_hot = torch.zeros(y_true.size(0), num_classes)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1)
return y_true_one_hot
class Nebula(LossFunction):
def __init__(self, domain_knowledge=None, user_input=None):
self.loss_function = None
self.domain_knowledge = domain_knowledge
self.user_input = user_input
self.loss_function_cache = {}
def determine_loss_function(self, y_pred, y_true):
#op 1 check range of values in y_true
unique_values = np.unique(y_true)
if len(unique_values) <= 10 and np.all(np.equal(np.mod(unique_values, 1), 0)):
if_classification = True
else:
is_classification = True
#==============================================>
#opt2 - check the distribution of valus in y_true
# assuming a specific pattern indicates a classification problem
# You can replace this with a more specific pattern check if needded
# value_counts = np.bincount(y_true.flatten().astype(int))
value_counts = np.bincount(y_true.flatten().to(dtype=torch.int32).numpy())
if np.all(value_counts > 0):
is_classification = True
#==============================================>
#op 3 analyze the dimension of y_pred
if y_pred.ndim > 2:
#handle segmentation problem
pass
#==============================================>
#op4 -> check sparsity of y_true
#v1
# sparsity = np.count_nonzero(y_true) / y_true.numel()
# if sparsity < 0.1:
# #handle multi label classification problem
# pass
#v2
# sparsity = np.count_nonzero(y_true) / y_true.numel()
# if sparsity < 0.5:
# self.loss_function = torch.nn.BCEWithLogitsLoss()
#v3
sparsity = np.count_nonzero(y_true) / y_true.numel()
if sparsity < 0.5:
self.loss_function = torch.nn.BCEWithLogitsLoss()
self.compute_loss = self.loss_function
#==============================================>
#op5 analyze the relationship between y_pred and y_true
#v1
# correlation = np.corrcoef(y_pred.flatten(), y_true.flatten())[0, 1]
# if correlation > 0.8:
# is_classification = False
#v2
# y_pred_flat = y_pred.flatten().numpy()
# y_true_flat = y_true.flatten().numpy()
# if y_pred_flat.shape != y_true_flat.shape:
# y_pred_flat = y_pred_flat[:y_true_flat.shape]
# correlation = np.corrcoef(y_pred_flat, y_true_flat)[0, 1]
#v3
# y_pred_flat = y_pred.flatten().numpy()
# y_true_flat = y_true.flatten().numpy()
# if y_pred.flat.shape != y_true_flat.shape:
# y_pref_flat = y_pred_flat[:y_true_flat.size]
# correlation = np.corrcoef(y_pref_flat, y_true_flat)[0, 1]
#v4
y_pred_flat = y_pred.flatten().numpy()
y_true_flat = y_true.flatten().numpy()
if y_pred_flat.shape != y_true_flat.shape:
y_pred_flat = y_pred_flat[:y_true_flat.size]
correlation = np.corrcoef(y_pred_flat, y_true_flat)[0, 1]
#==============================================>
#op6 use domain_kownledge
if self.domain_knowledge == "classification":
is_classification = True
elif self.domain_knowledge == "regression":
is_classification = False
#==============================================>
#op7 analyze distribution of values in y_pred
#assuiming certainty indicates a classification problem
# if np.max(y_pred) > 0.9:
# is_classification = True
#v2
if torch.max(y_pred) > 0.9:
is_classification = True
#==============================================>
#op8 check the baalance of classes in y_true
#assuming imbalanced classes indicate a classification problem
class_balance = value_counts / np.sum(value_counts)
if np.any(class_balance < 0.1):
is_classification = True
#==============================================>
#op9 use a model selection technique
#this optimization requires a model and a dataset so its not implemented
# you can implement this op outside the determine_loss_function method
#==============================================>
#op10 leverage user input or metadata
if self.user_input == "classification":
is_classification = True
elif self.user_input == "regression":
is_classification = False
#set the loss function based on the determined problem type
if is_classification:
self.loss_function = CrossEntropyLoss()
else:
self.loss_function = MSELoss()
def compute_loss(self, y_pred, y_true):
dataset_id = id(y_true)
if dataset_id not in self.loss_function.cache:
self.determine_loss_function(y_pred, y_true)
self.loss_function_cache[dataset_id] = self.loss_function
# self.determine_loss_function(y_pred, y_true)
# return self.loss_function.compute_loss(y_pred, y_true)
cached_loss_function = self.loss_function_cache[dataset_id]
return cached_loss_function.compute_loss(y_pred, y_true)
# y_pred_regression = torch.tensor([[2.5], [3.2]])
# y_true_regression = torch.tensor([[2.0], [3.0]])
# nebula = Nebula()
# loss_classification = nebula.compute_loss(y_pred_classification, y_true_classification)
# print("Nebula loss for classification:", loss_classification)
# loss_regression = nebula.compute_loss(y_pred_regression, y_true_regression)
# print("Nebula loss for regression:", loss_regression)
# v2 testing
# Example data for each optimization
y_true_classification = torch.tensor([0, 1, 0, 2, 1])
y_true_regression = torch.tensor([2.0, 3.5, 1.2, 4.8, 3.3])
y_pred_classification = torch.tensor([[0.8, 0.1, 0.1], [0.2, 0.7, 0.1], [0.9, 0.05, 0.05], [0.1, 0.2, 0.7], [0.3, 0.6, 0.1]])
y_pred_regression = torch.tensor([2.1, 3.6, 1.0, 4.9, 3.1])
# Example usage
nebula = Nebula()
y_pred_classification = torch.randn(5, 3)
y_true_classification = torch.tensor([1, 0, 2, 1, 0])
y_true_multilabel = torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [0, 1, 0]])
# Convert y_true_multilabel to float
y_true_multilabel = y_true_multilabel.to(torch.float32)
# Optimization 1: Check the range of values in y_true
loss_classification_1 = nebula.compute_loss(y_pred_classification, y_true_classification)
print("Nebula loss for classification (Optimization 1):", loss_classification_1)
loss_regression_1 = nebula.compute_loss(y_pred_regression, y_true_regression)
print("Nebula loss for regression (Optimization 1):", loss_regression_1)
# Optimization 2: Check the distribution of values in y_true
# (Assuming a specific pattern indicates a classification problem)
y_true_classification_2 = torch.tensor([0, 1, 0, 1, 0])
loss_classification_2 = nebula.compute_loss(y_pred_classification, y_true_classification_2)
print("Nebula loss for classification (Optimization 2):", loss_classification_2)
# Optimization 3: Analyze the dimensions of y_pred
# (Not applicable in this example, as it requires a segmentation problem)
# Optimization 4: Check the sparsity of y_true
# y_true_multilabel = torch.tensor([[1, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1], [0, 1, 0]])
# y_true_multilabel = torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [0, 1, 0]])
loss_multilabel = nebula.compute_loss(y_pred_classification, y_true_multilabel)
print("Nebula loss for multi-label classification (Optimization 4):", loss_multilabel)
# Optimization 5: Analyze the relationship between y_pred and y_true
y_true_regression_5 = torch.tensor([2.1, 3.6, 1.0, 4.9, 3.1])
loss_regression_5 = nebula.compute_loss(y_pred_regression, y_true_regression_5)
print("Nebula loss for regression (Optimization 5):", loss_regression_5)
# Optimization 6: Use domain knowledge
nebula_domain_classification = Nebula(domain_knowledge="classification")
loss_classification_6 = nebula_domain_classification.compute_loss(y_pred_classification, y_true_classification)
print("Nebula loss for classification (Optimization 6):", loss_classification_6)
nebula_domain_regression = Nebula(domain_knowledge="regression")
loss_regression_6 = nebula_domain_regression.compute_loss(y_pred_regression, y_true_regression)
print("Nebula loss for regression (Optimization 6):", loss_regression_6)
# Optimization 7: Analyze the distribution of values in y_pred
# y_pred_classification_7 = torch.tensor([[0.95, 0.025, 0.025], [0.05, 0.9, 0.05], [0.9, 0.05, 0.05], [0.1, 0.1, 0.8], [0.1, 0.8, 0.1]])
y_pred_classification_7 = torch.randn(5, 3)
y_true_classification_one_hot = one_hot_encoding(y_true_classification, 3)
loss_classification_7 = nebula.compute_loss(y_pred_classification_7, y_true_classification)
print("Nebula loss for classification (Optimization 7):", loss_classification_7)
# Optimization 8: Check the balance of classes in y_true
y_true_classification_8 = torch.tensor([0, 0, 0, 1, 1])
loss_classification_8 = nebula.compute_loss(y_pred_classification, y_true_classification_8)
print("Nebula loss for classification (Optimization 8):", loss_classification_8)
# Optimization 10: Leverage user input or metadata
nebula_user_classification = Nebula(user_input="classification")
loss_classification_10 = nebula_user_classification.compute_loss(y_pred_classification, y_true_classification)
print("Nebula loss for classification (Optimization 10):", loss_classification_10)
nebula_loss_regression = Nebula(user_input="regression")
loss_regression_10 = nebula_user_classification.compute_loss(y_pred_regression, y_true_regression)
print(f"Nebula loss for regression (optimization 10) {loss_regression_10}") | EXA-1-master | exa/modular_components/lossFunctions/nebula/experimental/nebulaV2.py |
import numpy as np
from torch.nn import BCELoss
# Define the base LossFunction class
class LossFunction:
def compute_loss(self, y_pred, y_true):
raise NotImplementedError("compute_loss method must be implemented in the derived class")
# Define specific loss function classes that inherit from LossFunction
class CrossEntropyLoss(LossFunction):
def compute_loss(self, y_pred, y_true):
softmax_pred = self.softmax(y_pred)
loss = -np.sum(y_true * np.log(softmax_pred))
return loss
def softmax(self, x):
exp_x = np.exp(x - np.max(x))
return exp_x / np.sum(exp_x, axis=1, keepdims=True)
class MeanSquaredErrorLoss(LossFunction):
def compute_loss(self, y_pred, y_true):
return np.mean((y_pred - y_true) ** 2)
#v1
# # Create a DynamicLossFunction class that inherits from the LossFunction base class
# class DynamicLossFunction(LossFunction):
# def __init__(self):
# self.loss_function = None
# def determine_loss_function(self, y_pred, y_true):
# # Implement logic to determine the type of data and select the appropriate loss function
# # For example, based on the shape of y_true or other criteria
# if y_true.shape[1] > 1:
# self.loss_function = CrossEntropyLoss()
# else:
# self.loss_function = MeanSquaredErrorLoss()
# def compute_loss(self, y_pred, y_true):
# self.determine_loss_function(y_pred, y_true)
# return self.loss_function.compute_loss(y_pred, y_true)
class DynamicLossFunction(LossFunction):
def __init__(self):
self.loss_function = None
def determine_loss_function(self, y_pred, y_true):
# Implement logic to determine the type of data and select the appropriate loss function
# Check if the problem is a classification or regression task
is_classification = self.is_classification_task(y_true)
# Check if the problem involves multiple classes or binary classes
is_multiclass = self.is_multiclass_problem(y_true)
# Select the appropriate loss function based on the problem type
if is_classification:
if is_multiclass:
self.loss_function = CrossEntropyLoss()
else:
self.loss_function = BCELoss()
else:
self.loss_function = MeanSquaredErrorLoss()
def is_classification_task(self, y_true):
# Check if the target variable is binary or consists of integers (indicating class labels)
return np.issubdtype(y_true.dtype, np.integer)
def is_multiclass_problem(self, y_true):
# Check if the problem involves multiple classes by counting the unique values in y_true
unique_values = np.unique(y_true)
return len(unique_values) > 2
def compute_loss(self, y_pred, y_true):
self.determine_loss_function(y_pred, y_true)
return self.loss_function.compute_loss(y_pred, y_true)
# Example usage
y_pred_classification = np.array([[2.0, 1.0, 0.1], [1.0, 2.0, 0.1]])
y_true_classification = np.array([[1, 0, 0], [0, 1, 0]])
y_pred_regression = np.array([[2.5], [3.2]])
y_true_regression = np.array([[2.0], [3.0]])
dynamic_loss_function = DynamicLossFunction()
loss_classification = dynamic_loss_function.compute_loss(y_pred_classification, y_true_classification)
print("Dynamic loss for classification:", loss_classification)
loss_regression = dynamic_loss_function.compute_loss(y_pred_regression, y_true_regression)
print("Dynamic loss for regression:", loss_regression) | EXA-1-master | exa/modular_components/lossFunctions/nebula/experimental/nebula.py |
import torch
import torch.nn as nn
# import torch.jit
import numpy as np
class LossFunction:
def compute_Loss(self, y_pred, y_true):
raise NotImplemented("compute_loss method must be implemented!")
class L1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.L1Loss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
# class MSELoss(LossFunction):
# def __init__(self):
# self.loss_function = nn.MSELoss()
# def compute_loss(self, y_pred, y_true):
# return self.loss_function(y_pred, y_true)
class MSELoss(LossFunction):
def __init__(self):
super().__init__()
self.loss_function = nn.MSELoss()
def compute_loss(self, y_pred, y_true):
y_true_one_hot = torch.zeros_like(y_pred)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1)
return self.loss_function(y_pred, y_true_one_hot)
def one_hot_encoding(y_true, num_classes):
y_true_one_hot = torch.zeros(y_true.size(0), num_classes)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1)
return y_true_one_hot
class SmoothL1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.SmoothL1Loss()
def compute_Loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class MultiLabelSoftMarginLoss(LossFunction):
def __init__(self):
self.loss_function = nn.MultiLabelSoftMarginLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class PoissonNLLoss(LossFunction):
def __init__(self):
self.loss_function = nn.PoissonNLLLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class KLDivLoss(LossFunction):
def __init__(self):
self.loss_function = nn.KLDivLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(F.log_softmax(y_pred, dim=1))
class NLLLoss(LossFunction):
def __init__(self):
self.loss_function = nn.NLLLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class HashableTensorWrapper:
def __init__(self, tensor):
self.tensor = tensor
self.tensor_shape = tensor.shape
self.tensor_dtype = tensor.dtype
def __hash__(self):
return hash((self.tensor_shape, self.tensor_dtype))
def __eq__(self, other):
return isinstance(other, HashableTensorWrapper) and self.tensor_shape == other.tensor_shape and self.tensor_dtype == other.tensor_dtype
#detector helper function
# def is_multi_label_classification(y_true: torch.Tensor):
# return y_true.shape[1] > 1 and y_true.dtype == torch.float
#v2
def is_multi_label_classification(y_true: torch.Tensor) -> bool:
return len(y_true.shape) > 1 and y_true.shape[1] > 1 and y_true.dtype == torch.float
def contains_non_negative_integers(y_true):
return torch.all(y_true >= 0) and torch.all(y_true == y_true.to(torch.int64))
def are_probability_distributions(y_pred, y_true):
return torch.all(y_pred >= 0) and torch.all(y_pred <= 1) and torch.all(y_true >= 0) and torch.all(y_true <= 1)
def are_log_probabilities(y_pred):
return torch.all(y_pred <= 0)
#generate unique key for a tensor
#v1
# def generate_tensor_key(tensor):
# return (tuple(tensor.shape), str(tensor.dtype))
#v2
# def generate_tensor_key(tensor):
# shape_tuple = ()
# for dim in tensor.shape:
# shape_tuple += (dim.item(),)
# return (shape_tuple, str(tensor.dtype))
#v3
# def generate_tensor_key(tensor):
# shape_tuple = ()
# for dim in tensor.shape:
# shape_tuple += (dim,)
# return (shape_tuple, str(tensor.dtype))
#v4
def generate_tensor_key(tensor):
shape_tuple = ()
for dim in tensor.shape:
shape_tuple += (dim,)
return (shape_tuple, str(tensor.dtype))
class CrossEntropyLoss(LossFunction):
def __init__(self):
self.loss_function = nn.CrossEntropyLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class Nebula(LossFunction):
def __init__(self, domain_knowledge=None, user_input=None):
self.loss_function = None
self.domain_knowledge = domain_knowledge
self.user_input = user_input
self.loss_function_cache = {}
self.unique_values_cache = {}
self.class_balance_cache = {}
def determine_loss_function(self, y_pred, y_true):
is_classification = None
dataset_id = id(y_true)
# Cache unique values
if dataset_id not in self.unique_values_cache:
self.unique_values_cache[dataset_id] = torch.unique(y_true)
unique_values = self.unique_values_cache[dataset_id]
# Cache class balance
if dataset_id not in self.class_balance_cache:
value_counts = torch.bincount(y_true.flatten().to(dtype=torch.int64))
self.class_balance_cache[dataset_id] = value_counts / torch.sum(value_counts)
class_balance = self.class_balance_cache[dataset_id]
# Optimization 2: Use PyTorch functions instead of NumPy
value_counts = torch.bincount(y_true.flatten().to(dtype=torch.int64))
# The remaining code remains unchanged as it already incorporates the suggested optimizations
if is_classification is None:
if len(unique_values) <= 10 and torch.all(torch.eq(unique_values % 1, 0)):
is_classification = True
if is_classification is None:
if torch.all(value_counts > 0):
is_classification = True
if y_pred.ndim > 2:
pass
if is_classification is None:
sparsity = torch.count_nonzero(y_true) / y_true.numel()
if sparsity < 0.5:
self.loss_function = torch.nn.BCEWithLogitsLoss()
self.compute_loss = self.loss_function
return
y_pred_flat = y_pred.flatten()
y_true_flat = y_true.flatten()
if y_pred_flat.shape != y_true_flat.shape:
y_pred_flat = y_pred_flat[:y_true_flat.numel()]
correlation = torch.tensor(np.corrcoef(y_pred_flat.cpu().numpy(), y_true_flat.cpu().numpy())[0, 1])
if is_classification is None:
if self.domain_knowledge == "classification":
is_classification = True
elif self.domain_knowledge == "regression":
is_classification = False
if is_classification is None:
if torch.max(y_pred) > 0.9:
is_classification = True
if is_classification is None:
if torch.any(class_balance < 0.1):
is_classification = True
if is_classification is None:
if self.user_input == "classification":
is_classification = True
elif self.user_input == "regression":
is_classification = False
#Multi-LabelClassification
if is_multi_label_classification(y_true):
self.loss_function = MultiLabelSoftMarginLoss()
#poissonNLLLoss
if contains_non_negative_integers(y_true):
self.loss_function = PoissonNLLoss()
#KLDIvLoss
if are_probability_distributions(y_pred, y_true):
self.loss_function = KLDivLoss()
#NLLLoss
if is_classification and are_log_probabilities(y_pred):
self.loss_function = NLLLoss()
# SmotthL1Loss
if is_classification is None:
#check range of values in y_true
if torch.min(y_true) >= 0 and torch.max(y_true) <= 1:
self.loss_function = SmoothL1Loss()
# Set the loss function based on the determined problem type
if is_classification:
self.loss_function = CrossEntropyLoss()
else:
self.loss_function = MSELoss()
# @torch.jit.script #optimization jit
def compute_loss(self, y_pred, y_true):
#v2
# tensor_key = HashableTensorWrapper(y_true)
# if tensor_key not in self.loss_function_cache:
# self.determine_loss_function(y_pred, y_true)
# return self.loss_function_cache[tensor_key](y_pred, y_true)
# V1
dataset_id = id(y_true)
if dataset_id not in self.loss_function_cache:
self.determine_loss_function(y_pred, y_true)
self.loss_function_cache[dataset_id] = self.loss_function
cached_loss_function = self.loss_function_cache[dataset_id]
return cached_loss_function.compute_loss(y_pred, y_true)
#v3
# tensor_key = generate_tensor_key(y_true)
# if tensor_key not in self.loss_function_cache:
# self.determine_loss_function(y_pred, y_true)
# return self.loss_function_cache[tensor_key](y_pred, y_true)
#move tensors nd model to gpu if available
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# y_pred, y_true = y_pred.to(device), y_true.to(device)
# # #example usage with the pytorch autograd profiler
# with torch.autograd.profiler.profile() as prof:
# loss = Nebula.compute_loss(y_pred, y_true)
# print(prof.key_average().table())
#reinforcement
from stable_baselines3 import PPO
from stable_baselines3.common.vec_env import DummyVecEnv, VecEnv
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor
import gym
from gym import spaces
class CustomFeaturesExtractor(BaseFeaturesExtractor):
def __init__(self, observation_space, features_dim):
super().__init__(observation_space, features_dim=features_dim)
print(f"Observation space: {observation_space} and features_dim: {features_dim} ")
def forward(self, observations):
# Extract features from the observations (state representation)
features = torch.tensor(observations).float()
return features
class LossFunctionEnv(gym.Env):
def __init__(self, y_pred, y_true):
super().__init__()
self.y_pred = y_pred
self.y_true = y_true
self.action_space = spaces.Discrete(len([CrossEntropyLoss, MSELoss])) # Add more loss functions as needed
self.observation_space = spaces.Box(low=0, high=float('inf'), shape=(2,), dtype=np.float32)
def reset(self):
# Reset the environment and return the initial state
state = self.extract_state(self.y_pred, self.y_true)
return state
def step(self, action):
# Map the action to the corresponding loss function
loss_function = map_action_to_loss_function(action)
# Compute the loss using the selected loss function
loss = loss_function.compute_loss(self.y_pred, self.y_true)
# Define the reward based on the loss
reward = -loss.item()
# Check if the episode is done (e.g., after a certain number of steps or a certain loss threshold)
done = False
# Return the next state, reward, and done flag
next_state = self.extract_state(self.y_pred, self.y_true)
return next_state, reward, done, {}
def extract_state(self, y_pred, y_true):
num_unique_values = len(torch.unique(y_true))
pred_range = torch.max(y_pred) - torch.min(y_pred)
state = [num_unique_values, pred_range.item()]
return state
def map_action_to_loss_function(action):
if action == 0:
return CrossEntropyLoss()
elif action == 1:
return MSELoss()
#add more loss functions as needed
# Create a DummyVecEnv wrapper for the LossFunctionEnv
def make_env(y_pred, y_true):
def _init():
return LossFunctionEnv(y_pred, y_true)
return _init
y_pred = torch.randn(100, 10)
y_true = torch.randint(0, 10, (100,))
env = DummyVecEnv([make_env(y_pred, y_true)])
# Create a custom policy network that uses the CustomFeaturesExtractor
policy_kwargs = dict(
features_extractor_class=CustomFeaturesExtractor,
features_extractor_kwargs=dict(features_dim=env.observation_space.shape[0]), # Define the observation space based on the state representation
)
# Initialize the PPO agent
agent = PPO("MlpPolicy", env, policy_kwargs=policy_kwargs, verbose=1)
# Train the agent
agent.learn(total_timesteps=10000)
# Use the trained agent in the NebulaOptimized class
class NebulaOptimized(Nebula):
def __init__(self, domain_knowledge=None, user_input=None):
super().__init__(domain_knowledge, user_input)
self.rl_agent = agent
def determine_loss_function(self, y_pred, y_true):
# Extract state representation from the data and model
# state = ... # Extract state representation from y_pred and y_true
state = self.extract_state(y_pred, y_true)
# Use the RL agent to select the optimal loss function
action, _ = self.rl_agent.predict(state, deterministic=True)
# Map the action to the corresponding loss function
self.loss_function = map_action_to_loss_function(action)
def extract_state(self, y_pred, y_true):
num_unique_values = len(torch.unique(y_true))
pred_range = torch.max(y_pred) - torch.min(y_pred)
state = [num_unique_values, pred_range.item()]
return state
nebula_optimized = NebulaOptimized()
#test the trained agent with new y_pred and y_true tensors
y_pred_test = torch.randn(100, 10)
y_true_test = torch.randint(0, 10, (100,))
nebula_optimized.determine_loss_function(y_pred_test, y_true_test)
print(f"Selected loss function {nebula_optimized.loss_function}") | EXA-1-master | exa/modular_components/lossFunctions/nebula/experimental/reinforcement/nebula.py |
import torch
import torch.nn as nn
# import torch.jit
import numpy as np
class LossFunction:
def compute_Loss(self, y_pred, y_true):
raise NotImplemented("compute_loss method must be implemented!")
class L1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.L1Loss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
# class MSELoss(LossFunction):
# def __init__(self):
# self.loss_function = nn.MSELoss()
# def compute_loss(self, y_pred, y_true):
# return self.loss_function(y_pred, y_true)
class MSELoss(LossFunction):
def __init__(self):
super().__init__()
self.loss_function = nn.MSELoss()
def compute_loss(self, y_pred, y_true):
y_true_one_hot = torch.zeros_like(y_pred)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1)
return self.loss_function(y_pred, y_true_one_hot)
def one_hot_encoding(y_true, num_classes):
y_true_one_hot = torch.zeros(y_true.size(0), num_classes)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1)
return y_true_one_hot
class SmoothL1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.SmoothL1Loss()
def compute_Loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class MultiLabelSoftMarginLoss(LossFunction):
def __init__(self):
self.loss_function = nn.MultiLabelSoftMarginLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class PoissonNLLoss(LossFunction):
def __init__(self):
self.loss_function = nn.PoissonNLLLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class KLDivLoss(LossFunction):
def __init__(self):
self.loss_function = nn.KLDivLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(F.log_softmax(y_pred, dim=1))
class NLLLoss(LossFunction):
def __init__(self):
self.loss_function = nn.NLLLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class HashableTensorWrapper:
def __init__(self, tensor):
self.tensor = tensor
self.tensor_shape = tensor.shape
self.tensor_dtype = tensor.dtype
def __hash__(self):
return hash((self.tensor_shape, self.tensor_dtype))
def __eq__(self, other):
return isinstance(other, HashableTensorWrapper) and self.tensor_shape == other.tensor_shape and self.tensor_dtype == other.tensor_dtype
#detector helper function
# def is_multi_label_classification(y_true: torch.Tensor):
# return y_true.shape[1] > 1 and y_true.dtype == torch.float
#v2
def is_multi_label_classification(y_true: torch.Tensor) -> bool:
return len(y_true.shape) > 1 and y_true.shape[1] > 1 and y_true.dtype == torch.float
def contains_non_negative_integers(y_true):
return torch.all(y_true >= 0) and torch.all(y_true == y_true.to(torch.int64))
def are_probability_distributions(y_pred, y_true):
return torch.all(y_pred >= 0) and torch.all(y_pred <= 1) and torch.all(y_true >= 0) and torch.all(y_true <= 1)
def are_log_probabilities(y_pred):
return torch.all(y_pred <= 0)
#generate unique key for a tensor
#v1
# def generate_tensor_key(tensor):
# return (tuple(tensor.shape), str(tensor.dtype))
#v2
# def generate_tensor_key(tensor):
# shape_tuple = ()
# for dim in tensor.shape:
# shape_tuple += (dim.item(),)
# return (shape_tuple, str(tensor.dtype))
#v3
# def generate_tensor_key(tensor):
# shape_tuple = ()
# for dim in tensor.shape:
# shape_tuple += (dim,)
# return (shape_tuple, str(tensor.dtype))
#v4
def generate_tensor_key(tensor):
shape_tuple = ()
for dim in tensor.shape:
shape_tuple += (dim,)
return (shape_tuple, str(tensor.dtype))
class CrossEntropyLoss(LossFunction):
def __init__(self):
self.loss_function = nn.CrossEntropyLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class Nebula(LossFunction):
def __init__(self, domain_knowledge=None, user_input=None):
self.loss_function = None
self.domain_knowledge = domain_knowledge
self.user_input = user_input
self.loss_function_cache = {}
self.unique_values_cache = {}
self.class_balance_cache = {}
def determine_loss_function(self, y_pred, y_true):
is_classification = None
dataset_id = id(y_true)
# Cache unique values
if dataset_id not in self.unique_values_cache:
self.unique_values_cache[dataset_id] = torch.unique(y_true)
unique_values = self.unique_values_cache[dataset_id]
# Cache class balance
if dataset_id not in self.class_balance_cache:
value_counts = torch.bincount(y_true.flatten().to(dtype=torch.int64))
self.class_balance_cache[dataset_id] = value_counts / torch.sum(value_counts)
class_balance = self.class_balance_cache[dataset_id]
# Optimization 2: Use PyTorch functions instead of NumPy
value_counts = torch.bincount(y_true.flatten().to(dtype=torch.int64))
# The remaining code remains unchanged as it already incorporates the suggested optimizations
if is_classification is None:
if len(unique_values) <= 10 and torch.all(torch.eq(unique_values % 1, 0)):
is_classification = True
if is_classification is None:
if torch.all(value_counts > 0):
is_classification = True
if y_pred.ndim > 2:
pass
if is_classification is None:
sparsity = torch.count_nonzero(y_true) / y_true.numel()
if sparsity < 0.5:
self.loss_function = torch.nn.BCEWithLogitsLoss()
self.compute_loss = self.loss_function
return
y_pred_flat = y_pred.flatten()
y_true_flat = y_true.flatten()
if y_pred_flat.shape != y_true_flat.shape:
y_pred_flat = y_pred_flat[:y_true_flat.numel()]
correlation = torch.tensor(np.corrcoef(y_pred_flat.cpu().numpy(), y_true_flat.cpu().numpy())[0, 1])
if is_classification is None:
if self.domain_knowledge == "classification":
is_classification = True
elif self.domain_knowledge == "regression":
is_classification = False
if is_classification is None:
if torch.max(y_pred) > 0.9:
is_classification = True
if is_classification is None:
if torch.any(class_balance < 0.1):
is_classification = True
if is_classification is None:
if self.user_input == "classification":
is_classification = True
elif self.user_input == "regression":
is_classification = False
#Multi-LabelClassification
if is_multi_label_classification(y_true):
self.loss_function = MultiLabelSoftMarginLoss()
#poissonNLLLoss
if contains_non_negative_integers(y_true):
self.loss_function = PoissonNLLoss()
#KLDIvLoss
if are_probability_distributions(y_pred, y_true):
self.loss_function = KLDivLoss()
#NLLLoss
if is_classification and are_log_probabilities(y_pred):
self.loss_function = NLLLoss()
# SmotthL1Loss
if is_classification is None:
#check range of values in y_true
if torch.min(y_true) >= 0 and torch.max(y_true) <= 1:
self.loss_function = SmoothL1Loss()
# Set the loss function based on the determined problem type
if is_classification:
self.loss_function = CrossEntropyLoss()
else:
self.loss_function = MSELoss()
# @torch.jit.script #optimization jit
def compute_loss(self, y_pred, y_true):
#v2
# tensor_key = HashableTensorWrapper(y_true)
# if tensor_key not in self.loss_function_cache:
# self.determine_loss_function(y_pred, y_true)
# return self.loss_function_cache[tensor_key](y_pred, y_true)
# V1
dataset_id = id(y_true)
if dataset_id not in self.loss_function_cache:
self.determine_loss_function(y_pred, y_true)
self.loss_function_cache[dataset_id] = self.loss_function
cached_loss_function = self.loss_function_cache[dataset_id]
return cached_loss_function.compute_loss(y_pred, y_true)
#v3
# tensor_key = generate_tensor_key(y_true)
# if tensor_key not in self.loss_function_cache:
# self.determine_loss_function(y_pred, y_true)
# return self.loss_function_cache[tensor_key](y_pred, y_true)
#move tensors nd model to gpu if available
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# y_pred, y_true = y_pred.to(device), y_true.to(device)
# # #example usage with the pytorch autograd profiler
# with torch.autograd.profiler.profile() as prof:
# loss = Nebula.compute_loss(y_pred, y_true)
# print(prof.key_average().table())
#reinforcement
from stable_baselines3 import PPO, A2C
from stable_baselines3.common.vec_env import DummyVecEnv, VecEnv
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor
import gym
from gym import spaces
class CustomFeaturesExtractor(BaseFeaturesExtractor):
def __init__(self, observation_space, features_dim):
super().__init__(observation_space, features_dim=features_dim)
print(f"Observation space: {observation_space} and features_dim: {features_dim} ")
def forward(self, observations):
# Extract features from the observations (state representation)
features = torch.tensor(observations).float()
return features
class LossFunctionEnv(gym.Env):
def __init__(self, y_pred, y_true):
super().__init__()
self.y_pred = y_pred
self.y_true = y_true
self.action_space = spaces.Discrete(len([CrossEntropyLoss, MSELoss])) # Add more loss functions as needed
self.observation_space = spaces.Box(low=0, high=float('inf'), shape=(2,), dtype=np.float32)
self.state = self.precompute_state(y_pred, y_true)
def precompute_state(self, y_pred, y_true):
num_unique_values = len(torch.unique(y_true))
pred_range = torch.max(y_pred) - torch.min(y_pred)
state = [num_unique_values, pred_range.item()]
return state
def reset(self):
# Reset the environment and return the initial state
state = self.extract_state(self.y_pred, self.y_true)
return state
def step(self, action):
# Map the action to the corresponding loss function
loss_function = map_action_to_loss_function(action)
# Compute the loss using the selected loss function
loss = loss_function.compute_loss(self.y_pred, self.y_true)
#define the reward based model on the loss
max_loss = torch.max(self.y_pred).item() * len(self.y_pred)
# Define the reward based on the loss
reward = -loss.item() / max_loss
#2nd loss
# reward = np.exp(-loss.item())
# Check if the episode is done (e.g., after a certain number of steps or a certain loss threshold)
done = False
# Return the next state, reward, and done flag
next_state = self.extract_state(self.y_pred, self.y_true)
return next_state, reward, done, {}
def extract_state(self, y_pred, y_true):
num_unique_values = len(torch.unique(y_true))
pred_range = torch.max(y_pred) - torch.min(y_pred)
state = [num_unique_values, pred_range.item()]
return state
#Cache action-to-loss-function mapping
action_to_loss_function_cache = {}
def map_action_to_loss_function(action):
# action = int(action[0])
if isinstance(action, np.ndarray):
action = action.item()
if action not in action_to_loss_function_cache:
if action == 0:
action_to_loss_function_cache[action] = CrossEntropyLoss()
elif action == 1:
action_to_loss_function_cache[action] = MSELoss()
return action_to_loss_function_cache[action]
#add more loss functions as needed
# Create a DummyVecEnv wrapper for the LossFunctionEnv
def make_env(y_pred, y_true):
def _init():
return LossFunctionEnv(y_pred, y_true)
return _init
y_pred = torch.randn(100, 10)
y_true = torch.randint(0, 10, (100,))
env = DummyVecEnv([make_env(y_pred, y_true)])
# Create a custom policy network that uses the CustomFeaturesExtractor
policy_kwargs = dict(
features_extractor_class=CustomFeaturesExtractor,
features_extractor_kwargs=dict(features_dim=env.observation_space.shape[0]), # Define the observation space based on the state representation
)
import optuna
# Define the evaluation function
def evaluate_agent(agent, y_pred, y_true, n_episodes=10):
env = DummyVecEnv([make_env(y_pred, y_true)])
rewards = []
for _ in range(n_episodes):
obs = env.reset()
done = False
episode_reward = 0
while not done:
action, _ = agent.predict(obs, deterministic=True)
obs, reward, done, _ = env.step(action)
episode_reward += reward
rewards.append(episode_reward)
return np.mean(rewards)
#test the trained agent with new y_pred and y_true tensors
y_pred_test = torch.randn(100, 10)
y_true_test = torch.randint(0, 10, (100,))
# Define the objective function
def objective(trial):
# Sample hyperparameters
learning_rate = trial.suggest_float("learning_rate", 1e-5, 1e-2)
# Add more hyperparameters as needed
# Train the agent with the sampled hyperparameters
agent = A2C("MlpPolicy", env, learning_rate=learning_rate, gamma=0.50, policy_kwargs=policy_kwargs, verbose=1)
agent.learn(total_timesteps=10000)
# Evaluate the agent and return the performance metric
performance_metric = evaluate_agent(agent, y_pred_test, y_true_test)
return performance_metric
storage = optuna.storages.RDBStorage("sqlite:///example.db")
# Create an Optuna study and optimize the objective function
study = optuna.create_study(direction="maximize", storage=storage, study_name="my_study")
print(f"Study: {study}")
study.optimize(objective, n_trials=1)
best_hyperparameters = study.best_params
print(f"Study: {study} best hyperparameters: {best_hyperparameters}")
# Train the final agent with the best hyperparameters
final_agent = A2C("MlpPolicy", env, **best_hyperparameters, policy_kwargs=policy_kwargs, verbose=1)
final_agent.learn(total_timesteps=10000)
# Use the trained agent in the NebulaOptimized class
class NebulaOptimized(Nebula):
def __init__(self, domain_knowledge=None, user_input=None):
super().__init__(domain_knowledge, user_input)
self.rl_agent = final_agent
def determine_loss_function(self, y_pred, y_true):
# Extract state representation from the data and model
# state = ... # Extract state representation from y_pred and y_true
state = self.extract_state(y_pred, y_true)
# Use the RL agent to select the optimal loss function
action, _ = self.rl_agent.predict(state, deterministic=True)
# Map the action to the corresponding loss function
self.loss_function = map_action_to_loss_function(action)
def extract_state(self, y_pred, y_true):
num_unique_values = len(torch.unique(y_true))
pred_range = torch.max(y_pred) - torch.min(y_pred)
state = [num_unique_values, pred_range.item()]
return state
nebula_optimized = NebulaOptimized()
#test the trained agent with new y_pred and y_true tensors
y_pred_test = torch.randn(100, 10)
y_true_test = torch.randint(0, 10, (100,))
nebula_optimized.determine_loss_function(y_pred_test, y_true_test)
print(f"Selected loss function {nebula_optimized.loss_function}")
"""
time/fps: Frames per second (FPS) measures the number of training steps the agent takes per second. Higher FPS indicates faster training. You should focus on increasing this metric to speed up the training process.
time/iterations: The number of training iterations completed by the agent. This metric increases as the agent trains. You should focus on increasing this metric to ensure the agent has enough training experience.
time/time_elapsed: The total time elapsed (in seconds) since the start of the training process. You should focus on lowering this metric to reduce the overall training time.
time/total_timesteps: The total number of timesteps the agent has experienced during training. You should focus on increasing this metric to ensure the agent has enough training experience.
train/entropy_loss: The entropy loss encourages exploration by penalizing deterministic policies. Lower entropy loss indicates less exploration. You should focus on balancing this metric to ensure the agent explores the environment sufficiently without getting stuck in suboptimal solutions.
train/explained_variance: Explained variance measures how well the agent's value function approximates the true value function. A value of 1 indicates a perfect approximation, while a value of 0 indicates no correlation. You should focus on increasing this metric to improve the agent's value function approximation.
train/learning_rate: The learning rate determines the step size for updating the agent's parameters during training. You should focus on finding the optimal learning rate that balances convergence speed and stability.
train/n_updates: The number of parameter updates performed by the agent during training. This metric increases as the agent trains. You should focus on increasing this metric to ensure the agent has enough training experience.
train/policy_loss: The policy loss measures the difference between the agent's predicted actions and the optimal actions. You should focus on lowering this metric to improve the agent's policy.
train/value_loss: The value loss measures the difference between the agent's predicted state values and the true state values. You should focus on lowering this metric to improve the agent's value function approximation.
In summary, you should focus on increasing the metrics related to training experience (iterations, total_timesteps, n_updates) and performance (explained_variance), while lowering the metrics related to training time (time_elapsed) and losses (policy_loss, value_loss). Balancing exploration (entropy_loss) and finding the optimal learning rate are also important for achieving good performance.
""" | EXA-1-master | exa/modular_components/lossFunctions/nebula/experimental/reinforcement/experimental/nebula2.py |
import torch
import torch.nn as nn
# import torch.jit
import numpy as np
class LossFunction:
def compute_Loss(self, y_pred, y_true):
raise NotImplemented("compute_loss method must be implemented!")
class L1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.L1Loss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
# class MSELoss(LossFunction):
# def __init__(self):
# self.loss_function = nn.MSELoss()
# def compute_loss(self, y_pred, y_true):
# return self.loss_function(y_pred, y_true)
class MSELoss(LossFunction):
def __init__(self):
super().__init__()
self.loss_function = nn.MSELoss()
def compute_loss(self, y_pred, y_true):
y_true_one_hot = torch.zeros_like(y_pred)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1)
return self.loss_function(y_pred, y_true_one_hot)
def one_hot_encoding(y_true, num_classes):
y_true_one_hot = torch.zeros(y_true.size(0), num_classes)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1)
return y_true_one_hot
class SmoothL1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.SmoothL1Loss()
def compute_Loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class MultiLabelSoftMarginLoss(LossFunction):
def __init__(self):
self.loss_function = nn.MultiLabelSoftMarginLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class PoissonNLLoss(LossFunction):
def __init__(self):
self.loss_function = nn.PoissonNLLLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class KLDivLoss(LossFunction):
def __init__(self):
self.loss_function = nn.KLDivLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(F.log_softmax(y_pred, dim=1))
class NLLLoss(LossFunction):
def __init__(self):
self.loss_function = nn.NLLLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class HashableTensorWrapper:
def __init__(self, tensor):
self.tensor = tensor
self.tensor_shape = tensor.shape
self.tensor_dtype = tensor.dtype
def __hash__(self):
return hash((self.tensor_shape, self.tensor_dtype))
def __eq__(self, other):
return isinstance(other, HashableTensorWrapper) and self.tensor_shape == other.tensor_shape and self.tensor_dtype == other.tensor_dtype
#detector helper function
# def is_multi_label_classification(y_true: torch.Tensor):
# return y_true.shape[1] > 1 and y_true.dtype == torch.float
#v2
def is_multi_label_classification(y_true: torch.Tensor) -> bool:
return len(y_true.shape) > 1 and y_true.shape[1] > 1 and y_true.dtype == torch.float
def contains_non_negative_integers(y_true):
return torch.all(y_true >= 0) and torch.all(y_true == y_true.to(torch.int64))
def are_probability_distributions(y_pred, y_true):
return torch.all(y_pred >= 0) and torch.all(y_pred <= 1) and torch.all(y_true >= 0) and torch.all(y_true <= 1)
def are_log_probabilities(y_pred):
return torch.all(y_pred <= 0)
#generate unique key for a tensor
#v1
# def generate_tensor_key(tensor):
# return (tuple(tensor.shape), str(tensor.dtype))
#v2
# def generate_tensor_key(tensor):
# shape_tuple = ()
# for dim in tensor.shape:
# shape_tuple += (dim.item(),)
# return (shape_tuple, str(tensor.dtype))
#v3
# def generate_tensor_key(tensor):
# shape_tuple = ()
# for dim in tensor.shape:
# shape_tuple += (dim,)
# return (shape_tuple, str(tensor.dtype))
#v4
def generate_tensor_key(tensor):
shape_tuple = ()
for dim in tensor.shape:
shape_tuple += (dim,)
return (shape_tuple, str(tensor.dtype))
class CrossEntropyLoss(LossFunction):
def __init__(self):
self.loss_function = nn.CrossEntropyLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class Nebula(LossFunction):
def __init__(self, domain_knowledge=None, user_input=None):
self.loss_function = None
self.domain_knowledge = domain_knowledge
self.user_input = user_input
self.loss_function_cache = {}
self.unique_values_cache = {}
self.class_balance_cache = {}
def determine_loss_function(self, y_pred, y_true):
is_classification = None
dataset_id = id(y_true)
# Cache unique values
if dataset_id not in self.unique_values_cache:
self.unique_values_cache[dataset_id] = torch.unique(y_true)
unique_values = self.unique_values_cache[dataset_id]
# Cache class balance
if dataset_id not in self.class_balance_cache:
value_counts = torch.bincount(y_true.flatten().to(dtype=torch.int64))
self.class_balance_cache[dataset_id] = value_counts / torch.sum(value_counts)
class_balance = self.class_balance_cache[dataset_id]
# Optimization 2: Use PyTorch functions instead of NumPy
value_counts = torch.bincount(y_true.flatten().to(dtype=torch.int64))
# The remaining code remains unchanged as it already incorporates the suggested optimizations
if is_classification is None:
if len(unique_values) <= 10 and torch.all(torch.eq(unique_values % 1, 0)):
is_classification = True
if is_classification is None:
if torch.all(value_counts > 0):
is_classification = True
if y_pred.ndim > 2:
pass
if is_classification is None:
sparsity = torch.count_nonzero(y_true) / y_true.numel()
if sparsity < 0.5:
self.loss_function = torch.nn.BCEWithLogitsLoss()
self.compute_loss = self.loss_function
return
y_pred_flat = y_pred.flatten()
y_true_flat = y_true.flatten()
if y_pred_flat.shape != y_true_flat.shape:
y_pred_flat = y_pred_flat[:y_true_flat.numel()]
correlation = torch.tensor(np.corrcoef(y_pred_flat.cpu().numpy(), y_true_flat.cpu().numpy())[0, 1])
if is_classification is None:
if self.domain_knowledge == "classification":
is_classification = True
elif self.domain_knowledge == "regression":
is_classification = False
if is_classification is None:
if torch.max(y_pred) > 0.9:
is_classification = True
if is_classification is None:
if torch.any(class_balance < 0.1):
is_classification = True
if is_classification is None:
if self.user_input == "classification":
is_classification = True
elif self.user_input == "regression":
is_classification = False
#Multi-LabelClassification
if is_multi_label_classification(y_true):
self.loss_function = MultiLabelSoftMarginLoss()
#poissonNLLLoss
if contains_non_negative_integers(y_true):
self.loss_function = PoissonNLLoss()
#KLDIvLoss
if are_probability_distributions(y_pred, y_true):
self.loss_function = KLDivLoss()
#NLLLoss
if is_classification and are_log_probabilities(y_pred):
self.loss_function = NLLLoss()
# SmotthL1Loss
if is_classification is None:
#check range of values in y_true
if torch.min(y_true) >= 0 and torch.max(y_true) <= 1:
self.loss_function = SmoothL1Loss()
# Set the loss function based on the determined problem type
if is_classification:
self.loss_function = CrossEntropyLoss()
else:
self.loss_function = MSELoss()
# @torch.jit.script #optimization jit
def compute_loss(self, y_pred, y_true):
#v2
# tensor_key = HashableTensorWrapper(y_true)
# if tensor_key not in self.loss_function_cache:
# self.determine_loss_function(y_pred, y_true)
# return self.loss_function_cache[tensor_key](y_pred, y_true)
# V1
dataset_id = id(y_true)
if dataset_id not in self.loss_function_cache:
self.determine_loss_function(y_pred, y_true)
self.loss_function_cache[dataset_id] = self.loss_function
cached_loss_function = self.loss_function_cache[dataset_id]
return cached_loss_function.compute_loss(y_pred, y_true)
#v3
# tensor_key = generate_tensor_key(y_true)
# if tensor_key not in self.loss_function_cache:
# self.determine_loss_function(y_pred, y_true)
# return self.loss_function_cache[tensor_key](y_pred, y_true)
#move tensors nd model to gpu if available
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# y_pred, y_true = y_pred.to(device), y_true.to(device)
# # #example usage with the pytorch autograd profiler
# with torch.autograd.profiler.profile() as prof:
# loss = Nebula.compute_loss(y_pred, y_true)
# print(prof.key_average().table())
#reinforcement
from stable_baselines3 import PPO, A2C
from stable_baselines3.common.vec_env import DummyVecEnv, VecEnv
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor
import gym
from gym import spaces
from stable_baselines3.common.callbacks import BaseCallback
from torch.nn.utils import clip_grad_norm_
class CustomFeaturesExtractor(BaseFeaturesExtractor):
def __init__(self, observation_space, features_dim):
super().__init__(observation_space, features_dim=features_dim)
print(f"Observation space: {observation_space} and features_dim: {features_dim} ")
def forward(self, observations):
# Extract features from the observations (state representation)
features = torch.tensor(observations).float()
return features
class LossFunctionEnv(gym.Env):
def __init__(self, y_pred, y_true):
super().__init__()
self.y_pred = y_pred
self.y_true = y_true
self.action_space = spaces.Discrete(len([CrossEntropyLoss, MSELoss])) # Add more loss functions as needed
self.observation_space = spaces.Box(low=0, high=float('inf'), shape=(2,), dtype=np.float32)
self.state = self.precompute_state(y_pred, y_true)
def precompute_state(self, y_pred, y_true):
num_unique_values = len(torch.unique(y_true))
pred_range = torch.max(y_pred) - torch.min(y_pred)
state = [num_unique_values, pred_range.item()]
return state
def reset(self):
# Reset the environment and return the initial state
state = self.extract_state(self.y_pred, self.y_true)
return state
def step(self, action):
# Map the action to the corresponding loss function
loss_function = map_action_to_loss_function(action)
# Compute the loss using the selected loss function
loss = loss_function.compute_loss(self.y_pred, self.y_true)
#define the reward based model on the loss
max_loss = torch.max(self.y_pred).item() * len(self.y_pred)
# Define the reward based on the loss
reward = -loss.item() / max_loss
#2nd loss
# reward = np.exp(-loss.item())
# Check if the episode is done (e.g., after a certain number of steps or a certain loss threshold)
done = False
# Return the next state, reward, and done flag
next_state = self.extract_state(self.y_pred, self.y_true)
return next_state, reward, done, {}
def extract_state(self, y_pred, y_true):
num_unique_values = len(torch.unique(y_true))
pred_range = torch.max(y_pred) - torch.min(y_pred)
state = [num_unique_values, pred_range.item()]
return state
#Cache action-to-loss-function mapping
action_to_loss_function_cache = {}
def map_action_to_loss_function(action):
# action = int(action[0])
if isinstance(action, np.ndarray):
action = action.item()
if action not in action_to_loss_function_cache:
if action == 0:
action_to_loss_function_cache[action] = CrossEntropyLoss()
elif action == 1:
action_to_loss_function_cache[action] = MSELoss()
return action_to_loss_function_cache[action]
#add more loss functions as needed
# Create a DummyVecEnv wrapper for the LossFunctionEnv
def make_env(y_pred, y_true):
def _init():
return LossFunctionEnv(y_pred, y_true)
return _init
y_pred = torch.randn(100, 10)
y_true = torch.randint(0, 10, (100,))
env = DummyVecEnv([make_env(y_pred, y_true)])
# Create a custom policy network that uses the CustomFeaturesExtractor
policy_kwargs = dict(
features_extractor_class=CustomFeaturesExtractor,
features_extractor_kwargs=dict(features_dim=env.observation_space.shape[0]), # Define the observation space based on the state representation
)
class CustomCallback(BaseCallback):
def __init__(self, patience, min_delta, initial_lr, decay_rate, decay_steps, max_norm, verbose=0):
super().__init__(verbose)
self.patience = patience
self.min_delta = min_delta
self.initial_lr = initial_lr
self.decay_rate = decay_rate
self.decay_steps = decay_steps
self.max_norm = max_norm
self.counter = 0
self.previous_performance = -np.inf
self.stop_training = False
def _on_step(self) -> bool:
# Early stopping
performance = self.model.ep_info_buffer.mean()
if performance - self.previous_performance < self.min_delta:
self.counter += 1
else:
self.counter = 0
if self.counter >= self.patience:
self.stop_training = True
self.previous_performance = performance
# Learning rate scheduler
step = self.num_timesteps
lr = self.initial_lr * (self.decay_rate ** (step / self.decay_steps))
for param_group in self.model.policy.optimizer.param_groups:
param_group['lr'] = lr
# Gradient clipping
clip_grad_norm_(self.model.policy.parameters(), self.max_norm)
return not self.stop_training
callback = CustomCallback(patience=10, min_delta=0.001, initial_lr=0.01, decay_rate=0.9, decay_steps=1000, max_norm=1.0)
agent.learn(total_timesteps=10000, callback=callback)
# Train the final agent with the best hyperparameters
agent = A2C("MlpPolicy", env, policy_kwargs=policy_kwargs, verbose=1)
agent.learn(total_timesteps=10000)
# Use the trained agent in the NebulaOptimized class
class NebulaOptimized(Nebula):
def __init__(self, domain_knowledge=None, user_input=None):
super().__init__(domain_knowledge, user_input)
self.rl_agent = agent
def determine_loss_function(self, y_pred, y_true):
# Extract state representation from the data and model
# state = ... # Extract state representation from y_pred and y_true
state = self.extract_state(y_pred, y_true)
# Use the RL agent to select the optimal loss function
action, _ = self.rl_agent.predict(state, deterministic=True)
# Map the action to the corresponding loss function
self.loss_function = map_action_to_loss_function(action)
def extract_state(self, y_pred, y_true):
num_unique_values = len(torch.unique(y_true))
pred_range = torch.max(y_pred) - torch.min(y_pred)
state = [num_unique_values, pred_range.item()]
return state
nebula_optimized = NebulaOptimized()
#test the trained agent with new y_pred and y_true tensors
y_pred_test = torch.randn(100, 10)
y_true_test = torch.randint(0, 10, (100,))
nebula_optimized.determine_loss_function(y_pred_test, y_true_test)
print(f"Selected loss function {nebula_optimized.loss_function}")
"""
time/fps: Frames per second (FPS) measures the number of training steps the agent takes per second. Higher FPS indicates faster training. You should focus on increasing this metric to speed up the training process.
time/iterations: The number of training iterations completed by the agent. This metric increases as the agent trains. You should focus on increasing this metric to ensure the agent has enough training experience.
time/time_elapsed: The total time elapsed (in seconds) since the start of the training process. You should focus on lowering this metric to reduce the overall training time.
time/total_timesteps: The total number of timesteps the agent has experienced during training. You should focus on increasing this metric to ensure the agent has enough training experience.
train/entropy_loss: The entropy loss encourages exploration by penalizing deterministic policies. Lower entropy loss indicates less exploration. You should focus on balancing this metric to ensure the agent explores the environment sufficiently without getting stuck in suboptimal solutions.
train/explained_variance: Explained variance measures how well the agent's value function approximates the true value function. A value of 1 indicates a perfect approximation, while a value of 0 indicates no correlation. You should focus on increasing this metric to improve the agent's value function approximation.
train/learning_rate: The learning rate determines the step size for updating the agent's parameters during training. You should focus on finding the optimal learning rate that balances convergence speed and stability.
train/n_updates: The number of parameter updates performed by the agent during training. This metric increases as the agent trains. You should focus on increasing this metric to ensure the agent has enough training experience.
train/policy_loss: The policy loss measures the difference between the agent's predicted actions and the optimal actions. You should focus on lowering this metric to improve the agent's policy.
train/value_loss: The value loss measures the difference between the agent's predicted state values and the true state values. You should focus on lowering this metric to improve the agent's value function approximation.
In summary, you should focus on increasing the metrics related to training experience (iterations, total_timesteps, n_updates) and performance (explained_variance), while lowering the metrics related to training time (time_elapsed) and losses (policy_loss, value_loss). Balancing exploration (entropy_loss) and finding the optimal learning rate are also important for achieving good performance.
""" | EXA-1-master | exa/modular_components/lossFunctions/nebula/experimental/reinforcement/experimental/nebula3.py |
import torch
import torch.nn as nn
# import torch.jit
import numpy as np
class LossFunction:
def compute_Loss(self, y_pred, y_true):
raise NotImplemented("compute_loss method must be implemented!")
class L1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.L1Loss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
# class MSELoss(LossFunction):
# def __init__(self):
# self.loss_function = nn.MSELoss()
# def compute_loss(self, y_pred, y_true):
# return self.loss_function(y_pred, y_true)
class MSELoss(LossFunction):
def __init__(self):
super().__init__()
self.loss_function = nn.MSELoss()
def compute_loss(self, y_pred, y_true):
y_true_one_hot = torch.zeros_like(y_pred)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1)
return self.loss_function(y_pred, y_true_one_hot)
def one_hot_encoding(y_true, num_classes):
y_true_one_hot = torch.zeros(y_true.size(0), num_classes)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1)
return y_true_one_hot
class SmoothL1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.SmoothL1Loss()
def compute_Loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class MultiLabelSoftMarginLoss(LossFunction):
def __init__(self):
self.loss_function = nn.MultiLabelSoftMarginLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class PoissonNLLoss(LossFunction):
def __init__(self):
self.loss_function = nn.PoissonNLLLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class KLDivLoss(LossFunction):
def __init__(self):
self.loss_function = nn.KLDivLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(F.log_softmax(y_pred, dim=1))
class NLLLoss(LossFunction):
def __init__(self):
self.loss_function = nn.NLLLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class HashableTensorWrapper:
def __init__(self, tensor):
self.tensor = tensor
self.tensor_shape = tensor.shape
self.tensor_dtype = tensor.dtype
def __hash__(self):
return hash((self.tensor_shape, self.tensor_dtype))
def __eq__(self, other):
return isinstance(other, HashableTensorWrapper) and self.tensor_shape == other.tensor_shape and self.tensor_dtype == other.tensor_dtype
#detector helper function
# def is_multi_label_classification(y_true: torch.Tensor):
# return y_true.shape[1] > 1 and y_true.dtype == torch.float
#v2
def is_multi_label_classification(y_true: torch.Tensor) -> bool:
return len(y_true.shape) > 1 and y_true.shape[1] > 1 and y_true.dtype == torch.float
def contains_non_negative_integers(y_true):
return torch.all(y_true >= 0) and torch.all(y_true == y_true.to(torch.int64))
def are_probability_distributions(y_pred, y_true):
return torch.all(y_pred >= 0) and torch.all(y_pred <= 1) and torch.all(y_true >= 0) and torch.all(y_true <= 1)
def are_log_probabilities(y_pred):
return torch.all(y_pred <= 0)
#generate unique key for a tensor
#v1
# def generate_tensor_key(tensor):
# return (tuple(tensor.shape), str(tensor.dtype))
#v2
# def generate_tensor_key(tensor):
# shape_tuple = ()
# for dim in tensor.shape:
# shape_tuple += (dim.item(),)
# return (shape_tuple, str(tensor.dtype))
#v3
# def generate_tensor_key(tensor):
# shape_tuple = ()
# for dim in tensor.shape:
# shape_tuple += (dim,)
# return (shape_tuple, str(tensor.dtype))
#v4
def generate_tensor_key(tensor):
shape_tuple = ()
for dim in tensor.shape:
shape_tuple += (dim,)
return (shape_tuple, str(tensor.dtype))
class CrossEntropyLoss(LossFunction):
def __init__(self):
self.loss_function = nn.CrossEntropyLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class Nebula(LossFunction):
def __init__(self, domain_knowledge=None, user_input=None):
self.loss_function = None
self.domain_knowledge = domain_knowledge
self.user_input = user_input
self.loss_function_cache = {}
self.unique_values_cache = {}
self.class_balance_cache = {}
def determine_loss_function(self, y_pred, y_true):
is_classification = None
dataset_id = id(y_true)
# Cache unique values
if dataset_id not in self.unique_values_cache:
self.unique_values_cache[dataset_id] = torch.unique(y_true)
unique_values = self.unique_values_cache[dataset_id]
# Cache class balance
if dataset_id not in self.class_balance_cache:
value_counts = torch.bincount(y_true.flatten().to(dtype=torch.int64))
self.class_balance_cache[dataset_id] = value_counts / torch.sum(value_counts)
class_balance = self.class_balance_cache[dataset_id]
# Optimization 2: Use PyTorch functions instead of NumPy
value_counts = torch.bincount(y_true.flatten().to(dtype=torch.int64))
# The remaining code remains unchanged as it already incorporates the suggested optimizations
if is_classification is None:
if len(unique_values) <= 10 and torch.all(torch.eq(unique_values % 1, 0)):
is_classification = True
if is_classification is None:
if torch.all(value_counts > 0):
is_classification = True
if y_pred.ndim > 2:
pass
if is_classification is None:
sparsity = torch.count_nonzero(y_true) / y_true.numel()
if sparsity < 0.5:
self.loss_function = torch.nn.BCEWithLogitsLoss()
self.compute_loss = self.loss_function
return
y_pred_flat = y_pred.flatten()
y_true_flat = y_true.flatten()
if y_pred_flat.shape != y_true_flat.shape:
y_pred_flat = y_pred_flat[:y_true_flat.numel()]
correlation = torch.tensor(np.corrcoef(y_pred_flat.cpu().numpy(), y_true_flat.cpu().numpy())[0, 1])
if is_classification is None:
if self.domain_knowledge == "classification":
is_classification = True
elif self.domain_knowledge == "regression":
is_classification = False
if is_classification is None:
if torch.max(y_pred) > 0.9:
is_classification = True
if is_classification is None:
if torch.any(class_balance < 0.1):
is_classification = True
if is_classification is None:
if self.user_input == "classification":
is_classification = True
elif self.user_input == "regression":
is_classification = False
#Multi-LabelClassification
if is_multi_label_classification(y_true):
self.loss_function = MultiLabelSoftMarginLoss()
#poissonNLLLoss
if contains_non_negative_integers(y_true):
self.loss_function = PoissonNLLoss()
#KLDIvLoss
if are_probability_distributions(y_pred, y_true):
self.loss_function = KLDivLoss()
#NLLLoss
if is_classification and are_log_probabilities(y_pred):
self.loss_function = NLLLoss()
# SmotthL1Loss
if is_classification is None:
#check range of values in y_true
if torch.min(y_true) >= 0 and torch.max(y_true) <= 1:
self.loss_function = SmoothL1Loss()
# Set the loss function based on the determined problem type
if is_classification:
self.loss_function = CrossEntropyLoss()
else:
self.loss_function = MSELoss()
# @torch.jit.script #optimization jit
def compute_loss(self, y_pred, y_true):
#v2
# tensor_key = HashableTensorWrapper(y_true)
# if tensor_key not in self.loss_function_cache:
# self.determine_loss_function(y_pred, y_true)
# return self.loss_function_cache[tensor_key](y_pred, y_true)
# V1
dataset_id = id(y_true)
if dataset_id not in self.loss_function_cache:
self.determine_loss_function(y_pred, y_true)
self.loss_function_cache[dataset_id] = self.loss_function
cached_loss_function = self.loss_function_cache[dataset_id]
return cached_loss_function.compute_loss(y_pred, y_true)
#v3
# tensor_key = generate_tensor_key(y_true)
# if tensor_key not in self.loss_function_cache:
# self.determine_loss_function(y_pred, y_true)
# return self.loss_function_cache[tensor_key](y_pred, y_true)
#move tensors nd model to gpu if available
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# y_pred, y_true = y_pred.to(device), y_true.to(device)
# # #example usage with the pytorch autograd profiler
# with torch.autograd.profiler.profile() as prof:
# loss = Nebula.compute_loss(y_pred, y_true)
# print(prof.key_average().table())
#reinforcement
from stable_baselines3 import PPO, A2C
from stable_baselines3.common.vec_env import DummyVecEnv, VecEnv
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor
import gym
from gym import spaces
class CustomFeaturesExtractor(BaseFeaturesExtractor):
def __init__(self, observation_space, features_dim):
super().__init__(observation_space, features_dim=features_dim)
print(f"Observation space: {observation_space} and features_dim: {features_dim} ")
def forward(self, observations):
# Extract features from the observations (state representation)
features = torch.tensor(observations).float()
return features
class LossFunctionEnv(gym.Env):
def __init__(self, y_pred, y_true):
super().__init__()
self.y_pred = y_pred
self.y_true = y_true
self.action_space = spaces.Discrete(len([CrossEntropyLoss, MSELoss])) # Add more loss functions as needed
self.observation_space = spaces.Box(low=0, high=float('inf'), shape=(2,), dtype=np.float32)
self.state = self.precompute_state(y_pred, y_true)
def precompute_state(self, y_pred, y_true):
num_unique_values = len(torch.unique(y_true))
pred_range = torch.max(y_pred) - torch.min(y_pred)
state = [num_unique_values, pred_range.item()]
return state
def reset(self):
# Reset the environment and return the initial state
state = self.extract_state(self.y_pred, self.y_true)
return state
def step(self, action):
# Map the action to the corresponding loss function
loss_function = map_action_to_loss_function(action)
# Compute the loss using the selected loss function
loss = loss_function.compute_loss(self.y_pred, self.y_true)
# Define the reward based on the loss
reward = -loss.item()
# Check if the episode is done (e.g., after a certain number of steps or a certain loss threshold)
done = False
# Return the next state, reward, and done flag
next_state = self.extract_state(self.y_pred, self.y_true)
return next_state, reward, done, {}
def extract_state(self, y_pred, y_true):
num_unique_values = len(torch.unique(y_true))
pred_range = torch.max(y_pred) - torch.min(y_pred)
state = [num_unique_values, pred_range.item()]
return state
#Cache action-to-loss-function mapping
action_to_loss_function_cache = {}
def map_action_to_loss_function(action):
# action = int(action[0])
if isinstance(action, np.ndarray):
action = action.item()
if action not in action_to_loss_function_cache:
if action == 0:
action_to_loss_function_cache[action] = CrossEntropyLoss()
elif action == 1:
action_to_loss_function_cache[action] = MSELoss()
return action_to_loss_function_cache[action]
#add more loss functions as needed
# Create a DummyVecEnv wrapper for the LossFunctionEnv
def make_env(y_pred, y_true):
def _init():
return LossFunctionEnv(y_pred, y_true)
return _init
y_pred = torch.randn(100, 10)
y_true = torch.randint(0, 10, (100,))
env = DummyVecEnv([make_env(y_pred, y_true)])
# Create a custom policy network that uses the CustomFeaturesExtractor
policy_kwargs = dict(
features_extractor_class=CustomFeaturesExtractor,
features_extractor_kwargs=dict(features_dim=env.observation_space.shape[0]), # Define the observation space based on the state representation
)
# Initialize the PPO agent
#Architecture: Replace PPO with SAC in the agent initialization.
# agent = PPO("MlpPolicy", env, policy_kwargs=policy_kwargs, verbose=1)
#A2C
agent = A2C("MlpPolicy", env, policy_kwargs=policy_kwargs, verbose=1)
# Train the agent
agent.learn(total_timesteps=10000)
# Use the trained agent in the NebulaOptimized class
class NebulaOptimized(Nebula):
def __init__(self, domain_knowledge=None, user_input=None):
super().__init__(domain_knowledge, user_input)
self.rl_agent = agent
def determine_loss_function(self, y_pred, y_true):
# Extract state representation from the data and model
# state = ... # Extract state representation from y_pred and y_true
state = self.extract_state(y_pred, y_true)
# Use the RL agent to select the optimal loss function
action, _ = self.rl_agent.predict(state, deterministic=True)
# Map the action to the corresponding loss function
self.loss_function = map_action_to_loss_function(action)
def extract_state(self, y_pred, y_true):
num_unique_values = len(torch.unique(y_true))
pred_range = torch.max(y_pred) - torch.min(y_pred)
state = [num_unique_values, pred_range.item()]
return state
nebula_optimized = NebulaOptimized()
#test the trained agent with new y_pred and y_true tensors
y_pred_test = torch.randn(100, 10)
y_true_test = torch.randint(0, 10, (100,))
nebula_optimized.determine_loss_function(y_pred_test, y_true_test)
print(f"Selected loss function {nebula_optimized.loss_function}")
"""
time/fps: Frames per second (FPS) measures the number of training steps the agent takes per second. Higher FPS indicates faster training. You should focus on increasing this metric to speed up the training process.
time/iterations: The number of training iterations completed by the agent. This metric increases as the agent trains. You should focus on increasing this metric to ensure the agent has enough training experience.
time/time_elapsed: The total time elapsed (in seconds) since the start of the training process. You should focus on lowering this metric to reduce the overall training time.
time/total_timesteps: The total number of timesteps the agent has experienced during training. You should focus on increasing this metric to ensure the agent has enough training experience.
train/entropy_loss: The entropy loss encourages exploration by penalizing deterministic policies. Lower entropy loss indicates less exploration. You should focus on balancing this metric to ensure the agent explores the environment sufficiently without getting stuck in suboptimal solutions.
train/explained_variance: Explained variance measures how well the agent's value function approximates the true value function. A value of 1 indicates a perfect approximation, while a value of 0 indicates no correlation. You should focus on increasing this metric to improve the agent's value function approximation.
train/learning_rate: The learning rate determines the step size for updating the agent's parameters during training. You should focus on finding the optimal learning rate that balances convergence speed and stability.
train/n_updates: The number of parameter updates performed by the agent during training. This metric increases as the agent trains. You should focus on increasing this metric to ensure the agent has enough training experience.
train/policy_loss: The policy loss measures the difference between the agent's predicted actions and the optimal actions. You should focus on lowering this metric to improve the agent's policy.
train/value_loss: The value loss measures the difference between the agent's predicted state values and the true state values. You should focus on lowering this metric to improve the agent's value function approximation.
In summary, you should focus on increasing the metrics related to training experience (iterations, total_timesteps, n_updates) and performance (explained_variance), while lowering the metrics related to training time (time_elapsed) and losses (policy_loss, value_loss). Balancing exploration (entropy_loss) and finding the optimal learning rate are also important for achieving good performance.
""" | EXA-1-master | exa/modular_components/lossFunctions/nebula/experimental/reinforcement/experimental/nebula1.py |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import pairwise_distances
import torch
import numpy as np
import networkx as nx
from gudhi import SimplexTree
import gudhi as gd
def geometric_similarity(y_pred, y_true):
# Compute a simple geometric metric based on the L2 norm of the difference between y_pred and y_true
geometric_difference = np.linalg.norm(y_pred - y_true)
return geometric_difference
# Helper function to perturb the input data
def perturb_input_data(x, perturbation):
# Apply the perturbation to the input data and return the perturbed data
return x + perturbation
# Generic topological invariance function
# def topological_invariance(y_pred, y_true, metric='euclidean'):
# distance_matrix = pairwise_distances(y_pred, y_true, metric=metric)
# distance = np.sum(np.min(distance_matrix, axis=1)) # Use the sum of minimum distances as the discrepancy metric
# return distance
def topological_invariance(y_pred, y_true, **kwargs):
data_type = infer_data_type(y_pred, y_true)
if data_type == 'point_cloud':
return point_cloud_topological_invariance(y_pred, y_true, **kwargs)
elif data_type == 'graph':
return graph_topological_invariance(y_pred, y_true, **kwargs)
elif data_type == 'multi_modal':
return multi_modal_topological_invariance(y_pred, y_true, **kwargs)
else:
raise ValueError(f'Unsupported data type: {data_type}')
# def bottleneck_distance(distance_matrix_1, distance_matrix_2):
# # Step 1: Compute the persistence diagrams for both distance matrices
# rips_complex_1 = gd.RipsComplex(distance_matrix=distance_matrix_1, max_edge_length=np.inf)
# simplex_tree_1 = rips_complex_1.create_simplex_tree(max_dimension=2)
# persistence_diagram_1 = simplex_tree_1.persistence()
# rips_complex_2 = gd.RipsComplex(distance_matrix=distance_matrix_2, max_edge_length=np.inf)
# simplex_tree_2 = rips_complex_2.create_simplex_tree(max_dimension=2)
# persistence_diagram_2 = simplex_tree_2.persistence()
# # Step 2: Calculate the bottleneck distance between the two persistence diagrams
# bottleneck_distance_value = gd.bottleneck_distance(persistence_diagram_1, persistence_diagram_2)
# # Step 3: Return the bottleneck distance
# return bottleneck_distance_value
#v2
def bottleneck_distance(distance_matrix_1, distance_matrix_2):
# Step 1: Compute the persistence diagrams for both distance matrices
rips_complex_1 = gd.RipsComplex(distance_matrix=distance_matrix_1, max_edge_length=np.inf)
simplex_tree_1 = rips_complex_1.create_simplex_tree(max_dimension=2)
persistence_diagram_1 = np.array(simplex_tree_1.persistence())
rips_complex_2 = gd.RipsComplex(distance_matrix=distance_matrix_2, max_edge_length=np.inf)
simplex_tree_2 = rips_complex_2.create_simplex_tree(max_dimension=2)
persistence_diagram_2 = np.array(simplex_tree_2.persistence())
# Step 2: Calculate the bottleneck distance between the two persistence diagrams
bottleneck_distance_value = gd.bottleneck_distance(persistence_diagram_1, persistence_diagram_2)
# Step 3: Return the bottleneck distance
return bottleneck_distance_value
def infer_data_type(y_pred, y_true):
if y_pred.ndim() == 2 and y_true.ndim() == 2:
return 'point_cloud'
elif isinstance(y_pred, nx.Graph) and isinstance(y_true, nx.Graph):
return 'graph'
elif isinstance(y_pred, list) and isinstance(y_true, list):
return 'multi_modal'
else:
raise ValueError('Unsupported data type.')
# def infer_data_type(y_pred, y_true):
# Analyze the input data (y_pred and y_true) and determine the data type
# This function should return a string representing the data type, such as 'point_cloud', 'graph', or 'multi_modal'
# if y_pred.ndim == 2 and y_true.ndim == 2:
# # If both y_pred and y_true are 2D arrays, we can assume they represent point clouds or graphs
# if is_point_cloud(y_pred, y_true):
# return 'point_cloud'
# elif is_graph(y_pred, y_true):
# return 'graph'
# elif y_pred.ndim == 3 and y_true.ndim == 3:
# # If both y_pred and y_true are 3D arrays, we can assume they represent multi-modal data
# if is_multi_modal(y_pred, y_true):
# return 'multi_modal'
# raise ValueError("Unable to infer data type.")
#v3
def infer_data_type(y_pred, y_true):
if y_pred.ndim == 2 and y_true.ndim == 2:
return 'point_cloud'
elif isinstance(y_pred, nx.Graph) and isinstance(y_true, nx.Graph):
return 'graph'
elif isinstance(y_pred, list) and isinstance(y_true, list):
return 'multi_modal'
else:
raise ValueError('Unsupported data type.')
def point_cloud_topological_invariance(y_pred, y_true, **kwargs):
y_pred = torch.tensor(y_pred)
y_true = torch.tensor(y_true)
# Calculate the pairwise distance matrices for both point clouds
y_pred_distance_matrix = torch.cdist(y_pred, y_pred)
#v1
# y_true_distance_matrix = torch.cdist(y_true, y_true)
#v2
y_true_distance_matrix = torch.cdist(y_true.float(), y_true.float())
# Calculate the topological invariance metric, e.g., bottleneck distance
# You'll need to implement the `bottleneck_distance` function or use an existing implementation
topological_invariance_metric = bottleneck_distance(y_pred_distance_matrix.numpy(), y_true_distance_matrix.numpy())
return topological_invariance_metric
def graph_topological_invariance(y_pred, y_true, **kwargs):
# Calculate the graph edit distance between the predicted and true graphs
# You can use the NetworkX library for this
graph_edit_distance = nx.graph_edit_distance(y_pred, y_true)
return graph_edit_distance
def multi_modal_topological_invariance(y_pred, y_true, **kwargs):
# Calculate the topological invariance metric for multi-modal data
# This could be a combination of different topological invariance metrics
# based on the specific problem requirements
# Example: Calculate the topological invariance for each modality and average the results
num_modalities = len(y_pred)
total_topological_invariance = 0.0
for i in range(num_modalities):
data_type = infer_data_type(y_pred[i], y_true[i])
topological_invariance_i = topological_invariance(y_pred[i], y_true[i], data_type=data_type)
total_topological_invariance += topological_invariance_i
average_topological_invariance = total_topological_invariance / num_modalities
return average_topological_invariance
# Note: The `bottleneck_distance` function mentioned in the point_cloud_topological_invariance function is not provided
# here. You'll need to either implement it yourself or use an existing implementation from a library like GUDHI.
def complexity_reduction(network):
# Example: Compute the L1 regularization term for the network's weights
l1_regularization = 0
for layer in network.layers:
if hasattr(layer, 'kernel'): # Check if the layer has trainable weights
l1_regularization += np.sum(np.abs(layer.kernel))
return l1_regularization
# Revised stability function
def stability(model, x, y_true, perturbations):
stability_metric = 0
for perturbation in perturbations:
x_perturbed = perturb_input_data(x, perturbation)
y_pred_perturbed = model.predict(x_perturbed)
stability_metric += np.linalg.norm(y_pred_perturbed - y_true)
stability_metric /= len(perturbations)
return stability_metric
# Calabi-Yau inspired loss function
# def calabi_yau_loss(model, x, y_true, perturbations, alpha=0.1, beta=0.1, gamma=0.1):
# y_pred = model.predict(x)
# # Compute geometric similarity
# geom_sim = geometric_similarity(y_pred, y_true)
# # Compute topological invariance
# topo_inv = topological_invariance(y_pred, y_true)
# # Compute complexity reduction
# comp_red = complexity_reduction(model)
# # Compute stability
# stab = stability(model, x, y_true, perturbations)
# # Combine the components with weighting factors alpha, beta, and gamma
# total_loss = geom_sim + alpha * topo_inv + beta * comp_red + gamma * stab
# return total_loss
#v2
#reshape arrays to 2d
def calabi_yau_loss(model, x, y_true, perturbations, alpha=0.1, beta=0.1, gamma=0.1):
y_pred = model.predict(x)
# Reshape y_pred and y_true to 2D arrays
y_pred = y_pred.reshape(-1, 1)
y_true = y_true.reshape(-1, 1)
# Compute geometric similarity
geom_sim = geometric_similarity(y_pred, y_true)
# Compute topological invariance
topo_inv = topological_invariance(y_pred, y_true)
# Compute complexity reduction
comp_red = complexity_reduction(model)
# Compute stability
stab = stability(model, x, y_true, perturbations)
# Combine the components with weighting factors alpha, beta, and gamma
total_loss = geom_sim + alpha * topo_inv + beta * comp_red + gamma * stab
return total_loss | EXA-1-master | exa/modular_components/lossFunctions/Yau/polymorphic.py |
import torch
import numpy as np
import networkx as nx
from gudhi import SimplexTree
import gudhi as gd
def geometric_similarity(y_pred, y_true):
# Compute a simple geometric metric based on the L2 norm of the difference between y_pred and y_true
geometric_difference = torch.norm(y_pred - y_true)
return geometric_difference
# Helper function to perturb the input data
def perturb_input_data(x, perturbation):
# Apply the perturbation to the input data and return the perturbed data
return x + perturbation
def topological_invariance(y_pred, y_true, **kwargs):
data_type = infer_data_type(y_pred, y_true)
if data_type == 'point_cloud':
return point_cloud_topological_invariance(y_pred, y_true, **kwargs)
elif data_type == 'graph':
return graph_topological_invariance(y_pred, y_true, **kwargs)
elif data_type == 'multi_modal':
return multi_modal_topological_invariance(y_pred, y_true, **kwargs)
else:
raise ValueError(f'Unsupported data type: {data_type}')
def infer_data_type(y_pred, y_true):
if y_pred.ndim == 2 and y_true.ndim == 2:
return 'point_cloud'
elif isinstance(y_pred, nx.Graph) and isinstance(y_true, nx.Graph):
return 'graph'
elif isinstance(y_pred, list) and isinstance(y_true, list):
return 'multi_modal'
else:
raise ValueError('Unsupported data type.')
def point_cloud_topological_invariance(y_pred, y_true, **kwargs):
# Calculate the pairwise distance matrices for both point clouds
y_pred_distance_matrix = torch.cdist(y_pred, y_pred)
y_true_distance_matrix = torch.cdist(y_true.float(), y_true.float())
# Calculate the topological invariance metric, e.g., bottleneck distance
# topological_invariance_metric = bottleneck_distance(y_pred_distance_matrix.numpy(), y_true_distance_matrix.numpy())
topological_invariance_metric = bottleneck_distance(y_pred_distance_matrix.detach().numpy(), y_true_distance_matrix.detach().numpy())
return topological_invariance_metric
def graph_topological_invariance(y_pred, y_true, **kwargs):
# Calculate the graph edit distance between the predicted and true graphs
# You can use the NetworkX library for this
graph_edit_distance = nx.graph_edit_distance(y_pred, y_true)
return graph_edit_distance
def multi_modal_topological_invariance(y_pred, y_true, **kwargs):
# Calculate the topological invariance metric for multi-modal data
# This could be a combination of different topological invariance metrics
# based on the specific problem requirements
# Example: Calculate the topological invariance for each modality and average the results
num_modalities = len(y_pred)
total_topological_invariance = 0.0
for i in range(num_modalities):
data_type = infer_data_type(y_pred[i], y_true[i])
topological_invariance_i = topological_invariance(y_pred[i], y_true[i], data_type=data_type)
total_topological_invariance += topological_invariance_i
average_topological_invariance = total_topological_invariance / num_modalities
return average_topological_invariance
def bottleneck_distance(distance_matrix_1, distance_matrix_2):
# Step 1: Compute the persistence diagrams for both distance matrices
rips_complex_1 = gd.RipsComplex(distance_matrix=distance_matrix_1, max_edge_length=np.inf)
simplex_tree_1 = rips_complex_1.create_simplex_tree(max_dimension=2)
# persistence_diagram_1 = np.array(simplex_tree_1.persistence())
persistence_diagram_1 = np.array([pair[1] for pair in simplex_tree_1.persistence()])
rips_complex_2 = gd.RipsComplex(distance_matrix=distance_matrix_2, max_edge_length=np.inf)
simplex_tree_2 = rips_complex_2.create_simplex_tree(max_dimension=2)
persistence_diagram_2 = np.array([pair[1] for pair in simplex_tree_2.persistence()])
# persistence_diagram_2 = np.array(simplex_tree_2.persistence())
# Step 2: Calculate the bottleneck distance between the two persistence diagrams
bottleneck_distance_value = gd.bottleneck_distance(persistence_diagram_1, persistence_diagram_2)
# Step 3: Return the bottleneck distance
return bottleneck_distance_value
# Generic function for complexity reduction
def complexity_reduction(model):
# Example: Compute the L1 regularization term for the network's weights
l1_regularization = 0
for parameter in model.parameters():
l1_regularization += torch.sum(torch.abs(parameter))
return l1_regularization
# Revised stability function
def stability(model, x, y_true, perturbations):
stability_metric = 0
for perturbation in perturbations:
x_perturbed = perturb_input_data(x, perturbation)
y_pred_perturbed = model(x_perturbed)
stability_metric += torch.norm(y_pred_perturbed - y_true)
stability_metric /= len(perturbations)
return stability_metric
# Calabi-Yau inspired loss function
def calabi_yau_loss(model, x, y_true, perturbations, alpha=0.1, beta=0.1, gamma=0.1):
# y_pred = torch.Tensor(y_pred.numpy()).view(-1, 1)
y_pred = model(x)
# Reshape y_pred and y_true to 2D tensors
y_pred = y_pred.view(-1, 1)
y_true = y_true.view(-1, 1)
# Compute geometric similarity
geom_sim = geometric_similarity(y_pred, y_true)
# Compute topological invariance
topo_inv = topological_invariance(y_pred, y_true)
# Compute complexity reduction
comp_red = complexity_reduction(model)
# Compute stability
stab = stability(model, x, y_true, perturbations)
# Combine the components with weighting factors alpha, beta, and gamma
total_loss = geom_sim + alpha * topo_inv + beta * comp_red + gamma * stab
return total_loss | EXA-1-master | exa/modular_components/lossFunctions/Yau/polymorphicv2.py |
import numpy as np
import tensorflow as tf
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import log_loss
from v2 import calabi_yau_loss
# Create a synthetic dataset
X, y = make_classification(n_samples=1000, n_features=20, n_classes=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Create a simple neural network model
model = tf.keras.Sequential([
tf.keras.layers.Dense(16, activation='relu', input_shape=(20,)),
tf.keras.layers.Dense(1, activation='sigmoid')
])
# Compile the model with cross-entropy loss
model.compile(optimizer='adam', loss='binary_crossentropy')
# Train the model
model.fit(X_train, y_train, epochs=10, batch_size=32, verbose=0)
# Evaluate the model using cross-entropy loss
y_pred = model.predict(X_test)
cross_entropy_loss = log_loss(y_test, y_pred)
# Generate random perturbations for the stability component of the Calabi-Yau inspired loss function
perturbations = [np.random.normal(0, 0.1, X_test.shape) for _ in range(5)]
# Evaluate the model using the Calabi-Yau inspired loss function
calabi_yau_loss_value = calabi_yau_loss(model, X_test, y_test, perturbations)
print(f'Cross-entropy loss: {cross_entropy_loss}')
print(f'Calabi-Yau inspired loss: {calabi_yau_loss_value}') | EXA-1-master | exa/modular_components/lossFunctions/Yau/test.py |
# import numpy as np
# import tensorflow as tf
# from sklearn.datasets import make_classification
# from sklearn.model_selection import train_test_split
# from sklearn.metrics import log_loss
# # from v2 import calabi_yau_loss
# # from polymorphic import calabi_yau_loss
# from polymorphicv2 import calabi_yau_loss
# # Create a synthetic dataset
# X, y = make_classification(n_samples=1000, n_features=20, n_classes=2)
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# # Create a simple neural network model
# model = tf.keras.Sequential([
# tf.keras.layers.Dense(16, activation='relu', input_shape=(20,)),
# tf.keras.layers.Dense(1, activation='sigmoid')
# ])
# # Compile the model with cross-entropy loss
# model.compile(optimizer='adam', loss='binary_crossentropy')
# # Train the model
# model.fit(X_train, y_train, epochs=10, batch_size=32, verbose=0)
# # Evaluate the model using cross-entropy loss
# y_pred = model.predict(X_test)
# cross_entropy_loss = log_loss(y_test, y_pred)
# # Generate random perturbations for the stability component of the Calabi-Yau inspired loss function
# perturbations = [np.random.normal(0, 0.1, X_test.shape) for _ in range(5)]
# # Update y_pred and y_true to be 2D arrays for topological_invariance function
# y_pred_2d = np.hstack((X_test, y_pred))
# y_true_2d = np.hstack((X_test, y_test.reshape(-1, 1)))
# # Evaluate the model using the Calabi-Yau inspired loss function
# calabi_yau_loss_value = calabi_yau_loss(model, X_test, y_test, perturbations)
# print(f'Cross-entropy loss: {cross_entropy_loss}')
# print(f'Calabi-Yau inspired loss: {calabi_yau_loss_value}')
import torch
import numpy as np
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import log_loss
# from polymorphicv2 import calabi_yau_loss
# from nebulaV4 import Nebula
# from p3 import calabi_yau_loss
# Create a synthetic dataset
X, y = make_classification(n_samples=1000, n_features=20, n_classes=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Convert the data to PyTorch tensors
X_train = torch.tensor(X_train, dtype=torch.float32)
X_test = torch.tensor(X_test, dtype=torch.float32)
y_train = torch.tensor(y_train, dtype=torch.float32).view(-1, 1)
y_test = torch.tensor(y_test, dtype=torch.float32)
# Create a simple neural network model
model = torch.nn.Sequential(
torch.nn.Linear(20, 16),
torch.nn.ReLU(),
torch.nn.Linear(16, 1),
torch.nn.Sigmoid()
)
# Define the optimizer and loss function
optimizer = torch.optim.Adam(model.parameters())
criterion = torch.nn.BCELoss()
# Train the model
for epoch in range(10):
# Forward pass
y_pred = model(X_train)
loss = criterion(y_pred, y_train)
# Backward pass and optimization
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Evaluate the model using cross-entropy loss
y_pred = model(X_test)
cross_entropy_loss = log_loss(y_test, y_pred.detach().numpy())
# Generate random perturbations for the stability component of the Calabi-Yau inspired loss function
perturbations = [torch.tensor(np.random.normal(0, 0.1, X_test.shape), dtype=torch.float32) for _ in range(5)]
# Update y_pred and y_true to be 2D arrays for topological_invariance function
y_pred_2d = torch.cat((X_test, y_pred), dim=1)
y_true_2d = torch.cat((X_test, y_test.view(-1, 1)), dim=1)
# Evaluate the model using the Calabi-Yau inspired loss function
calabi_yau_loss_value = Nebula(model, X_test, y_test, perturbations)
print(f'Cross-entropy loss: {cross_entropy_loss}')
print(f'Calabi-Yau inspired loss: {calabi_yau_loss_value}')
| EXA-1-master | exa/modular_components/lossFunctions/Yau/test2.py |
import numpy as np
from scipy.spatial.distance import cdist
# Function to measure geometric similarity
def geometric_similarity(y_pred, y_true):
# Compute a simple geometric metric based on the L2 norm of the difference between y_pred and y_true
geometric_difference = np.linalg.norm(y_pred - y_true)
return geometric_difference
# Function to measure topological invariance
# def topological_invariance(y_pred, y_true):
# # Example: Compute a topological metric based on persistent homology
# # Here, you would need to compute the persistent homology of y_pred and y_true, then compare the results
# pass
# Function to measure complexity reduction
def complexity_reduction(network):
# Example: Compute the L1 regularization term for the network's weights
l1_regularization = 0
for layer in network.layers:
if hasattr(layer, 'kernel'): # Check if the layer has trainable weights
l1_regularization += np.sum(np.abs(layer.kernel))
return l1_regularization
# Function to measure stability
def stability(y_pred, y_true, perturbations):
# Example: Compute the average L2 norm of the difference between perturbed predictions and the original prediction
stability_metric = 0
for perturbation in perturbations:
y_pred_perturbed = perturb_network(y_pred, perturbation)
stability_metric += np.linalg.norm(y_pred_perturbed - y_pred)
stability_metric /= len(perturbations)
return stability_metric
# # Helper function to perturb the network (not implemented)
# def perturb_network(y_pred, perturbation):
# # Apply the perturbation to the network and return the perturbed prediction
# pass
# #calabi yau inspired loss function
# def topological_invariance(y_pred, y_true):
# # Placeholder implementation: just return the mean squared error for now
# # In a real implementation, you would apply relevant transformations and compute a proper metric
# return np.mean((y_pred - y_true) ** 2)
def rotate_points(points, angle):
rotation_matrix = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
return np.dot(points, rotation_matrix)
def topological_invariance(y_pred, y_true, angle_increment=np.pi/6):
min_distance = float('inf')
# Rotate the predicted points and compute the distance to the true points
for angle in np.arange(0, 2 * np.pi, angle_increment):
rotated_y_pred = rotate_points(y_pred, angle)
distance_matrix = cdist(rotated_y_pred, y_true, metric='euclidean')
distance = np.sum(np.min(distance_matrix, axis=1)) # Use the sum of minimum distances as the discrepancy metric
min_distance = min(min_distance, distance)
return min_distance
def perturb_network(network, perturbations):
aggregated_performance = 0
for perturbation in perturbations:
perturbed_network = network.copy() # Replace this with an appropriate method to copy your network
perturbed_network.apply_perturbation(perturbation) # Replace this with an appropriate method to apply perturbations
performance = evaluate_network(perturbed_network) # Replace this with an appropriate method to evaluate network performance
aggregated_performance += performance
return aggregated_performance
# Calabi-Yau inspired loss function
def calabi_yau_loss(y_pred, y_true, network, perturbations, alpha=0.1, beta=0.1, gamma=0.1):
# Compute geometric similarity
geom_sim = geometric_similarity(y_pred, y_true)
# Compute topological invariance
topo_inv = topological_invariance(y_pred, y_true)
# Compute complexity reduction
comp_red = complexity_reduction(network)
# Compute stability
stab = stability(y_pred, y_true, perturbations)
# Combine the components with weighting factors alpha, beta, and gamma
total_loss = geom_sim + alpha * topo_inv + beta * comp_red + gamma * stab
return total_loss | EXA-1-master | exa/modular_components/lossFunctions/Yau/Yau.py |
import torch
import numpy as np
import networkx as nx
from gudhi import SimplexTree
import gudhi as gd
def geometric_similarity(y_pred, y_true):
# Compute a simple geometric metric based on the L2 norm of the difference between y_pred and y_true
geometric_difference = torch.norm(y_pred - y_true)
return geometric_difference
# Helper function to perturb the input data
def perturb_input_data(x, perturbation):
# Apply the perturbation to the input data and return the perturbed data
return x + perturbation
def topological_invariance(y_pred, y_true, **kwargs):
data_type = infer_data_type(y_pred, y_true)
if data_type == 'point_cloud':
return point_cloud_topological_invariance(y_pred, y_true, **kwargs)
elif data_type == 'graph':
return graph_topological_invariance(y_pred, y_true, **kwargs)
elif data_type == 'multi_modal':
return multi_modal_topological_invariance(y_pred, y_true, **kwargs)
else:
raise ValueError(f'Unsupported data type: {data_type}')
def infer_data_type(y_pred, y_true):
if y_pred.ndim == 2 and y_true.ndim == 2:
return 'point_cloud'
elif isinstance(y_pred, nx.Graph) and isinstance(y_true, nx.Graph):
return 'graph'
elif isinstance(y_pred, list) and isinstance(y_true, list):
return 'multi_modal'
else:
raise ValueError('Unsupported data type.')
def point_cloud_topological_invariance(y_pred, y_true, **kwargs):
# Calculate the pairwise distance matrices for both point clouds
y_pred_distance_matrix = torch.cdist(y_pred, y_pred)
y_true_distance_matrix = torch.cdist(y_true.float(), y_true.float())
# Calculate the topological invariance metric, e.g., bottleneck distance
# topological_invariance_metric = bottleneck_distance(y_pred_distance_matrix.numpy(), y_true_distance_matrix.numpy())
topological_invariance_metric = bottleneck_distance(y_pred_distance_matrix.detach().numpy(), y_true_distance_matrix.detach().numpy())
return topological_invariance_metric
def graph_topological_invariance(y_pred, y_true, **kwargs):
# Calculate the graph edit distance between the predicted and true graphs
# You can use the NetworkX library for this
graph_edit_distance = nx.graph_edit_distance(y_pred, y_true)
return graph_edit_distance
def multi_modal_topological_invariance(y_pred, y_true, **kwargs):
# Calculate the topological invariance metric for multi-modal data
# This could be a combination of different topological invariance metrics
# based on the specific problem requirements
# Example: Calculate the topological invariance for each modality and average the results
num_modalities = len(y_pred)
total_topological_invariance = 0.0
for i in range(num_modalities):
data_type = infer_data_type(y_pred[i], y_true[i])
topological_invariance_i = topological_invariance(y_pred[i], y_true[i], data_type=data_type)
total_topological_invariance += topological_invariance_i
average_topological_invariance = total_topological_invariance / num_modalities
return average_topological_invariance
def bottleneck_distance(distance_matrix_1, distance_matrix_2):
# Step 1: Compute the persistence diagrams for both distance matrices
rips_complex_1 = gd.RipsComplex(distance_matrix=distance_matrix_1, max_edge_length=np.inf)
simplex_tree_1 = rips_complex_1.create_simplex_tree(max_dimension=2)
# persistence_diagram_1 = np.array(simplex_tree_1.persistence())
persistence_diagram_1 = np.array([pair[1] for pair in simplex_tree_1.persistence()])
rips_complex_2 = gd.RipsComplex(distance_matrix=distance_matrix_2, max_edge_length=np.inf)
simplex_tree_2 = rips_complex_2.create_simplex_tree(max_dimension=2)
persistence_diagram_2 = np.array([pair[1] for pair in simplex_tree_2.persistence()])
# persistence_diagram_2 = np.array(simplex_tree_2.persistence())
# Step 2: Calculate the bottleneck distance between the two persistence diagrams
bottleneck_distance_value = gd.bottleneck_distance(persistence_diagram_1, persistence_diagram_2)
# Step 3: Return the bottleneck distance
return bottleneck_distance_value
# Generic function for complexity reduction
def complexity_reduction(model):
# Example: Compute the L1 regularization term for the network's weights
l1_regularization = 0
for parameter in model.parameters():
l1_regularization += torch.sum(torch.abs(parameter))
return l1_regularization
# Revised stability function
def stability(model, x, y_true, perturbations):
stability_metric = 0
for perturbation in perturbations:
x_perturbed = perturb_input_data(x, perturbation)
y_pred_perturbed = model(x_perturbed)
stability_metric += torch.norm(y_pred_perturbed - y_true)
stability_metric /= len(perturbations)
return stability_metric
# Calabi-Yau inspired loss function
def calabi_yau_loss(model, x, y_true, perturbations, alpha=0.1, beta=0.1, gamma=0.1):
# y_pred = torch.Tensor(y_pred.numpy()).view(-1, 1)
y_pred = model(x)
# Reshape y_pred and y_true to 2D tensors
y_pred = y_pred.view(-1, 1)
y_true = y_true.view(-1, 1)
# Compute geometric similarity
geom_sim = geometric_similarity(y_pred, y_true)
# Compute topological invariance
topo_inv = topological_invariance(y_pred, y_true)
# Compute complexity reduction
comp_red = complexity_reduction(model)
# Compute stability
stab = stability(model, x, y_true, perturbations)
# Combine the components with weighting factors alpha, beta, and gamma
total_loss = geom_sim + alpha * topo_inv + beta * comp_red + gamma * stab
return total_loss
import torch.nn as nn
# Define a simple linear regression model
class LinearRegression(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(1, 1)
def forward(self, x):
return self.linear(x)
import torch
import matplotlib.pyplot as plt
import numpy as np
# Create an instance of the linear regression model
model = LinearRegression()
# Compute the loss for a given input value
x = torch.Tensor([1])
y_true = torch.Tensor([2])
perturbations = torch.Tensor([[0.1], [-0.1]])
alpha = 0.1
beta = 0.1
gamma = 0.1
loss = calabi_yau_loss(model, x, y_true, perturbations, alpha=alpha, beta=beta, gamma=gamma)
# Define the range of values for the independent variable (x-axis)
x_range = np.linspace(-10, 10, 100)
# Set the hyperparameters for the loss function
alpha = 0.1
beta = 0.1
gamma = 0.1
# Define the true output values for the input values in the x range
y_true = np.sin(x_range)
# Convert y_true to a tensor
y_true_tensor = torch.Tensor(y_true)
# Define the perturbations to use for computing the stability component of the loss function
perturbations = np.random.normal(0, 0.1, size=(10, len(x_range)))
# Initialize an empty list to store the loss values for each input value in the x range
loss_values = []
# Loop through each input value in the x range and compute the corresponding loss value
for x in x_range:
x_tensor = torch.Tensor([x])
y_pred = torch.Tensor([np.sin(x)])
loss = calabi_yau_loss(model, x_tensor, y_true_tensor, perturbations, alpha=alpha, beta=beta, gamma=gamma)
loss_values.append(loss.item())
# Plot the loss values against the input values
plt.plot(x_range, loss_values)
plt.xlabel('Input Values')
plt.ylabel('Loss')
plt.title('Calabi-Yau Loss')
plt.show() | EXA-1-master | exa/modular_components/lossFunctions/Yau/visual.py |
import torch
import numpy as np
import networkx as nx
from gudhi import SimplexTree
import gudhi as gd
def geometric_similarity(y_pred, y_true):
# Compute a simple geometric metric based on the L2 norm of the difference between y_pred and y_true
geometric_difference = torch.norm(y_pred - y_true)
return geometric_difference
# Helper function to perturb the input data
def perturb_input_data(x, perturbation):
# Apply the perturbation to the input data and return the perturbed data
return x + perturbation
def topological_invariance(y_pred, y_true, **kwargs):
data_type = infer_data_type(y_pred, y_true)
if data_type == 'point_cloud':
return point_cloud_topological_invariance(y_pred, y_true, **kwargs)
elif data_type == 'graph':
return graph_topological_invariance(y_pred, y_true, **kwargs)
elif data_type == 'multi_modal':
return multi_modal_topological_invariance(y_pred, y_true, **kwargs)
else:
raise ValueError(f'Unsupported data type: {data_type}')
def infer_data_type(y_pred, y_true):
if y_pred.ndim == 2 and y_true.ndim == 2:
return 'point_cloud'
elif isinstance(y_pred, nx.Graph) and isinstance(y_true, nx.Graph):
return 'graph'
elif isinstance(y_pred, list) and isinstance(y_true, list):
return 'multi_modal'
else:
raise ValueError('Unsupported data type.')
def point_cloud_topological_invariance(y_pred, y_true, **kwargs):
# Calculate the pairwise distance matrices for both point clouds
y_pred_distance_matrix = torch.cdist(y_pred, y_pred)
y_true_distance_matrix = torch.cdist(y_true.float(), y_true.float())
# Calculate the topological invariance metric, e.g., bottleneck distance
topological_invariance_metric = bottleneck_distance(y_pred_distance_matrix.numpy(), y_true_distance_matrix.numpy())
return topological_invariance_metric
def graph_topological_invariance(y_pred, y_true, **kwargs):
# Calculate the graph edit distance between the predicted and true graphs
# You can use the NetworkX library for this
graph_edit_distance = nx.graph_edit_distance(y_pred, y_true)
return graph_edit_distance
def multi_modal_topological_invariance(y_pred, y_true, **kwargs):
# Calculate the topological invariance metric for multi-modal data
# This could be a combination of different topological invariance metrics
# based on the specific problem requirements
# Example: Calculate the topological invariance for each modality and average the results
num_modalities = len(y_pred)
total_topological_invariance = 0.0
for i in range(num_modalities):
data_type = infer_data_type(y_pred[i], y_true[i])
topological_invariance_i = topological_invariance(y_pred[i], y_true[i], data_type=data_type)
total_topological_invariance += topological_invariance_i
average_topological_invariance = total_topological_invariance / num_modalities
return average_topological_invariance
def infer_data_type(y_pred, y_true):
if y_pred.ndim == 2 and y_true.ndim == 2:
return 'point_cloud'
elif isinstance(y_pred, nx.Graph) and isinstance(y_true, nx.Graph):
return 'graph'
elif isinstance(y_pred, list) and isinstance(y_true, list):
return 'multi_modal'
else:
raise ValueError('Unsupported data type.')
def bottleneck_distance(distance_matrix_1, distance_matrix_2):
# Step 1: Compute the persistence diagrams for both distance matrices
rips_complex_1 = gd.RipsComplex(distance_matrix=distance_matrix_1, max_edge_length=np.inf)
simplex_tree_1 = rips_complex_1.create_simplex_tree(max_dimension=2)
persistence_diagram_1 = np.array(simplex_tree_1.persistence())
rips_complex_2 = gd.RipsComplex(distance_matrix=distance_matrix_2, max_edge_length=np.inf)
simplex_tree_2 = rips_complex_2.create_simplex_tree(max_dimension=2)
persistence_diagram_2 = np.array(simplex_tree_2.persistence())
# Step 2: Calculate the bottleneck distance between the two persistence diagrams
bottleneck_distance_value = gd.bottleneck_distance(persistence_diagram_1, persistence_diagram_2)
# Step 3: Return the bottleneck distance
return bottleneck_distance_value
# Generic function for complexity reduction
def complexity_reduction(model):
# Example: Compute the L1 regularization term for the network's weights
l1_regularization = 0
for parameter in model.parameters():
l1_regularization += torch.sum(torch.abs(parameter))
return l1_regularization
# Revised stability function
def stability(model, x, y_true, perturbations):
stability_metric = 0
for perturbation in perturbations:
x_perturbed = perturb_input_data(x, perturbation)
y_pred_perturbed = model(x_perturbed)
stability_metric += torch.norm(y_pred_perturbed - y_true)
stability_metric /= len(perturbations)
return stability_metric
# Calabi-Yau inspired loss function
def calabi_yau_loss(model, x, y_true, perturbations, alpha=0.1, beta=0.1, gamma=0.1):
# y_pred = torch.Tensor(y_pred.numpy()).view(-1, 1)
y_pred = model(x)
# Reshape y_pred and y_true to 2D tensors
y_pred = y_pred.view(-1, 1)
y_true = y_true.view(-1, 1)
# Compute geometric similarity
geom_sim = geometric_similarity(y_pred, y_true)
# Compute topological invariance
topo_inv = topological_invariance(y_pred, y_true)
# Compute complexity reduction
comp_red = complexity_reduction(model)
# Compute stability
stab = stability(model, x, y_true, perturbations)
# Combine the components with weighting factors alpha, beta, and gamma
total_loss = geom_sim + alpha * topo_inv + beta * comp_red + gamma * stab
return total_loss
| EXA-1-master | exa/modular_components/lossFunctions/Yau/p3.py |
import numpy as np
# # Functions to measure geometric similarity, topological invariance, complexity reduction, and stability
# def geometric_similarity(y_pred, y_true):
# # Compute a metric based on curvature or other geometric properties between y_pred and y_true
# pass
# def topological_invariance(y_pred, y_true):
# # Compute a metric that is invariant under specific transformations relevant to the problem domain
# pass
# def complexity_reduction(network):
# # Compute a term that promotes sparsity or low-rank structures in the network's weights or activations
# pass
# def stability(y_pred, y_true, perturbations):
# # Compute a metric that penalizes sensitivity to small changes in the input data or network parameters
# pass
# # Calabi-Yau-inspired loss function
# def calabi_yau_loss(y_pred, y_true, network, perturbations, weights):
# # Combine the metrics with appropriate weights to form the overall loss function
# loss = (
# weights['geometric_similarity'] * geometric_similarity(y_pred, y_true) +
# weights['topological_invariance'] * topological_invariance(y_pred, y_true) +
# weights['complexity_reduction'] * complexity_reduction(network) +
# weights['stability'] * stability(y_pred, y_true, perturbations)
# )
# return loss
#function to measure geometric similarity
def geometric_similarity(y_pred, y_true):
#compute a simple maetric based on the l2 norm of the differnece between y_pred and y_true
geometric_difference = np.linalg.norm(y_pred - y_true)
return geometric_difference
#function to measure topological invariance
def topological_invariance(y_pred, y_true):
#example create a topological metric based on persistent homology
#here compute the persistent homology of y_pred and y_true then compare the results
pass
#function to measure complexity reduction
def complexity_reduction(network):
#example compute the l1 regularization term for the ntworks weights
l1_regulariazation = 0
for layer in network.layers:
if hasattr(layer, 'kernel'): # check if the layer has trainable weights
l1_regularization += np.sum(np.abs(layer.kernel))
return l1_regulariazation
#function to measure stability
def stability(y_pred, y_true, pertubations):
#example compute the average l2 norm of the difference between perturbed predictions and the original predictions
stability_metric = 0
for pertubation in pertubation:
y_pred_pertubed = perturb_network(y_pred, pertubation)
stability_metric += np.lingalg.norm(y_pred_perturbed - y_pred)
stability_metric /= len(pertubation)
return stability_metric
#helper function to perturb the network
def perturb_network(y_pred, pertubation):
pass
| EXA-1-master | exa/modular_components/lossFunctions/Yau/main.py |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import pairwise_distances
def geometric_similarity(y_pred, y_true):
# Compute a simple geometric metric based on the L2 norm of the difference between y_pred and y_true
geometric_difference = np.linalg.norm(y_pred - y_true)
return geometric_difference
# Helper function to perturb the input data
def perturb_input_data(x, perturbation):
# Apply the perturbation to the input data and return the perturbed data
return x + perturbation
# Generic topological invariance function
def topological_invariance(y_pred, y_true, metric='euclidean'):
distance_matrix = pairwise_distances(y_pred, y_true, metric=metric)
distance = np.sum(np.min(distance_matrix, axis=1)) # Use the sum of minimum distances as the discrepancy metric
return distance
def complexity_reduction(network):
# Example: Compute the L1 regularization term for the network's weights
l1_regularization = 0
for layer in network.layers:
if hasattr(layer, 'kernel'): # Check if the layer has trainable weights
l1_regularization += np.sum(np.abs(layer.kernel))
return l1_regularization
# Revised stability function
def stability(model, x, y_true, perturbations):
stability_metric = 0
for perturbation in perturbations:
x_perturbed = perturb_input_data(x, perturbation)
y_pred_perturbed = model.predict(x_perturbed)
stability_metric += np.linalg.norm(y_pred_perturbed - y_true)
stability_metric /= len(perturbations)
return stability_metric
# Calabi-Yau inspired loss function
# def calabi_yau_loss(model, x, y_true, perturbations, alpha=0.1, beta=0.1, gamma=0.1):
# y_pred = model.predict(x)
# # Compute geometric similarity
# geom_sim = geometric_similarity(y_pred, y_true)
# # Compute topological invariance
# topo_inv = topological_invariance(y_pred, y_true)
# # Compute complexity reduction
# comp_red = complexity_reduction(model)
# # Compute stability
# stab = stability(model, x, y_true, perturbations)
# # Combine the components with weighting factors alpha, beta, and gamma
# total_loss = geom_sim + alpha * topo_inv + beta * comp_red + gamma * stab
# return total_loss
#v2
#reshape arrays to 2d
def calabi_yau_loss(model, x, y_true, perturbations, alpha=0.1, beta=0.1, gamma=0.1):
y_pred = model.predict(x)
# Reshape y_pred and y_true to 2D arrays
y_pred = y_pred.reshape(-1, 1)
y_true = y_true.reshape(-1, 1)
# Compute geometric similarity
geom_sim = geometric_similarity(y_pred, y_true)
# Compute topological invariance
topo_inv = topological_invariance(y_pred, y_true)
# Compute complexity reduction
comp_red = complexity_reduction(model)
# Compute stability
stab = stability(model, x, y_true, perturbations)
# Combine the components with weighting factors alpha, beta, and gamma
total_loss = geom_sim + alpha * topo_inv + beta * comp_red + gamma * stab
return total_loss | EXA-1-master | exa/modular_components/lossFunctions/Yau/v2.py |
import torch
import torch.nn as nn
DEV = torch.device('cuda:0')
def find_layers(module, layers=[nn.Conv2d, nn.Linear], name=''):
if type(module) in layers:
return {name: module}
res = {}
for name1, child in module.named_children():
res.update(find_layers(
child, layers=layers, name=name + '.' + name1 if name != '' else name1
))
return res
| EXA-1-master | exa/modular_components/gptq/modelutils.py |
from setuptools import setup, Extension
from torch.utils import cpp_extension
setup(
name='quant_cuda',
ext_modules=[cpp_extension.CUDAExtension(
'quant_cuda', ['quant_cuda.cpp', 'quant_cuda_kernel.cu']
)],
cmdclass={'build_ext': cpp_extension.BuildExtension}
)
| EXA-1-master | exa/modular_components/gptq/setup_cuda.py |
import time
import torch
import torch.nn as nn
from gptq import *
from modelutils import *
from quant import *
def get_opt(model):
import torch
def skip(*args, **kwargs):
pass
torch.nn.init.kaiming_uniform_ = skip
torch.nn.init.uniform_ = skip
torch.nn.init.normal_ = skip
from transformers import OPTForCausalLM
model = OPTForCausalLM.from_pretrained(model, torch_dtype='auto')
model.seqlen = model.config.max_position_embeddings
return model
@torch.no_grad()
def opt_sequential(model, dataloader, dev):
print('Starting ...')
use_cache = model.config.use_cache
model.config.use_cache = False
layers = model.model.decoder.layers
model.model.decoder.embed_tokens = model.model.decoder.embed_tokens.to(dev)
model.model.decoder.embed_positions = model.model.decoder.embed_positions.to(dev)
if hasattr(model.model.decoder, 'project_out') and model.model.decoder.project_out:
model.model.decoder.project_out = model.model.decoder.project_out.to(dev)
if hasattr(model.model.decoder, 'project_in') and model.model.decoder.project_in:
model.model.decoder.project_in = model.model.decoder.project_in.to(dev)
layers[0] = layers[0].to(dev)
dtype = next(iter(model.parameters())).dtype
inps = torch.zeros(
(args.nsamples, model.seqlen, model.config.hidden_size), dtype=dtype, device=dev
)
cache = {'i': 0, 'attention_mask': None}
class Catcher(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, inp, **kwargs):
inps[cache['i']] = inp
cache['i'] += 1
cache['attention_mask'] = kwargs['attention_mask']
raise ValueError
layers[0] = Catcher(layers[0])
for batch in dataloader:
try:
model(batch[0].to(dev))
except ValueError:
pass
layers[0] = layers[0].module
layers[0] = layers[0].cpu()
model.model.decoder.embed_tokens = model.model.decoder.embed_tokens.cpu()
model.model.decoder.embed_positions = model.model.decoder.embed_positions.cpu()
if hasattr(model.model.decoder, 'project_out') and model.model.decoder.project_out:
model.model.decoder.project_out = model.model.decoder.project_out.cpu()
if hasattr(model.model.decoder, 'project_in') and model.model.decoder.project_in:
model.model.decoder.project_in = model.model.decoder.project_in.cpu()
torch.cuda.empty_cache()
outs = torch.zeros_like(inps)
attention_mask = cache['attention_mask']
print('Ready.')
quantizers = {}
for i in range(len(layers)):
layer = layers[i].to(dev)
subset = find_layers(layer)
gptq = {}
for name in subset:
gptq[name] = GPTQ(subset[name])
gptq[name].quantizer = Quantizer()
gptq[name].quantizer.configure(
args.wbits, perchannel=True, sym=args.sym, mse=False, trits=args.trits
)
def add_batch(name):
def tmp(_, inp, out):
gptq[name].add_batch(inp[0].data, out.data)
return tmp
handles = []
for name in subset:
handles.append(subset[name].register_forward_hook(add_batch(name)))
for j in range(args.nsamples):
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask)[0]
for h in handles:
h.remove()
for name in subset:
print(i, name)
print('Quantizing ...')
gptq[name].fasterquant(percdamp=args.percdamp, groupsize=args.groupsize, actorder=args.act_order)
quantizers['model.decoder.layers.%d.%s' % (i, name)] = gptq[name].quantizer
gptq[name].free()
for j in range(args.nsamples):
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask)[0]
layers[i] = layer.cpu()
del layer
del gptq
torch.cuda.empty_cache()
inps, outs = outs, inps
model.config.use_cache = use_cache
return quantizers
@torch.no_grad()
def opt_eval(model, testenc, dev):
print('Evaluating ...')
testenc = testenc.input_ids
nsamples = testenc.numel() // model.seqlen
use_cache = model.config.use_cache
model.config.use_cache = False
layers = model.model.decoder.layers
model.model.decoder.embed_tokens = model.model.decoder.embed_tokens.to(dev)
model.model.decoder.embed_positions = model.model.decoder.embed_positions.to(dev)
if hasattr(model.model.decoder, 'project_out') and model.model.decoder.project_out:
model.model.decoder.project_out = model.model.decoder.project_out.to(dev)
if hasattr(model.model.decoder, 'project_in') and model.model.decoder.project_in:
model.model.decoder.project_in = model.model.decoder.project_in.to(dev)
layers[0] = layers[0].to(dev)
dtype = next(iter(model.parameters())).dtype
inps = torch.zeros(
(nsamples, model.seqlen, model.config.hidden_size), dtype=dtype, device=dev
)
cache = {'i': 0, 'attention_mask': None}
class Catcher(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, inp, **kwargs):
inps[cache['i']] = inp
cache['i'] += 1
cache['attention_mask'] = kwargs['attention_mask']
raise ValueError
layers[0] = Catcher(layers[0])
for i in range(nsamples):
batch = testenc[:, (i * model.seqlen):((i + 1) * model.seqlen)].to(dev)
try:
model(batch)
except ValueError:
pass
layers[0] = layers[0].module
layers[0] = layers[0].cpu()
model.model.decoder.embed_tokens = model.model.decoder.embed_tokens.cpu()
model.model.decoder.embed_positions = model.model.decoder.embed_positions.cpu()
if hasattr(model.model.decoder, 'project_out') and model.model.decoder.project_out:
model.model.decoder.project_out = model.model.decoder.project_out.cpu()
if hasattr(model.model.decoder, 'project_in') and model.model.decoder.project_in:
model.model.decoder.project_in = model.model.decoder.project_in.cpu()
torch.cuda.empty_cache()
outs = torch.zeros_like(inps)
attention_mask = cache['attention_mask']
for i in range(len(layers)):
print(i)
layer = layers[i].to(dev)
if args.nearest:
subset = find_layers(layer)
for name in subset:
quantizer = Quantizer()
quantizer.configure(
args.wbits, perchannel=True, sym=args.sym, mse=False
)
W = subset[name].weight.data
quantizer.find_params(W, weight=True)
subset[name].weight.data = quantize(
W, quantizer.scale, quantizer.zero, quantizer.maxq
).to(next(iter(layer.parameters())).dtype)
for j in range(nsamples):
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask)[0]
layers[i] = layer.cpu()
del layer
torch.cuda.empty_cache()
inps, outs = outs, inps
if model.model.decoder.final_layer_norm is not None:
model.model.decoder.final_layer_norm = model.model.decoder.final_layer_norm.to(dev)
if model.model.decoder.project_out is not None:
model.model.decoder.project_out = model.model.decoder.project_out.to(dev)
model.lm_head = model.lm_head.to(dev)
testenc = testenc.to(dev)
nlls = []
for i in range(nsamples):
hidden_states = inps[i].unsqueeze(0)
if model.model.decoder.final_layer_norm is not None:
hidden_states = model.model.decoder.final_layer_norm(hidden_states)
if model.model.decoder.project_out is not None:
hidden_states = model.model.decoder.project_out(hidden_states)
lm_logits = model.lm_head(hidden_states)
shift_logits = lm_logits[:, :-1, :].contiguous()
shift_labels = testenc[
:, (i * model.seqlen):((i + 1) * model.seqlen)
][:, 1:]
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
neg_log_likelihood = loss.float() * model.seqlen
nlls.append(neg_log_likelihood)
ppl = torch.exp(torch.stack(nlls).sum() / (nsamples * model.seqlen))
print(ppl.item())
model.config.use_cache = use_cache
# TODO: perform packing on GPU
def opt_pack3(model, quantizers):
layers = find_layers(model)
layers = {n: layers[n] for n in quantizers}
make_quant3(model, quantizers, faster=args.faster_kernel)
qlayers = find_layers(model, [Quant3Linear])
print('Packing ...')
for name in qlayers:
print(name)
quantizers[name] = quantizers[name].cpu()
qlayers[name].pack(layers[name], quantizers[name].scale, quantizers[name].zero)
print('Done.')
return model
def load_quant3(model, checkpoint):
from transformers import OPTConfig, OPTForCausalLM
config = OPTConfig.from_pretrained(model)
def noop(*args, **kwargs):
pass
torch.nn.init.kaiming_uniform_ = noop
torch.nn.init.uniform_ = noop
torch.nn.init.normal_ = noop
torch.set_default_dtype(torch.half)
transformers.modeling_utils._init_weights = False
torch.set_default_dtype(torch.half)
model = OPTForCausalLM(config)
torch.set_default_dtype(torch.float)
model = model.eval()
layers = find_layers(model)
for name in ['model.decoder.project_out', 'model.decoder.project_in', 'lm_head']:
if name in layers:
del layers[name]
make_quant3(model, layers, faster=args.faster_kernel)
print('Loading model ...')
model.load_state_dict(torch.load(checkpoint))
model.seqlen = model.config.max_position_embeddings
print('Done.')
return model
def opt_multigpu(model, gpus):
model.model.decoder.embed_tokens = model.model.decoder.embed_tokens.to(gpus[0])
model.model.decoder.embed_positions = model.model.decoder.embed_positions.to(gpus[0])
if hasattr(model.model.decoder, 'project_in') and model.model.decoder.project_in:
model.model.decoder.project_in = model.model.decoder.project_in.to(gpus[0])
if hasattr(model.model.decoder, 'project_out') and model.model.decoder.project_out:
model.model.decoder.project_out = model.model.decoder.project_out.to(gpus[-1])
if hasattr(model.model.decoder, 'final_layer_norm') and model.model.decoder.final_layer_norm:
model.model.decoder.final_layer_norm = model.model.decoder.final_layer_norm.to(gpus[-1])
import copy
model.lm_head = copy.deepcopy(model.lm_head).to(gpus[-1])
cache = {'mask': None}
class MoveModule(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
self.dev = next(iter(self.module.parameters())).device
def forward(self, *inp, **kwargs):
inp = list(inp)
if inp[0].device != self.dev:
inp[0] = inp[0].to(self.dev)
if cache['mask'] is None or cache['mask'].device != self.dev:
cache['mask'] = kwargs['attention_mask'].to(self.dev)
kwargs['attention_mask'] = cache['mask']
tmp = self.module(*inp, **kwargs)
return tmp
layers = model.model.decoder.layers
pergpu = math.ceil(len(layers) / len(gpus))
for i in range(len(layers)):
layers[i] = MoveModule(layers[i].to(gpus[i // pergpu]))
model.gpus = gpus
def benchmark(model, input_ids, check=False):
input_ids = input_ids.to(model.gpus[0] if hasattr(model, 'gpus') else DEV)
torch.cuda.synchronize()
cache = {'past': None}
def clear_past(i):
def tmp(layer, inp, out):
if cache['past']:
cache['past'][i] = None
return tmp
for i, layer in enumerate(model.model.decoder.layers):
layer.register_forward_hook(clear_past(i))
print('Benchmarking ...')
if check:
loss = nn.CrossEntropyLoss()
tot = 0.
def sync():
if hasattr(model, 'gpus'):
for gpu in model.gpus:
torch.cuda.synchronize(gpu)
else:
torch.cuda.synchronize()
with torch.no_grad():
attention_mask = torch.ones((1, input_ids.numel()), device=DEV)
times = []
for i in range(input_ids.numel()):
tick = time.time()
out = model(
input_ids[:, i].reshape(-1),
past_key_values=cache['past'],
attention_mask=attention_mask[:, :(i + 1)].reshape((1, -1))
)
sync()
times.append(time.time() - tick)
print(i, times[-1])
if check and i != input_ids.numel() - 1:
tot += loss(out.logits[0].to(DEV), input_ids[:, (i + 1)].to(DEV)).float()
cache['past'] = list(out.past_key_values)
del out
sync()
import numpy as np
print('Median:', np.median(times))
if check:
print('PPL:', torch.exp(tot / (input_ids.numel() - 1)).item())
if __name__ == '__main__':
import argparse
from datautils import *
parser = argparse.ArgumentParser()
parser.add_argument(
'model', type=str,
help='OPT model to load; pass `facebook/opt-X`.'
)
parser.add_argument(
'dataset', type=str, choices=['wikitext2', 'ptb', 'c4'],
help='Where to extract calibration data from.'
)
parser.add_argument(
'--seed',
type=int, default=0, help='Seed for sampling the calibration data.'
)
parser.add_argument(
'--nsamples', type=int, default=128,
help='Number of calibration data samples.'
)
parser.add_argument(
'--percdamp', type=float, default=.01,
help='Percent of the average Hessian diagonal to use for dampening.'
)
parser.add_argument(
'--nearest', action='store_true',
help='Whether to run the RTN baseline.'
)
parser.add_argument(
'--wbits', type=int, default=16, choices=[2, 3, 4, 16],
help='#bits to use for quantization; use 16 for evaluating base model.'
)
parser.add_argument(
'--trits', action='store_true',
help='Whether to use trits for quantization.'
)
parser.add_argument(
'--groupsize', type=int, default=-1,
help='Groupsize to use for quantization; default uses full row.'
)
parser.add_argument(
'--sym', action='store_true',
help='Whether to perform symmetric quantization.'
)
parser.add_argument(
'--save', type=str, default='',
help='Save quantized checkpoint under this name.'
)
parser.add_argument(
'--load', type=str, default='',
help='Load quantized model.'
)
parser.add_argument(
'--benchmark', type=int, default=0,
help='Number of tokens to use for benchmarking.'
)
parser.add_argument(
'--check', action='store_true',
help='Whether to compute perplexity during benchmarking for verification.'
)
parser.add_argument(
'--new-eval', action='store_true',
help='Whether to use the new PTB and C4 eval.'
)
parser.add_argument(
'--faster-kernel', action='store_true',
help='Whether to use the new faster kernel for benchmarking.'
)
parser.add_argument(
'--act-order', action='store_true',
help='Whether to apply the activation order GPTQ heuristic'
)
args = parser.parse_args()
if args.load:
model = load_quant3(args.model, args.load)
else:
model = get_opt(args.model)
model.eval()
dataloader, testloader = get_loaders(
args.dataset, nsamples=args.nsamples, seed=args.seed, model=args.model, seqlen=model.seqlen
)
if args.wbits < 16 and not args.nearest:
tick = time.time()
quantizers = opt_sequential(model, dataloader, DEV)
print(time.time() - tick)
if args.benchmark:
gpus = [torch.device('cuda:%d' % i) for i in range(torch.cuda.device_count())]
if len(gpus) > 1:
opt_multigpu(model, gpus)
else:
model = model.to(DEV)
if args.benchmark:
input_ids = next(iter(dataloader))[0][:, :args.benchmark]
benchmark(model, input_ids, check=args.check)
if args.load:
exit()
datasets = ['wikitext2', 'ptb', 'c4']
if args.new_eval:
datasets = ['wikitext2', 'ptb-new', 'c4-new']
for dataset in datasets:
dataloader, testloader = get_loaders(
dataset, seed=args.seed, model=args.model, seqlen=model.seqlen
)
print(dataset)
opt_eval(model, testloader, DEV)
if args.save:
opt_pack3(model, quantizers)
torch.save(model.state_dict(), args.save)
| EXA-1-master | exa/modular_components/gptq/opt.py |
import numpy as np
import torch
def set_seed(seed):
np.random.seed(seed)
torch.random.manual_seed(seed)
def get_wikitext2(nsamples, seed, seqlen, model):
from datasets import load_dataset
traindata = load_dataset('wikitext', 'wikitext-2-raw-v1', split='train')
testdata = load_dataset('wikitext', 'wikitext-2-raw-v1', split='test')
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model, use_fast=False)
trainenc = tokenizer("\n\n".join(traindata['text']), return_tensors='pt')
testenc = tokenizer("\n\n".join(testdata['text']), return_tensors='pt')
import random
random.seed(seed)
trainloader = []
for _ in range(nsamples):
i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
inp = trainenc.input_ids[:, i:j]
tar = inp.clone()
tar[:, :-1] = -100
trainloader.append((inp, tar))
return trainloader, testenc
def get_ptb(nsamples, seed, seqlen, model):
from datasets import load_dataset
traindata = load_dataset('ptb_text_only', 'penn_treebank', split='train')
valdata = load_dataset('ptb_text_only', 'penn_treebank', split='validation')
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model, use_fast=False)
trainenc = tokenizer("\n\n".join(traindata['sentence']), return_tensors='pt')
testenc = tokenizer("\n\n".join(valdata['sentence']), return_tensors='pt')
import random
random.seed(seed)
trainloader = []
for _ in range(nsamples):
i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
inp = trainenc.input_ids[:, i:j]
tar = inp.clone()
tar[:, :-1] = -100
trainloader.append((inp, tar))
return trainloader, testenc
def get_c4(nsamples, seed, seqlen, model):
from datasets import load_dataset
traindata = load_dataset(
'allenai/c4', 'allenai--c4', data_files={'train': 'en/c4-train.00000-of-01024.json.gz'}, split='train'
)
valdata = load_dataset(
'allenai/c4', 'allenai--c4', data_files={'validation': 'en/c4-validation.00000-of-00008.json.gz'}, split='validation'
)
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model, use_fast=False)
import random
random.seed(seed)
trainloader = []
for _ in range(nsamples):
while True:
i = random.randint(0, len(traindata) - 1)
trainenc = tokenizer(traindata[i]['text'], return_tensors='pt')
if trainenc.input_ids.shape[1] >= seqlen:
break
i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
inp = trainenc.input_ids[:, i:j]
tar = inp.clone()
tar[:, :-1] = -100
trainloader.append((inp, tar))
import random
random.seed(0)
valenc = []
for _ in range(256):
while True:
i = random.randint(0, len(valdata) - 1)
tmp = tokenizer(valdata[i]['text'], return_tensors='pt')
if tmp.input_ids.shape[1] >= seqlen:
break
i = random.randint(0, tmp.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
valenc.append(tmp.input_ids[:, i:j])
valenc = torch.hstack(valenc)
class TokenizerWrapper:
def __init__(self, input_ids):
self.input_ids = input_ids
valenc = TokenizerWrapper(valenc)
return trainloader, valenc
def get_ptb_new(nsamples, seed, seqlen, model):
from datasets import load_dataset
traindata = load_dataset('ptb_text_only', 'penn_treebank', split='train')
testdata = load_dataset('ptb_text_only', 'penn_treebank', split='test')
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model, use_fast=False)
trainenc = tokenizer(" ".join(traindata['sentence']), return_tensors='pt')
testenc = tokenizer(" ".join(testdata['sentence']), return_tensors='pt')
import random
random.seed(seed)
trainloader = []
for _ in range(nsamples):
i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
inp = trainenc.input_ids[:, i:j]
tar = inp.clone()
tar[:, :-1] = -100
trainloader.append((inp, tar))
return trainloader, testenc
def get_c4_new(nsamples, seed, seqlen, model):
from datasets import load_dataset
traindata = load_dataset(
'allenai/c4', 'allenai--c4', data_files={'train': 'en/c4-train.00000-of-01024.json.gz'}, split='train'
)
valdata = load_dataset(
'allenai/c4', 'allenai--c4', data_files={'validation': 'en/c4-validation.00000-of-00008.json.gz'}, split='validation'
)
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model, use_fast=False)
import random
random.seed(seed)
trainloader = []
for _ in range(nsamples):
while True:
i = random.randint(0, len(traindata) - 1)
trainenc = tokenizer(traindata[i]['text'], return_tensors='pt')
if trainenc.input_ids.shape[1] >= seqlen:
break
i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
inp = trainenc.input_ids[:, i:j]
tar = inp.clone()
tar[:, :-1] = -100
trainloader.append((inp, tar))
valenc = tokenizer(' '.join(valdata[:1100]['text']), return_tensors='pt')
valenc = valenc.input_ids[:, :(256 * seqlen)]
class TokenizerWrapper:
def __init__(self, input_ids):
self.input_ids = input_ids
valenc = TokenizerWrapper(valenc)
return trainloader, valenc
def get_loaders(
name, nsamples=128, seed=0, seqlen=2048, model=''
):
if 'wikitext2' in name:
return get_wikitext2(nsamples, seed, seqlen, model)
if 'ptb' in name:
if 'new' in name:
return get_ptb_new(nsamples, seed, seqlen, model)
return get_ptb(nsamples, seed, seqlen, model)
if 'c4' in name:
if 'new' in name:
return get_c4_new(nsamples, seed, seqlen, model)
return get_c4(nsamples, seed, seqlen, model)
| EXA-1-master | exa/modular_components/gptq/datautils.py |
import time
import torch
import torch.nn as nn
from gptq import *
from modelutils import *
from quant import *
def get_llama(model):
import torch
def skip(*args, **kwargs):
pass
torch.nn.init.kaiming_uniform_ = skip
torch.nn.init.uniform_ = skip
torch.nn.init.normal_ = skip
from transformers import LlamaForCausalLM
model = LlamaForCausalLM.from_pretrained(model, torch_dtype='auto')
model.seqlen = 2048
return model
@torch.no_grad()
def llama_sequential(model, dataloader, dev):
print('Starting ...')
use_cache = model.config.use_cache
model.config.use_cache = False
layers = model.model.layers
model.model.embed_tokens = model.model.embed_tokens.to(dev)
model.model.norm = model.model.norm.to(dev)
layers[0] = layers[0].to(dev)
dtype = next(iter(model.parameters())).dtype
inps = torch.zeros(
(args.nsamples, model.seqlen, model.config.hidden_size), dtype=dtype, device=dev
)
cache = {'i': 0, 'attention_mask': None}
class Catcher(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, inp, **kwargs):
inps[cache['i']] = inp
cache['i'] += 1
cache['attention_mask'] = kwargs['attention_mask']
cache['position_ids'] = kwargs['position_ids']
raise ValueError
layers[0] = Catcher(layers[0])
for batch in dataloader:
try:
model(batch[0].to(dev))
except ValueError:
pass
layers[0] = layers[0].module
layers[0] = layers[0].cpu()
model.model.embed_tokens = model.model.embed_tokens.cpu()
model.model.norm = model.model.norm.cpu()
torch.cuda.empty_cache()
outs = torch.zeros_like(inps)
attention_mask = cache['attention_mask']
position_ids = cache['position_ids']
print('Ready.')
quantizers = {}
for i in range(len(layers)):
layer = layers[i].to(dev)
full = find_layers(layer)
if args.true_sequential:
sequential = [
['self_attn.k_proj', 'self_attn.v_proj', 'self_attn.q_proj'],
['self_attn.o_proj'],
['mlp.up_proj', 'mlp.gate_proj'],
['mlp.down_proj']
]
else:
sequential = [list(full.keys())]
for names in sequential:
subset = {n: full[n] for n in names}
gptq = {}
for name in subset:
gptq[name] = GPTQ(subset[name])
gptq[name].quantizer = Quantizer()
gptq[name].quantizer.configure(
args.wbits, perchannel=True, sym=args.sym, mse=False
)
def add_batch(name):
def tmp(_, inp, out):
gptq[name].add_batch(inp[0].data, out.data)
return tmp
handles = []
for name in subset:
handles.append(subset[name].register_forward_hook(add_batch(name)))
for j in range(args.nsamples):
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask, position_ids=position_ids)[0]
for h in handles:
h.remove()
for name in subset:
print(i, name)
print('Quantizing ...')
gptq[name].fasterquant(percdamp=args.percdamp, groupsize=args.groupsize, actorder=args.act_order)
quantizers['model.layers.%d.%s' % (i, name)] = gptq[name].quantizer
gptq[name].free()
for j in range(args.nsamples):
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask, position_ids=position_ids)[0]
layers[i] = layer.cpu()
del layer
del gptq
torch.cuda.empty_cache()
inps, outs = outs, inps
model.config.use_cache = use_cache
return quantizers
@torch.no_grad()
def llama_eval(model, testenc, dev):
print('Evaluating ...')
testenc = testenc.input_ids
nsamples = testenc.numel() // model.seqlen
use_cache = model.config.use_cache
model.config.use_cache = False
layers = model.model.layers
model.model.embed_tokens = model.model.embed_tokens.to(dev)
layers[0] = layers[0].to(dev)
dtype = next(iter(model.parameters())).dtype
inps = torch.zeros(
(nsamples, model.seqlen, model.config.hidden_size), dtype=dtype, device=dev
)
cache = {'i': 0, 'attention_mask': None}
class Catcher(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, inp, **kwargs):
inps[cache['i']] = inp
cache['i'] += 1
cache['attention_mask'] = kwargs['attention_mask']
cache['position_ids'] = kwargs['position_ids']
raise ValueError
layers[0] = Catcher(layers[0])
for i in range(nsamples):
batch = testenc[:, (i * model.seqlen):((i + 1) * model.seqlen)].to(dev)
try:
model(batch)
except ValueError:
pass
layers[0] = layers[0].module
layers[0] = layers[0].cpu()
model.model.embed_tokens = model.model.embed_tokens.cpu()
torch.cuda.empty_cache()
outs = torch.zeros_like(inps)
attention_mask = cache['attention_mask']
position_ids = cache['position_ids']
for i in range(len(layers)):
print(i)
layer = layers[i].to(dev)
if args.nearest:
subset = find_layers(layer)
for name in subset:
quantizer = Quantizer()
quantizer.configure(
args.wbits, perchannel=True, sym=False, mse=False
)
W = subset[name].weight.data
quantizer.find_params(W, weight=True)
subset[name].weight.data = quantize(
W, quantizer.scale, quantizer.zero, quantizer.maxq
).to(next(iter(layer.parameters())).dtype)
for j in range(nsamples):
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask, position_ids=position_ids)[0]
layers[i] = layer.cpu()
del layer
torch.cuda.empty_cache()
inps, outs = outs, inps
if model.model.norm is not None:
model.model.norm = model.model.norm.to(dev)
model.lm_head = model.lm_head.to(dev)
testenc = testenc.to(dev)
nlls = []
for i in range(nsamples):
hidden_states = inps[i].unsqueeze(0)
if model.model.norm is not None:
hidden_states = model.model.norm(hidden_states)
lm_logits = model.lm_head(hidden_states)
shift_logits = lm_logits[:, :-1, :].contiguous()
shift_labels = testenc[
:, (i * model.seqlen):((i + 1) * model.seqlen)
][:, 1:]
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
neg_log_likelihood = loss.float() * model.seqlen
nlls.append(neg_log_likelihood)
ppl = torch.exp(torch.stack(nlls).sum() / (nsamples * model.seqlen))
print(ppl.item())
model.config.use_cache = use_cache
if __name__ == '__main__':
import argparse
from datautils import *
parser = argparse.ArgumentParser()
parser.add_argument(
'model', type=str,
help='LlaMa model to load; pass location of hugginface converted checkpoint.'
)
parser.add_argument(
'dataset', type=str, choices=['wikitext2', 'ptb', 'c4'],
help='Where to extract calibration data from.'
)
parser.add_argument(
'--seed',
type=int, default=0, help='Seed for sampling the calibration data.'
)
parser.add_argument(
'--nsamples', type=int, default=128,
help='Number of calibration data samples.'
)
parser.add_argument(
'--percdamp', type=float, default=.01,
help='Percent of the average Hessian diagonal to use for dampening.'
)
parser.add_argument(
'--nearest', action='store_true',
help='Whether to run the RTN baseline.'
)
parser.add_argument(
'--wbits', type=int, default=16, choices=[2, 3, 4, 8, 16],
help='#bits to use for quantization; use 16 for evaluating base model.'
)
parser.add_argument(
'--groupsize', type=int, default=-1,
help='Groupsize to use for quantization; default uses full row.'
)
parser.add_argument(
'--sym', action='store_true',
help='Whether to perform symmetric quantization.'
)
parser.add_argument(
'--new-eval', action='store_true',
help='Whether to use the new PTB and C4 eval.'
)
parser.add_argument(
'--act-order', action='store_true',
help='Whether to apply the activation order GPTQ heuristic'
)
parser.add_argument(
'--true-sequential', action='store_true',
help='Whether to run in true sequential model.'
)
args = parser.parse_args()
model = get_llama(args.model)
model.eval()
dataloader, testloader = get_loaders(
args.dataset, nsamples=args.nsamples, seed=args.seed, model=args.model, seqlen=model.seqlen
)
if args.wbits < 16 and not args.nearest:
tick = time.time()
quantizers = llama_sequential(model, dataloader, DEV)
print(time.time() - tick)
datasets = ['wikitext2', 'ptb', 'c4']
if args.new_eval:
datasets = ['wikitext2', 'ptb-new', 'c4-new']
for dataset in datasets:
dataloader, testloader = get_loaders(
dataset, seed=args.seed, model=args.model, seqlen=model.seqlen
)
print(dataset)
llama_eval(model, testloader, DEV)
| EXA-1-master | exa/modular_components/gptq/llama.py |
import torch
import torch.nn as nn
import quant_cuda
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
print('Benchmarking OPT-175B FC2 matvec ...')
DEV = torch.device('cuda:0')
M = 12288 * 4
N = 12288
DTYPE = torch.half
mat = torch.randn((M, N), device=DEV, dtype=DTYPE)
vec = torch.randn((1, M), device=DEV, dtype=DTYPE)
mul = torch.zeros((1, N), device=DEV, dtype=DTYPE)
COUNT = 1000
import time
tick = time.time()
for _ in range(COUNT):
torch.matmul(vec, mat, out=mul)
torch.cuda.synchronize()
print('FP16:', (time.time() - tick) / COUNT)
DTYPE = torch.float
mat = mat.to(DTYPE)
vec = vec.to(DTYPE)
mul = mul.to(DTYPE)
mat = torch.randint(-1000000000, 1000000000, (M // 1024 * 96, N), device=DEV, dtype=torch.int)
scales = torch.randn(N, device=DEV, dtype=DTYPE)
zeros = torch.randn(N, device=DEV, dtype=DTYPE)
COUNT = 1000
import time
tick = time.time()
for _ in range(COUNT):
quant_cuda.vecquant3matmul(vec, mat, mul, scales, zeros)
torch.cuda.synchronize()
print('3bit:', (time.time() - tick) / COUNT)
COUNT = 1000
import time
tick = time.time()
for _ in range(COUNT):
quant_cuda.vecquant3matmul_faster(vec, mat, mul, scales, zeros)
torch.cuda.synchronize()
print('3bit:', (time.time() - tick) / COUNT, '(faster)')
print('Verifiying kernel correctness ...')
M = 4 * 4096
N = 4096
layer = nn.Linear(M, N)
vec = torch.randn(M).to(DEV)
from quant import *
quantizer = Quantizer()
quantizer.configure(3, perchannel=True, sym=False, mse=False)
quantizer.find_params(layer.weight.data, weight=True)
layer.weight.data = quantize(
layer.weight.data, quantizer.scale, quantizer.zero, quantizer.maxq
)
qlayer = Quant3Linear(layer.in_features, layer.out_features)
qlayer.pack(layer, quantizer.scale, quantizer.zero)
qlayer = qlayer.to(DEV)
layer = layer.to(DEV)
with torch.no_grad():
print('Simu:', layer.to(DEV)(vec))
print('Kern:', qlayer(vec))
qlayer.faster = True
print('Kern:', qlayer(vec.half()), '(faster)')
| EXA-1-master | exa/modular_components/gptq/test_kernel.py |
import math
import time
import torch
import torch.nn as nn
import transformers
from quant import *
DEBUG = False
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
class GPTQ:
def __init__(self, layer):
self.layer = layer
self.dev = self.layer.weight.device
W = layer.weight.data.clone()
if isinstance(self.layer, nn.Conv2d):
W = W.flatten(1)
if isinstance(self.layer, transformers.Conv1D):
W = W.t()
self.rows = W.shape[0]
self.columns = W.shape[1]
self.H = torch.zeros((self.columns, self.columns), device=self.dev)
self.nsamples = 0
def add_batch(self, inp, out):
if DEBUG:
self.inp1 = inp
self.out1 = out
if len(inp.shape) == 2:
inp = inp.unsqueeze(0)
tmp = inp.shape[0]
if isinstance(self.layer, nn.Linear) or isinstance(self.layer, transformers.Conv1D):
if len(inp.shape) == 3:
inp = inp.reshape((-1, inp.shape[-1]))
inp = inp.t()
if isinstance(self.layer, nn.Conv2d):
unfold = nn.Unfold(
self.layer.kernel_size,
dilation=self.layer.dilation,
padding=self.layer.padding,
stride=self.layer.stride
)
inp = unfold(inp)
inp = inp.permute([1, 0, 2])
inp = inp.flatten(1)
self.H *= self.nsamples / (self.nsamples + tmp)
self.nsamples += tmp
# inp = inp.float()
inp = math.sqrt(2 / self.nsamples) * inp.float()
# self.H += 2 / self.nsamples * inp.matmul(inp.t())
self.H += inp.matmul(inp.t())
def fasterquant(
self, blocksize=128, percdamp=.01, groupsize=-1, actorder=False
):
W = self.layer.weight.data.clone()
if isinstance(self.layer, nn.Conv2d):
W = W.flatten(1)
if isinstance(self.layer, transformers.Conv1D):
W = W.t()
W = W.float()
tick = time.time()
if not self.quantizer.ready():
self.quantizer.find_params(W, weight=True)
H = self.H
del self.H
dead = torch.diag(H) == 0
H[dead, dead] = 1
W[:, dead] = 0
if actorder:
perm = torch.argsort(torch.diag(H), descending=True)
W = W[:, perm]
H = H[perm][:, perm]
Losses = torch.zeros_like(W)
Q = torch.zeros_like(W)
damp = percdamp * torch.mean(torch.diag(H))
diag = torch.arange(self.columns, device=self.dev)
H[diag, diag] += damp
H = torch.linalg.cholesky(H)
H = torch.cholesky_inverse(H)
H = torch.linalg.cholesky(H, upper=True)
Hinv = H
for i1 in range(0, self.columns, blocksize):
i2 = min(i1 + blocksize, self.columns)
count = i2 - i1
W1 = W[:, i1:i2].clone()
Q1 = torch.zeros_like(W1)
Err1 = torch.zeros_like(W1)
Losses1 = torch.zeros_like(W1)
Hinv1 = Hinv[i1:i2, i1:i2]
for i in range(count):
w = W1[:, i]
d = Hinv1[i, i]
if groupsize != -1:
if (i1 + i) % groupsize == 0:
self.quantizer.find_params(W[:, (i1 + i):(i1 + i + groupsize)], weight=True)
q = quantize(
w.unsqueeze(1), self.quantizer.scale, self.quantizer.zero, self.quantizer.maxq
).flatten()
Q1[:, i] = q
Losses1[:, i] = (w - q) ** 2 / d ** 2
err1 = (w - q) / d
W1[:, i:] -= err1.unsqueeze(1).matmul(Hinv1[i, i:].unsqueeze(0))
Err1[:, i] = err1
Q[:, i1:i2] = Q1
Losses[:, i1:i2] = Losses1 / 2
W[:, i2:] -= Err1.matmul(Hinv[i1:i2, i2:])
if DEBUG:
self.layer.weight.data[:, :i2] = Q[:, :i2]
self.layer.weight.data[:, i2:] = W[:, i2:]
print(torch.sum((self.layer(self.inp1) - self.out1) ** 2))
print(torch.sum(Losses))
torch.cuda.synchronize()
print('time %.2f' % (time.time() - tick))
print('error', torch.sum(Losses).item())
if actorder:
invperm = torch.argsort(perm)
Q = Q[:, invperm]
if isinstance(self.layer, transformers.Conv1D):
Q = Q.t()
self.layer.weight.data = Q.reshape(self.layer.weight.shape).to(self.layer.weight.data.dtype)
if DEBUG:
print(torch.sum((self.layer(self.inp1) - self.out1) ** 2))
def free(self):
if DEBUG:
self.inp1 = None
self.out1 = None
self.H = None
self.Losses = None
self.Trace = None
torch.cuda.empty_cache()
| EXA-1-master | exa/modular_components/gptq/gptq.py |
import numpy as np
import torch
import torch.nn as nn
def quantize(x, scale, zero, maxq):
if maxq < 0:
return (x > scale / 2).float() * scale + (x < zero / 2).float() * zero
q = torch.clamp(torch.round(x / scale) + zero, 0, maxq)
return scale * (q - zero)
class Quantizer(nn.Module):
def __init__(self, shape=1):
super(Quantizer, self).__init__()
self.register_buffer('maxq', torch.tensor(0))
self.register_buffer('scale', torch.zeros(shape))
self.register_buffer('zero', torch.zeros(shape))
def configure(
self,
bits, perchannel=False, sym=True,
mse=False, norm=2.4, grid=100, maxshrink=.8,
trits=False
):
self.maxq = torch.tensor(2 ** bits - 1)
self.perchannel = perchannel
self.sym = sym
self.mse = mse
self.norm = norm
self.grid = grid
self.maxshrink = maxshrink
if trits:
self.maxq = torch.tensor(-1)
def find_params(self, x, weight=False):
dev = x.device
self.maxq = self.maxq.to(dev)
shape = x.shape
if self.perchannel:
if weight:
x = x.flatten(1)
else:
if len(shape) == 4:
x = x.permute([1, 0, 2, 3])
x = x.flatten(1)
if len(shape) == 3:
x = x.reshape((-1, shape[-1])).t()
if len(shape) == 2:
x = x.t()
else:
x = x.flatten().unsqueeze(0)
tmp = torch.zeros(x.shape[0], device=dev)
xmin = torch.minimum(x.min(1)[0], tmp)
xmax = torch.maximum(x.max(1)[0], tmp)
if self.sym:
xmax = torch.maximum(torch.abs(xmin), xmax)
tmp = xmin < 0
if torch.any(tmp):
xmin[tmp] = -xmax[tmp]
tmp = (xmin == 0) & (xmax == 0)
xmin[tmp] = -1
xmax[tmp] = +1
if self.maxq < 0:
self.scale = xmax
self.zero = xmin
else:
self.scale = (xmax - xmin) / self.maxq
if self.sym:
self.zero = torch.full_like(self.scale, (self.maxq + 1) / 2)
else:
self.zero = torch.round(-xmin / self.scale)
if self.mse:
best = torch.full([x.shape[0]], float('inf'), device=dev)
for i in range(int(self.maxshrink * self.grid)):
p = 1 - i / self.grid
xmin1 = p * xmin
xmax1 = p * xmax
scale1 = (xmax1 - xmin1) / self.maxq
zero1 = torch.round(-xmin1 / scale1) if not self.sym else self.zero
q = quantize(x, scale1.unsqueeze(1), zero1.unsqueeze(1), self.maxq)
q -= x
q.abs_()
q.pow_(self.norm)
err = torch.sum(q, 1)
tmp = err < best
if torch.any(tmp):
best[tmp] = err[tmp]
self.scale[tmp] = scale1[tmp]
self.zero[tmp] = zero1[tmp]
if not self.perchannel:
if weight:
tmp = shape[0]
else:
tmp = shape[1] if len(shape) != 3 else shape[2]
self.scale = self.scale.repeat(tmp)
self.zero = self.zero.repeat(tmp)
if weight:
shape = [-1] + [1] * (len(shape) - 1)
self.scale = self.scale.reshape(shape)
self.zero = self.zero.reshape(shape)
return
if len(shape) == 4:
self.scale = self.scale.reshape((1, -1, 1, 1))
self.zero = self.zero.reshape((1, -1, 1, 1))
if len(shape) == 3:
self.scale = self.scale.reshape((1, 1, -1))
self.zero = self.zero.reshape((1, 1, -1))
if len(shape) == 2:
self.scale = self.scale.unsqueeze(0)
self.zero = self.zero.unsqueeze(0)
def quantize(self, x):
if self.ready():
return quantize(x, self.scale, self.zero, self.maxq)
return x
def enabled(self):
return self.maxq > 0
def ready(self):
return torch.all(self.scale != 0)
try:
import quant_cuda
except:
print('CUDA extension not installed.')
# Assumes layer is perfectly divisible into 1024 * 1024 blocks
class Quant3Linear(nn.Module):
def __init__(self, infeatures, outfeatures, faster=False):
super().__init__()
self.register_buffer('zeros', torch.zeros((outfeatures, 1)))
self.register_buffer('scales', torch.zeros((outfeatures, 1)))
self.register_buffer('bias', torch.zeros(outfeatures))
self.register_buffer(
'qweight', torch.zeros((infeatures // 32 * 3, outfeatures), dtype=torch.int)
)
self.faster = faster
def pack(self, linear, scales, zeros):
self.zeros = zeros * scales
self.scales = scales.clone()
self.bias = linear.bias.clone()
intweight = torch.round((linear.weight.data + self.zeros) / self.scales).to(torch.int)
intweight = intweight.t().contiguous()
intweight = intweight.numpy().astype(np.uint32)
qweight = np.zeros(
(intweight.shape[0] // 32 * 3, intweight.shape[1]), dtype=np.uint32
)
i = 0
row = 0
while row < qweight.shape[0]:
for j in range(i, i + 10):
qweight[row] |= intweight[j] << (3 * (j - i))
i += 10
qweight[row] |= intweight[i] << 30
row += 1
qweight[row] |= (intweight[i] >> 2) & 1
i += 1
for j in range(i, i + 10):
qweight[row] |= intweight[j] << (3 * (j - i) + 1)
i += 10
qweight[row] |= intweight[i] << 31
row += 1
qweight[row] |= (intweight[i] >> 1) & 0x3
i += 1
for j in range(i, i + 10):
qweight[row] |= intweight[j] << (3 * (j - i) + 2)
i += 10
row += 1
qweight = qweight.astype(np.int32)
self.qweight = torch.from_numpy(qweight)
def forward(self, x):
if x.shape[-1] == x.numel():
outshape = list(x.shape)
y = self.bias.clone()
outshape[-1] = self.bias.numel()
dtype = x.dtype
if self.faster:
x = x.half()
quant_cuda.vecquant3matmul_faster(x, self.qweight, y, self.scales, self.zeros)
else:
x = x.float()
quant_cuda.vecquant3matmul(x, self.qweight, y, self.scales, self.zeros)
y = y.to(dtype)
return y.reshape(outshape)
raise ValueError('Only supports a single token currently.')
def make_quant3(module, names, name='', faster=False):
if isinstance(module, Quant3Linear):
return
for attr in dir(module):
tmp = getattr(module, attr)
name1 = name + '.' + attr if name != '' else attr
if name1 in names:
setattr(
module, attr, Quant3Linear(tmp.in_features, tmp.out_features, faster=faster)
)
for name1, child in module.named_children():
make_quant3(child, names, name + '.' + name1 if name != '' else name1, faster=faster)
| EXA-1-master | exa/modular_components/gptq/quant.py |
import math
import time
import torch
import torch.nn as nn
import transformers
from gptq import *
from modelutils import *
from quant import *
def get_bloom(model):
import torch
def skip(*args, **kwargs):
pass
torch.nn.init.kaiming_uniform_ = skip
torch.nn.init.uniform_ = skip
torch.nn.init.normal_ = skip
from transformers import BloomForCausalLM
model = BloomForCausalLM.from_pretrained(model, torch_dtype='auto')
model.seqlen = 2048
return model
@torch.no_grad()
def bloom_sequential(model, dataloader, dev, means=None, stds=None):
print('Starting ...')
use_cache = model.config.use_cache
model.config.use_cache = False
layers = model.transformer.h
model.transformer.word_embeddings = model.transformer.word_embeddings.to(dev)
model.transformer.word_embeddings_layernorm = model.transformer.word_embeddings_layernorm.to(dev)
layers[0] = layers[0].to(dev)
dtype = next(iter(model.parameters())).dtype
inps = torch.zeros(
(args.nsamples, model.seqlen, model.config.hidden_size), dtype=dtype, device=dev
)
cache = {'i': 0, 'attention_mask': None, 'alibi': None}
class Catcher(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, inp, **kwargs):
inps[cache['i']] = inp
cache['i'] += 1
cache['attention_mask'] = kwargs['attention_mask']
cache['alibi'] = kwargs['alibi']
raise ValueError
layers[0] = Catcher(layers[0])
for batch in dataloader:
try:
model(batch[0].to(dev))
except ValueError:
pass
layers[0] = layers[0].module
layers[0] = layers[0].cpu()
model.transformer.word_embeddings = model.transformer.word_embeddings.cpu()
model.transformer.word_embeddings_layernorm = model.transformer.word_embeddings_layernorm.cpu()
torch.cuda.empty_cache()
outs = torch.zeros_like(inps)
attention_mask = cache['attention_mask']
alibi = cache['alibi']
print('Ready.')
for i in range(len(layers)):
layer = layers[i].to(dev)
subset = find_layers(layer)
gptq = {}
for name in subset:
gptq[name] = GPTQ(subset[name])
gptq[name].quantizer = Quantizer()
gptq[name].quantizer.configure(
args.wbits, perchannel=True, sym=args.sym, mse=False
)
def add_batch(name):
def tmp(_, inp, out):
gptq[name].add_batch(inp[0].data, out.data)
return tmp
handles = []
for name in subset:
handles.append(subset[name].register_forward_hook(add_batch(name)))
for j in range(args.nsamples):
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask, alibi=alibi)[0]
for h in handles:
h.remove()
for name in subset:
print(i, name)
print('Quantizing ...')
gptq[name].fasterquant(percdamp=args.percdamp, groupsize=args.groupsize)
for j in range(args.nsamples):
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask, alibi=alibi)[0]
layers[i] = layer.cpu()
del gptq
torch.cuda.empty_cache()
inps, outs = outs, inps
model.config.use_cache = use_cache
@torch.no_grad()
def bloom_eval(model, testenc, dev):
print('Evaluation...')
testenc = testenc.input_ids
nsamples = testenc.numel() // model.seqlen
use_cache = model.config.use_cache
model.config.use_cache = False
layers = model.transformer.h
model.transformer.word_embeddings = model.transformer.word_embeddings.to(dev)
model.transformer.word_embeddings_layernorm = model.transformer.word_embeddings_layernorm.to(dev)
layers[0] = layers[0].to(dev)
dtype = next(iter(model.parameters())).dtype
inps = torch.zeros(
(nsamples, model.seqlen, model.config.hidden_size), dtype=dtype, device=dev
)
cache = {'i': 0, 'attention_mask': None, 'alibi': None}
class Catcher(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, inp, **kwargs):
inps[cache['i']] = inp
cache['i'] += 1
cache['attention_mask'] = kwargs['attention_mask']
cache['alibi'] = kwargs['alibi']
raise ValueError
layers[0] = Catcher(layers[0])
for i in range(nsamples):
batch = testenc[:, (i * model.seqlen):((i + 1) * model.seqlen)].to(dev)
try:
model(batch)
except ValueError:
pass
layers[0] = layers[0].module
layers[0] = layers[0].cpu()
model.transformer.word_embeddings = model.transformer.word_embeddings.cpu()
model.transformer.word_embeddings_layernorm = model.transformer.word_embeddings_layernorm.cpu()
torch.cuda.empty_cache()
outs = torch.zeros_like(inps)
attention_mask = cache['attention_mask']
alibi = cache['alibi']
for i in range(len(layers)):
print(i)
layer = layers[i].to(dev)
if args.nearest:
subset = find_layers(layer)
for name in subset:
quantizer = Quantizer()
quantizer.configure(
args.wbits, perchannel=True, sym=args.sym, mse=False
)
W = subset[name].weight.data
quantizer.find_params(W, weight=True)
subset[name].weight.data = quantize(
W, quantizer.scale, quantizer.zero, quantizer.maxq
).to(next(iter(layer.parameters())).dtype)
for j in range(nsamples):
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask, alibi=alibi)[0]
layers[i] = layer.cpu()
del layer
torch.cuda.empty_cache()
inps, outs = outs, inps
model.transformer.ln_f = model.transformer.ln_f.to(dev)
model.lm_head = model.lm_head.to(dev)
testenc = testenc.to(dev)
nlls = []
for i in range(nsamples):
hidden_states = inps[i].unsqueeze(0)
hidden_states = model.transformer.ln_f(hidden_states)
lm_logits = model.lm_head(hidden_states)
shift_logits = lm_logits[:, :-1, :].contiguous()
shift_labels = testenc[
:, (i * model.seqlen):((i + 1) * model.seqlen)
][:, 1:]
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
neg_log_likelihood = loss.float() * model.seqlen
nlls.append(neg_log_likelihood)
ppl = torch.exp(torch.stack(nlls).sum() / (nsamples * model.seqlen))
print(ppl.item())
model.config.use_cache = use_cache
if __name__ == '__main__':
import argparse
from datautils import *
parser = argparse.ArgumentParser()
parser.add_argument(
'model', type=str,
help='BLOOM model to load; pass `bigscience/bloom-X`.'
)
parser.add_argument(
'dataset', type=str, choices=['wikitext2', 'ptb', 'c4'],
help='Where to extract calibration data from.'
)
parser.add_argument(
'--seed',
type=int, default=0, help='Seed for sampling the calibration data.'
)
parser.add_argument(
'--nsamples', type=int, default=128,
help='Number of calibration data samples.'
)
parser.add_argument(
'--percdamp', type=float, default=.01,
help='Percent of the average Hessian diagonal to use for dampening.'
)
parser.add_argument(
'--nearest', action='store_true',
help='Whether to run the RTN baseline.'
)
parser.add_argument(
'--wbits', type=int, default=16, choices=[2, 3, 4, 16],
help='#bits to use for quantization; use 16 for evaluating base model.'
)
parser.add_argument(
'--groupsize', type=int, default=-1,
help='Groupsize to use for quantization; default uses full row.'
)
parser.add_argument(
'--sym', action='store_true',
help='Whether to perform symmetric quantization.'
)
parser.add_argument(
'--new-eval', action='store_true',
help='Whether to use the new PTB and C4 eval'
)
args = parser.parse_args()
model = get_bloom(args.model)
model.eval()
dataloader, testloader = get_loaders(
args.dataset, nsamples=args.nsamples, seed=args.seed, model=args.model, seqlen=model.seqlen
)
if args.wbits < 16 and not args.nearest:
tick = time.time()
bloom_sequential(model, dataloader, DEV)
print(time.time() - tick)
datasets = ['wikitext2', 'ptb', 'c4']
if args.new_eval:
datasets = ['wikitext2', 'ptb-new', 'c4-new']
for dataset in datasets:
dataloader, testloader = get_loaders(
dataset, seed=args.seed, model=args.model, seqlen=model.seqlen
)
print(dataset)
bloom_eval(model, testloader, DEV)
| EXA-1-master | exa/modular_components/gptq/bloom.py |
import math
from collections.abc import Iterable
import numpy as np
import sacrebleu
import sklearn.metrics
import random
def mean(arr):
return sum(arr) / len(arr)
def pop_stddev(arr):
mu = mean(arr)
return math.sqrt(sum([(x - mu) ** 2 for x in arr]) / len(arr))
def sample_stddev(arr):
mu = mean(arr)
return math.sqrt(sum([(x - mu) ** 2 for x in arr]) / (len(arr) - 1))
def mean_stderr(arr):
return sample_stddev(arr) / math.sqrt(len(arr))
def median(arr):
return arr[len(arr) // 2]
def matthews_corrcoef(items):
unzipped_list = list(zip(*items))
golds = unzipped_list[0]
preds = unzipped_list[1]
return sklearn.metrics.matthews_corrcoef(golds, preds)
def f1_score(items):
unzipped_list = list(zip(*items))
golds = unzipped_list[0]
preds = unzipped_list[1]
fscore = sklearn.metrics.f1_score(golds, preds)
return np.max(fscore)
def acc_all(items):
# Only count as correct if all answers are labeled correctly for each question
question_scoring_dict = {}
preds = list(zip(*items))[0]
docs = list(zip(*items))[1]
for doc, pred in zip(docs, preds):
paragraph_id = doc["idx"]["paragraph"]
question_id = doc["idx"]["question"]
if (paragraph_id, question_id) not in question_scoring_dict:
question_scoring_dict[(paragraph_id, question_id)] = []
gold_label = doc["label"] == 1
question_scoring_dict[(paragraph_id, question_id)].append(gold_label == pred)
acc = np.mean([int(all(x)) for x in question_scoring_dict.values()])
return acc
def acc_all_stderr(items):
# Only count as correct if all answers are labeled correctly for each question
question_scoring_dict = {}
preds = list(zip(*items))[0]
docs = list(zip(*items))[1]
for doc, pred in zip(docs, preds):
question_id = doc["idx"]["question"]
if question_id not in question_scoring_dict:
question_scoring_dict[question_id] = []
gold_label = doc["label"] == 1
question_scoring_dict[question_id].append(gold_label == pred)
acc = mean_stderr([int(all(x)) for x in question_scoring_dict.values()])
return acc
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
"""Compute max metric between prediction and each ground truth."""
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def perplexity(items):
return math.exp(-mean(items))
def weighted_mean(items):
a, b = zip(*items)
return sum(a) / sum(b)
def weighted_perplexity(items):
return math.exp(-weighted_mean(items))
def bits_per_byte(items):
return -weighted_mean(items) / math.log(2)
def bleu(items):
"""The Bilingual Evaluation Understudy Score, or BLEU for short, is a metric
for evaluating a generated sentence to a reference sentence. It counts matching
n-grams in the candidate translation to n-grams in the reference text, where
1-gram or unigram would be each token and a bigram comparison would be each
word pair. The comparison is made regardless of word order
Source: https://machinelearningmastery.com/calculate-bleu-score-for-text-python/
Paper: https://www.aclweb.org/anthology/P02-1040/
Higher is better
"""
refs = list(zip(*items))[0]
preds = list(zip(*items))[1]
refs, preds = _sacreformat(refs, preds)
return sacrebleu.corpus_bleu(preds, refs).score
def chrf(items):
"""chrF++ is a tool for automatic evaluation of machine translation output
based on character n-gram precision and recall enhanced with word n-grams.
Source: https://github.com/m-popovic/chrF
Paper: https://www.aclweb.org/anthology/W15-3049.pdf
Higher is better # TODO I think
"""
refs = list(zip(*items))[0]
preds = list(zip(*items))[1]
refs, preds = _sacreformat(refs, preds)
return sacrebleu.corpus_chrf(preds, refs).score
def ter(items):
"""Translation Error Rate is an error metric for machine translation that
measures the number of edits required to change a system output into one
of the references
Source: http://www.cs.umd.edu/~snover/tercom/
Paper: http://mt-archive.info/AMTA-2006-Snover.pdf
Lower is better
"""
refs = list(zip(*items))[0]
preds = list(zip(*items))[1]
refs, preds = _sacreformat(refs, preds)
return sacrebleu.corpus_ter(preds, refs).score
def is_non_str_iterable(obj):
return isinstance(obj, Iterable) and not isinstance(obj, str)
def _sacreformat(refs, preds):
"""Format refs and preds for sacrebleu corpus calculation. It is very particular"""
# Sacrebleu expects (List[str], List[List[str])
# e.g. sacrebleu.corpus_bleu([pred_t], [[ref1_stream], [ref2_stream], ...])
# Note [ref1_stream] is the first reference for each pred.
# So lists are size N and (M, N) for N preds and M possible refs for each pred
# This is a different order of dimensions that I would expect
# We expect refs to be List[str] or List[List[str]], the outer list corresponding to preds
# Must become List[List[str]] with the inner list corresponding to preds
if not is_non_str_iterable(refs):
refs = list(refs)
if not is_non_str_iterable(refs[0]):
refs = [[ref] for ref in refs]
refs = list(zip(*refs))
# Note the number of refs in each ref list much match the number of preds
# We expect preds to be List[str] or List[List[str]]. Must become List[str]
if not is_non_str_iterable(preds):
preds = list(preds)
if is_non_str_iterable(preds[0]):
assert len(preds[0]) == 1, f"Pred must be a str, was {preds[0]}"
preds = [pred[0] for pred in preds]
return refs, preds
# stderr stuff
class _bootstrap_internal:
def __init__(self, f, n):
self.f = f
self.n = n
def __call__(self, v):
i, xs = v
rnd = random.Random()
rnd.seed(i)
res = []
for _ in range(self.n):
res.append(self.f(rnd.choices(xs, k=len(xs))))
return res
def bootstrap_stderr(f, xs, iters):
import multiprocessing as mp
pool = mp.Pool(mp.cpu_count())
# this gives a biased estimate of the stderr (i.e w/ the mean, it gives something
# equivalent to stderr calculated without Bessel's correction in the stddev.
# Unfortunately, I haven't been able to figure out what the right correction is
# to make the bootstrap unbiased - i considered multiplying by sqrt(n/(n-1)) but
# that would be ad-hoc and I can't prove that that would actually be an unbiased estimator)
# Thankfully, shouldn't matter because our samples are pretty big usually anyways
res = []
chunk_size = min(1000, iters)
from tqdm import tqdm
print("bootstrapping for stddev:", f.__name__)
for bootstrap in tqdm(
pool.imap(
_bootstrap_internal(f, chunk_size),
[(i, xs) for i in range(iters // chunk_size)],
),
total=iters // chunk_size,
):
# sample w replacement
res.extend(bootstrap)
pool.close()
return sample_stddev(res)
def stderr_for_metric(metric, bootstrap_iters):
bootstrappable = [
median,
matthews_corrcoef,
f1_score,
perplexity,
bleu,
chrf,
ter,
]
if metric in bootstrappable:
return lambda x: bootstrap_stderr(metric, x, iters=bootstrap_iters)
stderr = {mean: mean_stderr, acc_all: acc_all_stderr}
return stderr.get(metric, None)
def yesno(x):
if x:
return "yes"
else:
return "no"
| EXA-1-master | exa/modular_components/gptq/zeroShot/metrics.py |
import numpy as np
import torch
def set_seed(seed):
np.random.seed(seed)
torch.random.manual_seed(seed)
def get_wikitext2(nsamples, seed, seqlen, model):
from datasets import load_dataset
traindata = load_dataset('wikitext', 'wikitext-2-raw-v1', split='train')
testdata = load_dataset('wikitext', 'wikitext-2-raw-v1', split='test')
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model, use_fast=False)
trainenc = tokenizer("\n\n".join(traindata['text']), return_tensors='pt')
testenc = tokenizer("\n\n".join(testdata['text']), return_tensors='pt')
import random
random.seed(seed)
trainloader = []
for _ in range(nsamples):
i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
inp = trainenc.input_ids[:, i:j]
tar = inp.clone()
tar[:, :-1] = -100
trainloader.append((inp, tar))
return trainloader, testenc
def get_ptb(nsamples, seed, seqlen, model):
from datasets import load_dataset
traindata = load_dataset('ptb_text_only', 'penn_treebank', split='train')
valdata = load_dataset('ptb_text_only', 'penn_treebank', split='validation')
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model, use_fast=False)
trainenc = tokenizer("\n\n".join(traindata['sentence']), return_tensors='pt')
testenc = tokenizer("\n\n".join(valdata['sentence']), return_tensors='pt')
import random
random.seed(seed)
trainloader = []
for _ in range(nsamples):
i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
inp = trainenc.input_ids[:, i:j]
tar = inp.clone()
tar[:, :-1] = -100
trainloader.append((inp, tar))
return trainloader, testenc
def get_c4(nsamples, seed, seqlen, model):
from datasets import load_dataset
traindata = load_dataset(
'allenai/c4', 'allenai--c4', data_files={'train': 'en/c4-train.00000-of-01024.json.gz'}, split='train'
)
valdata = load_dataset(
'allenai/c4', 'allenai--c4', data_files={'validation': 'en/c4-validation.00000-of-00008.json.gz'}, split='validation'
)
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model, use_fast=False)
import random
random.seed(seed)
trainloader = []
for _ in range(nsamples):
while True:
i = random.randint(0, len(traindata) - 1)
trainenc = tokenizer(traindata[i]['text'], return_tensors='pt')
if trainenc.input_ids.shape[1] >= seqlen:
break
i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
inp = trainenc.input_ids[:, i:j]
tar = inp.clone()
tar[:, :-1] = -100
trainloader.append((inp, tar))
import random
random.seed(0)
valenc = []
for _ in range(256):
while True:
i = random.randint(0, len(valdata) - 1)
tmp = tokenizer(valdata[i]['text'], return_tensors='pt')
if tmp.input_ids.shape[1] >= seqlen:
break
i = random.randint(0, tmp.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
valenc.append(tmp.input_ids[:, i:j])
valenc = torch.hstack(valenc)
class TokenizerWrapper:
def __init__(self, input_ids):
self.input_ids = input_ids
valenc = TokenizerWrapper(valenc)
return trainloader, valenc
def get_loaders(
name, nsamples=128, seed=0, seqlen=2048, model=''
):
if 'wikitext2' in name:
return get_wikitext2(nsamples, seed, seqlen, model)
if 'ptb' in name:
return get_ptb(nsamples, seed, seqlen, model)
if 'c4' in name:
return get_c4(nsamples, seed, seqlen, model)
| EXA-1-master | exa/modular_components/gptq/zeroShot/datautils.py |
import argparse
import fnmatch
import tasks
import inspect
import functools
def positional_deprecated(fn):
"""
A decorator to nudge users into passing only keyword args (`kwargs`) to the
wrapped function, `fn`.
"""
@functools.wraps(fn)
def _wrapper(*args, **kwargs):
if len(args) != 1 if inspect.ismethod(fn) else 0:
print(
f"WARNING: using {fn.__name__} with positional arguments is "
"deprecated and will be disallowed in a future version of "
"lm-evaluation-harness!"
)
return fn(*args, **kwargs)
return _wrapper
class MultiChoice:
def __init__(self, choices):
self.choices = choices
# Simple wildcard support (linux filename patterns)
def __contains__(self, values):
for value in values.split(","):
if len(fnmatch.filter(self.choices, value)) == 0:
return False
return True
def __iter__(self):
for choice in self.choices:
yield choice
# Returns a list containing all values of the source_list that
# match at least one of the patterns
def pattern_match(patterns, source_list):
task_names = set()
for pattern in patterns:
for matching in fnmatch.filter(source_list, pattern):
task_names.add(matching)
return list(task_names)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'model', type=str,
help='For OPT model to load; pass `facebook/opt-X`.\\ BLOOM model to load; pass `bigscience/bloom-X`'
)
parser.add_argument(
'dataset', type=str, choices=['wikitext2', 'ptb', 'c4'],
help='Where to extract calibration data from.'
)
parser.add_argument(
'--nsamples', type=int, default=128,
help='Number of calibration data samples.'
)
parser.add_argument(
'--percdamp', type=float, default=.01,
help='Percent of the average Hessian diagonal to use for dampening.'
)
parser.add_argument(
'--groupsize', type=int, default=-1,
help='Groupsize to use for quantization; default uses full row.'
)
parser.add_argument(
'--seed',
type=int, default=2, help='Seed for sampling the calibration data.'
)
parser.add_argument(
'--table_results', action="store_true", help='Print results in a table.'
)
parser.add_argument("--tasks", default=None, choices=MultiChoice(tasks.ALL_TASKS))
parser.add_argument("--num_fewshot", type=int, default=0)
parser.add_argument("--output_path", default=None)
parser.add_argument("--wbits", type=int, default=32)
parser.add_argument("--nearest", action="store_true")
parser.add_argument('--load', type=str, default='')
args = parser.parse_args()
args.batch_size = 1 # BS=1 is used for zeroShot tasks!
return args
| EXA-1-master | exa/modular_components/gptq/zeroShot/utils.py |
import json
import logging
import evaluator
import tasks
from utils import parse_args, pattern_match
def main():
args = parse_args()
if args.tasks is None:
raise ValueError("Please specify a task to run")
else:
task_names = pattern_match(args.tasks.split(","), tasks.ALL_TASKS)
print(f"Selected Tasks: {task_names}")
results = evaluator.simple_evaluate(
args=args,
tasks_list=task_names,
)
dumped = json.dumps(results, indent=2)
print(dumped)
if args.output_path:
with open(args.output_path, "w") as f:
f.write(dumped)
print(
f"{args.model}"
f"num_fewshot: {args.num_fewshot},"
f" batch_size: {args.batch_size}"
)
if args.table_results:
print(evaluator.make_table(results))
else:
from pprint import pprint
pprint(results)
if __name__ == "__main__":
main()
| EXA-1-master | exa/modular_components/gptq/zeroShot/main.py |
from utils import positional_deprecated
import random
import numpy as np
import models
import models.models_utils
import tasks
import collections
import itertools
import metrics
import torch
import time
from datautils import get_loaders
@positional_deprecated
def simple_evaluate(
# model,
args,
tasks_list=[]
):
"""Instantiate and evaluate a model on a list of tasks.
:param args: Optional[str]
args for the zeroShot tasks
:param tasks: list[Union[str, Task]]
List of task names or Task objects. Task objects will be taken to have name task.EVAL_HARNESS_NAME if defined and type(task).__name__ otherwise.
:return
Dictionary of results
"""
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
assert tasks_list != [], "No tasks specified"
lm = models.get_model(args.model).create_from_arg_string({"args": args})
if args.load:
print('Loading checkpoint from {}...'.format(args.load))
lm.model.load_state_dict(torch.load(args.load))
if args.wbits < 16 and not args.nearest:
tick = time.time()
dataloader, testloader = get_loaders(
args.dataset, seed=args.seed, model=args.model, seqlen=lm.seqlen
)
if 'opt' in args.model:
quantizers = lm.opt_sequential(dataloader)
else:
quantizers = lm.bloom_sequential(dataloader)
print(time.time() - tick)
task_dict = tasks.get_task_dict(tasks_list)
results = evaluate(
lm=lm,
task_dict=task_dict,
seed=args.seed,
num_fewshot=args.num_fewshot,
)
# add info about the model and few shot config
results["config"] = {
"model": args.model,
"num_fewshot": args.num_fewshot,
"batch_size": args.batch_size,
"bootstrap_iters": 1000,
}
return results
@positional_deprecated
def evaluate(
lm,
task_dict,
seed=0,
num_fewshot=0,
):
"""Instantiate and evaluate a model on a list of tasks.
:param lm: obj
Language Model
:param task_dict: dict[str, Task]
Dictionary of tasks. Tasks will be taken to have name task.EVAL_HARNESS_NAME if defined and type(task).__name__ otherwise.
:param provide_description: bool
Not implemented, and this option is deprecated and will be removed in a future version in favor of a different description providing method
:param num_fewshot: int
Number of examples in few-shot context
:return
Dictionary of results
"""
task_dict_items = [
(name, task)
for name, task in task_dict.items()
if (task.has_validation_docs() or task.has_test_docs())
]
results = collections.defaultdict(dict)
versions = collections.defaultdict(dict)
requests = collections.defaultdict(list)
requests_origin = collections.defaultdict(list)
overlaps = collections.defaultdict(list) # {task_name: contaminated_docs}
# If we ever run into issues where the eval tasks don't fit in memory and we can't afford a machine with bigger
# memory, we can always modify this plumbing to support that, but I didn't want to include it just yet because
# over-engineering is bad (or we could make it write the requests to disk and then read them back out again
# - probably using an sqlite db because of all the moving parts we have
# TODO: we need unit tests & sanity checks or something to ensure that the return of `validation_docs` is stable
docs = {}
docs_for_decontamination = collections.defaultdict(list)
# get lists of each type of request
for task_name, task in task_dict_items:
versions[task_name] = task.VERSION
# default to test doc, fall back to val doc if validation unavailable
# TODO: the test-fallback-to-val system isn't final, we should revisit it at some point
if task.has_test_docs():
task_doc_func = task.test_docs
task_set = "test" # Required for caching in the decontamination
elif task.has_validation_docs():
task_set = "val" # Required for caching in the decontamination
task_doc_func = task.validation_docs
else:
raise RuntimeError("Task has neither test_docs nor validation_docs")
# deterministically shuffle docs and chop off the first `limit` because sometimes docs are in some kind of order
task_docs = list(task_doc_func())
rnd = random.Random()
rnd.seed(seed)
# rnd.shuffle(task_docs)
description = ""
for doc_id, doc in enumerate(itertools.islice(task_docs, 0, None)):
docs[(task_name, doc_id)] = doc
ctx = task.fewshot_context(
doc=doc, num_fewshot=num_fewshot, rnd=rnd, description=description
)
reqs = task.construct_requests(doc, ctx)
if not isinstance(reqs, (list, tuple)):
reqs = [reqs]
for i, req in enumerate(reqs):
requests[req.request_type].append(req)
# i: index in requests for a single task instance
# doc_id: unique id that we can get back to a doc using `docs`
requests_origin[req.request_type].append((i, task_name, doc, doc_id))
# all responses for each (task, doc)
process_res_queue = collections.defaultdict(list)
# execute each type of request
for reqtype, reqs in requests.items():
# TODO: right now, this code runs multiple separate LM requests for multiple Requests differing
# only in index. We could implement some kind of caching, but that would be more of a band-aid
# solution. we could also implement some kind of auto-grouping here;
# they should end up next to each other.
print("Running", reqtype, "requests")
resps = getattr(lm, reqtype)([req.args for req in reqs])
resps = [
x if req.index is None else x[req.index] for x, req in zip(resps, reqs)
]
for resp, (i, task_name, doc, doc_id) in zip(resps, requests_origin[reqtype]):
process_res_queue[(task_name, doc_id)].append((i, resp))
vals = collections.defaultdict(list)
# unpack results and sort back in order and return control to Task
for (task_name, doc_id), requests in process_res_queue.items():
requests.sort(key=lambda x: x[0])
requests = [x[1] for x in requests]
task = task_dict[task_name]
doc = docs[(task_name, doc_id)]
metrics_dict = task.process_results(doc, requests)
for metric, value in metrics_dict.items():
vals[(task_name, metric)].append(value)
# aggregate results
for (task_name, metric), items in vals.items():
task = task_dict[task_name]
real_metric = metric # key when looking up the metric with task.aggregation
if metric.endswith(decontaminate_suffix):
real_metric = metric.replace(
decontaminate_suffix, ""
) # decontaminated still uses the same metric
results[task_name][metric] = task.aggregation()[real_metric](items)
# hotfix: bleu, chrf, ter seem to be really expensive to bootstrap
# so we run them less iterations. still looking for a cleaner way to do this
stderr = metrics.stderr_for_metric(
metric=task.aggregation()[real_metric],
bootstrap_iters=1000
)
if stderr is not None:
results[task_name][metric + "_stderr"] = stderr(items)
return {"results": dict(results), "versions": dict(versions)}
def make_table(result_dict):
"""Generate table of results."""
from pytablewriter import MarkdownTableWriter, LatexTableWriter
md_writer = MarkdownTableWriter()
latex_writer = LatexTableWriter()
md_writer.headers = ["Task", "Version", "Metric", "Value", "", "Stderr"]
latex_writer.headers = ["Task", "Version", "Metric", "Value", "", "Stderr"]
values = []
for k, dic in result_dict["results"].items():
version = result_dict["versions"][k]
for m, v in dic.items():
if m.endswith("_stderr"):
continue
if m + "_stderr" in dic:
se = dic[m + "_stderr"]
values.append([k, version, m, "%.4f" % v, "±", "%.4f" % se])
else:
values.append([k, version, m, "%.4f" % v, "", ""])
k = ""
version = ""
md_writer.value_matrix = values
latex_writer.value_matrix = values
return md_writer.dumps()
decontaminate_suffix = "_decontaminate"
| EXA-1-master | exa/modular_components/gptq/zeroShot/evaluator.py |
"""
The LAMBADA dataset: Word prediction requiring a broad discourse context∗
https://arxiv.org/pdf/1606.06031.pdf
LAMBADA is a dataset to evaluate the capabilities of computational models for text
understanding by means of a word prediction task. LAMBADA is a collection of narrative
passages sharing the characteristic that human subjects are able to guess their last
word if they are exposed to the whole passage, but not if they only see the last
sentence preceding the target word. To succeed on LAMBADA, computational models
cannot simply rely on local context, but must be able to keep track of information
in the broader discourse.
Homepage: https://zenodo.org/record/2630551#.X4Xzn5NKjUI
"""
import inspect
from .tasks_utils import Task, rf
from .tasks_utils import mean, perplexity
from .local_datasets import lambada
_CITATION = """
@misc{
author={Paperno, Denis and Kruszewski, Germán and Lazaridou, Angeliki and Pham, Quan Ngoc and Bernardi, Raffaella and Pezzelle, Sandro and Baroni, Marco and Boleda, Gemma and Fernández, Raquel},
title={The LAMBADA dataset},
DOI={10.5281/zenodo.2630551},
publisher={Zenodo},
year={2016},
month={Aug}
}
"""
def preprocess(text):
text = text.replace("“", '"')
text = text.replace("”", '"')
text = text.replace("''", '"')
text = text.replace("``", '"')
return '\n' + text.strip()
class LAMBADA(Task):
VERSION = 0
DATASET_PATH = inspect.getfile(lambada.Lambada)
def has_training_docs(self):
return False
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
pass
def validation_docs(self):
return self.dataset["validation"]
def test_docs(self):
pass
def doc_to_text(self, doc):
return preprocess(doc["text"].strip()).rsplit(" ", 1)[0]
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc["text"]
def doc_to_target(self, doc):
return " " + doc["text"].rsplit(" ", 1)[1]
def construct_requests(self, doc, ctx):
ll, is_greedy = rf.loglikelihood(ctx, self.doc_to_target(doc))
return ll, is_greedy
def process_results(self, doc, results):
ll, is_greedy = results
return {"ppl": ll, "acc": int(is_greedy)}
def aggregation(self):
return {"ppl": perplexity, "acc": mean}
def higher_is_better(self):
return {"ppl": False, "acc": True}
| EXA-1-master | exa/modular_components/gptq/zeroShot/tasks/lambada.py |
"""
A Corpus and Cloze Evaluation for Deeper Understanding of Commonsense Stories
https://arxiv.org/pdf/1604.01696.pdf
'Story Cloze Test' (2018) is a commonsense reasoning framework for evaluating story
understanding, story generation, and script learning. This test requires a system
to choose the correct ending to a four-sentence story.
Homepage: https://cs.rochester.edu/nlp/rocstories/
"""
import numpy as np
from .tasks_utils import Task, rf
from .tasks_utils import mean
_CITATION = """
@inproceedings{sharma-etal-2018-tackling,
title = "Tackling the Story Ending Biases in The Story Cloze Test",
author = "Sharma, Rishi and
Allen, James and
Bakhshandeh, Omid and
Mostafazadeh, Nasrin",
booktitle = "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)",
month = jul,
year = "2018",
address = "Melbourne, Australia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/P18-2119",
doi = "10.18653/v1/P18-2119",
pages = "752--757",
abstract = "The Story Cloze Test (SCT) is a recent framework for evaluating story comprehension and script learning. There have been a variety of models tackling the SCT so far. Although the original goal behind the SCT was to require systems to perform deep language understanding and commonsense reasoning for successful narrative understanding, some recent models could perform significantly better than the initial baselines by leveraging human-authorship biases discovered in the SCT dataset. In order to shed some light on this issue, we have performed various data analysis and analyzed a variety of top performing models presented for this task. Given the statistics we have aggregated, we have designed a new crowdsourcing scheme that creates a new SCT dataset, which overcomes some of the biases. We benchmark a few models on the new dataset and show that the top-performing model on the original SCT dataset fails to keep up its performance. Our findings further signify the importance of benchmarking NLP systems on various evolving test sets.",
}
"""
class StoryCloze(Task):
VERSION = 0
DATASET_PATH = "story_cloze"
DATASET_NAME = None
def __init__(self, data_dir: str='tasks/local_datasets/storyCloze2018'):
"""
StoryCloze is not publicly available. You must download the data by
following https://cs.rochester.edu/nlp/rocstories/ and pass the folder
path into the `data_dir` arg.
"""
print("PLEASE MAKE SURE TO FILL THIS FORM BEFORE USING THE DATASET: https://cs.rochester.edu/nlp/rocstories/")
super().__init__(data_dir=data_dir)
def has_training_docs(self):
return False
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
pass
def validation_docs(self):
return self.dataset["validation"]
def test_docs(self):
return self.dataset["test"]
def doc_to_text(self, doc):
return " ".join(
[
doc["input_sentence_1"],
doc["input_sentence_2"],
doc["input_sentence_3"],
doc["input_sentence_4"],
]
)
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return " ".join(
[
doc["input_sentence_1"],
doc["input_sentence_2"],
doc["input_sentence_3"],
doc["input_sentence_4"],
]
)
def doc_to_target(self, doc):
clozes = [doc["sentence_quiz1"], doc["sentence_quiz2"]]
# `- 1` because the `answer_right_ending` index is 1-based.
return " " + clozes[doc["answer_right_ending"] - 1]
def construct_requests(self, doc, ctx):
"""Uses RequestFactory to construct Requests and returns an iterable of
Requests which will be sent to the LM.
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param ctx: str
The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question
part of the document for `doc`.
"""
clozes = [doc["sentence_quiz1"], doc["sentence_quiz2"]]
lls = [rf.loglikelihood(ctx, " {}".format(choice))[0] for choice in clozes]
return lls
def process_results(self, doc, results):
"""Take a single document and the LM results and evaluates, returning a
dict where keys are the names of submetrics and values are the values of
the metric for that one document
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param results:
The results of the requests created in construct_requests.
"""
gold = doc["answer_right_ending"] - 1
acc = 1.0 if np.argmax(results) == gold else 0.0
return {"acc": acc}
def aggregation(self):
"""
:returns: {str: [float] -> float}
A dictionary where keys are the names of submetrics and values are
functions that aggregate a list of metrics
"""
return {"acc": mean}
def higher_is_better(self):
"""
:returns: {str: bool}
A dictionary where keys are the names of submetrics and values are
whether a higher value of the submetric is better
"""
return {"acc": True}
class StoryCloze2016(StoryCloze):
DATASET_NAME = "2016"
class StoryCloze2018(StoryCloze):
DATASET_NAME = "2018" | EXA-1-master | exa/modular_components/gptq/zeroShot/tasks/storycloze.py |
from pprint import pprint
from typing import List, Union
from .tasks_utils import Task
from . import piqa
from . import arc
from . import superglue
from .local_datasets import lambada as lambada_dataset
from .lambada import LAMBADA
from . import glue
from . import storycloze
# TODO: Add the rest of the results!
########################################
# All tasks
########################################
TASK_REGISTRY = {
"lambada": LAMBADA,
"piqa": piqa.PiQA,
"arc_easy": arc.ARCEasy,
"arc_challenge": arc.ARCChallenge,
"boolq": superglue.BoolQ,
"cb": superglue.CommitmentBank,
"copa": superglue.Copa,
"wic": superglue.WordsInContext,
"multirc": superglue.MultiRC,
"rte": glue.RTE,
"record": superglue.ReCoRD,
"wsc": superglue.SGWinogradSchemaChallenge,
"storycloze": storycloze.StoryCloze2018
}
ALL_TASKS = sorted(list(TASK_REGISTRY))
def get_task(task_name):
try:
return TASK_REGISTRY[task_name]
except KeyError:
print("Available tasks:")
pprint(TASK_REGISTRY)
raise KeyError(f"Missing task {task_name}")
def get_task_name_from_object(task_object):
for name, class_ in TASK_REGISTRY.items():
if class_ is task_object:
return name
# this gives a mechanism for non-registered tasks to have a custom name anyways when reporting
return (
task_object.EVAL_HARNESS_NAME
if hasattr(task_object, "EVAL_HARNESS_NAME")
else type(task_object).__name__
)
def get_task_dict(task_name_list: List[Union[str, Task]]):
task_name_dict = {
task_name: get_task(task_name)()
for task_name in task_name_list
if isinstance(task_name, str)
}
task_name_from_object_dict = {
get_task_name_from_object(task_object): task_object
for task_object in task_name_list
if not isinstance(task_object, str)
}
assert set(task_name_dict.keys()).isdisjoint(set(task_name_from_object_dict.keys()))
return {**task_name_dict, **task_name_from_object_dict}
| EXA-1-master | exa/modular_components/gptq/zeroShot/tasks/__init__.py |
"""
GLUE: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding
https://openreview.net/pdf?id=rJ4km2R5t7
The General Language Understanding Evaluation (GLUE) benchmark is a collection of
resources for training, evaluating, and analyzing natural language understanding
systems. GLUE consists of:
- A benchmark of nine sentence- or sentence-pair language understanding tasks built
on established existing datasets and selected to cover a diverse range of dataset
sizes, text genres, and degrees of difficulty, and
- A diagnostic dataset designed to evaluate and analyze model performance with
respect to a wide range of linguistic phenomena found in natural language.
Homepage: https://gluebenchmark.com/
"""
import numpy as np
from .tasks_utils import Task, rf
from .tasks_utils import mean, matthews_corrcoef, f1_score, yesno
def general_detokenize(string):
string = string.replace(" n't", "n't")
string = string.replace(" )", ")")
string = string.replace("( ", "(")
string = string.replace('" ', '"')
string = string.replace(' "', '"')
string = re.sub(r" (['.,])", r"\1", string)
return string
# TODO(jon-tow): Add citations for the individual datasets/tasks that make up GLUE.
_CITATION = """
@inproceedings{wang-etal-2018-glue,
title = "{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding",
author = "Wang, Alex and
Singh, Amanpreet and
Michael, Julian and
Hill, Felix and
Levy, Omer and
Bowman, Samuel",
booktitle = "Proceedings of the 2018 {EMNLP} Workshop {B}lackbox{NLP}: Analyzing and Interpreting Neural Networks for {NLP}",
month = nov,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-5446",
doi = "10.18653/v1/W18-5446",
pages = "353--355",
abstract = "Human ability to understand language is \textit{general, flexible, and robust}. In contrast, most NLU models above the word level are designed for a specific task and struggle with out-of-domain data. If we aspire to develop models with understanding beyond the detection of superficial correspondences between inputs and outputs, then it is critical to develop a unified model that can execute a range of linguistic tasks across different domains. To facilitate research in this direction, we present the General Language Understanding Evaluation (GLUE, gluebenchmark.com): a benchmark of nine diverse NLU tasks, an auxiliary dataset for probing models for understanding of specific linguistic phenomena, and an online platform for evaluating and comparing models. For some benchmark tasks, training data is plentiful, but for others it is limited or does not match the genre of the test set. GLUE thus favors models that can represent linguistic knowledge in a way that facilitates sample-efficient learning and effective knowledge-transfer across tasks. While none of the datasets in GLUE were created from scratch for the benchmark, four of them feature privately-held test data, which is used to ensure that the benchmark is used fairly. We evaluate baselines that use ELMo (Peters et al., 2018), a powerful transfer learning technique, as well as state-of-the-art sentence representation models. The best models still achieve fairly low absolute scores. Analysis with our diagnostic dataset yields similarly weak performance over all phenomena tested, with some exceptions.",
}
"""
# Single-Sentence Tasks
class CoLA(Task):
VERSION = 0
DATASET_PATH = "glue"
DATASET_NAME = "cola"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc):
return "{}\nQuestion: Does this sentence make sense?\nAnswer:".format(
doc["sentence"]
)
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc["sentence"]
def doc_to_target(self, doc):
return " {}".format({1: "yes", 0: "no"}[doc["label"]])
def construct_requests(self, doc, ctx):
ll_true, _ = rf.loglikelihood(ctx, " yes")
ll_false, _ = rf.loglikelihood(ctx, " no")
return ll_true, ll_false
def process_results(self, doc, results):
ll_true, ll_false = results
pred = ll_true > ll_false
gold = doc["label"]
return {"mcc": (gold, pred)}
def higher_is_better(self):
return {"mcc": True}
def aggregation(self):
return {"mcc": matthews_corrcoef}
class SST(Task):
VERSION = 0
DATASET_PATH = "glue"
DATASET_NAME = "sst2"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc):
return "{}\nQuestion: Is this sentence positive or negative?\nAnswer:".format(
general_detokenize(doc["sentence"]),
)
def doc_to_target(self, doc):
return " {}".format({1: "positive", 0: "negative"}[doc["label"]])
def construct_requests(self, doc, ctx):
ll_positive, _ = rf.loglikelihood(ctx, " positive")
ll_negative, _ = rf.loglikelihood(ctx, " negative")
return ll_positive, ll_negative
def process_results(self, doc, results):
ll_positive, ll_negative = results
pred = ll_positive > ll_negative
gold = doc["label"]
return {"acc": pred == gold}
def higher_is_better(self):
return {"acc": True}
def aggregation(self):
return {"acc": mean}
# Inference Tasks
class MNLI(Task):
VERSION = 0
DATASET_PATH = "glue"
DATASET_NAME = "mnli"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
if self.has_validation_docs():
return self.dataset["validation_matched"]
def test_docs(self):
if self.has_test_docs():
return self.dataset["test_matched"]
def doc_to_text(self, doc):
return "{}\nQuestion: {} True, False or Neither?\nAnswer:".format(
doc["premise"],
doc["hypothesis"].strip()
+ ("" if doc["hypothesis"].strip().endswith(".") else "."),
)
def doc_to_target(self, doc):
# True = entailment
# False = contradiction
# Neither = neutral
return " {}".format({0: "True", 1: "Neither", 2: "False"}[doc["label"]])
def construct_requests(self, doc, ctx):
ll_true, _ = rf.loglikelihood(ctx, " True")
ll_neither, _ = rf.loglikelihood(ctx, " Neither")
ll_false, _ = rf.loglikelihood(ctx, " False")
return ll_true, ll_neither, ll_false
def process_results(self, doc, results):
gold = doc["label"]
pred = np.argmax(results)
return {"acc": pred == gold}
def higher_is_better(self):
return {"acc": True}
def aggregation(self):
return {"acc": mean}
class MNLIMismatched(MNLI):
VERSION = 0
def validation_docs(self):
if self.has_validation_docs():
return self.dataset["validation_mismatched"]
def test_docs(self):
if self.has_test_docs():
return self.dataset["test_mismatched"]
class QNLI(Task):
VERSION = 0
DATASET_PATH = "glue"
DATASET_NAME = "qnli"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc):
return (
"{}\n{}\nQuestion: Does this response answer the question?\nAnswer:".format(
doc["question"],
doc["sentence"],
)
)
def doc_to_target(self, doc):
# True = entailment
# False = not entailment
return " {}".format({0: "yes", 1: "no"}[doc["label"]])
def construct_requests(self, doc, ctx):
ll_yes, _ = rf.loglikelihood(ctx, " yes")
ll_no, _ = rf.loglikelihood(ctx, " no")
return ll_yes, ll_no
def process_results(self, doc, results):
ll_yes, ll_no = results
pred = ll_no > ll_yes
gold = doc["label"]
return {"acc": pred == gold}
def higher_is_better(self):
return {"acc": True}
def aggregation(self):
return {"acc": mean}
class WNLI(Task):
VERSION = 1
DATASET_PATH = "glue"
DATASET_NAME = "wnli"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc):
return "{}\nQuestion: {} True or False?\nAnswer:".format(
doc["sentence1"],
doc["sentence2"],
)
def doc_to_target(self, doc):
# True = entailment
# False = not_entailment
return " {}".format({0: "False", 1: "True"}[doc["label"]])
def construct_requests(self, doc, ctx):
ll_true, _ = rf.loglikelihood(ctx, " True")
ll_false, _ = rf.loglikelihood(ctx, " False")
return ll_true, ll_false
def process_results(self, doc, results):
ll_true, ll_false = results
pred = ll_true > ll_false
gold = doc["label"]
return {"acc": pred == gold}
def higher_is_better(self):
return {"acc": True}
def aggregation(self):
return {"acc": mean}
class RTE(Task):
VERSION = 0
DATASET_PATH = "glue"
DATASET_NAME = "rte"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc):
return "{}\nQuestion: {} True or False?\nAnswer:".format(
doc["sentence1"],
doc["sentence2"],
)
def doc_to_target(self, doc):
# 0 = entailment
# 1 = not_entailment
return " {}".format({0: "True", 1: "False"}[doc["label"]])
def construct_requests(self, doc, ctx):
ll_true, _ = rf.loglikelihood(ctx, " True")
ll_false, _ = rf.loglikelihood(ctx, " False")
return ll_true, ll_false
def process_results(self, doc, results):
ll_true, ll_false = results
pred = ll_false > ll_true
gold = doc["label"]
return {"acc": pred == gold}
def higher_is_better(self):
return {"acc": True}
def aggregation(self):
return {"acc": mean}
# Similarity and Paraphrase Tasks
class MRPC(Task):
VERSION = 0
DATASET_PATH = "glue"
DATASET_NAME = "mrpc"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc):
return "Sentence 1: {}\nSentence 2: {}\nQuestion: Do both sentences mean the same thing?\nAnswer:".format(
general_detokenize(doc["sentence1"]),
general_detokenize(doc["sentence2"]),
)
def doc_to_target(self, doc):
return " {}".format(yesno(doc["label"]))
def construct_requests(self, doc, ctx):
ll_yes, _ = rf.loglikelihood(ctx, " yes")
ll_no, _ = rf.loglikelihood(ctx, " no")
return ll_yes, ll_no
def process_results(self, doc, results):
ll_yes, ll_no = results
gold = doc["label"]
pred = ll_yes > ll_no
return {
"acc": pred == gold,
"f1": (gold, pred),
}
def higher_is_better(self):
return {"acc": True, "f1": True}
def aggregation(self):
return {"acc": mean, "f1": f1_score}
class QQP(Task):
VERSION = 0
DATASET_PATH = "glue"
DATASET_NAME = "qqp"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc):
return "Question 1: {}\nQuestion 2: {}\nQuestion: Do both questions ask the same thing?\nAnswer:".format(
doc["question1"],
doc["question2"],
)
def doc_to_target(self, doc):
return " {}".format(yesno(doc["label"]))
def construct_requests(self, doc, ctx):
ll_yes, _ = rf.loglikelihood(ctx, " yes")
ll_no, _ = rf.loglikelihood(ctx, " no")
return ll_yes, ll_no
def process_results(self, doc, results):
ll_yes, ll_no = results
gold = doc["label"]
pred = ll_yes > ll_no
return {
"acc": pred == gold,
"f1": (gold, pred),
}
def higher_is_better(self):
return {"acc": True, "f1": True}
def aggregation(self):
return {"acc": mean, "f1": f1_score}
class STSB(Task):
VERSION = 0
DATASET_PATH = "glue"
DATASET_NAME = "stsb"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return True
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def test_docs(self):
return self.dataset["test"]
def doc_to_text(self, doc):
return "sentence 1: {}\nsentence 2: {}\nAnswer:".format(
doc["sentence1"],
doc["sentence2"],
)
def doc_to_target(self, doc):
return " {}".format(doc["label"])
def construct_requests(self, doc, ctx):
"""Uses RequestFactory to construct Requests and returns an iterable of
Requests which will be sent to the LM.
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param ctx: str
The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question
part of the document for `doc`.
"""
# TODO: implement evaluation.
raise NotImplementedError("Evaluation not implemented")
def process_results(self, doc, results):
"""Take a single document and the LM results and evaluates, returning a
dict where keys are the names of submetrics and values are the values of
the metric for that one document
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param results:
The results of the requests created in construct_requests.
"""
# TODO: implement evaluation.
raise NotImplementedError("Evaluation not implemented")
def aggregation(self):
"""
:returns: {str: [float] -> float}
A dictionary where keys are the names of submetrics and values are
functions that aggregate a list of metrics
"""
# TODO: implement evaluation.
raise NotImplementedError("Evaluation not implemented")
def higher_is_better(self):
"""
:returns: {str: bool}
A dictionary where keys are the names of submetrics and values are
whether a higher value of the submetric is better
"""
# TODO: implement evaluation.
raise NotImplementedError("Evaluation not implemented")
| EXA-1-master | exa/modular_components/gptq/zeroShot/tasks/glue.py |
"""
SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems
https://w4ngatang.github.io/static/papers/superglue.pdf
SuperGLUE is a benchmark styled after GLUE with a new set of more difficult language
understanding tasks.
Homepage: https://super.gluebenchmark.com/
TODO: WSC requires free-form generation.
"""
import numpy as np
import sklearn
import re
import transformers.data.metrics.squad_metrics as squad_metrics
from .tasks_utils import Task, rf
from .tasks_utils import mean, acc_all, metric_max_over_ground_truths, yesno
def general_detokenize(string):
string = string.replace(" n't", "n't")
string = string.replace(" )", ")")
string = string.replace("( ", "(")
string = string.replace('" ', '"')
string = string.replace(' "', '"')
string = re.sub(r" (['.,])", r"\1", string)
return string
_CITATION = """
@inproceedings{NEURIPS2019_4496bf24,
author = {Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel},
booktitle = {Advances in Neural Information Processing Systems},
editor = {H. Wallach and H. Larochelle and A. Beygelzimer and F. d\textquotesingle Alch\'{e}-Buc and E. Fox and R. Garnett},
pages = {},
publisher = {Curran Associates, Inc.},
title = {SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
url = {https://proceedings.neurips.cc/paper/2019/file/4496bf24afe7fab6f046bf4923da8de6-Paper.pdf},
volume = {32},
year = {2019}
}
"""
class BoolQ(Task):
VERSION = 1
DATASET_PATH = "super_glue"
DATASET_NAME = "boolq"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc):
return f"{doc['passage']}\nQuestion: {doc['question']}?\nAnswer:"
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc["passage"]
def doc_to_target(self, doc):
return " " + yesno(doc["label"])
def construct_requests(self, doc, ctx):
ll_yes, _ = rf.loglikelihood(ctx, " yes")
ll_no, _ = rf.loglikelihood(ctx, " no")
return ll_yes, ll_no
def process_results(self, doc, results):
ll_yes, ll_no = results
gold = doc["label"]
acc = 1.0 if (ll_yes > ll_no) == gold else 0.0
return {"acc": acc}
def higher_is_better(self):
return {"acc": True}
def aggregation(self):
return {"acc": mean}
class CommitmentBank(Task):
VERSION = 1
DATASET_PATH = "super_glue"
DATASET_NAME = "cb"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc):
return "{}\nQuestion: {}. True, False or Neither?\nAnswer:".format(
doc["premise"],
doc["hypothesis"],
)
def doc_to_target(self, doc):
# True = entailment
# False = contradiction
# Neither = neutral
return " {}".format({0: "True", 1: "False", 2: "Neither"}[doc["label"]])
def construct_requests(self, doc, ctx):
ll_true, _ = rf.loglikelihood(ctx, " True")
ll_false, _ = rf.loglikelihood(ctx, " False")
ll_neither, _ = rf.loglikelihood(ctx, " Neither")
return ll_true, ll_false, ll_neither
def process_results(self, doc, results):
gold = doc["label"]
pred = np.argmax(results)
acc = 1.0 if pred == gold else 0.0
return {"acc": acc, "f1": (pred, gold)}
def higher_is_better(self):
return {"acc": True, "f1": True}
@classmethod
def cb_multi_fi(cls, items):
preds, golds = zip(*items)
preds = np.array(preds)
golds = np.array(golds)
f11 = sklearn.metrics.f1_score(y_true=golds == 0, y_pred=preds == 0)
f12 = sklearn.metrics.f1_score(y_true=golds == 1, y_pred=preds == 1)
f13 = sklearn.metrics.f1_score(y_true=golds == 2, y_pred=preds == 2)
avg_f1 = mean([f11, f12, f13])
return avg_f1
def aggregation(self):
return {
"acc": mean,
"f1": self.cb_multi_fi,
}
class Copa(Task):
VERSION = 0
DATASET_PATH = "super_glue"
DATASET_NAME = "copa"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc):
# Drop the period
connector = {
"cause": "because",
"effect": "therefore",
}[doc["question"]]
return doc["premise"].strip()[:-1] + f" {connector}"
def doc_to_target(self, doc):
correct_choice = doc["choice1"] if doc["label"] == 0 else doc["choice2"]
# Connect the sentences
return " " + self.convert_choice(correct_choice)
def construct_requests(self, doc, ctx):
choice1 = " " + self.convert_choice(doc["choice1"])
choice2 = " " + self.convert_choice(doc["choice2"])
ll_choice1, _ = rf.loglikelihood(ctx, choice1)
ll_choice2, _ = rf.loglikelihood(ctx, choice2)
return ll_choice1, ll_choice2
def process_results(self, doc, results):
gold = doc["label"]
pred = np.argmax(results)
acc = 1.0 if pred == gold else 0.0
return {"acc": acc}
def higher_is_better(self):
return {"acc": True}
def aggregation(self):
return {"acc": mean}
@staticmethod
def convert_choice(choice):
return choice[0].lower() + choice[1:]
class MultiRC(Task):
VERSION = 1
DATASET_PATH = "super_glue"
DATASET_NAME = "multirc"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc):
return f"{doc['paragraph']}\nQuestion: {doc['question']}\nAnswer:"
def doc_to_target(self, doc):
return " " + self.format_answer(answer=doc["answer"], label=doc["label"])
@staticmethod
def format_answer(answer, label):
label_str = "yes" if label else "no"
return f"{answer}\nIs the answer correct? {label_str}"
def construct_requests(self, doc, ctx):
true_choice = self.format_answer(answer=doc["answer"], label=True)
false_choice = self.format_answer(answer=doc["answer"], label=False)
ll_true_choice, _ = rf.loglikelihood(ctx, f" {true_choice}")
ll_false_choice, _ = rf.loglikelihood(ctx, f" {false_choice}")
return ll_true_choice, ll_false_choice
def process_results(self, doc, results):
ll_true_choice, ll_false_choice = results
pred = ll_true_choice > ll_false_choice
return {"acc": (pred, doc)}
def higher_is_better(self):
return {"acc": True}
def aggregation(self):
return {"acc": acc_all}
class ReCoRD(Task):
VERSION = 0
DATASET_PATH = "super_glue"
DATASET_NAME = "record"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
# In ReCoRD, each doc manifests multiple "examples" in the context of few shot example packing.
# Each doc consists of multiple answer candidates, each of which is scored yes/no.
if self._training_docs is None:
self._training_docs = []
for doc in self.dataset["train"]:
self._training_docs.append(self._process_doc(doc))
return self._training_docs
def validation_docs(self):
# See: training_docs
for doc in self.dataset["validation"]:
yield self._process_doc(doc)
@classmethod
def _process_doc(cls, doc):
return {
"passage": doc["passage"],
"query": doc["query"],
"entities": sorted(list(set(doc["entities"]))),
"answers": sorted(list(set(doc["answers"]))),
}
def doc_to_text(self, doc):
initial_text, *highlights = doc["passage"].strip().split("\n@highlight\n")
text = initial_text + "\n\n"
for highlight in highlights:
text += f" - {highlight}.\n"
return text
@classmethod
def format_answer(cls, query, entity):
return f" - {query}".replace("@placeholder", entity)
def doc_to_target(self, doc):
# We only output the first correct entity in a doc
return self.format_answer(query=doc["query"], entity=doc["answers"][0])
def construct_requests(self, doc, ctx):
requests = [
rf.loglikelihood(ctx, self.format_answer(query=doc["query"], entity=entity))
for entity in doc["entities"]
]
return requests
def process_results(self, doc, results):
# ReCoRD's evaluation is actually deceptively simple:
# - Pick the maximum likelihood prediction entity
# - Evaluate the accuracy and token F1 PER EXAMPLE
# - Average over all examples
max_idx = np.argmax(np.array([result[0] for result in results]))
prediction = doc["entities"][max_idx]
gold_label_set = doc["answers"]
f1 = metric_max_over_ground_truths(
squad_metrics.compute_f1, prediction, gold_label_set
)
em = metric_max_over_ground_truths(
squad_metrics.compute_exact, prediction, gold_label_set
)
return {
"f1": f1,
"em": em,
}
def higher_is_better(self):
return {
"f1": True,
"em": True,
}
def aggregation(self):
return {
"f1": mean,
"em": mean,
}
class WordsInContext(Task):
VERSION = 0
DATASET_PATH = "super_glue"
DATASET_NAME = "wic"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc):
return (
"Sentence 1: {}\nSentence 2: {}\nQuestion: Is the word '{}' used in the same way in the"
" two sentences above?\nAnswer:".format(
doc["sentence1"],
doc["sentence2"],
doc["sentence1"][doc["start1"] : doc["end1"]],
)
)
def doc_to_target(self, doc):
return " {}".format({0: "no", 1: "yes"}[doc["label"]])
def construct_requests(self, doc, ctx):
ll_yes, _ = rf.loglikelihood(ctx, " yes")
ll_no, _ = rf.loglikelihood(ctx, " no")
return ll_yes, ll_no
def process_results(self, doc, results):
ll_yes, ll_no = results
gold = doc["label"]
acc = 1.0 if (ll_yes > ll_no) == gold else 0.0
return {"acc": acc}
def higher_is_better(self):
return {"acc": True}
def aggregation(self):
return {"acc": mean}
class SGWinogradSchemaChallenge(Task):
VERSION = 0
# Note: This implementation differs from Fig G.32 because this is the SuperGLUE,
# binary version of the task.
DATASET_PATH = "super_glue"
DATASET_NAME = "wsc"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
if self.has_training_docs():
if self._training_docs is None:
# GPT-3 Paper's format only uses positive examples for fewshot "training"
self._training_docs = [
doc for doc in self.dataset["train"] if doc["label"]
]
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def doc_to_text(self, doc):
raw_passage = doc["text"]
# NOTE: HuggingFace span indices are word-based not character-based.
pre = " ".join(raw_passage.split()[: doc["span2_index"]])
post = raw_passage[len(pre) + len(doc["span2_text"]) + 1 :]
passage = general_detokenize(pre + " *{}*".format(doc["span2_text"]) + post)
noun = doc["span1_text"]
pronoun = doc["span2_text"]
text = (
f"Passage: {passage}\n"
+ f'Question: In the passage above, does the pronoun "*{pronoun}*" refer to "*{noun}*"?\n'
+ "Answer:"
)
return text
def doc_to_target(self, doc):
return " " + yesno(doc["label"])
def construct_requests(self, doc, ctx):
ll_yes, _ = rf.loglikelihood(ctx, " yes")
ll_no, _ = rf.loglikelihood(ctx, " no")
return ll_yes, ll_no
def process_results(self, doc, results):
ll_yes, ll_no = results
gold = doc["label"]
acc = 1.0 if (ll_yes > ll_no) == gold else 0.0
return {"acc": acc}
def higher_is_better(self):
return {"acc": True}
def aggregation(self):
return {"acc": mean}
| EXA-1-master | exa/modular_components/gptq/zeroShot/tasks/superglue.py |
import abc
import math
import datasets
import inspect
import functools
import numpy as np
from abc import abstractmethod
import sklearn
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
"""Compute max metric between prediction and each ground truth."""
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def perplexity(items):
return math.exp(-mean(items))
def matthews_corrcoef(items):
unzipped_list = list(zip(*items))
golds = unzipped_list[0]
preds = unzipped_list[1]
return sklearn.metrics.matthews_corrcoef(golds, preds)
def f1_score(items):
unzipped_list = list(zip(*items))
golds = unzipped_list[0]
preds = unzipped_list[1]
fscore = sklearn.metrics.f1_score(golds, preds)
return np.max(fscore)
def yesno(x):
if x:
return "yes"
else:
return "no"
def acc_all(items):
# Only count as correct if all answers are labeled correctly for each question
question_scoring_dict = {}
preds = list(zip(*items))[0]
docs = list(zip(*items))[1]
for doc, pred in zip(docs, preds):
paragraph_id = doc["idx"]["paragraph"]
question_id = doc["idx"]["question"]
if (paragraph_id, question_id) not in question_scoring_dict:
question_scoring_dict[(paragraph_id, question_id)] = []
gold_label = doc["label"] == 1
question_scoring_dict[(paragraph_id, question_id)].append(gold_label == pred)
acc = np.mean([int(all(x)) for x in question_scoring_dict.values()])
return acc
def positional_deprecated(fn):
"""
A decorator to nudge users into passing only keyword args (`kwargs`) to the
wrapped function, `fn`.
"""
@functools.wraps(fn)
def _wrapper(*args, **kwargs):
if len(args) != 1 if inspect.ismethod(fn) else 0:
print(
f"WARNING: using {fn.__name__} with positional arguments is "
"deprecated and will be disallowed in a future version of "
"lm-evaluation-harness!"
)
return fn(*args, **kwargs)
return _wrapper
def mean(arr):
return sum(arr) / len(arr)
def weighted_mean(items):
a, b = zip(*items)
return sum(a) / sum(b)
def weighted_perplexity(items):
return math.exp(-weighted_mean(items))
def bits_per_byte(items):
return -weighted_mean(items) / math.log(2)
class Task(abc.ABC):
"""A task represents an entire benchmark including its dataset, problems,
answers, and evaluation methods. See BoolQ for a simple example implementation
A `doc` can be any python object which represents one instance of evaluation.
This is usually a dictionary e.g.
{"question": ..., "answer": ...} or
{"question": ..., question, answer)
"""
# The name of the `Task` benchmark as denoted in the HuggingFace datasets Hub
# or a path to a custom `datasets` loading script.
DATASET_PATH: str = None
# The name of a subset within `DATASET_PATH`.
DATASET_NAME: str = None
def __init__(self, data_dir=None, cache_dir=None, download_mode=None):
"""
:param data_dir: str
Stores the path to a local folder containing the `Task`'s data files.
Use this to specify the path to manually downloaded data (usually when
the dataset is not publicly accessible).
:param cache_dir: str
The directory to read/write the `Task` dataset. This follows the
HuggingFace `datasets` API with the default cache directory located at:
`~/.cache/huggingface/datasets`
NOTE: You can change the cache location globally for a given process
by setting the shell environment variable, `HF_DATASETS_CACHE`,
to another directory:
`export HF_DATASETS_CACHE="/path/to/another/directory"`
:param download_mode: datasets.DownloadMode
How to treat pre-existing `Task` downloads and data.
- `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
Reuse download and reuse dataset.
- `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
Reuse download with fresh dataset.
- `datasets.DownloadMode.FORCE_REDOWNLOAD`
Fresh download and fresh dataset.
"""
self.download(data_dir, cache_dir, download_mode)
self._training_docs = None
self._fewshot_docs = None
def download(self, data_dir=None, cache_dir=None, download_mode=None):
"""Downloads and returns the task dataset.
Override this method to download the dataset from a custom API.
:param data_dir: str
Stores the path to a local folder containing the `Task`'s data files.
Use this to specify the path to manually downloaded data (usually when
the dataset is not publicly accessible).
:param cache_dir: str
The directory to read/write the `Task` dataset. This follows the
HuggingFace `datasets` API with the default cache directory located at:
`~/.cache/huggingface/datasets`
NOTE: You can change the cache location globally for a given process
by setting the shell environment variable, `HF_DATASETS_CACHE`,
to another directory:
`export HF_DATASETS_CACHE="/path/to/another/directory"`
:param download_mode: datasets.DownloadMode
How to treat pre-existing `Task` downloads and data.
- `datasets.DownloadMode.REUSE_DATASET_IF_EXISTS`
Reuse download and reuse dataset.
- `datasets.DownloadMode.REUSE_CACHE_IF_EXISTS`
Reuse download with fresh dataset.
- `datasets.DownloadMode.FORCE_REDOWNLOAD`
Fresh download and fresh dataset.
"""
self.dataset = datasets.load_dataset(
path=self.DATASET_PATH,
name=self.DATASET_NAME,
data_dir=data_dir,
cache_dir=cache_dir,
download_mode=download_mode,
)
def should_decontaminate(self):
"""Whether this task supports decontamination against model training set."""
return False
@abstractmethod
def has_training_docs(self):
"""Whether the task has a training set"""
pass
@abstractmethod
def has_validation_docs(self):
"""Whether the task has a validation set"""
pass
@abstractmethod
def has_test_docs(self):
"""Whether the task has a test set"""
pass
def training_docs(self):
"""
:return: Iterable[obj]
A iterable of any object, that doc_to_text can handle
"""
return []
def validation_docs(self):
"""
:return: Iterable[obj]
A iterable of any object, that doc_to_text can handle
"""
return []
def test_docs(self):
"""
:return: Iterable[obj]
A iterable of any object, that doc_to_text can handle
"""
return []
def _process_doc(self, doc):
"""
Override this to process (detokenize, strip, replace, etc.) individual
documents. This can be used in a map over documents of a data split.
E.g. `map(self._process_doc, self.dataset["validation"])`
:return: dict
The processed version of the specified `doc`.
"""
return doc
def fewshot_examples(self, k, rnd):
if self._training_docs is None:
self._training_docs = list(self.training_docs())
return rnd.sample(self._training_docs, k)
def doc_to_decontamination_query(self, doc):
print(
"Override doc_to_decontamination_query with document specific decontamination query."
)
assert False
@abstractmethod
def doc_to_text(self, doc):
pass
@abstractmethod
def doc_to_target(self, doc):
pass
@abstractmethod
def construct_requests(self, doc, ctx):
"""Uses RequestFactory to construct Requests and returns an iterable of
Requests which will be sent to the LM.
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param ctx: str
The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question
part of the document for `doc`.
"""
pass
@abstractmethod
def process_results(self, doc, results):
"""Take a single document and the LM results and evaluates, returning a
dict where keys are the names of submetrics and values are the values of
the metric for that one document
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param results:
The results of the requests created in construct_requests.
"""
pass
@abstractmethod
def aggregation(self):
"""
:returns: {str: [metric_score] -> float}
A dictionary where keys are the names of submetrics and values are
functions that aggregate a list of metric scores
"""
pass
@abstractmethod
def higher_is_better(self):
"""
:returns: {str: bool}
A dictionary where keys are the names of submetrics and values are
whether a higher value of the submetric is better
"""
pass
def fewshot_description(self):
import warnings
warnings.warn(
"`fewshot_description` will be removed in futures versions. Pass "
"any custom descriptions to the `evaluate` function instead.",
DeprecationWarning,
)
return ""
@positional_deprecated
def fewshot_context(
self, doc, num_fewshot, provide_description=None, rnd=None, description=None
):
"""Returns a fewshot context string that is made up of a prepended description
(if provided), the `num_fewshot` number of examples, and an appended prompt example.
:param doc: str
The document as returned from training_docs, validation_docs, or test_docs.
:param num_fewshot: int
The number of fewshot examples to provide in the returned context string.
:param provide_description: bool
Not implemented, and this option is deprecated and will be removed in a future version in favor of a different description providing method
:param rnd: random.Random
The pseudo-random number generator used to randomly sample examples.
WARNING: This is currently a required arg although it's optionalized with a default `None`.
:param description: str
The task's description that will be prepended to the fewshot examples.
:returns: str
The fewshot context.
"""
assert (
rnd is not None
), "A `random.Random` generator argument must be provided to `rnd`"
assert not provide_description, (
"The `provide_description` arg will be removed in future versions. To prepend "
"a custom description to the context, supply the corresponding string via the "
"`description` arg."
)
if provide_description is not None:
# nudge people to not specify it at all
print(
"WARNING: provide_description is deprecated and will be removed in a future version in favor of description_dict"
)
description = description + "\n\n" if description else ""
if num_fewshot == 0:
labeled_examples = ""
else:
# for sets with no training docs, draw from other set *but ensure no overlap with current doc*
if self.has_training_docs():
fewshotex = self.fewshot_examples(k=num_fewshot, rnd=rnd)
else:
if self._fewshot_docs is None:
self._fewshot_docs = list(
self.validation_docs()
if self.has_validation_docs()
else self.test_docs()
)
fewshotex = rnd.sample(self._fewshot_docs, num_fewshot + 1)
# get rid of the doc that's the one we're evaluating, if it's in the fewshot
fewshotex = [x for x in fewshotex if x != doc][:num_fewshot]
labeled_examples = (
"\n\n".join(
[
self.doc_to_text(doc) + self.doc_to_target(doc)
for doc in fewshotex
]
)
+ "\n\n"
)
example = self.doc_to_text(doc)
return description + labeled_examples + example
class MultipleChoiceTask(Task):
def doc_to_target(self, doc):
return " " + doc["choices"][doc["gold"]]
def construct_requests(self, doc, ctx):
lls = [
rf.loglikelihood(ctx, " {}".format(choice))[0] for choice in doc["choices"]
]
return lls
def process_results(self, doc, results):
gold = doc["gold"]
acc = 1.0 if np.argmax(results) == gold else 0.0
completion_len = np.array([float(len(i)) for i in doc["choices"]])
acc_norm = 1.0 if np.argmax(results / completion_len) == gold else 0.0
return {
"acc": acc,
"acc_norm": acc_norm,
}
def higher_is_better(self):
return {
"acc": True,
"acc_norm": True,
}
def aggregation(self):
return {
"acc": mean,
"acc_norm": mean,
}
class PerplexityTask(Task, abc.ABC):
def should_decontaminate(self):
"""Whether this task supports decontamination against model training set."""
return True
def has_training_docs(self):
return False
def fewshot_examples(self, k, rnd):
assert k == 0
return []
def fewshot_context(
self, doc, num_fewshot, provide_description=None, rnd=None, description=None
):
assert (
num_fewshot == 0
), "The number of fewshot examples must be 0 for perplexity tasks."
assert (
rnd is not None
), "A `random.Random` generator argument must be provided to `rnd`."
assert not provide_description, (
"The `provide_description` arg will be removed in future versions. To prepend "
"a custom description to the context, supply the corresponding string via the "
"`description` arg."
)
if provide_description is not None:
# nudge people to not specify it at all
print(
"WARNING: provide_description is deprecated and will be removed in a future version in favor of description_dict"
)
return ""
def higher_is_better(self):
return {
"word_perplexity": False,
"byte_perplexity": False,
"bits_per_byte": False,
}
def doc_to_decontamination_query(self, doc):
return doc
def doc_to_text(self, doc):
return ""
def doc_to_target(self, doc):
return doc
def construct_requests(self, doc, ctx):
assert not ctx
req = rf.loglikelihood_rolling(self.doc_to_target(doc))
return req
def process_results(self, doc, results):
(loglikelihood,) = results
words = self.count_words(doc)
bytes_ = self.count_bytes(doc)
return {
"word_perplexity": (loglikelihood, words),
"byte_perplexity": (loglikelihood, bytes_),
"bits_per_byte": (loglikelihood, bytes_),
}
def aggregation(self):
return {
"word_perplexity": weighted_perplexity,
"byte_perplexity": weighted_perplexity,
"bits_per_byte": bits_per_byte,
}
@classmethod
def count_bytes(cls, doc):
return len(doc.encode("utf-8"))
@classmethod
def count_words(cls, doc):
"""Downstream tasks with custom word boundaries should override this!"""
return len(re.split(r"\s+", doc))
class RequestFactory:
def __getattr__(self, attr):
def fn(*args):
return Request(attr, args)
return fn
REQUEST_RETURN_LENGTHS = {
"loglikelihood": 2,
"greedy_until": None,
"loglikelihood_rolling": None,
}
class Request:
def __init__(self, request_type, args, index=None):
if request_type not in REQUEST_RETURN_LENGTHS.keys():
raise NotImplementedError(
"The request type {} is not implemented!".format(request_type)
)
self.request_type = request_type
self.args = args
self.index = index
def __iter__(self):
if REQUEST_RETURN_LENGTHS[self.request_type] is None:
raise IndexError("This request type does not return multiple arguments!")
for i in range(REQUEST_RETURN_LENGTHS[self.request_type]):
yield Request(self.request_type, self.args, i)
def __getitem__(self, i):
if REQUEST_RETURN_LENGTHS[self.request_type] is None:
raise IndexError("This request type does not return multiple arguments!")
return Request(self.request_type, self.args, i)
def __eq__(self, other):
return (
self.request_type == other.request_type
and self.args == other.args
and self.index == other.index
)
def __repr__(self):
return f"Req_{self.request_type}{self.args}[{self.index}]\n"
rf = RequestFactory() | EXA-1-master | exa/modular_components/gptq/zeroShot/tasks/tasks_utils.py |
"""
Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge
https://arxiv.org/pdf/1803.05457.pdf
The ARC dataset consists of 7,787 science exam questions drawn from a variety
of sources, including science questions provided under license by a research
partner affiliated with AI2. These are text-only, English language exam questions
that span several grade levels as indicated in the files. Each question has a
multiple choice structure (typically 4 answer options). The questions are sorted
into a Challenge Set of 2,590 “hard” questions (those that both a retrieval and
a co-occurrence method fail to answer correctly) and an Easy Set of 5,197 questions.
Homepage: https://allenai.org/data/arc
"""
from .tasks_utils import MultipleChoiceTask
_CITATION = """
@article{Clark2018ThinkYH,
title={Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge},
author={Peter Clark and Isaac Cowhey and Oren Etzioni and Tushar Khot and Ashish Sabharwal and Carissa Schoenick and Oyvind Tafjord},
journal={ArXiv},
year={2018},
volume={abs/1803.05457}
}
"""
class ARCEasy(MultipleChoiceTask):
VERSION = 0
DATASET_PATH = "ai2_arc"
DATASET_NAME = "ARC-Easy"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return True
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(map(self._process_doc, self.dataset["train"]))
return self._training_docs
def validation_docs(self):
return map(self._process_doc, self.dataset["validation"])
def test_docs(self):
return map(self._process_doc, self.dataset["test"])
def _process_doc(self, doc):
# NOTE: Some `doc["answerKey"]`s are in numeric string format being one
# of {'1', '2', '3', '4', '5'}. We map them back to letters.
num_to_letter = {"1": "A", "2": "B", "3": "C", "4": "D", "5": "E"}
doc["answerKey"] = num_to_letter.get(doc["answerKey"], doc["answerKey"])
out_doc = {
"id": doc["id"],
"query": "Question: " + doc["question"] + "\nAnswer:",
"choices": doc["choices"]["text"],
"gold": ["A", "B", "C", "D", "E"].index(doc["answerKey"]),
}
return out_doc
def doc_to_text(self, doc):
return doc["query"]
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc["query"]
class ARCChallenge(ARCEasy):
DATASET_PATH = "ai2_arc"
DATASET_NAME = "ARC-Challenge"
| EXA-1-master | exa/modular_components/gptq/zeroShot/tasks/arc.py |
"""
PIQA: Reasoning about Physical Commonsense in Natural Language
https://arxiv.org/pdf/1911.11641.pdf
Physical Interaction: Question Answering (PIQA) is a physical commonsense
reasoning and a corresponding benchmark dataset. PIQA was designed to investigate
the physical knowledge of existing models. To what extent are current approaches
actually learning about the world?
Homepage: https://yonatanbisk.com/piqa/
"""
from .tasks_utils import MultipleChoiceTask
_CITATION = """
@inproceedings{Bisk2020,
author = {Yonatan Bisk and Rowan Zellers and
Ronan Le Bras and Jianfeng Gao
and Yejin Choi},
title = {PIQA: Reasoning about Physical Commonsense in
Natural Language},
booktitle = {Thirty-Fourth AAAI Conference on
Artificial Intelligence},
year = {2020},
}
"""
class PiQA(MultipleChoiceTask):
VERSION = 0
DATASET_PATH = "piqa"
DATASET_NAME = None
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(map(self._process_doc, self.dataset["train"]))
return self._training_docs
def validation_docs(self):
return map(self._process_doc, self.dataset["validation"])
def _process_doc(self, doc):
out_doc = {
"goal": doc["goal"],
"choices": [doc["sol1"], doc["sol2"]],
"gold": doc["label"],
}
return out_doc
def doc_to_text(self, doc):
return "Question: " + doc["goal"] + "\nAnswer:"
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc["goal"]
| EXA-1-master | exa/modular_components/gptq/zeroShot/tasks/piqa.py |
from .lambada import lambada | EXA-1-master | exa/modular_components/gptq/zeroShot/tasks/local_datasets/__init__.py |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Address all TODOs and remove all explanatory comments
"""LAMBADA dataset."""
import json
import datasets
_CITATION = """\
@misc{
author={Paperno, Denis and Kruszewski, Germán and Lazaridou, Angeliki and Pham, Quan Ngoc and Bernardi, Raffaella and Pezzelle, Sandro and Baroni, Marco and Boleda, Gemma and Fernández, Raquel},
title={The LAMBADA dataset},
DOI={10.5281/zenodo.2630551},
publisher={Zenodo},
year={2016},
month={Aug}
}
"""
_DESCRIPTION = """\
LAMBADA is a dataset to evaluate the capabilities of computational models for text
understanding by means of a word prediction task. LAMBADA is a collection of narrative
texts sharing the characteristic that human subjects are able to guess their last
word if they are exposed to the whole text, but not if they only see the last
sentence preceding the target word. To succeed on LAMBADA, computational models
cannot simply rely on local context, but must be able to keep track of information
in the broader discourse.
"""
_HOMEPAGE = "https://zenodo.org/record/2630551#.X4Xzn5NKjUI"
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
_URLS = {
"original": "http://eaidata.bmk.sh/data/lambada_test.jsonl",
"en": "http://eaidata.bmk.sh/data/lambada_test_en.jsonl",
"fr": "http://eaidata.bmk.sh/data/lambada_test_fr.jsonl",
"de": "http://eaidata.bmk.sh/data/lambada_test_de.jsonl",
"it": "http://eaidata.bmk.sh/data/lambada_test_it.jsonl",
"es": "http://eaidata.bmk.sh/data/lambada_test_es.jsonl",
}
class Lambada(datasets.GeneratorBasedBuilder):
"""LAMBADA is a dataset to evaluate the capabilities of computational models for text understanding by means of a word prediction task."""
VERSION = datasets.Version("0.0.1")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="original", version=VERSION, description="The LAMBADA dataset"
),
datasets.BuilderConfig(
name="en",
version=VERSION,
description="The English translated LAMBADA dataset",
),
datasets.BuilderConfig(
name="fr",
version=VERSION,
description="The French translated LAMBADA dataset",
),
datasets.BuilderConfig(
name="de",
version=VERSION,
description="The German translated LAMBADA dataset",
),
datasets.BuilderConfig(
name="it",
version=VERSION,
description="The Italian translated LAMBADA dataset",
),
datasets.BuilderConfig(
name="es",
version=VERSION,
description="The Spanish translated LAMBADA dataset",
),
]
DEFAULT_CONFIG_NAME = "original"
def _info(self):
features = datasets.Features(
{
"text": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=f"{_DESCRIPTION}\n{self.config.description}",
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls = _URLS[self.config.name]
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": data_dir,
"split": "validation",
},
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, filepath, split):
with open(filepath, encoding="utf-8") as f:
for key, row in enumerate(f):
data = json.loads(row)
yield key, {"text": data["text"]}
| EXA-1-master | exa/modular_components/gptq/zeroShot/tasks/local_datasets/lambada/lambada.py |
from .lambada import Lambada | EXA-1-master | exa/modular_components/gptq/zeroShot/tasks/local_datasets/lambada/__init__.py |
from . import opt
from . import bloom
MODEL_REGISTRY = {
'opt': opt.OPT,
'bloom': bloom.BLOOM
}
def get_model(model_name):
if 'opt' in model_name:
return MODEL_REGISTRY['opt']
elif 'bloom' in model_name:
return MODEL_REGISTRY['bloom']
return MODEL_REGISTRY[model_name]
| EXA-1-master | exa/modular_components/gptq/zeroShot/models/__init__.py |
import transformers
import torch
from .models_utils import BaseLM, find_layers
from transformers import OPTForCausalLM, AutoTokenizer
import torch.nn.functional as F
from torch import nn
import torch
from tqdm import tqdm
from .quant import *
from .gptq import GPTQ
class OPTClass(BaseLM):
def __init__(self, args):
super().__init__()
self.args = args
self._device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model_name = args.model
self.batch_size_per_gpu = args.batch_size
self.model = OPTForCausalLM.from_pretrained(self.model_name, torch_dtype='auto')
self.seqlen = self.model.config.max_position_embeddings
self.model.eval()
# pretrained tokenizer for neo is broken for now so just hard-coding this to gpt2
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name, use_fast=False)
self.vocab_size = self.tokenizer.vocab_size
print('OPT vocab size: ', self.vocab_size)
@property
def eot_token_id(self):
# we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
return self.tokenizer.eos_token_id
@property
def max_length(self):
try:
return self.gpt2.config.n_ctx
except AttributeError:
# gptneoconfig doesn't have n_ctx apparently
return self.model.config.max_position_embeddings
@property
def max_gen_toks(self):
print('max_gen_toks fn')
return 256
@property
def batch_size(self):
# TODO: fix multi-gpu
return self.batch_size_per_gpu # * gpus
@property
def device(self):
# TODO: fix multi-gpu
return self._device
def tok_encode(self, string: str):
return self.tokenizer.encode(string, add_special_tokens=False)
def tok_decode(self, tokens):
return self.tokenizer.decode(tokens)
def _model_call(self, inps):
"""
inps: a torch tensor of shape [batch, sequence]
the size of sequence may vary from call to call
returns: a torch tensor of shape [batch, sequence, vocab] with the
logits returned from the model
"""
with torch.no_grad():
return self.model(inps)[0][:, :, :50272]
@torch.no_grad()
def _model_logits_on_dataset(self, dataset_inps):
print('Evaluating ...')
nsamples = len(dataset_inps)
model = self.model
dev = self.device
use_cache = model.config.use_cache
model.config.use_cache = False
layers = model.model.decoder.layers
model.model.decoder.embed_tokens = model.model.decoder.embed_tokens.to(dev)
model.model.decoder.embed_positions = model.model.decoder.embed_positions.to(dev)
if hasattr(model.model.decoder, 'project_out') and model.model.decoder.project_out:
model.model.decoder.project_out = model.model.decoder.project_out.to(dev)
if hasattr(model.model.decoder, 'project_in') and model.model.decoder.project_in:
model.model.decoder.project_in = model.model.decoder.project_in.to(dev)
layers[0] = layers[0].to(dev)
dtype = next(iter(model.parameters())).dtype
inps = []
outs = []
for batch_idx, batch in enumerate(dataset_inps):
inps.append(torch.zeros(
(batch.shape[1], self.model.config.hidden_size), dtype=dtype,
))
outs.append(torch.zeros(
(batch.shape[1], self.model.config.hidden_size), dtype=dtype,
))
cache = {'i': 0, 'attention_masks': []}
class Catcher(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, inp, **kwargs):
inps[cache['i']] = inp
cache['i'] += 1
cache['attention_masks'].append(kwargs['attention_mask'].detach().cpu())
raise ValueError
layers[0] = Catcher(layers[0])
for i in range(nsamples):
batch = dataset_inps[i].to(dev)
try:
model(batch)
except ValueError:
pass
layers[0] = layers[0].module
layers[0] = layers[0].cpu()
model.model.decoder.embed_tokens = model.model.decoder.embed_tokens.cpu()
model.model.decoder.embed_positions = model.model.decoder.embed_positions.cpu()
if hasattr(model.model.decoder, 'project_out') and model.model.decoder.project_out:
model.model.decoder.project_out = model.model.decoder.project_out.cpu()
if hasattr(model.model.decoder, 'project_in') and model.model.decoder.project_in:
model.model.decoder.project_in = model.model.decoder.project_in.cpu()
torch.cuda.empty_cache()
attention_masks = cache['attention_masks']
for i in range(len(layers)):
print(i)
layer = layers[i].to(dev)
if self.args.nearest:
subset = find_layers(layer)
for name in subset:
quantizer = Quantizer()
quantizer.configure(
self.args.wbits, perchannel=True, sym=False, mse=False
)
W = subset[name].weight.data
quantizer.find_params(W, weight=True)
subset[name].weight.data = quantize(
W, quantizer.scale, quantizer.zero, quantizer.maxq
).to(next(iter(layer.parameters())).dtype)
for j in range(nsamples):
outs[j] = layer(inps[j].to(self.device), attention_mask=attention_masks[j].to(self.device))[0].detach().cpu()
layers[i] = layer.cpu()
del layer
torch.cuda.empty_cache()
inps, outs = outs, inps
if model.model.decoder.final_layer_norm is not None:
model.model.decoder.final_layer_norm = model.model.decoder.final_layer_norm.to(dev)
if model.model.decoder.project_out is not None:
model.model.decoder.project_out = model.model.decoder.project_out.to(dev)
model.lm_head = model.lm_head.to(dev)
if self.model.model.decoder.final_layer_norm is not None:
self.model.model.decoder.final_layer_norm = self.model.model.decoder.final_layer_norm.to(self.device)
if self.model.model.decoder.project_out is not None:
self.model.model.decoder.project_out = self.model.model.decoder.project_out.to(self.device)
self.model.lm_head = self.model.lm_head.to(self.device)
dataset_logits = []
for i in tqdm(range(nsamples), desc='Last Layer'):
hidden_states = inps[i].unsqueeze(0).to(self.device)
if self.model.model.decoder.final_layer_norm is not None:
hidden_states = self.model.model.decoder.final_layer_norm(hidden_states)
if self.model.model.decoder.project_out is not None:
hidden_states = self.model.model.decoder.project_out(hidden_states)
batch_logits = F.log_softmax(self.model.lm_head(hidden_states)[0][:, :, :50272], dim=-1).cpu()
dataset_logits.append(batch_logits)
model.config.use_cache = use_cache
return dataset_logits
def model_batched_set(self, inps):
import pdb;pdb.set_trace()
dataset_logits = []
for batch in inps:
multi_logits = F.log_softmax(
self._model_call(batch), dim=-1
).cpu() # [batch, padding_length, vocab]
dataset_logits.append(multi_logits)
return dataset_logits
def _model_generate(self, context, max_length, eos_token_id):
return self.model.generate(
context, max_length=max_length, eos_token_id=eos_token_id, do_sample=False
)
@torch.no_grad()
def opt_sequential(self, dataloader):
print('Starting ...')
model = self.model
dev = self.device
use_cache = model.config.use_cache
model.config.use_cache = False
layers = model.model.decoder.layers
model.model.decoder.embed_tokens = model.model.decoder.embed_tokens.to(dev)
model.model.decoder.embed_positions = model.model.decoder.embed_positions.to(dev)
if hasattr(model.model.decoder, 'project_out') and model.model.decoder.project_out:
model.model.decoder.project_out = model.model.decoder.project_out.to(dev)
if hasattr(model.model.decoder, 'project_in') and model.model.decoder.project_in:
model.model.decoder.project_in = model.model.decoder.project_in.to(dev)
layers[0] = layers[0].to(dev)
dtype = next(iter(model.parameters())).dtype
inps = torch.zeros(
(self.args.nsamples, self.seqlen, model.config.hidden_size), dtype=dtype, device=dev
)
cache = {'i': 0, 'attention_mask': None}
class Catcher(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, inp, **kwargs):
inps[cache['i']] = inp
cache['i'] += 1
cache['attention_mask'] = kwargs['attention_mask']
raise ValueError
layers[0] = Catcher(layers[0])
for batch in dataloader:
try:
model(batch[0].to(dev))
except ValueError:
pass
layers[0] = layers[0].module
layers[0] = layers[0].cpu()
model.model.decoder.embed_tokens = model.model.decoder.embed_tokens.cpu()
model.model.decoder.embed_positions = model.model.decoder.embed_positions.cpu()
if hasattr(model.model.decoder, 'project_out') and model.model.decoder.project_out:
model.model.decoder.project_out = model.model.decoder.project_out.cpu()
if hasattr(model.model.decoder, 'project_in') and model.model.decoder.project_in:
model.model.decoder.project_in = model.model.decoder.project_in.cpu()
torch.cuda.empty_cache()
outs = torch.zeros_like(inps)
attention_mask = cache['attention_mask']
print('Ready.')
quantizers = {}
for i in range(len(layers)):
layer = layers[i].to(dev)
subset = find_layers(layer)
gptq = {}
for name in subset:
gptq[name] = GPTQ(subset[name])
gptq[name].quantizer = Quantizer()
gptq[name].quantizer.configure(
self.args.wbits, perchannel=True, sym=False, mse=False
)
def add_batch(name):
def tmp(_, inp, out):
gptq[name].add_batch(inp[0].data, out.data)
return tmp
handles = []
for name in subset:
handles.append(subset[name].register_forward_hook(add_batch(name)))
for j in range(self.args.nsamples):
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask)[0]
for h in handles:
h.remove()
for name in subset:
print(i, name)
print('Quantizing ...')
gptq[name].fasterquant(percdamp=self.args.percdamp, groupsize=self.args.groupsize)
quantizers['model.decoder.layers.%d.%s' % (i, name)] = gptq[name].quantizer
gptq[name].free()
for j in range(self.args.nsamples):
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask)[0]
layers[i] = layer.cpu()
del layer
del gptq
torch.cuda.empty_cache()
inps, outs = outs, inps
model.config.use_cache = use_cache
return quantizers
# for backwards compatibility
OPT = OPTClass | EXA-1-master | exa/modular_components/gptq/zeroShot/models/opt.py |
import math
import time
import torch
import torch.nn as nn
import transformers
from .quant import *
DEBUG = False
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
class GPTQ:
def __init__(self, layer):
self.layer = layer
self.dev = self.layer.weight.device
W = layer.weight.data.clone()
if isinstance(self.layer, nn.Conv2d):
W = W.flatten(1)
if isinstance(self.layer, transformers.Conv1D):
W = W.t()
self.rows = W.shape[0]
self.columns = W.shape[1]
self.H = torch.zeros((self.columns, self.columns), device=self.dev)
self.nsamples = 0
def add_batch(self, inp, out):
if DEBUG:
self.inp1 = inp
self.out1 = out
if len(inp.shape) == 2:
inp = inp.unsqueeze(0)
tmp = inp.shape[0]
if isinstance(self.layer, nn.Linear) or isinstance(self.layer, transformers.Conv1D):
if len(inp.shape) == 3:
inp = inp.reshape((-1, inp.shape[-1]))
inp = inp.t()
if isinstance(self.layer, nn.Conv2d):
unfold = nn.Unfold(
self.layer.kernel_size,
dilation=self.layer.dilation,
padding=self.layer.padding,
stride=self.layer.stride
)
inp = unfold(inp)
inp = inp.permute([1, 0, 2])
inp = inp.flatten(1)
self.H *= self.nsamples / (self.nsamples + tmp)
self.nsamples += tmp
# inp = inp.float()
inp = math.sqrt(2 / self.nsamples) * inp.float()
# self.H += 2 / self.nsamples * inp.matmul(inp.t())
self.H += inp.matmul(inp.t())
def fasterquant(
self, blocksize=128, percdamp=.01, groupsize=-1
):
W = self.layer.weight.data.clone()
if isinstance(self.layer, nn.Conv2d):
W = W.flatten(1)
if isinstance(self.layer, transformers.Conv1D):
W = W.t()
W = W.float()
tick = time.time()
if not self.quantizer.ready():
self.quantizer.find_params(W, weight=True)
H = self.H
del self.H
dead = torch.diag(H) == 0
H[dead, dead] = 1
W[:, dead] = 0
Losses = torch.zeros_like(W)
Q = torch.zeros_like(W)
damp = percdamp * torch.mean(torch.diag(H))
diag = torch.arange(self.columns, device=self.dev)
H[diag, diag] += damp
H = torch.linalg.cholesky(H)
H = torch.cholesky_inverse(H)
H = torch.linalg.cholesky(H, upper=True)
Hinv = H
for i1 in range(0, self.columns, blocksize):
i2 = min(i1 + blocksize, self.columns)
count = i2 - i1
W1 = W[:, i1:i2].clone()
Q1 = torch.zeros_like(W1)
Err1 = torch.zeros_like(W1)
Losses1 = torch.zeros_like(W1)
Hinv1 = Hinv[i1:i2, i1:i2]
for i in range(count):
w = W1[:, i]
d = Hinv1[i, i]
if groupsize != -1:
if (i1 + i) % groupsize == 0:
self.quantizer.find_params(W[:, (i1 + i):(i1 + i + groupsize)], weight=True)
q = quantize(
w.unsqueeze(1), self.quantizer.scale, self.quantizer.zero, self.quantizer.maxq
).flatten()
Q1[:, i] = q
Losses1[:, i] = (w - q) ** 2 / d ** 2
err1 = (w - q) / d
W1[:, i:] -= err1.unsqueeze(1).matmul(Hinv1[i, i:].unsqueeze(0))
Err1[:, i] = err1
Q[:, i1:i2] = Q1
Losses[:, i1:i2] = Losses1 / 2
W[:, i2:] -= Err1.matmul(Hinv[i1:i2, i2:])
if DEBUG:
self.layer.weight.data[:, :i2] = Q[:, :i2]
self.layer.weight.data[:, i2:] = W[:, i2:]
print(torch.sum((self.layer(self.inp1) - self.out1) ** 2))
print(torch.sum(Losses))
torch.cuda.synchronize()
print('time %.2f' % (time.time() - tick))
print('error', torch.sum(Losses).item())
if isinstance(self.layer, transformers.Conv1D):
Q = Q.t()
self.layer.weight.data = Q.reshape(self.layer.weight.shape).to(self.layer.weight.data.dtype)
if DEBUG:
print(torch.sum((self.layer(self.inp1) - self.out1) ** 2))
def free(self):
if DEBUG:
self.inp1 = None
self.out1 = None
self.H = None
self.Losses = None
self.Trace = None
torch.cuda.empty_cache()
| EXA-1-master | exa/modular_components/gptq/zeroShot/models/gptq.py |
import torch
import torch.nn as nn
try:
import quant_cuda
except:
print('CUDA extension not installed.')
def quantize(x, scale, zero, maxq):
q = torch.clamp(torch.round(x / scale) + zero, 0, maxq)
return scale * (q - zero)
class Quantizer(nn.Module):
def __init__(self, shape=1):
super(Quantizer, self).__init__()
self.register_buffer('maxq', torch.tensor(0))
self.register_buffer('scale', torch.zeros(shape))
self.register_buffer('zero', torch.zeros(shape))
def configure(
self,
bits, perchannel=False, sym=True,
mse=False, norm=2.4, grid=100, maxshrink=.8
):
self.maxq = torch.tensor(2 ** bits - 1)
self.perchannel = perchannel
self.sym = sym
self.mse = mse
self.norm = norm
self.grid = grid
self.maxshrink = maxshrink
def find_params(self, x, weight=False):
dev = x.device
self.maxq = self.maxq.to(dev)
shape = x.shape
if self.perchannel:
if weight:
x = x.flatten(1)
else:
if len(shape) == 4:
x = x.permute([1, 0, 2, 3])
x = x.flatten(1)
if len(shape) == 3:
x = x.reshape((-1, shape[-1])).t()
if len(shape) == 2:
x = x.t()
else:
x = x.flatten().unsqueeze(0)
tmp = torch.zeros(x.shape[0], device=dev)
xmin = torch.minimum(x.min(1)[0], tmp)
xmax = torch.maximum(x.max(1)[0], tmp)
if self.sym:
xmax = torch.maximum(torch.abs(xmin), xmax)
tmp = xmin < 0
if torch.any(tmp):
xmin[tmp] = -xmax[tmp]
tmp = (xmin == 0) & (xmax == 0)
xmin[tmp] = -1
xmax[tmp] = +1
self.scale = (xmax - xmin) / self.maxq
if self.sym:
self.zero = torch.full_like(self.scale, (self.maxq + 1) / 2)
else:
self.zero = torch.round(-xmin / self.scale)
if self.mse:
best = torch.full([x.shape[0]], float('inf'), device=dev)
for i in range(int(self.maxshrink * self.grid)):
p = 1 - i / self.grid
xmin1 = p * xmin
xmax1 = p * xmax
scale1 = (xmax1 - xmin1) / self.maxq
zero1 = torch.round(-xmin1 / scale1) if not self.sym else self.zero
q = quantize(x, scale1.unsqueeze(1), zero1.unsqueeze(1), self.maxq)
q -= x
q.abs_()
q.pow_(self.norm)
err = torch.sum(q, 1)
tmp = err < best
if torch.any(tmp):
best[tmp] = err[tmp]
self.scale[tmp] = scale1[tmp]
self.zero[tmp] = zero1[tmp]
if not self.perchannel:
if weight:
tmp = shape[0]
else:
tmp = shape[1] if len(shape) != 3 else shape[2]
self.scale = self.scale.repeat(tmp)
self.zero = self.zero.repeat(tmp)
if weight:
shape = [-1] + [1] * (len(shape) - 1)
# self.scale = self.scale.unsqueeze(1)
# self.zero = self.zero.unsqueeze(1)
self.scale = self.scale.reshape(shape)
self.zero = self.zero.reshape(shape)
return
if len(shape) == 4:
self.scale = self.scale.reshape((1, -1, 1, 1))
self.zero = self.zero.reshape((1, -1, 1, 1))
if len(shape) == 3:
self.scale = self.scale.reshape((1, 1, -1))
self.zero = self.zero.reshape((1, 1, -1))
if len(shape) == 2:
self.scale = self.scale.unsqueeze(0)
self.zero = self.zero.unsqueeze(0)
def quantize(self, x):
if self.ready():
return quantize(x, self.scale, self.zero, self.maxq)
return x
def enabled(self):
return self.maxq > 0
def ready(self):
return torch.all(self.scale != 0)
class ActQuantWrapper(nn.Module):
def __init__(self, module):
super(ActQuantWrapper, self).__init__()
self.module = module
shape = [1] * len(self.module.weight.shape)
if len(shape) == 4:
shape[1] = self.module.weight.shape[1]
if len(shape) == 3:
shape[2] = self.module.weight.shape[2]
if len(shape) == 2:
shape[1] = self.module.weight.shape[1]
self.quantizer = Quantizer(shape=shape)
def forward(self, x):
return self.module(self.quantizer.quantize(x))
def add_actquant(module, name='', layers=[nn.Conv2d, nn.Linear]):
if isinstance(module, ActQuantWrapper):
return
for attr in dir(module):
tmp = getattr(module, attr)
if type(tmp) in layers:
setattr(module, attr, ActQuantWrapper(tmp))
if type(tmp) == nn.Sequential:
replaced = []
for i, child in enumerate(tmp.children()):
if type(child) in layers:
replaced.append(ActQuantWrapper(child))
else:
replaced.append(child)
setattr(module, attr, nn.Sequential(*replaced))
if type(tmp) == torch.nn.ModuleList:
replaced = []
for i, child in enumerate(tmp.children()):
if type(child) in layers:
replaced.append(ActQuantWrapper(child))
else:
replaced.append(child)
setattr(module, attr, nn.ModuleList(replaced))
for name1, child in module.named_children():
add_actquant(child, name + '.' + name1 if name != '' else name1, layers)
import time
class Quant4Linear(nn.Module):
def __init__(self, linear, scales, zeros):
super().__init__()
self.register_buffer('zeros', zeros.clone() * scales)
self.register_buffer('scales', scales)
self.register_buffer('bias', linear.bias.data)
intweight = torch.round((linear.weight.data + self.zeros) / self.scales).to(torch.int)
intweight = intweight.t().contiguous()
self.register_buffer('qweight', torch.zeros(
(intweight.shape[0] // 8, intweight.shape[1]), dtype=torch.int, device=self.bias.device
))
for i in range(intweight.shape[0]):
self.qweight[i // 8] |= intweight[i] << (4 * (i % 8))
# self.linear = linear.to(torch.device('cuda:0'))
def forward(self, x):
if x.shape[-1] == x.numel():
outshape = list(x.shape)
y = self.bias.clone()
outshape[-1] = self.bias.numel()
quant_cuda.vecquant4matmul(x, self.qweight, y, self.scales, self.zeros)
# y = self.linear(x)
return y.reshape(outshape)
print(x.shape)
raise ValueError('Only supports a single token currently.')
def make_quant4(module, quantizers, name=''):
if isinstance(module, Quant4Linear):
return
for attr in dir(module):
tmp = getattr(module, attr)
name1 = name + '.' + attr if name != '' else attr
if name1 in quantizers:
setattr(
module, attr,
Quant4Linear(tmp, quantizers[name1].scale, quantizers[name1].zero)
)
for name1, child in module.named_children():
make_quant4(child, quantizers, name + '.' + name1 if name != '' else name1)
| EXA-1-master | exa/modular_components/gptq/zeroShot/models/quant.py |
import abc
import torch
import json
import hashlib
import collections
from tqdm import tqdm
from typing import Iterable
from abc import abstractmethod
from torch import nn
import transformers
def find_layers(module, layers=[nn.Conv2d, nn.Linear, transformers.Conv1D], name=''):
if type(module) in layers:
return {name: module}
res = {}
for name1, child in module.named_children():
res.update(find_layers(
child, layers=layers, name=name + '.' + name1 if name != '' else name1
))
return res
class CacheHook:
def __init__(self, cachinglm):
if cachinglm is None:
self.dbdict = None
return
self.dbdict = cachinglm.dbdict
def add_partial(self, attr, req, res):
if self.dbdict is None:
return
hsh = hash_args(attr, req)
self.dbdict[hsh] = res
class LM(abc.ABC):
def __init__(self):
self.cache_hook = CacheHook(None)
@abstractmethod
def loglikelihood(self, requests):
"""Compute log-likelihood of generating a continuation from a context.
Downstream tasks should attempt to use loglikelihood instead of other
LM calls whenever possible.
:param requests: list
A list of pairs (context, continuation)
context: str
Context string. Implementations of LM must be able to handle an
empty context string.
continuation: str
The continuation over which log likelihood will be calculated. If
there is a word boundary, the space should be in the continuation.
For example, context="hello" continuation=" world" is correct.
:return: list
A list of pairs (logprob, isgreedy)
logprob: float
The log probability of `continuation`
isgreedy:
Whether `continuation` would be generated by greedy sampling from `context`
"""
pass
@abstractmethod
def loglikelihood_rolling(self, requests):
"""Compute full log-likelihood of a string, with no truncation, for perplexity computation
- We will use the full max context length of the model.
- For inputs that exceed the max context length, we divide the tokenized string into chunks of up to
the max context length.
- IMPORTANT: Each document's loglikelihood/perplexity is computed *separately*, unlike other implementations
which may simply concatenate multiple documents together.
- IMPORTANT: We maximize the amount of context for each prediction. Specifically, for inputs that we break into
multiple chunks, the last input will still a full-sized context.
Example:
Input tokens: [ 0 1 2 3 4 5 6 7 8 9 ]
Prefix: EOT
Max context length: 4
Resulting input/prediction pairs:
INPUT: EOT 0 1 2
PRED: 0 1 2 3
INPUT: 3 4 5 6
PRED: 4 5 6 7
INPUT: 5 6 7 8
PRED: 8 9
Observe that:
1. Each token is predicted exactly once
2. For the last pair, we provide the full context, but only score the last two tokens
:param requests: list
A list of strings
string: str
String for which we are computing per-toke loglikelihood
:return: list
A list of pairs (logprob, isgreedy)
logprob: float
The log probability of `continuation`
isgreedy:
Whether `continuation` would be generated by greedy sampling from `context`
"""
pass
# TODO: Add an optional max length
@abstractmethod
def greedy_until(self, requests):
"""Generate greedily until a stopping sequence
:param requests: list
A list of pairs (context, until)
context: str
Context string
until: [str]
The string sequences to generate until. These string sequences
may each span across multiple tokens, or may be part of one token.
:return: list
A list of strings continuation
continuation: str
The generated continuation.
"""
pass
@classmethod
def create_from_arg_string(cls, additional_config=None):
additional_config = {} if additional_config is None else additional_config
args = {k: v for k, v in additional_config.items() if v is not None}
return cls(**args)
def set_cache_hook(self, cache_hook):
self.cache_hook = cache_hook
class BaseLM(LM):
@property
@abstractmethod
def eot_token_id(self):
pass
@property
@abstractmethod
def max_length(self):
pass
@property
@abstractmethod
def max_gen_toks(self):
pass
@property
@abstractmethod
def batch_size(self):
pass
@property
@abstractmethod
def device(self):
pass
@abstractmethod
def tok_encode(self, string: str):
pass
@abstractmethod
def tok_decode(self, tokens: Iterable[int]):
pass
@abstractmethod
def _model_generate(self, context, max_length, eos_token_id):
pass
@abstractmethod
def _model_call(self, inps):
"""
inps: a torch tensor of shape [batch, sequence]
the size of sequence may vary from call to call
returns: a torch tensor of shape [batch, sequence, vocab] with the
logits returned from the model
"""
pass
# subclass must implement properties vocab_size, eot_token_id, max_gen_toks, batch_size, device, max_length.
# TODO: enforce this somehow
def loglikelihood(self, requests):
new_reqs = []
for context, continuation in requests:
if context == "":
# end of text as context
context_enc = [self.eot_token_id]
else:
context_enc = self.tok_encode(context)
continuation_enc = self.tok_encode(continuation)
new_reqs.append(((context, continuation), context_enc, continuation_enc))
return self._loglikelihood_tokens(new_reqs)
def loglikelihood_rolling(self, requests):
# TODO: Implement caching once we've confirmed the perplexity implementation
# TODO: automatic batch size detection for vectorization
loglikelihoods = []
for (string,) in tqdm(requests):
rolling_token_windows = list(
map(
make_disjoint_window,
get_rolling_token_windows(
token_list=self.tok_encode(string),
prefix_token=self.eot_token_id,
max_seq_len=self.max_length,
context_len=1,
),
)
)
rolling_token_windows = [(None,) + x for x in rolling_token_windows]
# TODO: extract out this call so it only gets called once and also somehow figure out partial caching for
# that
string_nll = self._loglikelihood_tokens(
rolling_token_windows, disable_tqdm=True
)
# discard is_greedy
string_nll = [x[0] for x in string_nll]
string_nll = sum(string_nll)
loglikelihoods.append(string_nll)
return loglikelihoods
def _loglikelihood_tokens(self, requests, disable_tqdm=False):
# TODO: implement some kind of efficient-request-middleware that lumps together requests with the same context
res = []
dataset_inps = []
def _collate(x):
# the negative sign on len(toks) sorts descending - this has a few advantages:
# - time estimates will always be over not underestimates, which is more useful for planning
# - to know the size of a batch when going through the list, you know the first one is always the batch
# padded context length. this is useful to simplify the batching logic and more importantly to make
# automatic adaptive batches much much easier to implement
# - any OOMs will happen right away rather than near the end
toks = x[1] + x[2]
return -len(toks), tuple(toks)
# TODO: automatic (variable) batch size detection for vectorization
re_ord = Reorderer(requests, _collate)
for chunk in chunks(
tqdm(re_ord.get_reordered(), disable=disable_tqdm), self.batch_size
):
inps = []
cont_toks_list = []
inplens = []
padding_length = None
# because vectorizing is annoying, we first convert each (context, continuation) pair to padded
# tensors, then we pack them together into a batch, call the model, and then pick it all apart
# again because vectorizing is annoying
for _, context_enc, continuation_enc in chunk:
# sanity check
assert len(context_enc) > 0
assert len(continuation_enc) > 0
assert len(continuation_enc) <= self.max_length
# how this all works:
# CTX CONT
# inp 0 1 2 3|4 5 6 7 8 9 <- last token is deleted by inp[:, :-1]
# gpt2 \ \
# logits 1 2 3|4 5 6 7 8 9 <- the ctx half gets tossed out by the
# cont_toks 4 5 6 7 8 9 [:, -len(continuation_enc):, :self.vocab_size] slice
# when too long to fit in context, truncate from the left
inp = torch.tensor(
(context_enc + continuation_enc)[-(self.max_length + 1) :][:-1],
dtype=torch.long,
).to(self.device)
(inplen,) = inp.shape
cont = continuation_enc
# since in _collate we make sure length is descending, the longest is always the first one.
padding_length = (
padding_length if padding_length is not None else inplen
)
# pad length from seq to padding_length
inp = torch.cat(
[
inp, # [seq]
torch.zeros(padding_length - inplen, dtype=torch.long).to(
inp.device
), # [padding_length - seq]
],
dim=0,
)
inps.append(inp.unsqueeze(0)) # [1, padding_length]
cont_toks_list.append(cont)
inplens.append(inplen)
# import pdb; pdb.set_trace()
batched_inps = torch.cat(inps, dim=0) # [batch, padding_length
dataset_inps.append(batched_inps)
dataset_logits = self._model_logits_on_dataset(dataset_inps)
iter = 0
for chunk in chunks(
tqdm(re_ord.get_reordered(), disable=disable_tqdm), self.batch_size
):
multi_logits = dataset_logits[iter]
iter+=1
inps = []
cont_toks_list = []
inplens = []
padding_length = None
# because vectorizing is annoying, we first convert each (context, continuation) pair to padded
# tensors, then we pack them together into a batch, call the model, and then pick it all apart
# again because vectorizing is annoying
# todo: check if we realy nead the following loop
for _, context_enc, continuation_enc in chunk:
# sanity check
assert len(context_enc) > 0
assert len(continuation_enc) > 0
assert len(continuation_enc) <= self.max_length
# how this all works:
# CTX CONT
# inp 0 1 2 3|4 5 6 7 8 9 <- last token is deleted by inp[:, :-1]
# gpt2 \ \
# logits 1 2 3|4 5 6 7 8 9 <- the ctx half gets tossed out by the
# cont_toks 4 5 6 7 8 9 [:, -len(continuation_enc):, :self.vocab_size] slice
# when too long to fit in context, truncate from the left
inp = torch.tensor(
(context_enc + continuation_enc)[-(self.max_length + 1):][:-1],
dtype=torch.long,
).to(self.device)
(inplen,) = inp.shape
cont = continuation_enc
# since in _collate we make sure length is descending, the longest is always the first one.
padding_length = (
padding_length if padding_length is not None else inplen
)
# pad length from seq to padding_length
inp = torch.cat(
[
inp, # [seq]
torch.zeros(padding_length - inplen, dtype=torch.long).to(
inp.device
), # [padding_length - seq]
],
dim=0,
)
inps.append(inp.unsqueeze(0)) # [1, padding_length]
cont_toks_list.append(cont)
inplens.append(inplen)
for (cache_key, _, _), logits, inp, inplen, cont_toks in zip(
chunk, multi_logits, inps, inplens, cont_toks_list
):
# Slice to original seq length
contlen = len(cont_toks)
logits = logits[inplen - contlen: inplen].unsqueeze(
0
) # [1, seq, vocab]
# Check if per-token argmax is exactly equal to continuation
greedy_tokens = logits.argmax(dim=-1)
cont_toks = torch.tensor(cont_toks, dtype=torch.long).unsqueeze(
0
) # [1, seq]
# import pdb; pdb.set_trace()
max_equal = (greedy_tokens == cont_toks).all()
# Obtain log-probs at the corresponding continuation token indices
# last_token_slice = logits[:, -1, :].squeeze(0).tolist()
logits = torch.gather(logits, 2, cont_toks.unsqueeze(-1)).squeeze(
-1
) # [1, seq]
# Answer: (log prob, is-exact-match)
answer = (float(logits.sum()), bool(max_equal))
# partial caching
if cache_key is not None:
self.cache_hook.add_partial("loglikelihood", cache_key, answer)
res.append(answer)
return re_ord.get_original(res)
def greedy_until(self, requests):
print('greedy utils in base...')
# TODO: implement fully general `until` that handles until that are
# multiple tokens or that span multiple tokens correctly
# TODO: extract to TokenizedLM?
res = []
def _collate(x):
toks = self.tok_encode(x[0])
return len(toks), x[0]
re_ord = Reorderer(requests, _collate)
for context, until in tqdm(re_ord.get_reordered()):
if isinstance(until, str):
until = [until]
(primary_until,) = self.tok_encode(until[0])
context_enc = torch.tensor(
[self.tok_encode(context)[self.max_gen_toks - self.max_length :]]
).to(self.device)
cont = self._model_generate(
context_enc, context_enc.shape[1] + self.max_gen_toks, primary_until
)
s = self.tok_decode(cont[0].tolist()[context_enc.shape[1] :])
for term in until:
s = s.split(term)[0]
# partial caching
self.cache_hook.add_partial("greedy_until", (context, until), s)
res.append(s)
return re_ord.get_original(res)
def make_disjoint_window(pair):
"""Takes output from get_rolling_token_windows and makes the context not overlap with the continuation"""
a, b = pair
return a[: len(a) - (len(b) - 1)], b
def hash_args(attr, args):
dat = json.dumps([attr] + list(args))
return hashlib.sha256(dat.encode("utf-8")).hexdigest()
def simple_parse_args_string(args_string):
"""
Parses something like
args1=val1,arg2=val2
Into a dictionary
"""
args_string = args_string.strip()
if not args_string:
return {}
arg_list = args_string.split(",")
args_dict = {}
for arg in arg_list:
k, v = arg.split("=")
args_dict[k] = v
return args_dict
def get_rolling_token_windows(token_list, prefix_token, max_seq_len, context_len):
"""
- context_len allows for a rolling window context, allowing each prediction window to potentially
condition on some context
:param token_list: list
List of tokens to be PREDICTED
:param max_seq_len: int
max_seq_len of model (or max_seq_len we want to use)
:param context_len: int
Amount of desired token context for prediction. Needs to be at least 1.
:param prefix_token: token
Dummy token like <eos> so the first token has something to condition on
:return: generator
Generator of tuples
(input_tokens, pred_tokens)
Note: Score only the last len(pred_tokens) logits of the LM
"""
assert 1 <= context_len <= max_seq_len
if not token_list:
return
# +1 offset, going from input->preds
pred_len = max_seq_len - context_len + 1
predicted = 0
# Special handling for first window: predict all tokens
first_seq_len = min(max_seq_len, len(token_list))
yield ([prefix_token] + token_list[: first_seq_len - 1], token_list[:first_seq_len])
predicted += first_seq_len
while predicted < len(token_list):
window_pred_len = min(len(token_list) - predicted, pred_len)
window_end = predicted + window_pred_len
yield (
token_list[window_end - max_seq_len - 1 : window_end - 1],
token_list[window_end - window_pred_len : window_end],
)
predicted += window_pred_len
class Reorderer:
def __init__(self, arr, fn):
self.size = len(arr)
arr = list(enumerate(arr))
arr = group(arr, lambda x: fn(x[1]))
arr = [([y[0] for y in x], x[0][1]) for x in arr]
arr.sort(key=lambda x: fn(x[1]))
self.arr = arr
def get_reordered(self):
return [x[1] for x in self.arr]
def get_original(self, newarr):
res = [None] * self.size
cov = [False] * self.size
for (inds, _), v in zip(self.arr, newarr):
for ind in inds:
res[ind] = v
cov[ind] = True
assert all(cov)
return res
def join_iters(iters):
for iter in iters:
yield from iter
def chunks(iter, n):
arr = []
for x in iter:
arr.append(x)
if len(arr) == n:
yield arr
arr = []
if arr:
yield arr
def group(arr, fn):
res = collections.defaultdict(list)
for ob in arr:
res[fn(ob)].append(ob)
return list(res.values())
| EXA-1-master | exa/modular_components/gptq/zeroShot/models/models_utils.py |
import math
import time
import torch
import torch.nn as nn
import transformers
from quant import *
DEBUG = False
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
class TrueOBS:
def __init__(self, layer):
self.layer = layer
self.dev = self.layer.weight.device
W = layer.weight.data.clone()
if isinstance(self.layer, nn.Conv2d):
W = W.flatten(1)
if isinstance(self.layer, transformers.Conv1D):
W = W.t()
self.rows = W.shape[0]
self.columns = W.shape[1]
self.H = torch.zeros((self.columns, self.columns), device=self.dev)
self.nsamples = 0
def add_batch(self, inp, out):
if DEBUG:
self.inp1 = inp
self.out1 = out
if len(inp.shape) == 2: # TODO: may not work for convnets
inp = inp.unsqueeze(0)
tmp = inp.shape[0]
if isinstance(self.layer, nn.Linear) or isinstance(self.layer, transformers.Conv1D):
if len(inp.shape) == 3:
inp = inp.reshape((-1, inp.shape[-1]))
inp = inp.t()
if isinstance(self.layer, nn.Conv2d):
unfold = nn.Unfold(
self.layer.kernel_size,
dilation=self.layer.dilation,
padding=self.layer.padding,
stride=self.layer.stride
)
inp = unfold(inp)
inp = inp.permute([1, 0, 2])
inp = inp.flatten(1)
self.H *= self.nsamples / (self.nsamples + tmp)
self.nsamples += tmp
# inp = inp.float()
inp = math.sqrt(2 / self.nsamples) * inp.float()
# self.H += 2 / self.nsamples * inp.matmul(inp.t())
self.H += inp.matmul(inp.t())
def fasterquant(
self, blocksize=128, percdamp=.01, sparseout=False, nearest=False
):
W = self.layer.weight.data.clone()
if isinstance(self.layer, nn.Conv2d):
W = W.flatten(1)
if isinstance(self.layer, transformers.Conv1D):
W = W.t()
W = W.float()
tick = time.time()
if not self.quantizer.ready():
self.quantizer.find_params(W, weight=True)
if False:
H = self.H
dead = torch.diag(H) == 0
H[dead, dead] = 1
W[:, dead] = 0
Losses = torch.zeros_like(W)
Q = torch.zeros_like(W)
damp = percdamp * torch.mean(torch.diag(H))
# diag = torch.arange(self.columns, device=self.dev)
# H[diag, diag] += damp
H += damp * torch.eye(self.columns, device=self.dev)
Hinv = torch.cholesky_inverse(torch.linalg.cholesky(H))
Hinv = torch.linalg.cholesky(Hinv, upper=True)
else:
H = self.H
del self.H
dead = torch.diag(H) == 0
H[dead, dead] = 1
W[:, dead] = 0
Losses = torch.zeros_like(W)
Q = torch.zeros_like(W)
damp = percdamp * torch.mean(torch.diag(H))
diag = torch.arange(self.columns, device=self.dev)
H[diag, diag] += damp
H = torch.linalg.cholesky(H)
H = torch.cholesky_inverse(H)
H = torch.linalg.cholesky(H, upper=True)
Hinv = H
outlier = .25 * (self.quantizer.scale ** 2).flatten()
tot = 0
for i1 in range(0, self.columns, blocksize):
i2 = min(i1 + blocksize, self.columns)
count = i2 - i1
W1 = W[:, i1:i2].clone()
Q1 = torch.zeros_like(W1)
Err1 = torch.zeros_like(W1)
Losses1 = torch.zeros_like(W1)
Hinv1 = Hinv[i1:i2, i1:i2]
for i in range(count):
w = W1[:, i]
d = Hinv1[i, i]
# if (i1 + i) % 512 == 0:
# self.quantizer.find_params(W[:, (i1 + i):(i1 + i + 512)], weight=True)
q = quantize(
w.unsqueeze(1), self.quantizer.scale, self.quantizer.zero, self.quantizer.maxq
).flatten()
Q1[:, i] = q
Losses1[:, i] = (w - q) ** 2 / d ** 2
if sparseout:
sel = (w - q) ** 2 > outlier
Losses1[sel, i] = 0
q[sel] = w[sel]
Q1[sel, i] = q[sel]
tot += torch.sum(sel.int()).item()
err1 = (w - q) / d
if not nearest:
W1[:, i:] -= err1.unsqueeze(1).matmul(Hinv1[i, i:].unsqueeze(0))
Err1[:, i] = err1
Q[:, i1:i2] = Q1
Losses[:, i1:i2] = Losses1 / 2
if not nearest:
W[:, i2:] -= Err1.matmul(Hinv[i1:i2, i2:])
if DEBUG:
self.layer.weight.data[:, :i2] = Q[:, :i2]
self.layer.weight.data[:, i2:] = W[:, i2:]
print(torch.sum((self.layer(self.inp1) - self.out1) ** 2))
print(torch.sum(Losses))
torch.cuda.synchronize()
print(tot / W.numel())
print('time %.2f' % (time.time() - tick))
print('error', torch.sum(Losses).item())
if isinstance(self.layer, transformers.Conv1D):
Q = Q.t()
self.layer.weight.data = Q.reshape(self.layer.weight.shape).to(self.layer.weight.data.dtype)
if DEBUG:
print(torch.sum((self.layer(self.inp1) - self.out1) ** 2))
def free(self):
if DEBUG:
self.inp1 = None
self.out1 = None
self.H = None
self.Losses = None
self.Trace = None
torch.cuda.empty_cache()
def print_mem():
t = torch.cuda.get_device_properties(0).total_memory / 1024 ** 3
r = torch.cuda.memory_reserved(0) / 1024 ** 3
a = torch.cuda.memory_allocated(0) / 1024 ** 3
print(t, r, a)
| EXA-1-master | exa/modular_components/gptq/zeroShot/models/fast_trueobs.py |
import transformers
import torch
from .models_utils import BaseLM, find_layers
from transformers import BloomForCausalLM, AutoTokenizer
import torch.nn.functional as F
from torch import nn
import torch
from tqdm import tqdm
from .quant import *
from .gptq import GPTQ
class BLOOMClass(BaseLM):
def __init__(self, args):
super().__init__()
self.args = args
self._device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model_name = args.model
self.batch_size_per_gpu = args.batch_size
self.model = BloomForCausalLM.from_pretrained(self.model_name, torch_dtype='auto')
self.model.eval()
self.seqlen = 2048
# pretrained tokenizer for neo is broken for now so just hard-coding this to gpt2
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name, use_fast=False)
self.vocab_size = self.tokenizer.vocab_size
print('BLOOM vocab size: ', self.vocab_size)
@property
def eot_token_id(self):
# we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
return self.tokenizer.eos_token_id
@property
def max_length(self):
return 2048
@property
def max_gen_toks(self):
print('max_gen_toks fn')
return 256
@property
def batch_size(self):
# TODO: fix multi-gpu
return self.batch_size_per_gpu # * gpus
@property
def device(self):
# TODO: fix multi-gpu
return self._device
def tok_encode(self, string: str):
return self.tokenizer.encode(string, add_special_tokens=False)
def tok_decode(self, tokens):
return self.tokenizer.decode(tokens)
def _model_call(self, inps):
"""
inps: a torch tensor of shape [batch, sequence]
the size of sequence may vary from call to call
returns: a torch tensor of shape [batch, sequence, vocab] with the
logits returned from the model
"""
with torch.no_grad():
return self.model(inps)[0][:, :, :250680]
@torch.no_grad()
def _model_logits_on_dataset(self, dataset_inps):
dataset_logits = []
nsamples = len(dataset_inps)
dev = self.device
model = self.model
print('Evaluation...')
use_cache = model.config.use_cache
model.config.use_cache = False
layers = model.transformer.h
model.transformer.word_embeddings = model.transformer.word_embeddings.to(dev)
model.transformer.word_embeddings_layernorm = model.transformer.word_embeddings_layernorm.to(dev)
layers[0] = layers[0].to(dev)
dtype = next(iter(model.parameters())).dtype
inps = []
outs = []
for batch_idx, batch in enumerate(dataset_inps):
inps.append(torch.zeros(
(batch.shape[1], self.model.config.hidden_size), dtype=dtype,
))
outs.append(torch.zeros(
(batch.shape[1], self.model.config.hidden_size), dtype=dtype,
))
cache = {'i': 0, 'attention_masks': [], 'alibis': []}
class Catcher(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, inp, **kwargs):
inps[cache['i']] = inp
cache['i'] += 1
cache['attention_masks'].append(kwargs['attention_mask'].detach().cpu())
cache['alibis'].append(kwargs['alibi'].detach().cpu())
raise ValueError
layers[0] = Catcher(layers[0])
for i in range(nsamples):
batch = dataset_inps[i].to(dev)
try:
model(batch)
except ValueError:
pass
layers[0] = layers[0].module
layers[0] = layers[0].cpu()
model.transformer.word_embeddings = model.transformer.word_embeddings.cpu()
model.transformer.word_embeddings_layernorm = model.transformer.word_embeddings_layernorm.cpu()
torch.cuda.empty_cache()
attention_masks = cache['attention_masks']
alibis = cache['alibis']
for i in range(len(layers)):
print(i)
layer = layers[i].to(dev)
if self.args.nearest:
subset = find_layers(layer)
for name in subset:
quantizer = Quantizer()
quantizer.configure(
self.args.wbits, perchannel=True, sym=False, mse=False
)
W = subset[name].weight.data
quantizer.find_params(W, weight=True)
subset[name].weight.data = quantize(
W, quantizer.scale, quantizer.zero, quantizer.maxq
).to(next(iter(layer.parameters())).dtype)
for j in range(nsamples):
outs[j] = layer(inps[j].to(self.device),
attention_mask=attention_masks[j].to(self.device),
alibi=alibis[j].to(self.device))[0].detach().cpu()
layers[i] = layer.cpu()
del layer
torch.cuda.empty_cache()
inps, outs = outs, inps
model.transformer.ln_f = model.transformer.ln_f.to(dev)
model.lm_head = model.lm_head.to(dev)
for i in tqdm(range(nsamples), desc='Last Layer'):
hidden_states = inps[i].unsqueeze(0).to(self.device)
hidden_states = self.model.transformer.ln_f(hidden_states)
batch_logits = F.log_softmax(self.model.lm_head(hidden_states)[0][:, :, :250680], dim=-1).cpu()
dataset_logits.append(batch_logits)
model.config.use_cache = use_cache
return dataset_logits
@torch.no_grad()
def _model_logits_on_dataset2(self, dataset_inps):
dataset_logits = []
nbatches = len(dataset_inps)
use_cache = self.model.config.use_cache
self.model.config.use_cache = False
layers = self.model.transformer.h
self.model.transformer.word_embeddings = self.model.transformer.word_embeddings.to(self.device)
self.model.transformer.word_embeddings_layernorm = self.model.transformer.word_embeddings_layernorm.to(
self.device)
layers[0] = layers[0].to(self.device)
dtype = next(iter(self.model.parameters())).dtype
inps = []
outs = []
for batch_idx, batch in enumerate(dataset_inps):
inps.append(torch.zeros(
(batch.shape[1], self.model.config.hidden_size), dtype=dtype,
))
outs.append(torch.zeros(
(batch.shape[1], self.model.config.hidden_size), dtype=dtype,
))
cache = {'i': 0, 'attention_masks': [], 'alibi': []}
class Catcher(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, inp, **kwargs):
inps[cache['i']] = inp.cpu()
cache['i'] += 1
cache['attention_masks'].append(kwargs['attention_mask'].detach().cpu())
cache['alibi'].append(kwargs['alibi'].detach().cpu())
raise ValueError
layers[0] = Catcher(layers[0])
for i in range(nbatches):
batch = dataset_inps[i].to(self.device)
try:
self.model(batch)
except ValueError:
pass
layers[0] = layers[0].module
layers[0] = layers[0].cpu()
self.model.transformer.word_embeddings = self.model.transformer.word_embeddings.cpu()
self.model.transformer.word_embeddings_layernorm = self.model.transformer.word_embeddings_layernorm.cpu()
torch.cuda.empty_cache() # TODO: maybe we don't need this?
attention_masks = cache['attention_masks']
alibis = cache['alibi']
for i in range(len(layers)):
print('layer: ', i)
layer = layers[i].to(self.device)
if self.args.wbits < 32 and self.args.nearest:
subset = find_layers(layer)
for name in subset:
if 'lm_head' in name:
continue
quantizer = Quantizer()
quantizer.configure(
self.args.wbits,
perchannel=True, sym=False, mse=False, norm=2.4
)
W = subset[name].weight.data
quantizer.find_params(W, weight=True)
subset[name].weight.data = quantize(
W, quantizer.scale, quantizer.zero, quantizer.maxq
).to(next(iter(layer.parameters())).dtype)
for j in range(nbatches):
outs[j] = layer(inps[j].to(self.device),
attention_mask=attention_masks[j].to(self.device),
alibi=alibis[j].to(self.device))[0].detach().cpu()
layers[i] = layer.cpu()
del layer
torch.cuda.empty_cache()
inps, outs = outs, inps
self.model.transformer.ln_f = self.model.transformer.ln_f.to(self.device)
self.model.lm_head = self.model.lm_head.to(self.device)
for i in tqdm(range(nbatches), desc='Last Layer'):
hidden_states = inps[i].unsqueeze(0).to(self.device)
hidden_states = self.model.transformer.ln_f(hidden_states)
batch_logits = F.log_softmax(self.model.lm_head(hidden_states)[0][:, :, :250680], dim=-1).cpu()
dataset_logits.append(batch_logits)
return dataset_logits
def _model_logits_on_dataset_2(self, inps):
# import pdb;pdb.set_trace()
self.model = self.model.to(self.device)
dataset_logits = []
for batch in inps:
multi_logits = F.log_softmax(
self._model_call(batch), dim=-1
).cpu() # [batch, padding_length, vocab]
dataset_logits.append(multi_logits)
return dataset_logits
def _model_generate(self, context, max_length, eos_token_id):
return self.model.generate(
context, max_length=max_length, eos_token_id=eos_token_id, do_sample=False
)
@torch.no_grad()
def bloom_sequential(self, dataloader):
print('Starting ...')
model = self.model
dev = self.device
use_cache = model.config.use_cache
model.config.use_cache = False
layers = model.transformer.h
model.transformer.word_embeddings = model.transformer.word_embeddings.to(dev)
model.transformer.word_embeddings_layernorm = model.transformer.word_embeddings_layernorm.to(dev)
layers[0] = layers[0].to(dev)
dtype = next(iter(model.parameters())).dtype
inps = torch.zeros(
(self.args.nsamples, self.seqlen, model.config.hidden_size), dtype=dtype, device=dev
)
cache = {'i': 0, 'attention_mask': None, 'alibi': None}
class Catcher(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, inp, **kwargs):
inps[cache['i']] = inp
cache['i'] += 1
cache['attention_mask'] = kwargs['attention_mask']
cache['alibi'] = kwargs['alibi']
raise ValueError
layers[0] = Catcher(layers[0])
for batch in dataloader:
try:
model(batch[0].to(dev))
except ValueError:
pass
layers[0] = layers[0].module
layers[0] = layers[0].cpu()
model.transformer.word_embeddings = model.transformer.word_embeddings.cpu()
model.transformer.word_embeddings_layernorm = model.transformer.word_embeddings_layernorm.cpu()
torch.cuda.empty_cache()
outs = torch.zeros_like(inps)
attention_mask = cache['attention_mask']
alibi = cache['alibi']
print('Ready.')
for i in range(len(layers)):
layer = layers[i].to(dev)
subset = find_layers(layer)
gptq = {}
for name in subset:
gptq[name] = GPTQ(subset[name])
gptq[name].quantizer = Quantizer()
gptq[name].quantizer.configure(
self.args.wbits, perchannel=True, sym=False, mse=False
)
def add_batch(name):
def tmp(_, inp, out):
gptq[name].add_batch(inp[0].data, out.data)
return tmp
handles = []
for name in subset:
handles.append(subset[name].register_forward_hook(add_batch(name)))
for j in range(self.args.nsamples):
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask, alibi=alibi)[0]
for h in handles:
h.remove()
for name in subset:
print(i, name)
print('Quantizing ...')
gptq[name].fasterquant(percdamp=self.args.percdamp, groupsize=self.args.groupsize)
for j in range(self.args.nsamples):
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask, alibi=alibi)[0]
layers[i] = layer.cpu()
del gptq
torch.cuda.empty_cache()
inps, outs = outs, inps
model.config.use_cache = use_cache
# for backwards compatibility
BLOOM = BLOOMClass | EXA-1-master | exa/modular_components/gptq/zeroShot/models/bloom.py |
from fts import FineTuner
model_id="google/flan-t5-xxl"
dataset_name="samsum"
finetune = FineTuner(
model_id=model_id,
dataset_name="samsum",
max_length=150,
lora_r=16,
lora_alpha=32,
quantize=True
)
finetune.train | Finetuning-Suite-master | example.py |
from fts import Inference
model = Inference(
model_id="georgesung/llama2_7b_chat_uncensored",
quantized=True
)
model.run("What is your name") | Finetuning-Suite-master | inference.py |
from datasets import load_dataset
from transformers import AutoTokenizer
from fts.finetuner import FineTuner
tokenizer = AutoTokenizer.from_pretrained("Phind/Phind-CodeLlama-34B-v1")
def data_preprocessing(dataset="Abirate/english_quotes"):
data = load_dataset(dataset)
data = data.map(
lambda samples: tokenizer(samples["quote"]), batched=True
)
def trainer(model):
import transformers
# needed for gpt-neo-x tokenizer
tokenizer.pad_token = tokenizer.eos_token
trainer = transformers.Trainer(
model=model,
train_dataset=data_preprocessing["train"],
args=transformers.TrainingArguments(
per_device_train_batch_size=1,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=10,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir="outputs",
optim="paged_adamw_8bit"
),
data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False),
)
model.config.use_cache = False # silence the warnings. Please re-enable for inference!
trainer.train()
FineTuner(
model_id="Phind/Phind-CodeLlama-34B-v1",
preprocessor=data_preprocessing,
trainer_config=trainer
)
| Finetuning-Suite-master | playground/llama2_english.py |
from fts.finetuner import FineTuner
from fts.inference.hf_model import Inference
from fts.processing.base import Preprocessor, DefaultPreprocessor
from fts.trainer.base import TrainerConfiguration, DefaultTrainerConfig
from fts.processing.build_dataset import BuildDataset
| Finetuning-Suite-master | fts/__init__.py |
import logging
import torch
from datasets import load_dataset
from peft import TaskType
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
Seq2SeqTrainer,
)
from fts.inference.base import DefaultInferenceHandler
from fts.processing.base import DefaultPreprocessor
from fts.trainer.base import DefaultTrainerConfig
class FineTuner:
def __init__(self,
model_id: str,
device: str = None,
dataset_name=None,
lora_r=16,
lora_alpha=32,
lora_target_modules=["q", "v"],
lora_bias="none",
preprocessor=None,
lora_task_type=TaskType.SEQ_2_SEQ_LM,
max_length=1000,
quantize: bool = False,
quantization_config: dict = None,
trainer_config=None,
inference_handler=None
):
self.logger = logging.getLogger(__name__)
self.device = device if device else ('cuda' if torch.cuda.is_available() else 'cpu')
self.model_id = model_id
self.max_length = max_length
self.dataset_name = dataset_name
self.preprocessor = preprocessor if preprocessor else DefaultPreprocessor(self.model_id)
self.trainer_config = trainer_config if trainer_config else DefaultTrainerConfig
self.inference_handler = inference_handler if inference_handler else DefaultInferenceHandler()
self.lora_r = lora_r
self.lora_alpha = lora_alpha
self.lora_target_modules = lora_target_modules
self.lora_bias = lora_bias
self.lora_task_type = lora_task_type
self.dataset = load_dataset(dataset_name)
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
bnb_config = None
if quantize:
if not quantization_config:
quantization_config = {
'load_in_4bit': True,
'bnb_4bit_use_double_quant': True,
'bnb_4bit_quant_type': "nf4",
'bnb_4bit_compute_dtype': torch.bfloat16
}
bnb_config = BitsAndBytesConfig(**quantization_config)
try:
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
self.model = AutoModelForCausalLM.from_pretrained(self.model_id, quantization_config=bnb_config)
self.model.to(self.device)
except Exception as e:
self.logger.error(f"Failed to load the model or the tokenizer: {e}")
raise
def __call__(self, prompt_text: str, max_length: int = None):
max_length = max_length if max_length else self.max_length
try:
inputs = self.tokenizer.encode(prompt_text, return_tensors="pt").to(self.device)
with torch.no_grad():
outputs = self.model.generate(inputs, max_length=max_length, do_sample=True)
return self.tokenizer.decode(outputs[0], skip_special_tokens=True)
except Exception as e:
self.logger.error(f"Failed to generate the text: {e}")
raise
def preprocess_data(self):
tokenized_dataset = self.dataset.map(self.preprocessor.preprocess_function, batched=True, remove_columns=["dialogue", "summary", "id"])
return tokenized_dataset
def train(self, output_dir, num_train_epochs):
self.model, data_collator, training_args = self.trainer_config.configure(self.model, self.tokenizer, output_dir, num_train_epochs)
tokenized_dataset = self.preprocessor_datas()
trainer = Seq2SeqTrainer(model=self.model, args=training_args, data_collator=data_collator, train_dataset=tokenized_dataset["train"])
trainer.train()
def generate(self, prompt_text: str, max_length: int = None):
try:
return self.inference_handler.generate(prompt_text, self.model, self.tokenizer, self.device, max_length)
except Exception as error:
error_msg = f"Failed to generate text for input: {prompt_text} because of Error {error} try modifying the inference function"
self.logger.error(error_msg)
raise ValueError(error_msg) from error
| Finetuning-Suite-master | fts/finetuner.py |
Finetuning-Suite-master | fts/processing/__init__.py |
|
import argparse
import multiprocessing
from itertools import chain
from datasets import load_dataset
from kosmosx.model import KosmosTokenizer
class BuildDataset:
def __init__(self, seed=42, seq_len=8192, hf_account="YOUR HUGGINGFACE API KEY", dataset_name="uggingFaceM4/VQAv2"):
self.SEED = seed
self.SEQ_LEN = seq_len
self.NUM_CPU = multiprocessing.cpu_count()
self.HF_ACCOUNT_REPO = hf_account
self.DATASET_NAME = dataset_name
self.tokenizer = KosmosTokenizer.tokenize
def tokenize_function(self, example):
return self.tokenizer([t + self.tokenizer.eos_token for t in example["text"]])
def group_texts(self, examples):
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
if total_length >= self.SEQ_LEN:
total_length = (total_length // self.SEQ_LEN) * self.SEQ_LEN
result = {
k: [t[i : i + self.SEQ_LEN] for i in range(0, total_length, self.SEQ_LEN)]
for k, t in concatenated_examples.items()
}
return result
def build(self):
train_dataset = load_dataset(self.DATASET_NAME, split="train", streaming=True)
tokenized_dataset = train_dataset.map(
self.tokenize_function,
batched=True,
num_proc=self.NUM_CPU,
remove_columns=["text"],
)
train_tokenized_dataset = tokenized_dataset.map(
self.group_texts,
batched=True,
num_proc=self.NUM_CPU,
)
train_tokenized_dataset.push_to_hub(self.HF_ACCOUNT_REPO)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Process and push dataset to Hugging Face Hub")
parser.add_argument("--seed", type=int, default=42, help="Random seed")
parser.add_argument("--seq_len", type=int, default=8192, help="Sequence length for processing")
parser.add_argument("--hf_account", type=str, default="YOUR HUGGINGFACE API KEY", help="Hugging Face account name and repo")
parser.add_argument("--dataset_name", type=str, default="uggingFaceM4/VQAv2", help="Name of the dataset to process")
args = parser.parse_args()
dataset_builder = BuildDataset(seed=args.seed, seq_len=args.seq_len, hf_account=args.hf_account, dataset_name=args.dataset_name)
dataset_builder.build() | Finetuning-Suite-master | fts/processing/build_dataset.py |
from abc import ABC, abstractmethod
class Preprocessor(ABC):
def __init__(self, tokenizer):
self.tokenizer = tokenizer
@abstractmethod
def preprocess_function(self, sample, padding="max_length"):
pass
# Step 2: Default Preprocessor
class DefaultPreprocessor(Preprocessor):
def preprocess_function(
self,
sample,
padding="max_length",
max_source_length=None,
max_target_length=None
):
inputs = ["prompt" + item for item in sample["act"]]
model_inputs = self.tokenizer(inputs, max_length=max_source_length, padding=padding, truncation=True)
labels = self.tokenizer(text_target=sample["prompt"], max_length=max_target_length, padding=padding, truncation=True)
if padding == "max_length":
labels["input_ids"] = [
[(l if l != self.tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs | Finetuning-Suite-master | fts/processing/base.py |
Finetuning-Suite-master | fts/utils/__init__.py |
|
def print_trainable_parameters(model):
trainable_params = 0
all_param = 0
for _, param in model.named_parameters():
all_param += param.numel()
if param.requires_grad:
trainable_params += param.numel()
print(
f"Trainable params: {trainable_params} || all params {all_param} || trainable: {100 * trainable_params / all_param}"
) | Finetuning-Suite-master | fts/utils/main.py |
import torch
import logging
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
class Inference:
def __init__(
self,
model_id: str,
device: str = None,
max_length: int = 20,
quantize: bool = False,
quantization_config: dict = None
):
super().__init__()
self.logger = logging.getLogger(__name__)
self.device = device if device else ('cuda' if torch.cuda.is_available() else 'cpu')
self.model_id = model_id
self.max_length = max_length
bnb_config = None
if quantize:
if not quantization_config:
quantization_config = {
'load_in_4bit': True,
'bnb_4bit_use_double_quant': True,
'bnb_4bit_quant_type': "nf4",
'bnb_4bit_compute_dtype': torch.bfloat16
}
bnb_config = BitsAndBytesConfig(**quantization_config)
try:
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
self.model = AutoModelForCausalLM.from_pretrained(self.model_id, quantization_config=bnb_config)
self.model.to(self.device)
except Exception as e:
self.logger.error(f"Failed to load the model or the tokenizer: {e}")
raise
def __call__(self, prompt_text: str, max_length: int = None):
max_length = max_length if max_length else self.max_length
try:
inputs = self.tokenizer.encode(prompt_text, return_tensors="pt").to(self.device)
with torch.no_grad():
outputs = self.model.generate(inputs, max_length=max_length, do_sample=True)
return self.tokenizer.decode(outputs[0], skip_special_tokens=True)
except Exception as e:
self.logger.error(f"Failed to generate the text: {e}")
raise
def run(self, prompt_text: str, max_length: int = None):
max_length = max_length if max_length else self.max_length
try:
inputs = self.tokenizer.encode(prompt_text, return_tensors="pt").to(self.device)
with torch.no_grad():
outputs = self.model.generate(inputs, max_length=max_length, do_sample=True)
return self.tokenizer.decode(outputs[0], skip_special_tokens=True)
except Exception as e:
self.logger.error(f"Failed to generate the text: {e}")
raise
| Finetuning-Suite-master | fts/inference/hf_model.py |
Finetuning-Suite-master | fts/inference/__init__.py |
|
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, GPTQConfig
class GPTQInference:
def __init__(
self,
model_id,
quantization_config_bits: int = 4,
quantization_config_dataset: str = None,
max_length: int = 500
):
self.model_id = model_id
self.quantization_config_bits = quantization_config_bits
self.quantization_config_dataset = quantization_config_dataset
self.max_length = max_length
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
self.quantization_config = GPTQConfig(
bits=self.quantization_config_bits,
dataset=quantization_config_dataset,
tokenizer=self.tokenizer
)
self.model = AutoModelForCausalLM.from_pretrained(
self.model_id,
device_map="auto",
quantization_config=self.quantization_config
)
def run(
self,
prompt: str,
# max_length: int =x None
):
# max_length = max_length if max_length else self.max_length
try:
inputs = self.tokenizer.encode(
prompt,
return_tensors="pt"
).to(self.device)
with torch.no_grad():
outputs = self.model.generate(
inputs,
max_length=self.max_length,
do_sample=True
)
return self.tokenizer.decode(
outputs[0],
skip_special_tokens=True
)
except Exception as error:
print(f"Error: {error} in inference mode, please change the inference logic or try again")
raise | Finetuning-Suite-master | fts/inference/gptq.py |
from abc import ABC, abstractmethod
class InferenceHandler(ABC):
@abstractmethod
def run(
self,
prompt_text=None,
model=None,
tokenizer=None,
device=None,
max_length = None
):
pass
class DefaultInferenceHandler(InferenceHandler):
def run(
self,
prompt_text,
model,
tokenizer,
device,
max_length
):
inputs = tokenizer.encode(prompt_text, return_tensors="pt").to(self.device)
outputs = model.run(inputs, max_length=max_length, do_sample=True)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
| Finetuning-Suite-master | fts/inference/base.py |
Finetuning-Suite-master | fts/trainer/__init__.py |
|
from abc import ABC, abstractmethod
from peft import LoraConfig, TaskType, get_peft_model
from transformers import (
DataCollatorForSeq2Seq,
Seq2SeqTrainingArguments,
)
class TrainerConfiguration(ABC):
@abstractmethod
def configure(self, model, tokenizer, output_dir, num_train_epochs, *args, **kwargs):
"""
Configures the model collator, and training arguments
Returns:
tuple: (configured model, data_collator, training_args)
"""
pass
class DefaultTrainerConfig(TrainerConfiguration):
def configure(self, model, tokenizer, output_dir, num_train_epochs, *args, **kwargs):
lora_config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q", "v"],
bias="none",
task_type=TaskType.SEQ_2_SEQ_LM,
)
model = get_peft_model(model, lora_config)
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, label_pad_token_id=-100, pad_to_multiple_of=8 )
training_args = Seq2SeqTrainingArguments(
output_dir=output_dir,
auto_find_batch_size=True,
learning_rate=1e-3,
num_train_epochs=num_train_epochs,
logging_dir=f"{output_dir}/logs",
logging_strategy="steps",
logging_steps=500,
save_strategy="no",
report_to="tensorboard"
)
return model, data_collator, training_args
| Finetuning-Suite-master | fts/trainer/base.py |
from setuptools import setup, find_packages
setup(
name='pali-torch',
packages=find_packages(exclude=[]),
version='0.0.3',
license='MIT',
description='Pali - PyTorch',
author='Kye Gomez',
author_email='[email protected]',
long_description_content_type='text/markdown',
url='https://github.com/kyegomez/Pali',
keywords=[
'artificial intelligence',
'deep learning',
'optimizers',
'Prompt Engineering'
],
install_requires=[
'transformers',
'primus',
'torch',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | PALI-main | setup.py |
import torch
from pali import Pali
model = Pali()
img = torch.randn(1, 3, 256, 256)
prompt = torch.randint(0, 256, (1, 1024))
mask = torch.ones(1, 1024).bool()
output_text = torch.randint(0, 256, (1, 1024))
result = model.process(img, prompt, output_text, mask)
print(result)
| PALI-main | example.py |
import torch
from pali.model import Pali
# # Initialize Pali model
# pali = Pali()
# Example 1: Caption an Image
# # Load images
# images = [torch.randn(1, 3, 256, 256) for _ in range(3)]
# for i, img in enumerate(images):
# # Generate a caption for the image
# prompt = torch.randint(0, 256, (1, 1024))
# prompt_mask = torch.ones(1, 1024).bool()
# output_text = torch.randint(0, 256, (1, 1024))
# result = pali.process(img, prompt, output_text, prompt_mask)
# print(f"Caption for image {i+1}: ", result)
# # # Example 2: Generate text based on another piece of text
# # Define prompt texts
# prompt_texts = ["Once upon a time", "In a galaxy far, far away", "It was a dark and stormy night"]
# for i, prompt_text in enumerate(prompt_texts):
# prompt = torch.tensor([ord(c) for c in prompt_text]).unsqueeze(0)
# # Generate text based on the prompt
# output_text = torch.randint(0, 256, (1, 1024))
# result = pali.process(None, prompt, output_text, None)
# print(f"Generated text for prompt {i+1}: ", result)
pali = Pali()
prompt_text = "say hi to Kye"
model_name = "t5-small" # specify the model name or path
generated_text = pali.generate(prompt_text, model_name=model_name)
print(generated_text) | PALI-main | inference.py |
from math import ceil
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, pack, unpack
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(self, *args, **kwargs):
was_training = self.training
self.eval()
out = fn(self, *args, **kwargs)
self.train(was_training)
return out
return inner
# nucleus
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
# topk
def top_k(logits, thres = 0.9):
k = ceil((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# top_a
def top_a(logits, min_p_pow=2.0, min_p_ratio=0.02):
probs = F.softmax(logits, dim=-1)
limit = torch.pow(torch.max(probs), min_p_pow) * min_p_ratio
logits[probs < limit] = float('-inf')
logits[probs >= limit] = 1
return logits
# autoregressive wrapper class
class AutoregressiveWrapper(nn.Module):
def __init__(
self,
net,
ignore_index = -100,
pad_value = 0,
mask_prob = 0.
):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.max_seq_len
# paper shows masking (MLM) in conjunction with autoregressive decoder-only training leads to big improvements https://arxiv.org/abs/2210.13432
assert mask_prob < 1.
self.mask_prob = mask_prob
@torch.no_grad()
@eval_decorator
def generate(
self,
start_tokens,
seq_len,
eos_token = None,
temperature = 1.,
filter_logits_fn = top_k,
filter_thres = 0.9,
min_p_pow = 2.0,
min_p_ratio = 0.02,
**kwargs
):
start_tokens, ps = pack([start_tokens], '* n')
b, t = start_tokens.shape
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
logits = self.net(x, **kwargs)[:, -1]
if filter_logits_fn in {top_k, top_p}:
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
elif filter_logits_fn is top_a:
filtered_logits = filter_logits_fn(logits, min_p_pow = min_p_pow, min_p_ratio= min_p_ratio)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if exists(eos_token):
is_eos_tokens = (out == eos_token)
if is_eos_tokens.any(dim = -1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
out = out.masked_fill(mask, self.pad_value)
break
out = out[:, t:]
out, = unpack(out, ps, '* n')
return out
def forward(self, x, **kwargs):
seq, ignore_index = x.shape[1], self.ignore_index
inp, target = x[:, :-1], x[:, 1:]
inp = torch.where(inp == ignore_index, self.pad_value, inp)
if self.mask_prob > 0.:
rand = torch.randn(inp.shape, device = x.device)
rand[:, 0] = -torch.finfo(rand.dtype).max # first token should not be masked out
num_mask = min(int(seq * self.mask_prob), seq - 1)
indices = rand.topk(num_mask, dim = -1).indices
mask = ~torch.zeros_like(inp).scatter(1, indices, 1.).bool()
kwargs.update(self_attn_context_mask = mask)
logits = self.net(inp, **kwargs)
loss = F.cross_entropy(
rearrange(logits, 'b n c -> b c n'),
target,
ignore_index = ignore_index
)
return loss | PALI-main | pali/autoregressive_wrapper.py |
from pali.model import VitModel, Transformer, Pali | PALI-main | pali/__init__.py |
import torch
from pali.transformer import ViTransformerWrapper, Encoder, XTransformer
from transformers import AutoTokenizer
class VitModel:
def __init__(self,
image_size=256,
patch_size=32,
dim=512,
depth=6,
heads=8,
*args, **kwargs):
self.image_size = image_size
self.patch_size = patch_size
self.dim = dim
self.depth = depth
self.heads = heads
self.vit = ViTransformerWrapper(
image_size=image_size,
patch_size=patch_size,
attn_layers=Encoder(
dim=dim,
depth=depth,
heads=heads
)
)
def process(self, img):
if img is None:
raise ValueError('Input image cannot be None')
if img.shape[1:] != (3, self.image_size, self.image_size):
raise ValueError('Input image must have the shape [*, 3, {}, {}]'.format(self.image_size, self.image_size))
return self.vit(img, return_embeddings=True)
class Pali:
def __init__(
self,
model_name = None,
image_size=256,
patch_size=32,
dim=512,
depth=6,
heads=8,
enc_num_tokens=256,
enc_max_seq_len=1024,
dec_num_tokens=256,
dec_max_seq_len=1024,
enc_depth=6,
enc_heads=8,
dec_depth=6,
dec_heads=8,
):
self.model_name = model_name
self.tokenizer = None
# self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self.vit_model = VitModel(
image_size=image_size,
patch_size=patch_size,
dim=dim,
depth=depth,
heads=heads
)
self.pali_model = XTransformer(
dim=dim,
enc_num_tokens=enc_num_tokens,
enc_depth=enc_depth,
enc_heads=enc_heads,
enc_max_seq_len=enc_max_seq_len,
dec_num_tokens=dec_num_tokens,
dec_depth=dec_depth,
dec_heads=dec_heads,
dec_max_seq_len=dec_max_seq_len
)
def process(
self,
img,
prompt,
output,
mask
):
img_embeds = self.vit_model.process(img)
result = self.pali_model(prompt, output, mask=mask, src_prepend_embeds=img_embeds)
return result
def generate(
self,
text,
seq_len=1024,
mask=None,
attn_mask=None,
model_name=None
):
if model_name:
self.model_name = model_name
if not self.model_name:
raise ValueError("model_name must be specidfied either in the class constructor or in the generate method")
if not self.tokenizer:
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
inputs = self.tokenizer.encode(text, return_tensors="pt")
seq_out_start = torch.zeros(1, 1).long()
result = self.pali_model.generate(inputs, seq_out_start, seq_len, mask, attn_mask)
result_text = self.tokenizer.decode(result[0], skip_special_tokens=True)
return result_text | PALI-main | pali/model.py |
from functools import partial
import torch
from torch import nn, einsum, Tensor
import torch.nn.functional as F
from collections import namedtuple
from functools import wraps
from packaging import version
from dataclasses import dataclass
from einops import rearrange
# constants
EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
@dataclass
class Intermediates:
qk_similarities: Tensor = None
pre_softmax_attn: Tensor = None
post_softmax_attn: Tensor = None
def to_tuple(self):
return (self.qk_similarities, self.pre_softmax_attn, self.post_softmax_attn)
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def compact(arr):
return [*filter(exists, arr)]
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# functions for creating causal mask
# need a special one for onnx cpu (no support for .triu)
def create_causal_mask(i, j, device):
return torch.ones((i, j), device = device, dtype = torch.bool).triu(j - i + 1)
def onnx_create_causal_mask(i, j, device):
r = torch.arange(i, device = device)
causal_mask = rearrange(r, 'i -> i 1') < rearrange(r, 'j -> 1 j')
causal_mask = F.pad(causal_mask, (j - i, 0), value = False)
return causal_mask
# main class
class Attend(nn.Module):
def __init__(
self,
*,
dropout = 0.,
causal = False,
heads = None,
talking_heads = False,
sparse_topk = None,
scale = None,
qk_norm = False,
flash = False,
add_zero_kv = False,
onnxable = False
):
super().__init__()
self.scale = scale
self.qk_norm = qk_norm
self.causal = causal
self.create_causal_mask = onnx_create_causal_mask if onnxable else create_causal_mask
self.attn_fn = partial(F.softmax, dtype = torch.float32) if not qk_norm else F.softmax
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
# talking heads
assert not (flash and talking_heads), 'talking heads not compatible with flash attention'
self.talking_heads = talking_heads
if talking_heads:
self.pre_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
self.post_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
# sparse topk
assert not (flash and sparse_topk), 'sparse topk not compatible with flash attention'
self.sparse_topk = sparse_topk
# add a key / value token composed of zeros
# in case this helps controlling outliers, proposed by https://www.evanmiller.org/attention-is-off-by-one.html
self.add_zero_kv = add_zero_kv
# flash attention
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = EfficientAttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(False, True, True)
def flash_attn(
self,
q, k, v,
mask = None,
attn_bias = None
):
batch, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
# Recommended for multi-query single-key-value attention by Tri Dao
# kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64])
if k.ndim == 3:
k = rearrange(k, 'b ... -> b 1 ...').expand_as(q)
if v.ndim == 3:
v = rearrange(v, 'b ... -> b 1 ...').expand_as(q)
# handle scale - by default they scale by dim_head ** -0.5, but need to take care if using cosine sim attention
if self.qk_norm:
default_scale = q.shape[-1] ** -0.5
q = q * (default_scale / self.scale)
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
causal = self.causal
if exists(mask):
assert mask.ndim == 4
mask = mask.expand(batch, heads, q_len, k_len)
# manually handle causal mask, if another mask was given
if causal:
causal_mask = self.create_causal_mask(q_len, k_len, device = device)
mask = mask & ~causal_mask
causal = False
# handle alibi positional bias
# convert from bool to float
if exists(attn_bias):
attn_bias = rearrange(attn_bias, 'h i j -> 1 h i j').expand(batch, heads, -1, -1)
# if mask given, the mask would already contain the causal mask from above logic
# otherwise, if no mask given but still causal, mask out alibi positional bias to a large negative number
mask_value = -torch.finfo(q.dtype).max
if exists(mask):
attn_bias = attn_bias.masked_fill(~mask, mask_value // 2)
elif causal:
causal_mask = self.create_causal_mask(q_len, k_len, device = device)
attn_bias = attn_bias.masked_fill(causal_mask, mask_value // 2)
causal = False
# scaled_dot_product_attention handles attn_mask either as bool or additive bias
# make it an additive bias here
mask = attn_bias
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = causal
)
return out, Intermediates()
def forward(
self,
q, k, v,
mask = None,
attn_bias = None,
prev_attn = None
):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device = q.shape[-2], q.device
scale = default(self.scale, q.shape[-1] ** -0.5)
if self.add_zero_kv:
k, v = map(lambda t: F.pad(t, (0, 0, 1, 0), value = 0.), (k, v))
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
if exists(attn_bias):
attn_bias = F.pad(attn_bias, (1, 0), value = 0.)
if self.flash:
assert not exists(prev_attn), 'residual attention not compatible with flash attention'
return self.flash_attn(q, k, v, mask = mask, attn_bias = attn_bias)
kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d'
dots = einsum(f'b h i d, {kv_einsum_eq} -> b h i j', q, k) * scale
if exists(prev_attn):
dots = dots + prev_attn
qk_similarities = dots.clone()
if self.talking_heads:
dots = self.pre_softmax_talking_heads(dots)
if exists(attn_bias):
dots = dots + attn_bias
i, j, dtype = *dots.shape[-2:], dots.dtype
pre_softmax_attn = dots.clone()
mask_value = -torch.finfo(dots.dtype).max
if exists(self.sparse_topk) and self.sparse_topk < j:
top_values, _ = dots.topk(self.sparse_topk, dim = -1)
sparse_topk_mask = dots < top_values[..., -1:]
mask = (mask & sparse_topk_mask) if exists(mask) else sparse_topk_mask
if exists(mask):
dots = dots.masked_fill(~mask, mask_value)
if self.causal:
causal_mask = self.create_causal_mask(i, j, device = device)
dots = dots.masked_fill(causal_mask, mask_value)
attn = self.attn_fn(dots, dim = -1)
attn = attn.type(dtype)
post_softmax_attn = attn.clone()
attn = self.attn_dropout(attn)
if self.talking_heads:
attn = self.post_softmax_talking_heads(attn)
out = einsum(f'b h i j, {kv_einsum_eq} -> b h i d', attn, v)
intermediates = Intermediates(
qk_similarities = qk_similarities,
pre_softmax_attn = pre_softmax_attn,
post_softmax_attn = post_softmax_attn
)
return out, intermediates
# cascading heads logic
def to_single_heads(t, dim = 1):
heads = t.unbind(dim = dim)
return tuple(head.unsqueeze(dim) for head in heads)
class CascadingHeads(nn.Module):
def __init__(self, attend: Attend):
super().__init__()
self.attend = attend
def forward(
self,
q, k, v,
mask = None,
attn_bias = None,
prev_attn = None
):
assert q.shape[-1] == v.shape[-1], 'cascading heads can only be done if query / key and value head dimensions are the same'
# split inputs into per-head inputs
heads = q.shape[1]
queries = to_single_heads(q)
keys = to_single_heads(k) if k.ndim == 4 else ((k,) * heads)
values = to_single_heads(v) if v.ndim == 4 else ((v,) * heads)
mask = (mask,) * heads
attn_bias = to_single_heads(attn_bias, dim = 0) if exists(attn_bias) else ((None,) * heads)
prev_attn = to_single_heads(prev_attn) if exists(prev_attn) else ((None,) * heads)
# now loop through each head, without output of previous head summed with the next head
# thus cascading
all_outs = []
all_intermediates = []
prev_head_out = None
for h_q, h_k, h_v, h_mask, h_attn_bias, h_prev_attn in zip(queries, keys, values, mask, attn_bias, prev_attn):
if exists(prev_head_out):
h_q = h_q + prev_head_out
out, intermediates = self.attend(
h_q, h_k, h_v,
mask = h_mask,
attn_bias = h_attn_bias,
prev_attn = h_prev_attn
)
prev_head_out = out
all_outs.append(out)
all_intermediates.append(intermediates)
# cat all output heads
all_outs = torch.cat(all_outs, dim = 1)
# cat all intermediates, if they exist
qk_similarities, pre_softmax_attn, post_softmax_attn = zip(*map(lambda i: i.to_tuple(), all_intermediates))
qk_similarities, pre_softmax_attn, post_softmax_attn = map(compact, (qk_similarities, pre_softmax_attn, post_softmax_attn))
aggregated_intermediates = Intermediates(
qk_similarities = torch.cat(qk_similarities, dim = 1) if len(qk_similarities) > 0 else None,
pre_softmax_attn = torch.cat(pre_softmax_attn, dim = 1) if len(pre_softmax_attn) > 0 else None,
post_softmax_attn = torch.cat(post_softmax_attn, dim = 1) if len(post_softmax_attn) > 0 else None
)
return all_outs, aggregated_intermediates | PALI-main | pali/attend.py |
import math
from random import random
import torch
from torch import nn, einsum, Tensor
import torch.nn.functional as F
from functools import partial, wraps
from inspect import isfunction
from dataclasses import dataclass
from typing import List
from einops import rearrange, repeat
from pali.attend import Attend, Intermediates, CascadingHeads
from pali.autoregressive_wrapper import AutoregressiveWrapper
# constants
DEFAULT_DIM_HEAD = 64
@dataclass
class LayerIntermediates:
hiddens: List[Tensor] = None
attn_intermediates: List[Intermediates] = None
# helpers
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def cast_tuple(val, depth):
return val if isinstance(val, tuple) else (val,) * depth
def maybe(fn):
@wraps(fn)
def inner(x, *args, **kwargs):
if not exists(x):
return x
return fn(x, *args, **kwargs)
return inner
class always():
def __init__(self, val):
self.val = val
def __call__(self, *args, **kwargs):
return self.val
class not_equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x != self.val
class equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x == self.val
# tensor helpers
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
def l2norm(t, groups = 1):
t = rearrange(t, '... (g d) -> ... g d', g = groups)
t = F.normalize(t, p = 2, dim = -1)
return rearrange(t, '... g d -> ... (g d)')
def pad_at_dim(t, pad, dim = -1, value = 0.):
dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1)
zeros = ((0, 0) * dims_from_right)
return F.pad(t, (*zeros, *pad), value = value)
def or_reduce(masks):
head, *body = masks
for rest in body:
head = head | rest
return head
# init helpers
def init_zero_(layer):
nn.init.constant_(layer.weight, 0.)
if exists(layer.bias):
nn.init.constant_(layer.bias, 0.)
# keyword argument helpers
def pick_and_pop(keys, d):
values = list(map(lambda key: d.pop(key), keys))
return dict(zip(keys, values))
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
return str.startswith(prefix)
def group_by_key_prefix(prefix, d):
return group_dict_by_key(partial(string_begins_with, prefix), d)
def groupby_prefix_and_trim(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
# initializations
def deepnorm_init(
transformer,
beta,
module_name_match_list = ['.ff.', '.to_v', '.to_out']
):
for name, module in transformer.named_modules():
if type(module) != nn.Linear:
continue
needs_beta_gain = any(map(lambda substr: substr in name, module_name_match_list))
gain = beta if needs_beta_gain else 1
nn.init.xavier_normal_(module.weight.data, gain = gain)
if exists(module.bias):
nn.init.constant_(module.bias.data, 0)
# structured dropout, more effective than traditional attention dropouts
def dropout_seq(seq, mask, dropout):
b, n, *_, device = *seq.shape, seq.device
logits = torch.randn(b, n, device = device)
if exists(mask):
mask_value = max_neg_value(logits)
logits = logits.masked_fill(~mask, mask_value)
keep_prob = 1. - dropout
num_keep = max(1, int(keep_prob * n))
keep_indices = logits.topk(num_keep, dim = 1).indices
batch_indices = torch.arange(b, device = device)
batch_indices = rearrange(batch_indices, 'b -> b 1')
seq = seq[batch_indices, keep_indices]
if exists(mask):
seq_counts = mask.sum(dim = -1)
seq_keep_counts = torch.ceil(seq_counts * keep_prob).int()
keep_mask = torch.arange(num_keep, device = device) < rearrange(seq_keep_counts, 'b -> b 1')
mask = mask[batch_indices, keep_indices] & keep_mask
return seq, mask
# activations
class ReluSquared(nn.Module):
def forward(self, x):
return F.relu(x) ** 2
# embedding
class TokenEmbedding(nn.Module):
def __init__(self, dim, num_tokens, l2norm_embed = False):
super().__init__()
self.l2norm_embed = l2norm_embed
self.emb = nn.Embedding(num_tokens, dim)
def forward(self, x):
token_emb = self.emb(x)
return l2norm(token_emb) if self.l2norm_embed else token_emb
# positional embeddings
class AbsolutePositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len, l2norm_embed = False):
super().__init__()
self.scale = dim ** -0.5 if not l2norm_embed else 1.
self.max_seq_len = max_seq_len
self.l2norm_embed = l2norm_embed
self.emb = nn.Embedding(max_seq_len, dim)
def forward(self, x, pos = None):
seq_len, device = x.shape[1], x.device
assert seq_len <= self.max_seq_len, f'you are passing in a sequence length of {seq_len} but your absolute positional embedding has a max sequence length of {self.max_seq_len}'
if not exists(pos):
pos = torch.arange(seq_len, device = device)
pos_emb = self.emb(pos)
pos_emb = pos_emb * self.scale
return l2norm(pos_emb) if self.l2norm_embed else pos_emb
class ScaledSinusoidalEmbedding(nn.Module):
def __init__(self, dim, theta = 10000):
super().__init__()
assert (dim % 2) == 0
self.scale = nn.Parameter(torch.ones(1) * dim ** -0.5)
half_dim = dim // 2
freq_seq = torch.arange(half_dim).float() / half_dim
inv_freq = theta ** -freq_seq
self.register_buffer('inv_freq', inv_freq, persistent = False)
def forward(self, x, pos = None):
seq_len, device = x.shape[1], x.device
if not exists(pos):
pos = torch.arange(seq_len, device = device)
emb = einsum('i, j -> i j', pos, self.inv_freq)
emb = torch.cat((emb.sin(), emb.cos()), dim = -1)
return emb * self.scale
class RelativePositionBias(nn.Module):
def __init__(self, scale, causal = False, num_buckets = 32, max_distance = 128, heads = 8):
super().__init__()
self.scale = scale
self.causal = causal
self.num_buckets = num_buckets
self.max_distance = max_distance
self.relative_attention_bias = nn.Embedding(num_buckets, heads)
@staticmethod
def _relative_position_bucket(relative_position, causal = True, num_buckets = 32, max_distance = 128):
ret = 0
n = -relative_position
if not causal:
num_buckets //= 2
ret += (n < 0).long() * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).long()
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
@property
def device(self):
return next(self.parameters()).device
def forward(self, i, j):
device = self.device
q_pos = torch.arange(j - i, j, dtype = torch.long, device = device)
k_pos = torch.arange(j, dtype = torch.long, device = device)
rel_pos = k_pos[None, :] - q_pos[:, None]
rp_bucket = self._relative_position_bucket(rel_pos, causal = self.causal, num_buckets = self.num_buckets, max_distance = self.max_distance)
values = self.relative_attention_bias(rp_bucket)
bias = rearrange(values, 'i j h -> h i j')
return bias * self.scale
class DynamicPositionBias(nn.Module):
def __init__(self, dim, *, heads, depth, log_distance = False, norm = False):
super().__init__()
assert depth >= 1, 'depth for dynamic position bias MLP must be greater or equal to 1'
self.log_distance = log_distance
self.mlp = nn.ModuleList([])
self.mlp.append(nn.Sequential(
nn.Linear(1, dim),
nn.LayerNorm(dim) if norm else nn.Identity(),
nn.SiLU()
))
for _ in range(depth - 1):
self.mlp.append(nn.Sequential(
nn.Linear(dim, dim),
nn.LayerNorm(dim) if norm else nn.Identity(),
nn.SiLU()
))
self.mlp.append(nn.Linear(dim, heads))
@property
def device(self):
return next(self.parameters()).device
def forward(self, i, j):
assert i == j
n, device = j, self.device
# get the (n x n) matrix of distances
seq_arange = torch.arange(n, device = device)
context_arange = torch.arange(n, device = device)
indices = rearrange(seq_arange, 'i -> i 1') - rearrange(context_arange, 'j -> 1 j')
indices += (n - 1)
# input to continuous positions MLP
pos = torch.arange(-n + 1, n, device = device).float()
pos = rearrange(pos, '... -> ... 1')
if self.log_distance:
pos = torch.sign(pos) * torch.log(pos.abs() + 1) # log of distance is sign(rel_pos) * log(abs(rel_pos) + 1)
for layer in self.mlp:
pos = layer(pos)
# get position biases
bias = pos[indices]
bias = rearrange(bias, 'i j h -> h i j')
return bias
class AlibiPositionalBias(nn.Module):
def __init__(self, heads, total_heads, **kwargs):
super().__init__()
self.heads = heads
self.total_heads = total_heads
slopes = Tensor(self._get_slopes(heads))
slopes = rearrange(slopes, 'h -> h 1 1')
self.register_buffer('slopes', slopes, persistent = False)
self.register_buffer('bias', None, persistent = False)
def get_bias(self, i, j, device):
i_arange = torch.arange(j - i, j, device = device)
j_arange = torch.arange(j, device = device)
bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1'))
return bias
@staticmethod
def _get_slopes(heads):
def get_slopes_power_of_2(n):
start = (2**(-2**-(math.log2(n)-3)))
ratio = start
return [start*ratio**i for i in range(n)]
if math.log2(heads).is_integer():
return get_slopes_power_of_2(heads)
closest_power_of_2 = 2 ** math.floor(math.log2(heads))
return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2]
@property
def device(self):
return next(self.buffers()).device
def forward(self, i, j):
h, device = self.total_heads, self.device
if exists(self.bias) and self.bias.shape[-1] >= j and self.bias.shape[-2] >= i:
return self.bias[..., :i, :j]
bias = self.get_bias(i, j, device)
bias = bias * self.slopes
num_heads_unalibied = h - bias.shape[0]
bias = pad_at_dim(bias, (0, num_heads_unalibied), dim = 0)
self.register_buffer('bias', bias, persistent = False)
return self.bias
class LearnedAlibiPositionalBias(AlibiPositionalBias):
def __init__(self, heads, total_heads):
super().__init__(heads, total_heads)
log_slopes = torch.log(self.slopes)
self.learned_logslopes = nn.Parameter(log_slopes)
def forward(self, i, j):
h, device = self.heads, self.device
def get_slopes(param):
return pad_at_dim(param.exp(), (0, h - param.shape[0]), dim = -2)
if exists(self.bias) and self.bias.shape[-1] >= j and self.bias.shape[-2] >= i:
bias = self.bias[..., :i, :j]
else:
bias = self.get_bias(i, j, device)
self.register_buffer('bias', bias, persistent = False)
slopes = get_slopes(self.learned_logslopes)
bias = bias * slopes
return bias
class RotaryEmbedding(nn.Module):
def __init__(
self,
dim,
use_xpos = False,
scale_base = 512,
interpolation_factor = 1.,
base = 10000,
base_rescale_factor = 1.
):
super().__init__()
# proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning
# has some connection to NTK literature
# https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
base *= base_rescale_factor ** (dim / (dim - 2))
inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
assert interpolation_factor >= 1.
self.interpolation_factor = interpolation_factor
if not use_xpos:
self.register_buffer('scale', None)
return
scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
self.scale_base = scale_base
self.register_buffer('scale', scale)
def forward(self, seq_len, device):
t = torch.arange(seq_len, device = device).type_as(self.inv_freq)
t = t / self.interpolation_factor
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
if not exists(self.scale):
return freqs, 1.
power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base
scale = self.scale ** rearrange(power, 'n -> n 1')
scale = torch.cat((scale, scale), dim = -1)
return freqs, scale
def rotate_half(x):
x = rearrange(x, '... (j d) -> ... j d', j = 2)
x1, x2 = x.unbind(dim = -2)
return torch.cat((-x2, x1), dim = -1)
def apply_rotary_pos_emb(t, freqs, scale = 1):
seq_len = t.shape[-2]
freqs = freqs[-seq_len:, :]
return (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale)
# norms
class Scale(nn.Module):
def __init__(self, value, fn):
super().__init__()
self.value = value
self.fn = fn
def forward(self, x, **kwargs):
out = self.fn(x, **kwargs)
def scale_fn(t):
return t * self.value
if not isinstance(out, tuple):
return scale_fn(out)
return (scale_fn(out[0]), *out[1:])
class ScaleNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1) * (dim ** -0.5))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True)
return x / norm.clamp(min = self.eps) * self.g
class RMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale * self.g
class SimpleRMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale
# residual and residual gates
class Residual(nn.Module):
def __init__(self, dim, scale_residual = False, scale_residual_constant = 1.):
super().__init__()
self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
self.scale_residual_constant = scale_residual_constant
def forward(self, x, residual):
if exists(self.residual_scale):
residual = residual * self.residual_scale
if self.scale_residual_constant != 1:
residual = residual * self.scale_residual_constant
return x + residual
class GRUGating(nn.Module):
def __init__(self, dim, scale_residual = False, **kwargs):
super().__init__()
self.gru = nn.GRUCell(dim, dim)
self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
def forward(self, x, residual):
if exists(self.residual_scale):
residual = residual * self.residual_scale
gated_output = self.gru(
rearrange(x, 'b n d -> (b n) d'),
rearrange(residual, 'b n d -> (b n) d')
)
return gated_output.reshape_as(x)
# token shifting
def shift(t, amount, mask = None):
if amount == 0:
return t
else:
amount = min(amount, t.shape[1])
if exists(mask):
t = t.masked_fill(~mask[..., None], 0.)
return pad_at_dim(t, (amount, -amount), dim = - 2, value = 0.)
class ShiftTokens(nn.Module):
def __init__(self, shifts, fn):
super().__init__()
self.fn = fn
self.shifts = tuple(shifts)
def forward(self, x, **kwargs):
mask = kwargs.get('mask', None)
shifts = self.shifts
segments = len(shifts)
feats_per_shift = x.shape[-1] // segments
splitted = x.split(feats_per_shift, dim = -1)
segments_to_shift, rest = splitted[:segments], splitted[segments:]
segments_to_shift = list(map(lambda args: shift(*args, mask = mask), zip(segments_to_shift, shifts)))
x = torch.cat((*segments_to_shift, *rest), dim = -1)
return self.fn(x, **kwargs)
# feedforward
class GLU(nn.Module):
def __init__(self, dim_in, dim_out, activation):
super().__init__()
self.act = activation
self.proj = nn.Linear(dim_in, dim_out * 2)
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim = -1)
return x * self.act(gate)
class FeedForward(nn.Module):
def __init__(
self,
dim,
dim_out = None,
mult = 4,
glu = False,
swish = False,
relu_squared = False,
post_act_ln = False,
dropout = 0.,
no_bias = False,
zero_init_output = False
):
super().__init__()
inner_dim = int(dim * mult)
dim_out = default(dim_out, dim)
if relu_squared:
activation = ReluSquared()
elif swish:
activation = nn.SiLU()
else:
activation = nn.GELU()
project_in = nn.Sequential(
nn.Linear(dim, inner_dim, bias = not no_bias),
activation
) if not glu else GLU(dim, inner_dim, activation)
self.ff = nn.Sequential(
project_in,
nn.LayerNorm(inner_dim) if post_act_ln else nn.Identity(),
nn.Dropout(dropout),
nn.Linear(inner_dim, dim_out, bias = not no_bias)
)
# init last linear layer to 0
if zero_init_output:
init_zero_(self.ff[-1])
def forward(self, x):
return self.ff(x)
# attention. it is all we need
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = DEFAULT_DIM_HEAD,
heads = 8,
causal = False,
flash = False,
talking_heads = False,
head_scale = False,
sparse_topk = None,
num_mem_kv = 0,
dropout = 0.,
on_attn = False,
gate_values = False,
zero_init_output = False,
max_attend_past = None,
qk_norm = False,
qk_norm_groups = 1,
qk_norm_scale = 10,
qk_norm_dim_scale = False,
one_kv_head = False,
shared_kv = False,
value_dim_head = None,
tensor_product = False, # https://arxiv.org/abs/2208.06061
cascading_heads = False,
add_zero_kv = False, # same as add_zero_attn in pytorch
onnxable = False
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
self.causal = causal
self.max_attend_past = max_attend_past
value_dim_head = default(value_dim_head, dim_head)
q_dim = k_dim = dim_head * heads
v_dim = out_dim = value_dim_head * heads
self.one_kv_head = one_kv_head
if one_kv_head:
k_dim = dim_head
v_dim = value_dim_head
out_dim = v_dim * heads
self.to_q = nn.Linear(dim, q_dim, bias = False)
self.to_k = nn.Linear(dim, k_dim, bias = False)
# shared key / values, for further memory savings during inference
assert not (shared_kv and value_dim_head != dim_head), 'key and value head dimensions must be equal for shared key / values'
self.to_v = nn.Linear(dim, v_dim, bias = False) if not shared_kv else None
# relations projection from tp-attention
self.to_r = nn.Linear(dim, v_dim, bias = False) if tensor_product else None
# add GLU gating for aggregated values, from alphafold2
self.to_v_gate = None
if gate_values:
self.to_v_gate = nn.Linear(dim, out_dim)
nn.init.constant_(self.to_v_gate.weight, 0)
nn.init.constant_(self.to_v_gate.bias, 1)
# cosine sim attention
self.qk_norm = qk_norm
self.qk_norm_groups = qk_norm_groups
self.qk_norm_scale = qk_norm_scale
# whether to use the rmsnorm (equivalent to cosine sim attention when scale is equal to 1) - https://arxiv.org/abs/2302.05442
self.qk_norm_dim_scale = qk_norm_dim_scale
self.qk_norm_q_scale = self.qk_norm_k_scale = 1
if qk_norm and qk_norm_dim_scale:
self.qk_norm_q_scale = nn.Parameter(torch.ones(dim_head))
self.qk_norm_k_scale = nn.Parameter(torch.ones(dim_head))
assert (not qk_norm) or (dim_head % qk_norm_groups) == 0, 'dimension per attention head must be divisible by the qk norm groups'
assert not (qk_norm and (dim_head // qk_norm_groups) <= 2), 'the group dimension may be too small (2 was too small in my tests, but 4 still works, surprisingly)'
# attend class - includes core attention algorithm + talking heads
self.attend = Attend(
heads = heads,
causal = causal,
talking_heads = talking_heads,
dropout = dropout,
sparse_topk = sparse_topk,
qk_norm = qk_norm,
scale = qk_norm_scale if qk_norm else self.scale,
add_zero_kv = add_zero_kv,
flash = flash,
onnxable = onnxable
)
if cascading_heads:
# cascading heads - wrap the Attend logic
self.attend = CascadingHeads(self.attend)
# head scaling
self.head_scale = head_scale
if head_scale:
self.head_scale_params = nn.Parameter(torch.ones(1, heads, 1, 1))
# explicit topk sparse attention
self.sparse_topk = sparse_topk
# add memory key / values
self.num_mem_kv = num_mem_kv
if num_mem_kv > 0:
self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
# attention on attention
self.attn_on_attn = on_attn
self.to_out = nn.Sequential(nn.Linear(out_dim, dim * 2, bias = False), nn.GLU()) if on_attn else nn.Linear(out_dim, dim, bias = False)
# init output projection 0
if zero_init_output:
init_zero_(self.to_out)
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
attn_mask = None,
rel_pos = None,
rotary_pos_emb = None,
prev_attn = None,
mem = None
):
b, n, _, h, head_scale, device, has_context = *x.shape, self.heads, self.head_scale, x.device, exists(context)
kv_input = default(context, x)
q_input = x
k_input = kv_input
v_input = kv_input
r_input = x
if exists(mem):
k_input = torch.cat((mem, k_input), dim = -2)
v_input = torch.cat((mem, v_input), dim = -2)
q = self.to_q(q_input)
k = self.to_k(k_input)
v = self.to_v(v_input) if exists(self.to_v) else k
r = self.to_r(r_input) if exists(self.to_r) else None
q = rearrange(q, 'b n (h d) -> b h n d', h = h)
if not self.one_kv_head:
k, v, r = map(lambda t: maybe(rearrange)(t, 'b n (h d) -> b h n d', h = h), (k, v, r))
if self.qk_norm:
qk_l2norm = partial(l2norm, groups = self.qk_norm_groups)
q, k = map(qk_l2norm, (q, k))
q = q * self.qk_norm_q_scale
k = k * self.qk_norm_k_scale
if exists(rotary_pos_emb) and not has_context:
freqs, xpos_scale = rotary_pos_emb
l = freqs.shape[-1]
q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale ** -1.) if exists(xpos_scale) else (1., 1.)
(ql, qr), (kl, kr), (vl, vr) = map(lambda t: (t[..., :l], t[..., l:]), (q, k, v))
ql, kl, vl = map(lambda arg: apply_rotary_pos_emb(arg[0], freqs, arg[1]), ((ql, q_xpos_scale), (kl, k_xpos_scale), (vl, k_xpos_scale)))
q, k, v = map(lambda t: torch.cat(t, dim = -1), ((ql, qr), (kl, kr), (vl, vr)))
input_mask = default(context_mask, mask)
if self.num_mem_kv > 0:
mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b = b), (self.mem_k, self.mem_v))
if self.qk_norm:
mem_k = l2norm(mem_k)
mem_k = mem_k * self.qk_norm_k_scale
k = torch.cat((mem_k, k), dim = -2)
v = torch.cat((mem_v, v), dim = -2)
if exists(input_mask):
input_mask = pad_at_dim(input_mask, (self.num_mem_kv, 0), dim = -1, value = True)
i, j = map(lambda t: t.shape[-2], (q, k))
# determine masking
max_neg_value(q)
masks = []
final_attn_mask = None
if exists(input_mask):
input_mask = rearrange(input_mask, 'b j -> b 1 1 j')
masks.append(~input_mask)
if exists(attn_mask):
assert 2 <= attn_mask.ndim <= 4, 'attention mask must have greater than 2 dimensions but less than or equal to 4'
if attn_mask.ndim == 2:
attn_mask = rearrange(attn_mask, 'i j -> 1 1 i j')
elif attn_mask.ndim == 3:
attn_mask = rearrange(attn_mask, 'h i j -> 1 h i j')
masks.append(~attn_mask)
if exists(self.max_attend_past):
range_q = torch.arange(j - i, j, device = device)
range_k = torch.arange(j, device = device)
dist = rearrange(range_q, 'i -> 1 1 i 1') - rearrange(range_k, 'j -> 1 1 1 j')
max_attend_past_mask = dist > self.max_attend_past
masks.append(max_attend_past_mask)
if len(masks) > 0:
final_attn_mask = ~or_reduce(masks)
# prepare relative positional bias, if needed
attn_bias = None
if exists(rel_pos):
attn_bias = rel_pos(i, j)
# attention is all we need
out, intermediates = self.attend(
q, k, v,
mask = final_attn_mask,
attn_bias = attn_bias,
prev_attn = prev_attn
)
# https://arxiv.org/abs/2208.06061 proposes to add a residual for better gradients
if exists(r):
out = out * r + out
# normformer scaling of heads
if head_scale:
out = out * self.head_scale_params
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
# alphafold2 styled gating of the values
if exists(self.to_v_gate):
gates = self.to_v_gate(x)
out = out * gates.sigmoid()
# combine the heads
out = self.to_out(out)
if exists(mask):
mask = rearrange(mask, 'b n -> b n 1')
out = out.masked_fill(~mask, 0.)
return out, intermediates
class AttentionLayers(nn.Module):
def __init__(
self,
dim,
depth,
heads = 8,
causal = False,
cross_attend = False,
only_cross = False,
use_scalenorm = False,
use_rmsnorm = False,
use_simple_rmsnorm = False,
alibi_pos_bias = False,
alibi_num_heads = None,
alibi_learned = False,
rel_pos_bias = False,
rel_pos_num_buckets = 32,
rel_pos_max_distance = 128,
dynamic_pos_bias = False,
dynamic_pos_bias_log_distance = False,
dynamic_pos_bias_mlp_depth = 2,
dynamic_pos_bias_norm = False,
rotary_pos_emb = False,
rotary_emb_dim = None,
rotary_xpos = False,
rotary_interpolation_factor = 1.,
rotary_xpos_scale_base = 512,
rotary_base_rescale_factor = 1.,
custom_layers = None,
sandwich_coef = None,
par_ratio = None,
residual_attn = False,
cross_residual_attn = False,
macaron = False,
pre_norm = True,
pre_norm_has_final_norm = True,
gate_residual = False,
scale_residual = False,
scale_residual_constant = 1.,
deepnorm = False,
shift_tokens = 0,
sandwich_norm = False,
resi_dual = False,
resi_dual_scale = 1.,
zero_init_branch_output = False,
layer_dropout = 0.,
cross_attn_tokens_dropout = 0.,
**kwargs
):
super().__init__()
rotary_pos_emb = rotary_pos_emb or rotary_xpos
ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs)
attn_kwargs, kwargs = groupby_prefix_and_trim('attn_', kwargs)
dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD)
self.dim = dim
self.depth = depth
self.layers = nn.ModuleList([])
self.has_pos_emb = rel_pos_bias or rotary_pos_emb
rotary_emb_dim = max(default(rotary_emb_dim, dim_head // 2), 32)
assert not (rotary_xpos and not causal), 'rotary xpos is not compatible with bidirectional attention'
self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim, use_xpos = rotary_xpos, scale_base = rotary_xpos_scale_base, interpolation_factor = rotary_interpolation_factor, base_rescale_factor = rotary_base_rescale_factor) if rotary_pos_emb else None
assert not (alibi_pos_bias and rel_pos_bias), 'you can only choose Alibi positional bias or T5 relative positional bias, not both'
assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance'
# relative positional bias
flash_attn = attn_kwargs.get('flash', False)
assert (int(rel_pos_bias) + int(dynamic_pos_bias) + int(alibi_pos_bias)) <= 1, 'you can only choose up to one of t5, alibi, or dynamic positional bias'
self.rel_pos = None
if rel_pos_bias:
assert not flash_attn, 'flash attention not compatible with t5 relative positional bias'
self.rel_pos = RelativePositionBias(scale = dim_head ** 0.5, causal = causal, heads = heads, num_buckets = rel_pos_num_buckets, max_distance = rel_pos_max_distance)
elif dynamic_pos_bias:
assert not flash_attn, 'flash attention not compatible with dynamic positional bias'
self.rel_pos = DynamicPositionBias(dim = dim // 4, heads = heads, log_distance = dynamic_pos_bias_log_distance, depth = dynamic_pos_bias_mlp_depth, norm = dynamic_pos_bias_norm)
elif alibi_pos_bias:
alibi_num_heads = default(alibi_num_heads, heads)
assert alibi_num_heads <= heads, 'number of ALiBi heads must be less than the total number of heads'
alibi_pos_klass = LearnedAlibiPositionalBias if alibi_learned else AlibiPositionalBias
self.rel_pos = alibi_pos_klass(heads = alibi_num_heads, total_heads = heads)
# determine deepnorm and residual scale
if deepnorm:
assert scale_residual_constant == 1, 'scale residual constant is being overridden by deep norm settings'
pre_norm = sandwich_norm = resi_dual = False
scale_residual = True
scale_residual_constant = (2 * depth) ** 0.25
assert (int(sandwich_norm) + int(resi_dual)) <= 1, 'either sandwich norm or resiDual is selected, but not both'
assert not (not pre_norm and sandwich_norm), 'sandwich norm cannot be used when not using prenorm'
if resi_dual:
pre_norm = False
self.pre_norm = pre_norm
self.sandwich_norm = sandwich_norm
self.resi_dual = resi_dual
assert 0 < resi_dual_scale <= 1., 'resiDual prenorm residual must be scaled by a factor greater than 0 and less than or equal to 1.'
self.resi_dual_scale = resi_dual_scale
self.residual_attn = residual_attn
self.cross_residual_attn = cross_residual_attn
assert not (flash_attn and (residual_attn or cross_residual_attn)), 'flash attention is not compatible with residual attention'
self.cross_attend = cross_attend
assert (int(use_scalenorm) + int(use_rmsnorm) + int(use_simple_rmsnorm)) <= 1, 'you can only use either scalenorm, rmsnorm, or simple rmsnorm'
if use_scalenorm:
norm_class = ScaleNorm
elif use_rmsnorm:
norm_class = RMSNorm
elif use_simple_rmsnorm:
norm_class = SimpleRMSNorm
else:
norm_class = nn.LayerNorm
norm_fn = partial(norm_class, dim)
if cross_attend and not only_cross:
default_block = ('a', 'c', 'f')
elif cross_attend and only_cross:
default_block = ('c', 'f')
else:
default_block = ('a', 'f')
if macaron:
default_block = ('f',) + default_block
# zero init
if zero_init_branch_output:
attn_kwargs = {**attn_kwargs, 'zero_init_output': True}
ff_kwargs = {**ff_kwargs, 'zero_init_output': True}
# calculate layer block order
if exists(custom_layers):
layer_types = custom_layers
elif exists(par_ratio):
par_depth = depth * len(default_block)
assert 1 < par_ratio <= par_depth, 'par ratio out of range'
default_block = tuple(filter(not_equals('f'), default_block))
par_attn = par_depth // par_ratio
depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper
par_width = (depth_cut + depth_cut // par_attn) // par_attn
assert len(default_block) <= par_width, 'default block is too large for par_ratio'
par_block = default_block + ('f',) * (par_width - len(default_block))
par_head = par_block * par_attn
layer_types = par_head + ('f',) * (par_depth - len(par_head))
elif exists(sandwich_coef):
assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth'
layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef
else:
layer_types = default_block * depth
self.layer_types = layer_types
self.num_attn_layers = len(list(filter(equals('a'), layer_types)))
# stochastic depth
self.layer_dropouts = cast_tuple(layer_dropout, len(layer_types))
# structured dropout for cross attending
self.cross_attn_tokens_dropout = cross_attn_tokens_dropout
# calculate token shifting
shift_tokens = cast_tuple(shift_tokens, len(layer_types))
# whether it has post norm
self.final_norm = norm_fn() if pre_norm or resi_dual else nn.Identity()
# iterate and construct layers
for ind, (layer_type, layer_shift_tokens) in enumerate(zip(self.layer_types, shift_tokens)):
ind == (len(self.layer_types) - 1)
if layer_type == 'a':
layer = Attention(dim, heads = heads, causal = causal, **attn_kwargs)
elif layer_type == 'c':
layer = Attention(dim, heads = heads, **attn_kwargs)
elif layer_type == 'f':
layer = FeedForward(dim, **ff_kwargs)
layer = layer if not macaron else Scale(0.5, layer)
else:
raise Exception(f'invalid layer type {layer_type}')
if layer_shift_tokens > 0:
shift_range_upper = layer_shift_tokens + 1
shift_range_lower = -layer_shift_tokens if not causal else 0
layer = ShiftTokens(range(shift_range_lower, shift_range_upper), layer)
residual_fn = GRUGating if gate_residual else Residual
residual = residual_fn(dim, scale_residual = scale_residual, scale_residual_constant = scale_residual_constant)
pre_branch_norm = norm_fn() if pre_norm else None
post_branch_norm = norm_fn() if sandwich_norm else None
post_main_norm = norm_fn() if not pre_norm else None
norms = nn.ModuleList([
pre_branch_norm,
post_branch_norm,
post_main_norm
])
self.layers.append(nn.ModuleList([
norms,
layer,
residual
]))
if deepnorm:
init_gain = (8 * depth) ** -0.25
deepnorm_init(self, init_gain)
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
attn_mask = None,
self_attn_context_mask = None,
mems = None,
return_hiddens = False
):
assert not (self.cross_attend ^ exists(context)), 'context must be passed in if cross_attend is set to True'
hiddens = []
intermediates = []
prev_attn = None
prev_cross_attn = None
mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers
rotary_pos_emb = None
if exists(self.rotary_pos_emb):
max_rotary_emb_length = max(list(map(lambda m: (m.shape[1] if exists(m) else 0) + x.shape[1], mems)))
rotary_pos_emb = self.rotary_pos_emb(max_rotary_emb_length, x.device)
outer_residual = x * self.resi_dual_scale
for ind, (layer_type, (norm, block, residual_fn), layer_dropout) in enumerate(zip(self.layer_types, self.layers, self.layer_dropouts)):
ind == (len(self.layers) - 1)
if self.training and layer_dropout > 0. and random() < layer_dropout:
continue
if layer_type == 'a':
if return_hiddens:
hiddens.append(x)
layer_mem = mems.pop(0) if mems else None
if layer_type == 'c':
if self.training and self.cross_attn_tokens_dropout > 0.:
context, context_mask = dropout_seq(context, context_mask, self.cross_attn_tokens_dropout)
inner_residual = x
pre_norm, post_branch_norm, post_main_norm = norm
if exists(pre_norm):
x = pre_norm(x)
if layer_type == 'a':
out, inter = block(x, mask = mask, context_mask = self_attn_context_mask, attn_mask = attn_mask, rel_pos = self.rel_pos, rotary_pos_emb = rotary_pos_emb, prev_attn = prev_attn, mem = layer_mem)
elif layer_type == 'c':
out, inter = block(x, context = context, mask = mask, context_mask = context_mask, prev_attn = prev_cross_attn)
elif layer_type == 'f':
out = block(x)
if self.resi_dual:
outer_residual = outer_residual + out * self.resi_dual_scale
if exists(post_branch_norm):
out = post_branch_norm(out)
x = residual_fn(out, inner_residual)
if layer_type in ('a', 'c') and return_hiddens:
intermediates.append(inter)
if layer_type == 'a' and self.residual_attn:
prev_attn = inter.pre_softmax_attn
elif layer_type == 'c' and self.cross_residual_attn:
prev_cross_attn = inter.pre_softmax_attn
if exists(post_main_norm):
x = post_main_norm(x)
if self.resi_dual:
x = x + self.final_norm(outer_residual)
else:
x = self.final_norm(x)
if return_hiddens:
intermediates = LayerIntermediates(
hiddens = hiddens,
attn_intermediates = intermediates
)
return x, intermediates
return x
class Encoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on encoder'
super().__init__(causal = False, **kwargs)
class Decoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on decoder'
super().__init__(causal = True, **kwargs)
class CrossAttender(AttentionLayers):
def __init__(self, **kwargs):
super().__init__(cross_attend = True, only_cross = True, **kwargs)
class ViTransformerWrapper(nn.Module):
def __init__(
self,
*,
image_size,
patch_size,
attn_layers,
channels = 3,
num_classes = None,
dropout = 0.,
post_emb_norm = False,
emb_dropout = 0.
):
super().__init__()
assert isinstance(attn_layers, Encoder), 'attention layers must be an Encoder'
assert image_size % patch_size == 0, 'image dimensions must be divisible by the patch size'
dim = attn_layers.dim
num_patches = (image_size // patch_size) ** 2
patch_dim = channels * patch_size ** 2
self.patch_size = patch_size
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, dim))
self.patch_to_embedding = nn.Sequential(
nn.LayerNorm(patch_dim),
nn.Linear(patch_dim, dim),
nn.LayerNorm(dim)
)
self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity()
self.dropout = nn.Dropout(emb_dropout)
self.attn_layers = attn_layers
self.mlp_head = nn.Linear(dim, num_classes) if exists(num_classes) else nn.Identity()
def forward(
self,
img,
return_embeddings = False
):
p = self.patch_size
x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
x = self.patch_to_embedding(x)
n = x.shape[1]
x = x + self.pos_embedding[:, :n]
x = self.post_emb_norm(x)
x = self.dropout(x)
x = self.attn_layers(x)
if not exists(self.mlp_head) or return_embeddings:
return x
x = x.mean(dim = -2)
return self.mlp_head(x)
class TransformerWrapper(nn.Module):
def __init__(
self,
*,
num_tokens,
max_seq_len,
attn_layers,
emb_dim = None,
max_mem_len = 0,
shift_mem_down = 0,
emb_dropout = 0.,
post_emb_norm = False,
num_memory_tokens = None,
tie_embedding = False,
logits_dim = None,
use_abs_pos_emb = True,
scaled_sinu_pos_emb = False,
l2norm_embed = False,
emb_frac_gradient = 1. # GLM-130B and Cogview successfully used this, set at 0.1
):
super().__init__()
assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
dim = attn_layers.dim
emb_dim = default(emb_dim, dim)
self.emb_dim = emb_dim
self.num_tokens = num_tokens
self.max_seq_len = max_seq_len
self.max_mem_len = max_mem_len
self.shift_mem_down = shift_mem_down
self.l2norm_embed = l2norm_embed
self.token_emb = TokenEmbedding(emb_dim, num_tokens, l2norm_embed = l2norm_embed)
if not (use_abs_pos_emb and not attn_layers.has_pos_emb):
self.pos_emb = always(0)
elif scaled_sinu_pos_emb:
self.pos_emb = ScaledSinusoidalEmbedding(emb_dim)
else:
self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len, l2norm_embed = l2norm_embed)
self.emb_frac_gradient = emb_frac_gradient # fraction of the gradient that should go to the embedding, https://arxiv.org/abs/2105.13290
self.post_emb_norm = nn.LayerNorm(emb_dim) if post_emb_norm else nn.Identity()
self.emb_dropout = nn.Dropout(emb_dropout)
self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()
self.attn_layers = attn_layers
self.init_()
logits_dim = default(logits_dim, num_tokens)
self.to_logits = nn.Linear(dim, logits_dim) if not tie_embedding else lambda t: t @ self.token_emb.emb.weight.t()
# memory tokens (like [cls]) from Memory Transformers paper
num_memory_tokens = default(num_memory_tokens, 0)
self.num_memory_tokens = num_memory_tokens
if num_memory_tokens > 0:
self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))
def init_(self):
if self.l2norm_embed:
nn.init.normal_(self.token_emb.emb.weight, std = 1e-5)
if not isinstance(self.pos_emb, always):
nn.init.normal_(self.pos_emb.emb.weight, std = 1e-5)
return
nn.init.kaiming_normal_(self.token_emb.emb.weight)
def forward(
self,
x,
return_embeddings = False,
return_logits_and_embeddings = False,
return_intermediates = False,
mask = None,
return_mems = False,
return_attn = False,
mems = None,
pos = None,
prepend_embeds = None,
sum_embeds = None,
**kwargs
):
b, n, device, num_mem, emb_frac_gradient = *x.shape, x.device, self.num_memory_tokens, self.emb_frac_gradient
return_hiddens = return_mems | return_attn
# absolute positional embedding
external_pos_emb = exists(pos) and pos.dtype != torch.long
pos_emb = self.pos_emb(x, pos = pos) if not external_pos_emb else pos
x = self.token_emb(x) + pos_emb
# for summing embeddings passed externally - needs this for self-conditioning in non-autoregressive training
if exists(sum_embeds):
x = x + sum_embeds
# post embedding norm, purportedly leads to greater stabilization
x = self.post_emb_norm(x)
# whether to append embeds, as in PaLI, for image embeddings
if exists(prepend_embeds):
prepend_seq, prepend_dim = prepend_embeds.shape[1:]
assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as text model dimensions'
x = torch.cat((prepend_embeds, x), dim = -2)
# whether to reduce the gradient going to the embedding, from cogview paper, corroborated by GLM-130B model
if emb_frac_gradient < 1:
assert emb_frac_gradient > 0
x = x * emb_frac_gradient + x.detach() * (1 - emb_frac_gradient)
# embedding dropout
x = self.emb_dropout(x)
x = self.project_emb(x)
if num_mem > 0:
mem = repeat(self.memory_tokens, 'n d -> b n d', b = b)
x = torch.cat((mem, x), dim = 1)
# auto-handle masking after appending memory tokens
if exists(mask):
mask = pad_at_dim(mask, (num_mem, 0), dim = -1, value = True)
if self.shift_mem_down and exists(mems):
mems_l, mems_r = mems[:self.shift_mem_down], mems[self.shift_mem_down:]
mems = [*mems_r, *mems_l]
if return_hiddens:
x, intermediates = self.attn_layers(x, mask = mask, mems = mems, return_hiddens = True, **kwargs)
else:
x = self.attn_layers(x, mask = mask, mems = mems, **kwargs)
mem, x = x[:, :num_mem], x[:, num_mem:]
if return_logits_and_embeddings:
out = (self.to_logits(x), x)
elif return_embeddings:
out = x
else:
out = self.to_logits(x)
if return_intermediates:
return out, intermediates
if return_mems:
hiddens = intermediates.hiddens
new_mems = list(map(lambda pair: torch.cat(pair, dim = -2), zip(mems, hiddens))) if exists(mems) else hiddens
new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems))
return out, new_mems
if return_attn:
attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
return out, attn_maps
return out
class ContinuousTransformerWrapper(nn.Module):
def __init__(
self,
*,
max_seq_len,
attn_layers,
dim_in = None,
dim_out = None,
emb_dim = None,
max_mem_len = 0,
post_emb_norm = False,
emb_dropout = 0.,
use_abs_pos_emb = True,
scaled_sinu_pos_emb = False
):
super().__init__()
assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
dim = attn_layers.dim
self.max_seq_len = max_seq_len
self.max_mem_len = max_mem_len
if not (use_abs_pos_emb and not attn_layers.has_pos_emb):
self.pos_emb = always(0)
elif scaled_sinu_pos_emb:
self.pos_emb = ScaledSinusoidalEmbedding(dim)
else:
self.pos_emb = AbsolutePositionalEmbedding(dim, max_seq_len)
self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity()
self.emb_dropout = nn.Dropout(emb_dropout)
self.project_in = nn.Linear(dim_in, dim) if exists(dim_in) else nn.Identity()
self.attn_layers = attn_layers
self.project_out = nn.Linear(dim, dim_out) if exists(dim_out) else nn.Identity()
def forward(
self,
x,
return_embeddings = False,
return_intermediates = False,
return_mems = False,
mask = None,
return_attn = False,
mems = None,
pos = None,
prepend_embeds = None,
**kwargs
):
x = self.project_in(x)
x = x + self.pos_emb(x, pos = pos)
x = self.post_emb_norm(x)
# whether to append embeds, as in PaLI, for image embeddings
if exists(prepend_embeds):
_, prepend_dim = prepend_embeds.shape[1:]
assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as model dimensions'
x = torch.cat((prepend_embeds, x), dim = -2)
x = self.emb_dropout(x)
x, intermediates = self.attn_layers(x, mask = mask, mems = mems, return_hiddens = True, **kwargs)
out = self.project_out(x) if not return_embeddings else x
if return_intermediates:
return out, intermediates
if return_mems:
hiddens = intermediates.hiddens
new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), hiddens))
return out, new_mems
if return_attn:
attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
return out, attn_maps
return out
class XTransformer(nn.Module):
def __init__(
self,
*,
dim,
tie_token_emb = False,
ignore_index = -100,
pad_value = 0,
deepnorm = False,
cross_attn_tokens_dropout = 0.,
**kwargs
):
super().__init__()
enc_kwargs, kwargs = groupby_prefix_and_trim('enc_', kwargs)
dec_kwargs, kwargs = groupby_prefix_and_trim('dec_', kwargs)
assert 'dim' not in enc_kwargs and 'dim' not in dec_kwargs, 'dimension of either encoder or decoder must be set with `dim` keyword'
enc_transformer_kwargs = pick_and_pop(['num_tokens', 'max_seq_len'], enc_kwargs)
enc_transformer_kwargs['emb_dropout'] = enc_kwargs.pop('emb_dropout', 0)
enc_transformer_kwargs['num_memory_tokens'] = enc_kwargs.pop('num_memory_tokens', None)
enc_transformer_kwargs['scaled_sinu_pos_emb'] = enc_kwargs.pop('scaled_sinu_pos_emb', False)
enc_transformer_kwargs['use_abs_pos_emb'] = enc_kwargs.pop('use_abs_pos_emb', True)
dec_transformer_kwargs = pick_and_pop(['num_tokens', 'max_seq_len'], dec_kwargs)
dec_transformer_kwargs['emb_dropout'] = dec_kwargs.pop('emb_dropout', 0)
dec_transformer_kwargs['scaled_sinu_pos_emb'] = dec_kwargs.pop('scaled_sinu_pos_emb', False)
dec_transformer_kwargs['use_abs_pos_emb'] = dec_kwargs.pop('use_abs_pos_emb', True)
self.cross_attn_tokens_dropout = cross_attn_tokens_dropout # how many tokens from the encoder to dropout when cross attending from decoder - seen in a couple papers, including Perceiver AR - this will also be very effective regularization when cross attending to very long memories
if deepnorm:
enc_kwargs['scale_residual'] = True
dec_kwargs['scale_residual'] = True
enc_depth = enc_kwargs['depth']
dec_depth = dec_kwargs['depth']
enc_kwargs['scale_residual_constant'] = 0.81 * ((enc_depth ** 4) * dec_depth) ** .0625
dec_kwargs['scale_residual_constant'] = (3 * dec_depth) ** 0.25
self.encoder = TransformerWrapper(
**enc_transformer_kwargs,
attn_layers = Encoder(dim = dim, **enc_kwargs)
)
self.decoder = TransformerWrapper(
**dec_transformer_kwargs,
attn_layers = Decoder(dim = dim, cross_attend = True, **dec_kwargs)
)
if deepnorm:
deepnorm_init(self.encoder, 0.87 * ((enc_depth ** 4) * dec_depth) ** -0.0625)
deepnorm_init(self.decoder, (12 * dec_depth) ** -0.25)
if tie_token_emb:
self.decoder.token_emb = self.encoder.token_emb
self.decoder = AutoregressiveWrapper(self.decoder, ignore_index=ignore_index, pad_value=pad_value)
@torch.no_grad()
def generate(
self,
seq_in,
seq_out_start,
seq_len,
mask = None,
attn_mask = None,
**kwargs
):
encodings = self.encoder(seq_in, mask = mask, attn_mask = attn_mask, return_embeddings = True)
return self.decoder.generate(seq_out_start, seq_len, context = encodings, context_mask = mask, **kwargs)
def forward(self, src, tgt, mask = None, attn_mask = None, src_prepend_embeds = None):
if exists(src_prepend_embeds) and exists(mask):
mask = pad_at_dim(mask, (src_prepend_embeds.shape[-2], 0), dim = -1, value = True)
enc = self.encoder(src, mask = mask, attn_mask = attn_mask, prepend_embeds = src_prepend_embeds, return_embeddings = True)
if self.training and self.cross_attn_tokens_dropout > 0:
enc, mask = dropout_seq(enc, mask, self.cross_attn_tokens_dropout)
out = self.decoder(tgt, context = enc, context_mask = mask)
return out | PALI-main | pali/transformer.py |
VIMA-main | example.py |
|
import torch
from torch.nn import Module
from transformers import AutoTokenizer
from vima.transformer import (
Decoder,
Transformer,
ViTransformerWrapper,
Encoder
)
import logging
from vima.autoregressive import AutoregressiveWrapper
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s'
)
class VimaTokenizer:
"""
A tokenizer class for the Vima model
Attributes:
processor(CLIPProcessor): The processor to tokenize images
tokenizer: (AutoTokenizer): The tokenizer to tokenize text
im_idx: (int): The Index of the "" token.
"""
def __init__(self):
try:
# self.processor = CLIPProcessor.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K")
self.tokenizer = AutoTokenizer.from_pretrained(
"EleutherAI/gpt-neox-20b",
additional_special_tokens=["", "<audio>", "</audio>", "<video>", "</video>"],
eos_token="<eos>",
pad_token="<pad>",
extra_ids=0,
model_max_length=8192
)
except Exception as e:
logging.error(f"Failed to initialize VimaTokenizer: {e}")
raise
self.im_idx, self.im_end_idx = self.tokenizer.convert_tokens_to_ids([""])
def tokenize_texts(self, texts: str):
"""
Tokenize given texts.
Args:
Texts (str): The Text to be tokenized
Returns:
A tuple containing the tokenized texts and only the text tokens.
"""
try:
texts = self.tokenizer(
texts,
return_tensors="pt",
padding=True,
truncation=True
).input_ids
# Add image tokens to text as "<s>  text </s>"
image_tokens = torch.tensor([[self.im_idx, self.im_end_idx]] * texts.shape[0])
return torch.cat([texts[:, 0:1], image_tokens, texts[:, 1:]], dim=1), texts
except Exception as e:
logging.error(f"Failed to tokenize texts: {e}")
raise
def tokenize_images(self, images):
"""
Tokenizes given images.
Args:
images: The images to be tokenized
Returns:
The tokenized images.
"""
try:
return self.processor(images=images, return_tensors="pt").pixel_values
except Exception as e:
logging.error(f"Failed to tokenize images: {e}")
raise
def tokenize(self, sample):
"""
Tokenizes given sample.
Args:
Sample: The sample to be tokenized
Returns:
A dictionary containing the tokenized text tokens, images, labels, and attention mask.
"""
try:
text_tokens, only_text_tokens = self.tokenize_texts(sample["target_text"])
attention_mask = text_tokens != self.tokenizer.pad_token_id
dummy_image_features = torch.ones((text_tokens.shape[0], 64))
attention_mask = torch.cat([dummy_image_features, attention_mask], dim=1)
return {
"text_tokens": text_tokens,
"images": self.tokenize_images(sample["image"]),
"labels": only_text_tokens,
"attention_mask": attention_mask,
}
except Exception as e:
logging.error(f"Failed to tokenize sample: {e}")
raise
class Vima(Module):
"""
Vima is a transformer-based model architecture. It initializes with
a Transformer and AutoregressiveWrapper with default or user-specified parameters.
"""
def __init__(
self,
num_tokens=50432,
max_seq_len=8192,
dim=2560,
depth=32,
dim_head=128,
heads=24,
use_abs_pos_emb=False,
alibi_pos_bias=True,
alibi_num_heads=12,
rotary_xpos=True,
attn_flash=True,
attn_kv_heads = 2,
qk_norm=True,
attn_qk_norm=True,
attn_qk_norm_dim_scale=True,
):
"""
Initialize the model with specified or default parameters.
Args:
- num_tokens: Number of tokens in the vocabulary
- max_seq_len: Maximum sequence length
- dim: Dimension of the model
- depth: Depth of the model
- dim_head: Dimension of the model head
- heads: Number of heads
- use_abs_pos_emb: Whether to use absolute position embedding
- alibi_pos_bias: Alibi position bias
- alibi_num_heads: Number of alibi heads
- rotary_xpos: Rotary position
- attn_flash: Attention flash
- deepnorm: Deep normalization
- shift_tokens: Number of tokens to shift
- attn_one_kv_head: Attention one key/value head
- qk_norm: Query-key normalization
- attn_qk_norm: Attention query-key normalization
- attn_qk_norm_dim_scale: Attention query-key normalization dimension scale
- embedding_provider: Embedding provider module
"""
super().__init__()
try:
self.Vima = Transformer(
num_tokens=num_tokens,
max_seq_len=max_seq_len,
use_abs_pos_emb=use_abs_pos_emb,
attn_layers=Decoder(
dim=dim,
depth=depth,
dim_head=dim_head,
heads=heads,
alibi_pos_bias=alibi_pos_bias,
alibi_num_heads=alibi_num_heads,
rotary_xpos=rotary_xpos,
attn_flash=attn_flash,
attn_kv_heads=attn_kv_heads,
qk_norm=qk_norm,
attn_qk_norm=attn_qk_norm,
attn_qk_norm_dim_scale=attn_qk_norm_dim_scale
)
)
self.decoder = AutoregressiveWrapper(self.Vima)
except Exception as e:
print("Failed to initialize Vima: ", e)
raise
def forward(self, text_tokens, **kwargs):
"""
Forward pass through the model. It expects the input text_tokens.
Args:
- text_tokens: Input tokens
- kwargs: Other arguments
Returns:
- output from the decoder
"""
try:
model_input = self.decoder.forward(text_tokens)[0]
return self.decoder(model_input, padded_x=model_input[0])
except Exception as e:
print("Failed in forward method: ", e)
raise
class VimaMultiModal(Module):
def __init__(
self,
image_size=256,
patch_size=32,
encoder_dim=512,
encoder_depth=6,
encoder_heads=8,
num_tokens=20000,
max_seq_len=1024,
decoder_dim=512,
decoder_depth=6,
decoder_heads=8,
alibi_num_heads=4,
use_abs_pos_emb=False,
cross_attend=True,
alibi_pos_bias=True,
rotary_xpos=True,
attn_flash=True,
qk_norm=True
):
super(VimaMultiModal, self).__init__()
self.encoder = ViTransformerWrapper(
image_size=image_size,
patch_size=patch_size,
attn_layers=Encoder(
dim=encoder_dim,
depth=encoder_depth,
heads=encoder_heads
)
)
self.decoder = Transformer(
num_tokens=num_tokens,
max_seq_len=max_seq_len,
use_abs_pos_emb=use_abs_pos_emb,
attn_layers=Decoder(
dim=decoder_dim,
depth=decoder_depth,
heads=decoder_heads,
cross_attend=cross_attend,
alibi_pos_bias=alibi_pos_bias,
alibi_num_heads=alibi_num_heads,
rotary_xpos=rotary_xpos,
attn_flash=attn_flash,
qk_norm=qk_norm,
)
)
def forward(self, img, text):
try:
encoded = self.encoder(img, return_embeddings=True)
return self.decoder(text, context=encoded)
except Exception as error:
print(f"Failed in forward method: {error}")
raise | VIMA-main | vima/vima.py |
from math import ceil
import torch
import torch.nn.functional as F
from einops import pack, rearrange, unpack
from torch import nn
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(self, *args, **kwargs):
was_training = self.training
self.eval()
out = fn(self, *args, **kwargs)
self.train(was_training)
return out
return inner
# nucleus
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
# topk
def top_k(logits, thres = 0.9):
k = ceil((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# top_a
def top_a(logits, min_p_pow=2.0, min_p_ratio=0.02):
probs = F.softmax(logits, dim=-1)
limit = torch.pow(torch.max(probs), min_p_pow) * min_p_ratio
logits[probs < limit] = float('-inf')
logits[probs >= limit] = 1
return logits
# autoregressive wrapper class
class AutoregressiveWrapper(nn.Module):
def __init__(
self,
net,
ignore_index = -100,
pad_value = 0,
mask_prob = 0.
):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.max_seq_len
# paper shows masking (MLM) in conjunction with autoregressive decoder-only training leads to big improvements https://arxiv.org/abs/2210.13432
assert mask_prob < 1.
self.mask_prob = mask_prob
@torch.no_grad()
@eval_decorator
def generate(
self,
start_tokens,
seq_len,
eos_token = None,
temperature = 1.,
filter_logits_fn = top_k,
filter_thres = 0.9,
min_p_pow = 2.0,
min_p_ratio = 0.02,
**kwargs
):
start_tokens, ps = pack([start_tokens], '* n')
b, t = start_tokens.shape
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
logits = self.net(x, **kwargs)[:, -1]
if filter_logits_fn in {top_k, top_p}:
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
elif filter_logits_fn is top_a:
filtered_logits = filter_logits_fn(logits, min_p_pow = min_p_pow, min_p_ratio= min_p_ratio)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if exists(eos_token):
is_eos_tokens = (out == eos_token)
if is_eos_tokens.any(dim = -1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
out = out.masked_fill(mask, self.pad_value)
break
out = out[:, t:]
out, = unpack(out, ps, '* n')
return out
def forward(self, x, return_loss=True, **kwargs):
seq, ignore_index = x.shape[1], self.ignore_index
inp, target = x[:, :-1], x[:, 1:]
if self.mask_prob > 0.:
rand = torch.randn(inp.shape, device = x.device)
rand[:, 0] = -torch.finfo(rand.dtype).max # first token should not be masked out
num_mask = min(int(seq * self.mask_prob), seq - 1)
indices = rand.topk(num_mask, dim = -1).indices
mask = ~torch.zeros_like(inp).scatter(1, indices, 1.).bool()
kwargs.update(self_attn_context_mask = mask)
logits = self.net(inp, **kwargs)
loss = F.cross_entropy(
rearrange(logits, 'b n c -> b c n'),
target,
ignore_index = ignore_index
)
if return_loss:
return logits, loss
return logits | VIMA-main | vima/autoregressive.py |
from vima.vima import VimaTokenizer, Vima, VimaMultiModal | VIMA-main | vima/__init__.py |
from typing import Callable, Literal
import torch
from torch import nn
from torch.nn import Embedding as _Embedding
#utils
class Embedding(_Embedding):
@property
def output_dim(self):
return self.embedding_dim
def build_mlp(
input_dim,
*,
hidden_dim,
output_dim,
hidden_depth,
num_layers,
activation: str | Callable = "relu",
weight_init: str | Callable = "orthogonal",
bias_init="zeros",
norm_type: Literal["batchnorm", "layernorm"] | None = None,
add_input_activation: bool | str | Callable = False,
add_input_norm: bool = False,
add_output_activation: bool | str | Callable = False,
add_output_norm: bool = False,
) -> nn.Sequential:
"""
Tanh is used with orthogonal init => better than relu
Args:
norm_type: batchnorm or layernorm applied to intermediate layers
add_input_activation: add nonlinearty to the input _before_
procesing a feat from a preceding image encoder => image encoder has a linear layer
at the end
add_input_norm: whether to add a norm layr to the input _before_ the mlp computation
add_output_activation: add nonlinearty to the output _after_ the mlp
add_output_norm: add norm layer => _after_ mlp comp
"""
assert (hidden_depth is None) != (num_layers is None), (
"Either hidden_depth or num_layers must be specified but not both"
"num_layers is defined as hidden_depth+1"
)
if hidden_depth is not None:
assert hidden_depth >= 0
if num_layers is not None:
assert num_layers >= 1
act_layer = get_activation(activation)
weight_init = get_initializer(weight_init, activation)
bias_init = get_initializer(bias_init, activation)
if norm_type is not None:
norm_type = norm_type.lower()
if not norm_type:
norm_type = nn.Identity
elif norm_type == "batchnorm":
norm_type == nn.BatchNorm1d
elif norm_type == "layernorm":
norm_type = nn.LayerNorm
else:
raise ValueError(f"Unsupposted norm layer: {norm_type}")
hidden_depth = num_layers - 1 if hidden_depth is None else hidden_depth
if hidden_depth == 0:
mods = [nn.Linear(input_dim, output_dim)]
else:
mods = [nn.Linear(input_dim, hidden_dim), norm_type(hidden_dim), act_layer()]
for i in range(hidden_depth - 1):
mods += [
nn.Linear(hidden_depth, hidden_dim),
norm_type(hidden_dim),
act_layer(),
]
mods.append(nn.Linear(hidden_dim, output_dim))
if add_input_norm:
mods = [norm_type(input_dim)] + mods
if add_input_activation:
if add_input_activation is not True:
act_layer = get_activation(add_input_activation)
mods = [act_layer()] + mods
if add_output_norm:
mods.append(norm_type(output_dim))
if add_output_activation:
if add_output_activation is not True:
act_layer = get_activation(add_output_activation)
mods.append(act_layer())
for mod in mods:
if isinstance(mod, nn.Linear):
weight_init(mod.weight)
bias_init(mod.bias)
return nn.Sequential(*mods)
def get_activation(
activation: str | Callable | None
) -> Callable:
if not activation:
return nn.Identity
elif callable(activation):
return activation
ACT_LAYER = {
"tanh": nn.Tanh,
"relu": lambda: nn.ReLU(inplace=True),
"leaky_relu": lambda: nn.LeakyReLU(inplace=True),
"swish": lambda: nn.SiLU(inplace=True),
"sigmoid": nn.Sigmoid,
"elu": lambda: nn.ELU(inplace=True),
"gelu": nn.GELU,
}
activation = activation.lower()
assert activation in ACT_LAYER, f"Supported activations: {ACT_LAYER.keys()}"
return ACT_LAYER[activation]
def get_initializer(
method: str | Callable,
activation: str
) -> Callable:
if isinstance(method, str):
assert hasattr(
nn.init, f"{method}_"
), f"Initalizer nn.init.{method}_ does not exist"
if method == "orthogonal":
try:
gain = nn.init.calculate_gain(activation)
except ValueError:
gain = 1.0
return lambda x: nn.init.orthogonal_(x, gain=gain)
else:
return getattr(nn.init, f"{method}_")
else:
assert callable(method)
return method
class Categorical(torch.distributions.Categorical):
def mode(self):
return self.logits.argmax(dim=-1)
class MultiCategorical(torch.distribution.Distribution):
def __init__(
self,
logits,
action_dims: list[int]
):
assert logits.dim() >= 2, logits.shape
super().__init__(batch_shape=logits[:-1], validate_args=False)
self._action_dims = tuple(action_dims)
assert logits.size(-1) == sum(
self._action_dims
), f"sum of action dims {self._action_dims} != {logits.size(-1)}"
self._dist = [
Categorical(logits=split)
for split in torch.split(logits, action_dims, dim=-1)
]
def mode(self):
return torch.stack(
[torch.argmax(dist.probs, dim=-1) for dist in self._dist], dim=-1
)
class ActionDecoder(nn.Module):
def __init__(
self,
input_dim: int,
*,
action_dims: dict[str, int | list[int]],
hidden_dim: int,
hidden_depth: int,
activation: str | Callable = "relu",
norm_type: Literal["batchnorm", "layernorm"] | None = None,
last_layer_gain: float | None = 0.01
):
super().__init__()
self._decoders = nn.ModuleDict()
for k, v, in action_dims.items():
if isinstance(v, int):
self._decoders[k] = CategoricalNet(
input_dim,
action_dim=v,
hidden_dim=hidden_dim,
hidden_depth=hidden_depth,
activation=activation,
norm_type=norm_type,
last_layer_gain=last_layer_gain
)
elif isinstance(v, list):
self._decoders[k] = MultiCategoricalNet(
input_dim,
action_dims=v,
hidden_dim=hidden_dim,
activation=activation,
norm_type=norm_type,
last_layer_gain=last_layer_gain
)
else:
raise ValueError(f"Invalid action_dims value: {v}")
def forward(self, x: torch.Tensor):
return {k: v(x) for k, v in self._decoders.items()}
def build_mlp_distribution_net(
input_dim,
*,
output_dim,
hidden_dim,
hidden_depth,
activation: str | Callable = "relu",
norm_type: Literal["batchnorm", "layernorm"] | None = None,
last_layer_gain: float | None = 0.01,
):
"""
Use orthogonal inti to inti the mlp policy
Args:
last_layer_gain: orthogonal init gain for the last fc layer.
You may want to set ti to a small value to have the gaussian centered around 0.0 in the beginning
"""
mlp = build_mlp(
input_dim=input_dim,
output_dim=output_dim,
hidden_dim=hidden_dim,
hidden_depth=hidden_depth,
activation=activation,
weight_init="orthogonal",
bias_init="zeros",
norm_type=norm_type
)
if last_layer_gain:
assert last_layer_gain > 0
nn.init.orthogonal_(mlp[-1].weight, gain=last_layer_gain)
return mlp
class CategoricalNet(nn.Module):
def __init__(
self,
input_dim,
*,
action_dim,
hidden_dim,
hidden_depth,
activation: str | Callable = "relu",
norm_type: Literal["batchnorm", "layernorm"] | None = None,
last_layer_gain: float | None = 0.01,
):
"""
Use orthogonal initialization to init mlp polict
Args:
last_layer_gain: orthogonal init gain for the last fc layer
you want to set it to a small value to make the categorical close to unifrom random at the beginning
"""
super().__init__()
self.mlp = build_mlp_distribution_net(
input_dim=input_dim,
output_dim=action_dim,
hidden_dim=hidden_dim,
hidden_depth=hidden_depth,
activation=activation,
norm_type=norm_type,
last_layer_gain=last_layer_gain,
)
self.head = CategoricalHead()
def forward(self, x):
return self.head(self.mlp(x))
class MultiCategoricalNet(nn.Module):
def __init__(
self,
input_dim,
*,
action_dims,
hidden_dim,
hidden_depth,
activation: str | Callable = "relu",
norm_type: Literal["batchnorm", "layernorm"] | None = None,
last_layer_gain: float | None = 0.01,
):
"""
Orthogonal init to init mlp policy
splut head does not share nn weights
Args:
last_layer_gain: orthogonal init gain for the last fc layer
you may set it to a small value to make the categorical close to uniform random at the beginning
set to None to use the default gate
"""
super().__init__()
self.mlps = nn.ModuleList()
for action in action_dims:
net = build_mlp_distribution_net(
input_dim=input_dim,
output_dim=action,
hidden_dim=hidden_dim,
hidden_depth=hidden_depth,
activation=activation,
norm_type=norm_type,
last_layer_gain=last_layer_gain
)
self.mlps.append(net)
self.head = MultiCategoricalHead(action_dims)
def forward(self, x):
return self.head(torch.cat([mlp(x) for mlp in self.mlps], dim=-1))
class CategoricalHead(nn.Module):
def forward(self, x: torch.Tensor) -> Categorical:
return Categorical(logits=x)
class MultiCategoricalHead(nn.Module):
def __init__(
self,
action_dims: list[int]
):
super().__init__()
self._action_dims = tuple(action_dims)
def forward(
self,
x: torch.Tensor
) -> MultiCategorical:
return MultiCategorical(logits=x, action_dims=self._action_dims)
class ActionEmbedding(nn.Module):
def __init__(
self,
output_dim: int,
*,
embed_dict: dict[str, nn.Module],
):
super().__init__()
self.embed_dict = nn.ModuleDict(embed_dict)
embed_dict_output_dim = sum(
embed_dict[k].output_dim for k in sorted(embed_dict.keys())
)
self._post_layer = (
nn.Identity()
if output_dim == embed_dict_output_dim
else nn.Linear(embed_dict_output_dim, output_dim)
)
self._output_dim = output_dim
self._input_fields_checked = False
@property
def output_dim(self):
return self._output_dim
def forward(
self,
x_dict: dict[str, torch.Tensor]
):
if not self._input_fields_checked:
assert set(x_dict.keys()) == set(self.embed_dict.keys())
self._input_fields_checked = True
return self._post_layer(
torch.cat(
[self._embed_dict[k] for k in sorted(x_dict.keys())], dim=-1
)
)
class ContinuousActionEmbedding(nn.Module):
def __init__(
self,
output_dim: int,
*,
input_dim,
hidden_dim,
hidden_depth,
):
super().__init__()
self.layer = build_mlp(
input_dim=input_dim,
hidden_dim=hidden_dim,
output_dim=output_dim,
hidden_depth=hidden_depth,
)
self.output_dim = output_dim
def forward(self, x: torch.Tensor):
return self._layer(x)
| VIMA-main | vima/model.py |
from collections import namedtuple
from dataclasses import dataclass
from functools import partial, wraps
from typing import Optional
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
from packaging import version
from torch import Tensor, einsum, nn
# constants
EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
@dataclass
class Intermediates:
qk_similarities: Optional[Tensor] = None
pre_softmax_attn: Optional[Tensor] = None
post_softmax_attn: Optional[Tensor] = None
def to_tuple(self):
return (self.qk_similarities, self.pre_softmax_attn, self.post_softmax_attn)
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def compact(arr):
return [*filter(exists, arr)]
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# functions for creating causal mask
# need a special one for onnx cpu (no support for .triu)
def create_causal_mask(i, j, device):
return torch.ones((i, j), device = device, dtype = torch.bool).triu(j - i + 1)
def onnx_create_causal_mask(i, j, device):
r = torch.arange(i, device = device)
causal_mask = rearrange(r, 'i -> i 1') < rearrange(r, 'j -> 1 j')
causal_mask = F.pad(causal_mask, (j - i, 0), value = False)
return causal_mask
# main class
class Attend(nn.Module):
def __init__(
self,
*,
dropout = 0.,
causal = False,
heads = None,
talking_heads = False,
sparse_topk = None,
scale = None,
qk_norm = False,
flash = False,
add_zero_kv = False,
onnxable = False
):
super().__init__()
self.scale = scale
self.qk_norm = qk_norm
self.causal = causal
self.create_causal_mask = onnx_create_causal_mask if onnxable else create_causal_mask
self.attn_fn = partial(F.softmax, dtype = torch.float32) if not qk_norm else F.softmax
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
# talking heads
assert not (flash and talking_heads), 'talking heads not compatible with flash attention'
self.talking_heads = talking_heads
if talking_heads:
self.pre_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
self.post_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
# sparse topk
assert not (flash and sparse_topk), 'sparse topk not compatible with flash attention'
self.sparse_topk = sparse_topk
# add a key / value token composed of zeros
# in case this helps controlling outliers, proposed by https://www.evanmiller.org/attention-is-off-by-one.html
self.add_zero_kv = add_zero_kv
# flash attention
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = EfficientAttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(False, True, True)
def flash_attn(
self,
q, k, v,
mask = None,
attn_bias = None
):
batch, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
# Recommended for multi-query single-key-value attention by Tri Dao
# kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64])
if k.ndim == 3:
k = rearrange(k, 'b ... -> b 1 ...').expand_as(q)
if v.ndim == 3:
v = rearrange(v, 'b ... -> b 1 ...').expand_as(q)
# handle scale - by default they scale by dim_head ** -0.5, but need to take care if using cosine sim attention
if self.qk_norm:
default_scale = q.shape[-1] ** -0.5
q = q * (default_scale / self.scale)
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
causal = self.causal
if exists(mask):
assert mask.ndim == 4
mask = mask.expand(batch, heads, q_len, k_len)
# manually handle causal mask, if another mask was given
if causal:
causal_mask = self.create_causal_mask(q_len, k_len, device = device)
mask = mask & ~causal_mask
causal = False
# handle alibi positional bias
# convert from bool to float
if exists(attn_bias):
attn_bias = rearrange(attn_bias, 'h i j -> 1 h i j').expand(batch, heads, -1, -1)
# if mask given, the mask would already contain the causal mask from above logic
# otherwise, if no mask given but still causal, mask out alibi positional bias to a large negative number
mask_value = -torch.finfo(q.dtype).max
if exists(mask):
attn_bias = attn_bias.masked_fill(~mask, mask_value // 2)
elif causal:
causal_mask = self.create_causal_mask(q_len, k_len, device = device)
attn_bias = attn_bias.masked_fill(causal_mask, mask_value // 2)
causal = False
# scaled_dot_product_attention handles attn_mask either as bool or additive bias
# make it an additive bias here
mask = attn_bias
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = causal
)
return out, Intermediates()
def forward(
self,
q, k, v,
mask = None,
attn_bias = None,
prev_attn = None
):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, heads, kv_heads, device = q.shape[-2], q.shape[1], k.shape[1], q.device
scale = default(self.scale, q.shape[-1] ** -0.5)
# handle grouped multi-query attention
if kv_heads == 1:
k, v = map(lambda t: rearrange(t, 'b 1 n d -> b n d'), (k, v))
elif kv_heads < heads:
k, v = map(lambda t: repeat(t, 'b kvh n d -> b (r kvh) n d', r = heads // kv_heads), (k, v))
# handle zero kv, as means for allowing network to attend to nothing
if self.add_zero_kv:
k, v = map(lambda t: F.pad(t, (0, 0, 1, 0), value = 0.), (k, v))
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
if exists(attn_bias):
attn_bias = F.pad(attn_bias, (1, 0), value = 0.)
if self.flash:
assert not exists(prev_attn), 'residual attention not compatible with flash attention'
return self.flash_attn(q, k, v, mask = mask, attn_bias = attn_bias)
kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d'
dots = einsum(f'b h i d, {kv_einsum_eq} -> b h i j', q, k) * scale
if exists(prev_attn):
dots = dots + prev_attn
qk_similarities = dots.clone()
if self.talking_heads:
dots = self.pre_softmax_talking_heads(dots)
if exists(attn_bias):
dots = dots + attn_bias
i, j, dtype = *dots.shape[-2:], dots.dtype
mask_value = -torch.finfo(dots.dtype).max
if exists(self.sparse_topk) and self.sparse_topk < j:
top_values, _ = dots.topk(self.sparse_topk, dim = -1)
sparse_topk_mask = dots < top_values[..., -1:]
mask = (mask & sparse_topk_mask) if exists(mask) else sparse_topk_mask
if exists(mask):
dots = dots.masked_fill(~mask, mask_value)
if self.causal:
causal_mask = self.create_causal_mask(i, j, device = device)
dots = dots.masked_fill(causal_mask, mask_value)
pre_softmax_attn = dots.clone()
attn = self.attn_fn(dots, dim = -1)
attn = attn.type(dtype)
post_softmax_attn = attn.clone()
attn = self.attn_dropout(attn)
if self.talking_heads:
attn = self.post_softmax_talking_heads(attn)
out = einsum(f'b h i j, {kv_einsum_eq} -> b h i d', attn, v)
intermediates = Intermediates(
qk_similarities = qk_similarities,
pre_softmax_attn = pre_softmax_attn,
post_softmax_attn = post_softmax_attn
)
return out, intermediates
# cascading heads logic
def to_single_heads(t, dim = 1):
heads = t.unbind(dim = dim)
return tuple(head.unsqueeze(dim) for head in heads)
class CascadingHeads(nn.Module):
def __init__(self, attend: Attend):
super().__init__()
self.attend = attend
def forward(
self,
q, k, v,
mask = None,
attn_bias = None,
prev_attn = None
):
assert q.shape[-1] == v.shape[-1], 'cascading heads can only be done if query / key and value head dimensions are the same'
# split inputs into per-head inputs
heads = q.shape[1]
queries = to_single_heads(q)
keys = to_single_heads(k) if k.ndim == 4 else ((k,) * heads)
values = to_single_heads(v) if v.ndim == 4 else ((v,) * heads)
mask = (mask,) * heads
attn_bias = to_single_heads(attn_bias, dim = 0) if exists(attn_bias) else ((None,) * heads)
prev_attn = to_single_heads(prev_attn) if exists(prev_attn) else ((None,) * heads)
# now loop through each head, without output of previous head summed with the next head
# thus cascading
all_outs = []
all_intermediates = []
prev_head_out = None
for h_q, h_k, h_v, h_mask, h_attn_bias, h_prev_attn in zip(queries, keys, values, mask, attn_bias, prev_attn):
if exists(prev_head_out):
h_q = h_q + prev_head_out
out, intermediates = self.attend(
h_q, h_k, h_v,
mask = h_mask,
attn_bias = h_attn_bias,
prev_attn = h_prev_attn
)
prev_head_out = out
all_outs.append(out)
all_intermediates.append(intermediates)
# cat all output heads
all_outs = torch.cat(all_outs, dim = 1)
# cat all intermediates, if they exist
qk_similarities, pre_softmax_attn, post_softmax_attn = zip(*map(lambda i: i.to_tuple(), all_intermediates))
qk_similarities, pre_softmax_attn, post_softmax_attn = map(compact, (qk_similarities, pre_softmax_attn, post_softmax_attn))
aggregated_intermediates = Intermediates(
qk_similarities = torch.cat(qk_similarities, dim = 1) if len(qk_similarities) > 0 else None,
pre_softmax_attn = torch.cat(pre_softmax_attn, dim = 1) if len(pre_softmax_attn) > 0 else None,
post_softmax_attn = torch.cat(post_softmax_attn, dim = 1) if len(post_softmax_attn) > 0 else None
)
return all_outs, aggregated_intermediates | VIMA-main | vima/attend.py |
from vima.transformer import ViTransformerWrapper, Encoder
class VisionEncoder:
# # Usage:
# image_encoder = ImageEncoder()
# img_embeddings = image_encoder.embed_image_data([img1, img2]) # You'd provide your list of image data here.
def __init__(
self,
image_size: int = 256,
patch_size: int = 32,
encoder_dim: int = 512,
encoder_depth: int = 6,
encoder_heads: int = 8,
):
super().__init__()
self.encoder = ViTransformerWrapper(
image_size=image_size,
patch_size=patch_size,
attn_layers=Encoder(
dim=encoder_dim,
depth=encoder_depth,
heads=encoder_heads,
)
)
def embed(self, img):
encoded = self.encoder(img, return_embeddings=True)
return encoded
| VIMA-main | vima/vit.py |
import math
from dataclasses import dataclass
from functools import partial, wraps
from inspect import isfunction
from random import random
from typing import Callable, List, Optional
import torch
import torch.nn.functional as F
from einops import rearrange, reduce, repeat
from torch import Tensor, einsum, nn
from saycan.attend import Attend, Intermediates
DEFAULT_DIM_HEAD = 64
@dataclass
class LayerIntermediates:
hiddens: Optional[List[Tensor]] = None
attn_intermediates: Optional[List[Intermediates]] = None
layer_hiddens: Optional[List[Tensor]] = None
attn_z_loss: Optional[Tensor] = None
# helpers
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def cast_tuple(val, depth):
return val if isinstance(val, tuple) else (val,) * depth
def divisible_by(num, den):
return (num % den) == 0
def maybe(fn):
@wraps(fn)
def inner(x, *args, **kwargs):
if not exists(x):
return x
return fn(x, *args, **kwargs)
return inner
class always():
def __init__(self, val):
self.val = val
def __call__(self, *args, **kwargs):
return self.val
class not_equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x != self.val
class equals():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return x == self.val
def Sequential(*modules):
return nn.Sequential(*filter(exists, modules))
# tensor helpers
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
def l2norm(t, groups = 1):
t = rearrange(t, '... (g d) -> ... g d', g = groups)
t = F.normalize(t, p = 2, dim = -1)
return rearrange(t, '... g d -> ... (g d)')
def pad_at_dim(t, pad, dim = -1, value = 0.):
dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1)
zeros = ((0, 0) * dims_from_right)
return F.pad(t, (*zeros, *pad), value = value)
def or_reduce(masks):
head, *body = masks
for rest in body:
head = head | rest
return head
# auxiliary loss helpers
def calc_z_loss(
pre_softmax_attns: List[Tensor],
mask = None,
weight = 1.
):
# the same loss applied to the mixture of experts router logits in https://arxiv.org/abs/2202.08906
# in the paper, in a tiny footnote, they mention using it on attention logits with stabilizing effects
# also used in PaLM as one of the measures
lse = 0.
for attn in pre_softmax_attns:
lse = lse + attn.logsumexp(dim = -1)
loss = torch.square(lse)
loss = reduce(loss, 'b h n -> b n', 'sum')
if not exists(mask):
return loss.mean() * weight
loss = loss[mask].sum() / mask.sum().clamp(min = 1e-5)
return loss * weight
# init helpers
def init_zero_(layer):
nn.init.constant_(layer.weight, 0.)
if exists(layer.bias):
nn.init.constant_(layer.bias, 0.)
# keyword argument helpers
def pick_and_pop(keys, d):
values = list(map(lambda key: d.pop(key), keys))
return dict(zip(keys, values))
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
return str.startswith(prefix)
def group_by_key_prefix(prefix, d):
return group_dict_by_key(partial(string_begins_with, prefix), d)
def groupby_prefix_and_trim(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
# initializations
def deepnorm_init(
transformer,
beta,
module_name_match_list = ['.ff.', '.to_v', '.to_out']
):
for name, module in transformer.named_modules():
if type(module) != nn.Linear:
continue
needs_beta_gain = any(map(lambda substr: substr in name, module_name_match_list))
gain = beta if needs_beta_gain else 1
nn.init.xavier_normal_(module.weight.data, gain = gain)
if exists(module.bias):
nn.init.constant_(module.bias.data, 0)
# structured dropout, more effective than traditional attention dropouts
def dropout_seq(seq, mask, dropout):
b, n, *_, device = *seq.shape, seq.device
logits = torch.randn(b, n, device = device)
if exists(mask):
mask_value = max_neg_value(logits)
logits = logits.masked_fill(~mask, mask_value)
keep_prob = 1. - dropout
num_keep = max(1, int(keep_prob * n))
keep_indices = logits.topk(num_keep, dim = 1).indices
batch_indices = torch.arange(b, device = device)
batch_indices = rearrange(batch_indices, 'b -> b 1')
seq = seq[batch_indices, keep_indices]
if exists(mask):
seq_counts = mask.sum(dim = -1)
seq_keep_counts = torch.ceil(seq_counts * keep_prob).int()
keep_mask = torch.arange(num_keep, device = device) < rearrange(seq_keep_counts, 'b -> b 1')
mask = mask[batch_indices, keep_indices] & keep_mask
return seq, mask
# activations
class ReluSquared(nn.Module):
def forward(self, x):
return F.relu(x) ** 2
# embedding
class TokenEmbedding(nn.Module):
def __init__(self, dim, num_tokens, l2norm_embed = False):
super().__init__()
self.l2norm_embed = l2norm_embed
self.emb = nn.Embedding(num_tokens, dim)
def forward(self, x):
token_emb = self.emb(x)
return l2norm(token_emb) if self.l2norm_embed else token_emb
# positional embeddings
class AbsolutePositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len, l2norm_embed = False):
super().__init__()
self.scale = dim ** -0.5 if not l2norm_embed else 1.
self.max_seq_len = max_seq_len
self.l2norm_embed = l2norm_embed
self.emb = nn.Embedding(max_seq_len, dim)
def forward(self, x, pos = None):
seq_len, device = x.shape[1], x.device
assert seq_len <= self.max_seq_len, f'you are passing in a sequence length of {seq_len} but your absolute positional embedding has a max sequence length of {self.max_seq_len}'
if not exists(pos):
pos = torch.arange(seq_len, device = device)
pos_emb = self.emb(pos)
pos_emb = pos_emb * self.scale
return l2norm(pos_emb) if self.l2norm_embed else pos_emb
class ScaledSinusoidalEmbedding(nn.Module):
def __init__(self, dim, theta = 10000):
super().__init__()
assert divisible_by(dim, 2)
self.scale = nn.Parameter(torch.ones(1) * dim ** -0.5)
half_dim = dim // 2
freq_seq = torch.arange(half_dim).float() / half_dim
inv_freq = theta ** -freq_seq
self.register_buffer('inv_freq', inv_freq, persistent = False)
def forward(self, x, pos = None):
seq_len, device = x.shape[1], x.device
if not exists(pos):
pos = torch.arange(seq_len, device = device)
emb = einsum('i, j -> i j', pos, self.inv_freq)
emb = torch.cat((emb.sin(), emb.cos()), dim = -1)
return emb * self.scale
class RelativePositionBias(nn.Module):
def __init__(self, scale, causal = False, num_buckets = 32, max_distance = 128, heads = 8):
super().__init__()
self.scale = scale
self.causal = causal
self.num_buckets = num_buckets
self.max_distance = max_distance
self.relative_attention_bias = nn.Embedding(num_buckets, heads)
@staticmethod
def _relative_position_bucket(relative_position, causal = True, num_buckets = 32, max_distance = 128):
ret = 0
n = -relative_position
if not causal:
num_buckets //= 2
ret += (n < 0).long() * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).long()
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
@property
def device(self):
return next(self.parameters()).device
def forward(self, i, j):
device = self.device
q_pos = torch.arange(j - i, j, dtype = torch.long, device = device)
k_pos = torch.arange(j, dtype = torch.long, device = device)
rel_pos = k_pos[None, :] - q_pos[:, None]
rp_bucket = self._relative_position_bucket(rel_pos, causal = self.causal, num_buckets = self.num_buckets, max_distance = self.max_distance)
values = self.relative_attention_bias(rp_bucket)
bias = rearrange(values, 'i j h -> h i j')
return bias * self.scale
class DynamicPositionBias(nn.Module):
def __init__(self, dim, *, heads, depth, log_distance = False, norm = False):
super().__init__()
assert depth >= 1, 'depth for dynamic position bias MLP must be greater or equal to 1'
self.log_distance = log_distance
self.mlp = nn.ModuleList([])
self.mlp.append(Sequential(
nn.Linear(1, dim),
nn.LayerNorm(dim) if norm else None,
nn.SiLU()
))
for _ in range(depth - 1):
self.mlp.append(Sequential(
nn.Linear(dim, dim),
nn.LayerNorm(dim) if norm else None,
nn.SiLU()
))
self.mlp.append(nn.Linear(dim, heads))
@property
def device(self):
return next(self.parameters()).device
def forward(self, i, j):
assert i == j
n, device = j, self.device
# get the (n x n) matrix of distances
seq_arange = torch.arange(n, device = device)
context_arange = torch.arange(n, device = device)
indices = rearrange(seq_arange, 'i -> i 1') - rearrange(context_arange, 'j -> 1 j')
indices += (n - 1)
# input to continuous positions MLP
pos = torch.arange(-n + 1, n, device = device).float()
pos = rearrange(pos, '... -> ... 1')
if self.log_distance:
pos = torch.sign(pos) * torch.log(pos.abs() + 1) # log of distance is sign(rel_pos) * log(abs(rel_pos) + 1)
for layer in self.mlp:
pos = layer(pos)
# get position biases
bias = pos[indices]
bias = rearrange(bias, 'i j h -> h i j')
return bias
class AlibiPositionalBias(nn.Module):
def __init__(self, heads, total_heads, **kwargs):
super().__init__()
self.heads = heads
self.total_heads = total_heads
slopes = Tensor(self._get_slopes(heads))
slopes = rearrange(slopes, 'h -> h 1 1')
self.register_buffer('slopes', slopes, persistent = False)
self.register_buffer('bias', None, persistent = False)
def get_bias(self, i, j, device):
i_arange = torch.arange(j - i, j, device = device)
j_arange = torch.arange(j, device = device)
bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1'))
return bias
@staticmethod
def _get_slopes(heads):
def get_slopes_power_of_2(n):
start = (2**(-2**-(math.log2(n)-3)))
ratio = start
return [start*ratio**i for i in range(n)]
if math.log2(heads).is_integer():
return get_slopes_power_of_2(heads)
closest_power_of_2 = 2 ** math.floor(math.log2(heads))
return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2]
@property
def device(self):
return next(self.buffers()).device
def forward(self, i, j):
h, device = self.total_heads, self.device
if exists(self.bias) and self.bias.shape[-1] >= j and self.bias.shape[-2] >= i:
return self.bias[..., :i, :j]
bias = self.get_bias(i, j, device)
bias = bias * self.slopes
num_heads_unalibied = h - bias.shape[0]
bias = pad_at_dim(bias, (0, num_heads_unalibied), dim = 0)
self.register_buffer('bias', bias, persistent = False)
return self.bias
class RotaryEmbedding(nn.Module):
def __init__(
self,
dim,
use_xpos = False,
scale_base = 512,
interpolation_factor = 1.,
base = 10000,
base_rescale_factor = 1.
):
super().__init__()
# proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning
# has some connection to NTK literature
# https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
base *= base_rescale_factor ** (dim / (dim - 2))
inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
assert interpolation_factor >= 1.
self.interpolation_factor = interpolation_factor
if not use_xpos:
self.register_buffer('scale', None)
return
scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
self.scale_base = scale_base
self.register_buffer('scale', scale)
def forward(self, seq_len, device):
t = torch.arange(seq_len, device = device).type_as(self.inv_freq)
t = t / self.interpolation_factor
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
if not exists(self.scale):
return freqs, 1.
power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base
scale = self.scale ** rearrange(power, 'n -> n 1')
scale = torch.cat((scale, scale), dim = -1)
return freqs, scale
def rotate_half(x):
x = rearrange(x, '... (j d) -> ... j d', j = 2)
x1, x2 = x.unbind(dim = -2)
return torch.cat((-x2, x1), dim = -1)
def apply_rotary_pos_emb(t, freqs, scale = 1):
seq_len = t.shape[-2]
freqs = freqs[-seq_len:, :]
return (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale)
# norms
class Scale(nn.Module):
def __init__(self, value, fn):
super().__init__()
self.value = value
self.fn = fn
def forward(self, x, **kwargs):
out = self.fn(x, **kwargs)
def scale_fn(t):
return t * self.value
if not isinstance(out, tuple):
return scale_fn(out)
return (scale_fn(out[0]), *out[1:])
class ScaleNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1) * (dim ** -0.5))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True)
return x / norm.clamp(min = self.eps) * self.g
class RMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale * self.g
class SimpleRMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale
# residual and residual gates
class Residual(nn.Module):
def __init__(self, dim, scale_residual = False, scale_residual_constant = 1.):
super().__init__()
self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
self.scale_residual_constant = scale_residual_constant
def forward(self, x, residual):
if exists(self.residual_scale):
residual = residual * self.residual_scale
if self.scale_residual_constant != 1:
residual = residual * self.scale_residual_constant
return x + residual
class GRUGating(nn.Module):
def __init__(self, dim, scale_residual = False, **kwargs):
super().__init__()
self.gru = nn.GRUCell(dim, dim)
self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
def forward(self, x, residual):
if exists(self.residual_scale):
residual = residual * self.residual_scale
gated_output = self.gru(
rearrange(x, 'b n d -> (b n) d'),
rearrange(residual, 'b n d -> (b n) d')
)
return gated_output.reshape_as(x)
# token shifting
def shift(t, amount, mask = None):
if amount == 0:
return t
else:
amount = min(amount, t.shape[1])
if exists(mask):
t = t.masked_fill(~mask[..., None], 0.)
return pad_at_dim(t, (amount, -amount), dim = - 2, value = 0.)
class ShiftTokens(nn.Module):
def __init__(self, shifts, fn):
super().__init__()
self.fn = fn
self.shifts = tuple(shifts)
def forward(self, x, **kwargs):
mask = kwargs.get('mask', None)
shifts = self.shifts
segments = len(shifts)
feats_per_shift = x.shape[-1] // segments
splitted = x.split(feats_per_shift, dim = -1)
segments_to_shift, rest = splitted[:segments], splitted[segments:]
segments_to_shift = list(map(lambda args: shift(*args, mask = mask), zip(segments_to_shift, shifts)))
x = torch.cat((*segments_to_shift, *rest), dim = -1)
return self.fn(x, **kwargs)
# feedforward
class GLU(nn.Module):
def __init__(
self,
dim_in,
dim_out,
activation: Callable,
mult_bias = False
):
super().__init__()
self.act = activation
self.proj = nn.Linear(dim_in, dim_out * 2)
self.mult_bias = nn.Parameter(torch.ones(dim_out)) if mult_bias else 1.
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim = -1)
return x * self.act(gate) * self.mult_bias
class FeedForward(nn.Module):
def __init__(
self,
dim,
dim_out = None,
mult = 4,
glu = False,
glu_mult_bias = False,
swish = False,
relu_squared = False,
post_act_ln = False,
dropout = 0.,
no_bias = False,
zero_init_output = False
):
super().__init__()
inner_dim = int(dim * mult)
dim_out = default(dim_out, dim)
if relu_squared:
activation = ReluSquared()
elif swish:
activation = nn.SiLU()
else:
activation = nn.GELU()
if glu:
project_in = GLU(dim, inner_dim, activation, mult_bias = glu_mult_bias)
else:
project_in = nn.Sequential(
nn.Linear(dim, inner_dim, bias = not no_bias),
activation
)
self.ff = Sequential(
project_in,
nn.LayerNorm(inner_dim) if post_act_ln else None,
nn.Dropout(dropout),
nn.Linear(inner_dim, dim_out, bias = not no_bias)
)
# init last linear layer to 0
if zero_init_output:
init_zero_(self.ff[-1])
def forward(self, x):
return self.ff(x)
# attention. it is all we need
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = DEFAULT_DIM_HEAD,
heads = 8,
causal = False,
flash = False,
talking_heads = False,
head_scale = False,
sparse_topk = None,
num_mem_kv = 0,
dropout = 0.,
on_attn = False,
gate_values = False,
zero_init_output = False,
max_attend_past = None,
qk_norm = False,
qk_norm_groups = 1,
qk_norm_scale = 10,
qk_norm_dim_scale = False,
one_kv_head = False,
kv_heads = None,
shared_kv = False,
value_dim_head = None,
tensor_product = False, # https://arxiv.org/abs/2208.06061
cascading_heads = False,
add_zero_kv = False, # same as add_zero_attn in pytorch
onnxable = False
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
self.causal = causal
self.max_attend_past = max_attend_past
assert not (exists(kv_heads) and one_kv_head), 'either attn_one_kv_head is set to True (in which case kv_heads is set to 1), or attn_kv_heads is set, but not both'
value_dim_head = default(value_dim_head, dim_head)
kv_heads = default(kv_heads, heads)
kv_heads = 1 if one_kv_head else kv_heads
assert divisible_by(heads, kv_heads)
self.kv_heads = kv_heads
q_dim = dim_head * heads
k_dim = dim_head * kv_heads
v_dim = value_dim_head * kv_heads
out_dim = value_dim_head * heads
self.to_q = nn.Linear(dim, q_dim, bias = False)
self.to_k = nn.Linear(dim, k_dim, bias = False)
# shared key / values, for further memory savings during inference
assert not (shared_kv and value_dim_head != dim_head), 'key and value head dimensions must be equal for shared key / values'
self.to_v = nn.Linear(dim, v_dim, bias = False) if not shared_kv else None
# relations projection from tp-attention
self.to_r = nn.Linear(dim, v_dim, bias = False) if tensor_product else None
# add GLU gating for aggregated values, from alphafold2
self.to_v_gate = None
if gate_values:
self.to_v_gate = nn.Linear(dim, out_dim)
nn.init.constant_(self.to_v_gate.weight, 0)
nn.init.constant_(self.to_v_gate.bias, 1)
# cosine sim attention
self.qk_norm = qk_norm
self.qk_norm_groups = qk_norm_groups
self.qk_norm_scale = qk_norm_scale
# whether to use the rmsnorm (equivalent to cosine sim attention when scale is equal to 1) - https://arxiv.org/abs/2302.05442
self.qk_norm_dim_scale = qk_norm_dim_scale
self.qk_norm_q_scale = self.qk_norm_k_scale = 1
if qk_norm and qk_norm_dim_scale:
self.qk_norm_q_scale = nn.Parameter(torch.ones(dim_head))
self.qk_norm_k_scale = nn.Parameter(torch.ones(dim_head))
assert (not qk_norm) or divisible_by(dim_head, qk_norm_groups), 'dimension per attention head must be divisible by the qk norm groups'
assert not (qk_norm and (dim_head // qk_norm_groups) <= 2), 'the group dimension may be too small (2 was too small in my tests, but 4 still works, surprisingly)'
# attend class - includes core attention algorithm + talking heads
self.attend = Attend(
heads = heads,
causal = causal,
talking_heads = talking_heads,
dropout = dropout,
sparse_topk = sparse_topk,
qk_norm = qk_norm,
scale = qk_norm_scale if qk_norm else self.scale,
add_zero_kv = add_zero_kv,
flash = flash,
onnxable = onnxable
)
# head scaling
self.head_scale = head_scale
if head_scale:
self.head_scale_params = nn.Parameter(torch.ones(1, heads, 1, 1))
# explicit topk sparse attention
self.sparse_topk = sparse_topk
# add memory key / values
self.num_mem_kv = num_mem_kv
if num_mem_kv > 0:
self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
# attention on attention
self.attn_on_attn = on_attn
self.to_out = nn.Sequential(nn.Linear(out_dim, dim * 2, bias = False), nn.GLU()) if on_attn else nn.Linear(out_dim, dim, bias = False)
# init output projection 0
if zero_init_output:
init_zero_(self.to_out)
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
attn_mask = None,
rel_pos = None,
rotary_pos_emb = None,
prev_attn = None,
mem = None
):
b, n, _, h, kv_h, head_scale, device, has_context = *x.shape, self.heads, self.kv_heads, self.head_scale, x.device, exists(context)
kv_input = default(context, x)
q_input = x
k_input = kv_input
v_input = kv_input
r_input = x
if exists(mem):
k_input = torch.cat((mem, k_input), dim = -2)
v_input = torch.cat((mem, v_input), dim = -2)
q = self.to_q(q_input)
k = self.to_k(k_input)
v = self.to_v(v_input) if exists(self.to_v) else k
r = self.to_r(r_input) if exists(self.to_r) else None
q = rearrange(q, 'b n (h d) -> b h n d', h = h)
k, v, r = map(lambda t: maybe(rearrange)(t, 'b n (h d) -> b h n d', h = kv_h), (k, v, r))
if self.qk_norm:
qk_l2norm = partial(l2norm, groups = self.qk_norm_groups)
q, k = map(qk_l2norm, (q, k))
q = q * self.qk_norm_q_scale
k = k * self.qk_norm_k_scale
if exists(rotary_pos_emb) and not has_context:
freqs, xpos_scale = rotary_pos_emb
l = freqs.shape[-1]
q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale ** -1.) if exists(xpos_scale) else (1., 1.)
(ql, qr), (kl, kr), (vl, vr) = map(lambda t: (t[..., :l], t[..., l:]), (q, k, v))
ql, kl, vl = map(lambda arg: apply_rotary_pos_emb(arg[0], freqs, arg[1]), ((ql, q_xpos_scale), (kl, k_xpos_scale), (vl, k_xpos_scale)))
q, k, v = map(lambda t: torch.cat(t, dim = -1), ((ql, qr), (kl, kr), (vl, vr)))
input_mask = context_mask if has_context else mask
if self.num_mem_kv > 0:
mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b = b), (self.mem_k, self.mem_v))
if self.qk_norm:
mem_k = l2norm(mem_k)
mem_k = mem_k * self.qk_norm_k_scale
k = torch.cat((mem_k, k), dim = -2)
v = torch.cat((mem_v, v), dim = -2)
if exists(input_mask):
input_mask = pad_at_dim(input_mask, (self.num_mem_kv, 0), dim = -1, value = True)
i, j = map(lambda t: t.shape[-2], (q, k))
# determine masking
max_neg_value(q)
masks = []
final_attn_mask = None
if exists(input_mask):
input_mask = rearrange(input_mask, 'b j -> b 1 1 j')
masks.append(~input_mask)
if exists(attn_mask):
assert 2 <= attn_mask.ndim <= 4, 'attention mask must have greater than 2 dimensions but less than or equal to 4'
if attn_mask.ndim == 2:
attn_mask = rearrange(attn_mask, 'i j -> 1 1 i j')
elif attn_mask.ndim == 3:
attn_mask = rearrange(attn_mask, 'h i j -> 1 h i j')
masks.append(~attn_mask)
if exists(self.max_attend_past):
range_q = torch.arange(j - i, j, device = device)
range_k = torch.arange(j, device = device)
dist = rearrange(range_q, 'i -> 1 1 i 1') - rearrange(range_k, 'j -> 1 1 1 j')
max_attend_past_mask = dist > self.max_attend_past
masks.append(max_attend_past_mask)
if len(masks) > 0:
final_attn_mask = ~or_reduce(masks)
# prepare relative positional bias, if needed
attn_bias = None
if exists(rel_pos):
attn_bias = rel_pos(i, j)
# attention is all we need
out, intermediates = self.attend(
q, k, v,
mask = final_attn_mask,
attn_bias = attn_bias,
prev_attn = prev_attn
)
# https://arxiv.org/abs/2208.06061 proposes to add a residual for better gradients
if exists(r):
out = out * r + out
# normformer scaling of heads
if head_scale:
out = out * self.head_scale_params
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
# alphafold2 styled gating of the values
if exists(self.to_v_gate):
gates = self.to_v_gate(x)
out = out * gates.sigmoid()
# combine the heads
out = self.to_out(out)
if exists(mask):
mask = rearrange(mask, 'b n -> b n 1')
out = out.masked_fill(~mask, 0.)
return out, intermediates
class AttentionLayers(nn.Module):
def __init__(
self,
dim,
depth,
heads = 8,
causal = False,
cross_attend = False,
only_cross = False,
use_scalenorm = False,
use_rmsnorm = False,
use_simple_rmsnorm = False,
alibi_pos_bias = False,
alibi_num_heads = None,
rel_pos_bias = False,
rel_pos_num_buckets = 32,
rel_pos_max_distance = 128,
dynamic_pos_bias = False,
dynamic_pos_bias_log_distance = False,
dynamic_pos_bias_mlp_depth = 2,
dynamic_pos_bias_norm = False,
rotary_pos_emb = False,
rotary_emb_dim = None,
rotary_xpos = False,
rotary_interpolation_factor = 1.,
rotary_xpos_scale_base = 512,
rotary_base_rescale_factor = 1.,
custom_layers = None,
sandwich_coef = None,
par_ratio = None,
residual_attn = False,
cross_residual_attn = False,
macaron = False,
pre_norm = True,
pre_norm_has_final_norm = True,
gate_residual = False,
scale_residual = False,
scale_residual_constant = 1.,
deepnorm = False,
shift_tokens = 0,
sandwich_norm = False,
resi_dual = False,
resi_dual_scale = 1.,
zero_init_branch_output = False,
layer_dropout = 0.,
cross_attn_tokens_dropout = 0.,
**kwargs
):
super().__init__()
rotary_pos_emb = rotary_pos_emb or rotary_xpos
ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs)
attn_kwargs, kwargs = groupby_prefix_and_trim('attn_', kwargs)
dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD)
self.dim = dim
self.depth = depth
self.layers = nn.ModuleList([])
self.has_pos_emb = rel_pos_bias or rotary_pos_emb
rotary_emb_dim = max(default(rotary_emb_dim, dim_head // 2), 32)
assert not (rotary_xpos and not causal), 'rotary xpos is not compatible with bidirectional attention'
self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim, use_xpos = rotary_xpos, scale_base = rotary_xpos_scale_base, interpolation_factor = rotary_interpolation_factor, base_rescale_factor = rotary_base_rescale_factor) if rotary_pos_emb else None
assert not (alibi_pos_bias and rel_pos_bias), 'you can only choose Alibi positional bias or T5 relative positional bias, not both'
assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance'
# relative positional bias
flash_attn = attn_kwargs.get('flash', False)
assert (int(rel_pos_bias) + int(dynamic_pos_bias) + int(alibi_pos_bias)) <= 1, 'you can only choose up to one of t5, alibi, or dynamic positional bias'
self.rel_pos = None
if rel_pos_bias:
assert not flash_attn, 'flash attention not compatible with t5 relative positional bias'
self.rel_pos = RelativePositionBias(scale = dim_head ** 0.5, causal = causal, heads = heads, num_buckets = rel_pos_num_buckets, max_distance = rel_pos_max_distance)
elif dynamic_pos_bias:
assert not flash_attn, 'flash attention not compatible with dynamic positional bias'
self.rel_pos = DynamicPositionBias(dim = dim // 4, heads = heads, log_distance = dynamic_pos_bias_log_distance, depth = dynamic_pos_bias_mlp_depth, norm = dynamic_pos_bias_norm)
elif alibi_pos_bias:
alibi_num_heads = default(alibi_num_heads, heads)
assert alibi_num_heads <= heads, 'number of ALiBi heads must be less than the total number of heads'
self.rel_pos = AlibiPositionalBias(heads = alibi_num_heads, total_heads = heads)
# determine deepnorm and residual scale
if deepnorm:
assert scale_residual_constant == 1, 'scale residual constant is being overridden by deep norm settings'
pre_norm = sandwich_norm = resi_dual = False
scale_residual = True
scale_residual_constant = (2 * depth) ** 0.25
assert (int(sandwich_norm) + int(resi_dual)) <= 1, 'either sandwich norm or resiDual is selected, but not both'
assert not (not pre_norm and sandwich_norm), 'sandwich norm cannot be used when not using prenorm'
if resi_dual:
pre_norm = False
self.pre_norm = pre_norm
self.sandwich_norm = sandwich_norm
self.resi_dual = resi_dual
assert 0 < resi_dual_scale <= 1., 'resiDual prenorm residual must be scaled by a factor greater than 0 and less than or equal to 1.'
self.resi_dual_scale = resi_dual_scale
self.residual_attn = residual_attn
self.cross_residual_attn = cross_residual_attn
assert not (flash_attn and (residual_attn or cross_residual_attn)), 'flash attention is not compatible with residual attention'
self.cross_attend = cross_attend
assert (int(use_scalenorm) + int(use_rmsnorm) + int(use_simple_rmsnorm)) <= 1, 'you can only use either scalenorm, rmsnorm, or simple rmsnorm'
if use_scalenorm:
norm_class = ScaleNorm
elif use_rmsnorm:
norm_class = RMSNorm
elif use_simple_rmsnorm:
norm_class = SimpleRMSNorm
else:
norm_class = nn.LayerNorm
norm_fn = partial(norm_class, dim)
if cross_attend and not only_cross:
default_block = ('a', 'c', 'f')
elif cross_attend and only_cross:
default_block = ('c', 'f')
else:
default_block = ('a', 'f')
if macaron:
default_block = ('f',) + default_block
# zero init
if zero_init_branch_output:
attn_kwargs = {**attn_kwargs, 'zero_init_output': True}
ff_kwargs = {**ff_kwargs, 'zero_init_output': True}
# calculate layer block order
if exists(custom_layers):
layer_types = custom_layers
elif exists(par_ratio):
par_depth = depth * len(default_block)
assert 1 < par_ratio <= par_depth, 'par ratio out of range'
default_block = tuple(filter(not_equals('f'), default_block))
par_attn = par_depth // par_ratio
depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper
par_width = (depth_cut + depth_cut // par_attn) // par_attn
assert len(default_block) <= par_width, 'default block is too large for par_ratio'
par_block = default_block + ('f',) * (par_width - len(default_block))
par_head = par_block * par_attn
layer_types = par_head + ('f',) * (par_depth - len(par_head))
elif exists(sandwich_coef):
assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth'
layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef
else:
layer_types = default_block * depth
self.layer_types = layer_types
self.num_attn_layers = len(list(filter(equals('a'), layer_types)))
# stochastic depth
self.layer_dropouts = cast_tuple(layer_dropout, len(layer_types))
# structured dropout for cross attending
self.cross_attn_tokens_dropout = cross_attn_tokens_dropout
# calculate token shifting
shift_tokens = cast_tuple(shift_tokens, len(layer_types))
# whether it has post norm
self.final_norm = norm_fn() if pre_norm or resi_dual else nn.Identity()
# iterate and construct layers
for ind, (layer_type, layer_shift_tokens) in enumerate(zip(self.layer_types, shift_tokens)):
ind == (len(self.layer_types) - 1)
if layer_type == 'a':
layer = Attention(dim, heads = heads, causal = causal, **attn_kwargs)
elif layer_type == 'c':
layer = Attention(dim, heads = heads, **attn_kwargs)
elif layer_type == 'f':
layer = FeedForward(dim, **ff_kwargs)
layer = layer if not macaron else Scale(0.5, layer)
else:
raise Exception(f'invalid layer type {layer_type}')
if layer_shift_tokens > 0:
shift_range_upper = layer_shift_tokens + 1
shift_range_lower = -layer_shift_tokens if not causal else 0
layer = ShiftTokens(range(shift_range_lower, shift_range_upper), layer)
residual_fn = GRUGating if gate_residual else Residual
residual = residual_fn(dim, scale_residual = scale_residual, scale_residual_constant = scale_residual_constant)
pre_branch_norm = norm_fn() if pre_norm else None
post_branch_norm = norm_fn() if sandwich_norm else None
post_main_norm = norm_fn() if not pre_norm else None
norms = nn.ModuleList([
pre_branch_norm,
post_branch_norm,
post_main_norm
])
self.layers.append(nn.ModuleList([
norms,
layer,
residual
]))
if deepnorm:
init_gain = (8 * depth) ** -0.25
deepnorm_init(self, init_gain)
def forward(
self,
x,
context = None,
mask = None,
context_mask = None,
attn_mask = None,
self_attn_context_mask = None,
mems = None,
return_hiddens = False
):
assert not (self.cross_attend ^ exists(context)), 'context must be passed in if cross_attend is set to True'
hiddens = []
layer_hiddens = []
intermediates = []
prev_attn = None
prev_cross_attn = None
mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers
rotary_pos_emb = None
if exists(self.rotary_pos_emb):
max_rotary_emb_length = max(list(map(lambda m: (m.shape[1] if exists(m) else 0) + x.shape[1], mems)))
rotary_pos_emb = self.rotary_pos_emb(max_rotary_emb_length, x.device)
outer_residual = x * self.resi_dual_scale
for ind, (layer_type, (norm, block, residual_fn), layer_dropout) in enumerate(zip(self.layer_types, self.layers, self.layer_dropouts)):
ind == (len(self.layers) - 1)
if self.training and layer_dropout > 0. and random() < layer_dropout:
continue
if layer_type == 'a':
if return_hiddens:
hiddens.append(x)
layer_mem = mems.pop(0) if mems else None
if layer_type == 'c':
if self.training and self.cross_attn_tokens_dropout > 0.:
context, context_mask = dropout_seq(context, context_mask, self.cross_attn_tokens_dropout)
inner_residual = x
if return_hiddens:
layer_hiddens.append(x)
pre_norm, post_branch_norm, post_main_norm = norm
if exists(pre_norm):
x = pre_norm(x)
if layer_type == 'a':
out, inter = block(x, mask = mask, context_mask = self_attn_context_mask, attn_mask = attn_mask, rel_pos = self.rel_pos, rotary_pos_emb = rotary_pos_emb, prev_attn = prev_attn, mem = layer_mem)
elif layer_type == 'c':
out, inter = block(x, context = context, mask = mask, context_mask = context_mask, prev_attn = prev_cross_attn)
elif layer_type == 'f':
out = block(x)
if self.resi_dual:
outer_residual = outer_residual + out * self.resi_dual_scale
if exists(post_branch_norm):
out = post_branch_norm(out)
x = residual_fn(out, inner_residual)
if layer_type in ('a', 'c') and return_hiddens:
intermediates.append(inter)
if layer_type == 'a' and self.residual_attn:
prev_attn = inter.pre_softmax_attn
elif layer_type == 'c' and self.cross_residual_attn:
prev_cross_attn = inter.pre_softmax_attn
if exists(post_main_norm):
x = post_main_norm(x)
if return_hiddens:
layer_hiddens.append(x)
if self.resi_dual:
x = x + self.final_norm(outer_residual)
else:
x = self.final_norm(x)
if return_hiddens:
intermediates = LayerIntermediates(
hiddens = hiddens,
attn_intermediates = intermediates,
layer_hiddens = layer_hiddens
)
return x, intermediates
return x
class Encoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on encoder'
super().__init__(causal = False, **kwargs)
class Decoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on decoder'
super().__init__(causal = True, **kwargs)
class CrossAttender(AttentionLayers):
def __init__(self, **kwargs):
super().__init__(cross_attend = True, only_cross = True, **kwargs)
class ViTransformerWrapper(nn.Module):
def __init__(
self,
*,
image_size,
patch_size,
attn_layers,
channels = 3,
num_classes = None,
post_emb_norm = False,
emb_dropout = 0.
):
super().__init__()
assert isinstance(attn_layers, Encoder), 'attention layers must be an Encoder'
assert divisible_by(image_size, patch_size), 'image dimensions must be divisible by the patch size'
dim = attn_layers.dim
num_patches = (image_size // patch_size) ** 2
patch_dim = channels * patch_size ** 2
self.patch_size = patch_size
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, dim))
self.patch_to_embedding = nn.Sequential(
nn.LayerNorm(patch_dim),
nn.Linear(patch_dim, dim),
nn.LayerNorm(dim)
)
self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity()
self.dropout = nn.Dropout(emb_dropout)
self.attn_layers = attn_layers
self.mlp_head = nn.Linear(dim, num_classes) if exists(num_classes) else nn.Identity()
def forward(
self,
img,
return_embeddings = False
):
p = self.patch_size
x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
x = self.patch_to_embedding(x)
n = x.shape[1]
x = x + self.pos_embedding[:, :n]
x = self.post_emb_norm(x)
x = self.dropout(x)
x = self.attn_layers(x)
if not exists(self.mlp_head) or return_embeddings:
return x
x = x.mean(dim = -2)
return self.mlp_head(x)
class Transformer(nn.Module):
def __init__(
self,
*,
num_tokens,
max_seq_len,
attn_layers,
emb_dim = None,
max_mem_len = 0,
shift_mem_down = 0,
emb_dropout = 0.,
post_emb_norm = False,
num_memory_tokens = None,
tie_embedding = False,
logits_dim = None,
use_abs_pos_emb = True,
scaled_sinu_pos_emb = False,
l2norm_embed = False,
emb_frac_gradient = 1., # GLM-130B and Cogview successfully used this, set at 0.1
attn_z_loss_weight = 1e-4
):
super().__init__()
assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
dim = attn_layers.dim
emb_dim = default(emb_dim, dim)
self.emb_dim = emb_dim
self.num_tokens = num_tokens
self.max_seq_len = max_seq_len
self.max_mem_len = max_mem_len
self.shift_mem_down = shift_mem_down
self.l2norm_embed = l2norm_embed
self.token_emb = TokenEmbedding(emb_dim, num_tokens, l2norm_embed = l2norm_embed)
if not (use_abs_pos_emb and not attn_layers.has_pos_emb):
self.pos_emb = always(0)
elif scaled_sinu_pos_emb:
self.pos_emb = ScaledSinusoidalEmbedding(emb_dim)
else:
self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len, l2norm_embed = l2norm_embed)
self.emb_frac_gradient = emb_frac_gradient # fraction of the gradient that should go to the embedding, https://arxiv.org/abs/2105.13290
self.post_emb_norm = nn.LayerNorm(emb_dim) if post_emb_norm else nn.Identity()
self.emb_dropout = nn.Dropout(emb_dropout)
self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()
self.attn_layers = attn_layers
self.init_()
logits_dim = default(logits_dim, num_tokens)
self.to_logits = nn.Linear(dim, logits_dim) if not tie_embedding else lambda t: t @ self.token_emb.emb.weight.t()
# memory tokens (like [cls]) from Memory Transformers paper
num_memory_tokens = default(num_memory_tokens, 0)
self.num_memory_tokens = num_memory_tokens
if num_memory_tokens > 0:
self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))
def init_(self):
if self.l2norm_embed:
nn.init.normal_(self.token_emb.emb.weight, std = 1e-5)
if not isinstance(self.pos_emb, always):
nn.init.normal_(self.pos_emb.emb.weight, std = 1e-5)
return
nn.init.kaiming_normal_(self.token_emb.emb.weight)
def forward(
self,
x,
return_embeddings = False,
return_logits_and_embeddings = False,
return_intermediates = False,
mask = None,
return_mems = False,
return_attn = False,
mems = None,
pos = None,
prepend_embeds = None,
sum_embeds = None,
return_attn_z_loss = False,
attn_z_loss_weight = 1e-4,
**kwargs
):
b, n, device, num_mem, emb_frac_gradient = *x.shape, x.device, self.num_memory_tokens, self.emb_frac_gradient
return_hiddens = return_mems | return_attn | return_intermediates | return_attn_z_loss
# absolute positional embedding
external_pos_emb = exists(pos) and pos.dtype != torch.long
pos_emb = self.pos_emb(x, pos = pos) if not external_pos_emb else pos
x = self.token_emb(x) + pos_emb
# for summing embeddings passed externally - needs this for self-conditioning in non-autoregressive training
if exists(sum_embeds):
x = x + sum_embeds
# post embedding norm, purportedly leads to greater stabilization
x = self.post_emb_norm(x)
# whether to append embeds, as in PaLI, for image embeddings
if exists(prepend_embeds):
prepend_seq, prepend_dim = prepend_embeds.shape[1:]
assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as text model dimensions'
x = torch.cat((prepend_embeds, x), dim = -2)
# whether to reduce the gradient going to the embedding, from cogview paper, corroborated by GLM-130B model
if emb_frac_gradient < 1:
assert emb_frac_gradient > 0
x = x * emb_frac_gradient + x.detach() * (1 - emb_frac_gradient)
# embedding dropout
x = self.emb_dropout(x)
x = self.project_emb(x)
if num_mem > 0:
mem = repeat(self.memory_tokens, 'n d -> b n d', b = b)
x = torch.cat((mem, x), dim = 1)
# auto-handle masking after appending memory tokens
if exists(mask):
mask = pad_at_dim(mask, (num_mem, 0), dim = -1, value = True)
if self.shift_mem_down and exists(mems):
mems_l, mems_r = mems[:self.shift_mem_down], mems[self.shift_mem_down:]
mems = [*mems_r, *mems_l]
if return_hiddens:
x, intermediates = self.attn_layers(x, mask = mask, mems = mems, return_hiddens = True, **kwargs)
else:
x = self.attn_layers(x, mask = mask, mems = mems, **kwargs)
mem, x = x[:, :num_mem], x[:, num_mem:]
if return_logits_and_embeddings:
out = (self.to_logits(x), x)
elif return_embeddings:
out = x
else:
out = self.to_logits(x)
if return_attn_z_loss:
pre_softmax_attns = list(map(lambda t: t.pre_softmax_attn, intermediates.attn_intermediates))
intermediates.attn_z_loss = calc_z_loss(pre_softmax_attns, weight = attn_z_loss_weight)
return_intermediates = True
if return_intermediates:
return out, intermediates
if return_mems:
hiddens = intermediates.hiddens
new_mems = list(map(lambda pair: torch.cat(pair, dim = -2), zip(mems, hiddens))) if exists(mems) else hiddens
new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems))
return out, new_mems
if return_attn:
attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
return out, attn_maps
return out | VIMA-main | vima/transformer.py |
from falcon.main import Falcon
falcon = Falcon(
temperature=0.5,
top_p=0.9,
max_new_tokens=500,
quantized=True,
system_prompt=""
)
prompt = "What is the meaning of the collapse of the wave function?"
result = falcon.run(prompt=prompt)
print(result) | Falcon-main | example.py |
from falcon.main import Falcon | Falcon-main | falcon/__init__.py |
import torch
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer
class Falcon:
def __init__(
self,
*,
model_id: str = "tiiuae/falcon-180B",
temperature: float = 0.5,
top_p: float = 0.9,
max_new_tokens: int = 400,
quantized: bool = False,
system_prompt: str = None
):
super().__init__()
self.model_id = model_id
self.temperature = temperature
self.top_p = top_p
self.max_new_tokens = max_new_tokens
self.quantized = quantized
self.system_prompt = system_prompt
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
if self.quantized:
self.model = AutoModelForCausalLM.from_pretrained(
self.model_id,
torch_dtype=torch.bfloat16,
load_in_8bit=True,
device_map="auto",
)
else:
self.model = AutoModelForCausalLM(
self.model_id,
torch_dtype=torch.bfloat16,
device_map="auto",
)
def run(self, prompt):
inputs = self.tokenizer(prompt, return_tensors="pt").to("cuda")
output = self.model.generate(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
do_sample=True,
temperature=self.temperature,
top_p=self.top_p,
max_new_tokens=self.max_new_tokens
)
output = output[0].to("cuda")
print(self.tokenizer.decode(output))
def chat(self, message, history, system_prompt):
prompt = ""
system_prompt = system_prompt or self.system_prompt
if system_prompt:
prompt += f"System: {system_prompt}\n"
for user_prompt, bot_response in history:
prompt += f"User: {user_prompt}\n"
prompt += f"Falcon: {bot_response}\n"
prompt += f"User: {message}\nFalcon:"
inputs = self.tokenizer(
prompt,
return_tensors="pt"
).to("cuda")
output = self.model.generate(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
do_sample=True,
temperature=self.temperature,
top_p=self.top_p,
max_new_tokens=self.max_new_tokens
)
output = output[0].to("cuda")
print(self.tokenizer.decode(output))
| Falcon-main | falcon/main.py |
# Copyright 2005-2008 Nanorex, Inc. See LICENSE file for details.
"""
preferences.py -- Preferences system.
@author: Bruce
@version: $Id: preferences.py 13965 2008-08-14 20:09:41Z derrickdb1 $
@copyright: 2005-2008 Nanorex, Inc. See LICENSE file for details.
Module classification: [bruce 071215]
At least foundation, due to integral use of "from changes import UsageTracker".
But also could be construed to have lots of app-specific knowledge,
due to "from prefs_constants import prefs_table". But for now, experiment
with pretending that's not app-specific, which we can get away with since
it's pure data... and this might even make sense, if different apps
share code which references the same prefs_keys from that table,
as long as we make sure they can use different (per-app) prefs files.
(For the same reason, we'll also classify prefs_constants as foundation
or lower. In fact, it'll be utilities or constants for now, as explained
in its docstring.)
A desirable refactoring might be to classify prefs_constants higher
(model or a specific app) and pass it to a prefs singleton as an argument.
Then it'd be more clearly ok to call this module "foundation", but let
prefs_constants be higher. OTOH, the reason explained above may make it
perfectly ok for prefs_constants to be very low.
==
Prototype for Alpha.
See lower-down docstrings for usage.
==
History:
bruce 050106 or so: created it.
[some minor changes since then]
bruce 050804: added prefs usage/change tracking.
==
Should be used with bsddb,
but works without it too, after printing a warning.
The module bsddb is present in our standard installations
of windows and linux python, but not yet Mac python;
but we can add it, since it's easily available from
http://undefined.org/python/pimp/darwin-7.0.0-Power_Macintosh.html
(package bsddb3 4.1.6)
BUT WE SHOULD LOOK INTO THE LICENSE TO MAKE SURE IT'S OK!
(It probably is, and [050804] I think Huaicai investigated this
and confirmed that it is.)
"""
import os
import time
import sys
import NE1_Build_Constants
_tmpary = NE1_Build_Constants.NE1_RELEASE_VERSION.split(".")
if len(_tmpary) >= 3:
DEFAULT_PREFS_BASENAME = "default_prefs_v%s-%s-%s.txt" % \
(_tmpary[0], _tmpary[1], _tmpary[2])
else:
DEFAULT_PREFS_BASENAME = "default_prefs_v%s-%s.txt" % \
(_tmpary[0], _tmpary[1])
#Derrick 080703
# note: this name is still hardcoded into
# packaging/Pref_Mod/pref_modifier.py
_tmpFilePath = None
def find_or_make_Nanorex_directory():
"""
Find or make the directory ~/Nanorex, in which we will store
important subdirectories such as Preferences, temporary files, etc.
If it doesn't exist and can't be made, try using /tmp.
[#e Future: for Windows that backup dir should be something other than /tmp.
And for all OSes, we should use a more conventional place to store prefs
if there is one (certainly there is on Mac).]
"""
global _tmpFilePath
if _tmpFilePath:
return _tmpFilePath # already chosen, always return the same one
_tmpFilePath = _find_or_make_nanorex_dir_0()
assert _tmpFilePath
return _tmpFilePath
def _find_or_make_nanorex_dir_0():
"""
private helper function for find_or_make_Nanorex_directory
"""
#Create the temporary file directory if not exist [by huaicai ~041201]
# bruce 041202 comments about future changes to this code:
# - we'll probably rename this, sometime before Alpha goes out,
# since its purpose will become more user-visible and general.
# - it might be good to create a README file in the directory
# when we create it. And maybe to tell the user we created it,
# in a dialog.
# - If creating it fails, we might want to create it in /tmp
# (or wherever some python function says is a good temp dir)
# rather than leaving an ususable path in tmpFilePath. This
# could affect someone giving a demo on a strange machine!
# - If it exists already, we might want to test that it's a
# directory and is writable. If we someday routinely create
# a new file in it for each session, that will be a good-
# enough test.
tmpFilePath = os.path.normpath(os.path.expanduser("~/Nanorex/"))
if not os.path.exists(tmpFilePath):
try:
os.makedirs(tmpFilePath)
except:
sys.exit(1)
#bruce 041202 fixed minor bug in next line; removed return statement
#print_compact_traceback("exception in creating temporary directory: \"%s\"" % tmpFilePath)
#bruce 050104 new feature [needs to be made portable so it works on Windows ###@@@]
#os_tempdir = "/tmp"
#print "warning: using \"%s\" for temporary directory, since \"%s\" didn't work" % (os_tempdir, tmpFilePath)
#tmpFilePath = os_tempdir
#e now we should create or update a README file in there [bruce 050104]
return tmpFilePath
def mkdirs_in_filename(filename):
"""
Make all directories needed for the directory part of this filename,
if nothing exists there. Never make the filename itself (even if it's
intended to be a directory, which we have no way of knowing anyway).
If something other than a directory exists at one of the dirs we might
otherwise make, we don't change it, which will probably lead to errors
in this function or in the caller, which is fine.
"""
dir, file = os.path.split(filename)
if not os.path.exists(dir):
mkdirs_in_filename(dir)
os.mkdir(dir)
if not os.path.exists(dir):
print u"Directory not created: ", dir.encode("utf_8")
return
# some imports remain lower down, for now: bsddb and shelve
"""
Some internal & client-code documentation, as of 050106:
We store prefs in a shelf. Restrictions imposed by the shelve module:
Keys must be strings, values can be any pickleable python exprs,
and neither can be extremely long (exact limits are not made clear).
When these restrictions become a problem, we will make our intermediating
layer handle them (for example, by translating long keys to short ones).
==
Concurrent access:
We usually keep the shelf closed, in case other processes want to access or modify it too.
This only works if we assume that these processes only open it briefly when processing
some user event (typed command or clicked button), and this doesn't happen in two processes
at once since the user can only give events to one process at a time. For this reason,
it's important to only open it briefly during a user event (and only at the beginning
if the processing takes a long time), and never any other time!
Also, if you (a process) start another process which might access the prefs when it starts,
you should only access them yourself just before it starts (and during subsequent user events,
assuming that subprocess follows the same rule).
We rely on the client code to follow these rules; we don't try to enforce them.
Breaking them might conceivably trash the entire prefs database, or perhaps more likely,
cause an error in the process trying to access it while another process is doing so.
(This depends on the db module, and I don't know what bsddb does in this case.)
We make no attempt yet to handle these errors or back up the prefs database.
==
Internal shelf key usage:
Current internal shelf key usage (this might change at any time,
without the client-code keys changing):
Keys starting "k " are translated versions of client-code keys;
see internal _attr2key method (which will be renamed).
Keys starting '_' or with a digit are reserved for use by this code.
In fact, all other keys are reserved. Presently used: see the code.
The most important one is _format_version.
==
High-level keys and values:
Keys supplied by client code (translated through _attr2key into shelf keys)
are presently just strings, using conventions still mostly to be invented,
but in the future will be able to be more kinds of objects.
Values supplied by client code will in the future be translated, and have
metainfo added, but this is not yet done. Values must be pickleable, and
also should not include instances of classes until we decide which of
those are ok. (But Numeric arrays are ok.)
For now, all modules use the same global namespace of high-level keys,
but this might change. To permit this, the module defining the key
needs to be detectable by this code... basically this means any given key
should be passed into this module from the same external module.
Details to be documented when they are implemented and become relevant.
==
Usage by client code (for now -- this might change!):
from foundation.preferences import prefs_context
prefs = prefs_context()
key = "some string" # naming conventions to be introduced later
prefs[key] = value
value = prefs[key] # raises KeyError if not there
# these dict-like operations might or might not work
# (not yet tested; someday we will probably suppport them
# and make them more efficient than individual operations
# when several prefs are changed at once)
prefs.get(key, defaultvalue)
prefs.update(dict1)
dict1.update(prefs)
"""
# ===
# This module wants bsddb, just to make sure the shelf is stored in a format
# that (we hope) all platforms can open. (It also might be more reliable,
# be better at concurrent access, and/or permit longer keys and (especially)
# values than other db packages.)
# But, we'll run without it if necessary, but when we do, we'll use a different
# shelf name, in case the binary formats are incompatible. (Not a perfect solution,
# since there's no guarantee the db format without bsddb is always the same...
# but I don't know a good-enough way to find out which db module shelve is actually using.)
try:
import bsddb3 as _junk
_junk # try to tell pylint we need this import [bruce 071023]
except:
print "Error: import bsddb failed"
sys.exit(1)
else:
dbname = "bsddb"
# And this module requires shelve. We assume without checking that if bsddb is available,
# shelve will use it. (I don't know any straightforward way to check this. But the
# docs for shelve say it will use it, I think. #k check this ###@@@)
from bsddb3 import dbshelve as shelve
# (For the actual filename of the prefs file, see the code of _make_prefs_shelf()
# below, which specifies the basename only; the db module decides what extension
# to add. This is one reason we store the prefs in a subdirectory.)
# ===
_shelfname = _shelf = _cache = None
_defaults = _trackers = None #bruce 050804 new features
def _make_prefs_shelf():
"""
[private function]
call this once per session,
to create or find the shelf (whose name depends only on the dbm format we'll use for it),
and create the cache of its contents,
and store a comment there about this process,
and close the shelf again in case a concurrent process is sharing the same shelf with us.
"""
global _shelfname, _shelf, _cache, _defaults, _trackers
nanorex = find_or_make_Nanorex_directory()
global dbname
_shelfname = os.path.join( nanorex, "Preferences", "%s-shelf" % dbname )
# This name should differ when db format differs.
# Note: the actual filename used might have an extension added
# by the db module (in theory, it might even create two files
# with different extensions from the given basename).
# By experiment, on the Mac, with bsddb there is no extension added,
# and without it there is '.db' added. [bruce 050105]
mkdirs_in_filename(_shelfname)
_shelf = shelve.open(_shelfname.encode("utf_8"))
_cache = {}
_cache.update(_shelf) # will this work?
was_just_made = (not _cache) #bruce 080505
if was_just_made:
print u"made prefs db, basename", _shelfname.encode("utf_8")
else:
print u"prefs db already existed, basename", _shelfname.encode("utf_8")
_defaults = {}
_trackers = {}
# zap obsolete contents
obskeys = []
for key in _cache.keys():
if key.isdigit() or key in ['_session_counter']:
obskeys.append(key)
for key in obskeys:
del _shelf[key]
del _cache[key]
###@@@ following should be revised to handle junk contents gracefully,
# and to notice the existing format version and handle older formats appropriately
# or reject them gracefully.
_store_while_open('_format_version', 'preferences.py/v050106')
# storing this blindly is only ok since the only prior version is one
# we can transparently convert to this one by the "zap obskeys" above.
# store a comment about the last process to start using this shelf
# (nothing yet looks at this comment)
proc_info = "process: pid = %d, starttime = %r" % (os.getpid(), time.asctime())
_store_while_open( '_fyi/last_proc', proc_info ) # (nothing yet looks at this)
_close()
if was_just_made:
# use DEFAULT_PREFS_BASENAME [bruce 080505 new feature];
# file format must correspond with that written by
# packaging/Pref_Mod/pref_modifier.py
default_prefs_values = {}
# read the values from DEFAULT_PREFS_BASENAME
# (while shelf is closed, in case this takes time)
try:
filename = os.path.join( nanorex, "Preferences", DEFAULT_PREFS_BASENAME )
if not os.path.exists(filename):
lines = []
print u"didn't find", filename.encode("utf_8")
else:
file = open( filename, "rU")
lines = file.readlines()
file.close()
print u"reading from", filename.encode("utf_8")
for line in lines:
line0 = line
try:
# try/except so corrupted lines don't break good ones added later
# assume line has the correct format: key = val\n
while line[-1] in ('\r', '\n'):
# 'while' is to handle Windows newlines
# (probably not needed due to 'rU')
line = line[:-1]
key, val = line.split(" = ")
# don't strip key or val -- they might end with spaces
def decode(string1):
words = string1.split(r'\\')
for i in range(len(words)):
word = words[i]
word = word.replace(r'\=', '=')
word = word.replace(r'\n', '\n')
word = word.replace(r'\r', '\r')
words[i] = word
continue
return '\\'.join(words)
key = decode(key)
val = decode(val)
if val == 'True':
val = True
elif val == 'False':
val = False
default_prefs_values[key] = val
# print "read key, val = (%r, %r)" % (key, val)
pass
except:
print "ignoring exception in this line: %r" % (line0,)
pass
continue
pass
except:
print "ignoring exception reading from", DEFAULT_PREFS_BASENAME
default_prefs_values = {}
pass
items = default_prefs_values.items()
items.sort() # just to make the following console prints look nicer
# now open, store the values, and close
_shelf = shelve.open(_shelfname.encode("utf_8"))
for key, val in items:
pkey = _PREFS_KEY_TO_SHELF_KEY(key)
_store_while_open( pkey, val)
print "stored key, val = (%r, %r)" % (key, val)
_close()
pass
return
def _close():
global _shelf
_shelf.close()
_shelf = None
return
def _reopen():
_ensure_shelf_exists()
global _shelf
assert _shelf is None
_shelf = shelve.open(_shelfname.encode("utf_8"))
# don't bother to re-update our _cache! This would be too slow to do every time.
return
def _store_new_while_open(key, val): # [not used as of 050804]
assert not _shelf.has_key(key) # checks _shelf, not merely _cache
assert not _cache.has_key(key)
_cache[key] = val
_shelf[key] = val
return
def _store_while_open(key, val): # [used only when initializing the shelf, as of 050804]
# don't assert _cache and _shelf are the same at this key -- it's not an error if they are not,
# or if shelf has a value and cache does not, since a concurrent process is allowed to write
# a prefs value on its own.
_cache[key] = val
_shelf[key] = val
return
def _ensure_shelf_exists():
if not _shelfname:
_make_prefs_shelf()
return
#bruce 050804/050805 new features:
def _track_change(pkey):
_tracker_for_pkey( pkey).track_change()
def _track_use(pkey):
_tracker_for_pkey( pkey).track_use()
def _tracker_for_pkey(pkey):
try:
return _trackers[pkey]
except KeyError:
sys.exit(1)
#tracker = _trackers[pkey] = UsageTracker()
#return tracker
pass
def _get_pkey_key(pkey, key): #bruce 050804 split this out of __getitem__ so I can also use it in get (both methods)
"""
[#doc better; note: pkey and key args are redundant;
they're both provided just for this implem's convenience]
"""
_track_use(pkey) # note, this is done even if we raise KeyError below (which is good)
try:
return _cache[pkey]
except KeyError:
raise KeyError, key # note: exception detail is key, not pkey as it would be if we just said "raise"
pass
def _get_pkey_faster(pkey): # optimization of _get_pkey_key(pkey, key) when the KeyError exception detail doesn't matter
_track_use(pkey)
return _cache[pkey]
def _record_default( pkey, dflt):
"""
Record this default value (if none is yet known for pkey),
so other code can find out what the default value is,
for use in "restore defaults" buttons in prefs UI.
In debug version, also ensure this is the same as any previously recorded default value.
Note, dflt can be anything, even None, though some callers have a special case
which avoids calling this when dflt is None.
"""
_defaults.setdefault( pkey, dflt) # only affects it the first time, for a given pkey
if debug_flags.atom_debug:
# also check consistency each time
if dflt != _defaults[pkey]:
print "atom_debug: bug: ignoring inconsistent default %r for pref %r; retaining %r" % \
( dflt, pkey, _defaults[pkey] ) #e also print key if in future the key/pkey relation gets more complex
return
def _restore_default_while_open( pkey): #bruce 050805
"""
Remove the pref for pkey from the prefs db (but no error if it's not present there).
As for the internal value of the pref (in _cache, and for track_change, and for subscriptions to its value):
If a default value has been recorded, change the cached value to that value
(as it would be if this pref had originally been missing from the db, and a default value was then recorded).
If not, remove it from _cache as well, and use the internal value of None.
Either way, if the new internal value differs from the one before this function was called,
track the change and fulfill any subscriptions to it.
If possible, don't track a use of the prefs value.
"""
priorval = _cache.get(pkey) # might be None
if _shelf.has_key(pkey):
del _shelf[pkey]
try:
dflt = _defaults[pkey]
except KeyError:
if debug_flags.atom_debug:
print "atom_debug: fyi: restore defaults finds no default yet recorded for %r; using None" % pkey
_cache[pkey] = dflt = None
del _cache[pkey]
else:
_cache[pkey] = dflt
if dflt != priorval:
_track_change(pkey)
#e fulfill any subscriptions to this value (if this is ever done by something other than track_change itself)
return
def keys_list( keys): #bruce 050805
"""
Given a key or a list of keys (or a nested list), return an equivalent list of keys.
Note: tuples of keys are not allowed (someday they might be a new kind of primitive key).
"""
res = []
if type(keys) == type([]):
for sub in keys:
res.extend( keys_list( sub) )
#e could be optimized (trivially, if we disallowed nested lists)
else:
assert type(keys) == type("a")
res.append(keys)
return res
# ==
# Now make a prefs function, which returns a prefs object [someday] customized for the calling module,
# in which prefs can be accessed or stored using attributes, whose names are interpreted in a context
# which might differ for each module.
_NOT_PASSED = [] # private object for use as keyword arg default [bruce 070110, part of fixing bug of None as Choice value]
# (note, the same global name is used for different objects in preferences.py and debug_prefs.py)
def _PREFS_KEY_TO_SHELF_KEY(prefs_key):
"""
Translate a prefs_key string (used in external code)
to a shelf database key string (called "pkey" in some local variables).
"""
#bruce 080505 split this out of _prefs_context._attr2key
return "k " + prefs_key
class _prefs_context:
"""
Represents a symbol context for prefs names, possibly [someday] customized for one module.
"""
def __init__(self, modname):
# modname is not presently used
_ensure_shelf_exists() # needed before __getattr__ and __getitem__ are called
self.trackers = {}
def _attr2key(self, attr): # note: method and its arg are misnamed.
return _PREFS_KEY_TO_SHELF_KEY(attr)
#e Someday we will support more complex keys,
# which are like exprs whose heads (at all levels) are in our context.
# For now, just support arbitrary strings as items.
def __setitem__(self, key, val):
assert type(key) == type("a") # not unicode, numbers, lists, ... for now
pkey = self._attr2key(key) # but we might use a more general func for this, at some point
try:
#bruce 050804 new feature: detect "change with no effect" (where new value equals existing value),
# so we can avoid tracking that as an actual change.
# We also avoid tracking this as a use (even though we do use the value for the comparison).
# And, while we're at it, optimize by not changing the prefs db in this case.
# This is not just an optimization, since if the prefs db contains no value for this pref,
# and no value other than the default value (according to the current code) has been stored during this session
# and if this remains true in the present call (i.e. val equals the default value),
# then (due to some of today's changes to other code here, particularly self.get storing dflt in cache), #####IMPLEM
# we won't store anything in the prefs db now.
cached_val = _cache[pkey] # this might be a default value from the present code which is not in the prefs db
except KeyError:
same = False
else:
# If no default value is known, we consider any value to differ from it.
# [##e Would it be better to treat this as if the default value was None (like prefs.get does)??]
same = (val == cached_val)
if same:
if 0 and debug_flags.atom_debug:
print "atom_debug: fyi: returning early from prefs.__setitem__(%r) since val == cached_val, %r == %r" % (key, val, cached_val)
return # see long comment above
if _shelf:
_shelf[pkey] = _cache[pkey] = val
#_track_change(pkey) # do this only after the change happens, for the sake of formulas...
#e (someday we might pass an arg saying the change is done, or the curval is merely invalid,
# and if the latter, whether another track_change will occur when the change is done.)
else:
try:
_reopen()
_shelf[pkey] = _cache[pkey] = val
_track_change(pkey)
finally:
_close()
return
def __getitem__(self, key):
assert type(key) == type("a")
pkey = self._attr2key(key)
return _get_pkey_key( pkey, key)
def get(self, key, dflt = _NOT_PASSED): #bruce 050117; revised 050804, and 070110 to use _NOT_PASSED
assert type(key) == type("a")
pkey = self._attr2key(key)
if dflt is not _NOT_PASSED:
_record_default( pkey, dflt)
#bruce 070110 bugfix: use _NOT_PASSED rather than None.
# Before this fix, passing None explicitly as dflt would fail to record it, which could cause later exceptions
# when client code used env.prefs[key] if the pref had never been saved. This was one of two bugs in
# using a Choice value of None in debug_prefs.py. The other part is fixed in debug_prefs.py dated today.
del dflt # [if dflt was used below and we removed this del, we'd need to replace _NOT_PASSED with None in this localvar]
try:
return _get_pkey_faster( pkey) # optim of self[key]
# note: usage of this pref is tracked in _get_pkey_faster even if it then raises KeyError.
except KeyError:
#bruce 050804 new features (see long comment in __setitem__ for partial explanation):
# if default value must be used, then
# (1) let it be the first one recorded regardless of the one passed to this call, for consistency;
# (2) store it in _cache (so this isn't called again, and for other reasons mentioned in __setitem__)
# but not in the prefs db itself.
try:
dflt = _defaults[pkey] # might be None, if that was explicitly recorded by a direct call to _record_default
except KeyError:
# no default value was yet recorded
dflt = None # but don't save None in _cache in this case
if debug_flags.atom_debug:
print "atom_debug: warning: prefs.get(%r) returning None since no default value was yet recorded" % (key,)
else:
_cache[pkey] = dflt # store in cache but not in prefs-db
return dflt
pass
def update(self, dict1): #bruce 050117
# note: unlike repeated setitem, this only opens and closes once.
if _shelf:
for key, val in dict1.items():
#e (on one KeyError, should we store the rest?)
#e (better, should we check all keys before storing anything?)
self[key] = val #e could optimize, but at least this leaves it open
# that will do _track_use(pkey); if we optimize this, remember to do that here.
else:
try:
_reopen()
self.update(dict1)
finally:
_close()
return
def suspend_saving_changes(self): #bruce 051205 new feature
"""
Let prefs changes after this point be saved in RAM and take full effect
(including notifying subscribers),
but not be saved to disk until the next call to resume_saving_changes
(which should be called within the same user command or mouse drag,
but not for every mouse motion during a drag).
Use this to prevent constant updates to disk for every mouse motion
during a drag (e.g. as a prefs slider is adjusted).
Warn if called when changes are already suspended,
but as a special case to mitigate bugs of failing to call resume,
save all accumulated changes whenever called.
"""
if _shelf:
# already suspended -- briefly resume (so they're saved) before suspending (again)
print "bug: suspend_saving_changes when already suspended -- probably means resume was missing; saving them now"
_close()
_reopen()
return
def resume_saving_changes(self, redundant_is_ok = False): #bruce 051205 new feature
"""
Resume saving changes, after a call of suspend_saving_changes.
Optional redundant_is_ok = True prevents a warning about a redundant call;
this is useful for letting callers make sure changes are being saved
when they should be (and probably already are).
"""
if _shelf:
if redundant_is_ok: # this case untested (no immediate use is planned as of 051205)
print "Warning: resume_saving_changes(redundant_is_ok = True) was in fact redundant --"
print " i.e. it may have been necessary to work around a bug and save prefs."
_close()
else:
if not redundant_is_ok:
print "warning: redundant resume_saving_changes ignored"
return
def restore_defaults(self, keys): #bruce 050805
"""
Given a key or a list of keys,
restore the default value of each given preference
(if one has yet been recorded, e.g. if prefs.get has been provided with one),
with all side effects as if the user set it to that value,
but actually remove the value from the prefs db as well
(so if future code has a different default value for the same pref,
that newer value will be used by that future code).
[#e we might decide to make that prefs-db-removal feature optional.]
"""
if _shelf:
for key in keys_list( keys):
pkey = self._attr2key(key)
_restore_default_while_open( pkey)
else:
try:
_reopen()
self.restore_defaults( keys)
finally:
_close()
return
def get_default_values(self, keys): #bruce 080131 UNTESTED @@@@
"""
@param keys: a list of key strings (tuple not allowed; nested list not allowed)
"""
assert type(keys) == type([])
return map( self.get_default_value, keys)
def get_default_value(self, key, _default_return_value = None): #bruce 080131/080201 UNTESTED @@@@
"""
@param key: a key string
"""
# review: should default value of _default_return_value be None (as now), or _NOT_PASSED?
assert type(key) == type("")
pkey = self._attr2key(key)
dflt = _defaults.get(pkey, _default_return_value)
return dflt
def has_default_value(self, key): #bruce 080131/080201 UNTESTED @@@@
"""
@param key: a key string
"""
# This is a ###STUB in a few ways:
# - it ought to compare using same_vals, not != (also in setitem??)
# - the specification doesn't say what to do when no default is yet recorded
# - old version without _NOT_PASSED:
# it might record a default of None if no default is yet recorded (not sure)
# - new version with _NOT_PASSED: correctness not fully reviewed
dflt = self.get_default_value(key, _NOT_PASSED)
current = self.get(key, dflt) # does usage tracking (good)
same = not (dflt != current)
# (note: this is a safer comparison than ==, but not perfect,
# re Numeric arrays)
return same
def have_default_values(self, keys): #bruce 080201 UNTESTED @@@@
"""
Return True if every prefs key in the given list currently has
its default value (i.e. if restore_defaults would not
change their current values).
@param keys: a list of key strings (tuple not allowed; nested list not allowed)
"""
assert type(keys) == type([])
# note: I think this does not access the shelf,
# so we don't need to optimize it to only open the shelf once.
for key in keys:
if not self.has_default_value(key):
return False
return True
pass # end of class _prefs_context
# for now, in this stub code, all modules use one context:
_global_context = _prefs_context("allmodules")
def prefs_context():
###@@@ stub: always use the same context, not customized to the calling module.
return _global_context
# ==
# initialization code [bruce 050805] (includes the set of env.prefs)
def declare_pref( attrname, typecode, prefskey, dflt = None ): # arg format is same as prefs_table record format
assert typecode in ['color','boolean','string','int', 'float'] or type(typecode) == type([]) #e or others as we define them
#e create type object from typecode
#e get dflt from type object if it's None here, otherwise tell this dflt to type object
#e record type object
#e use attrname to set up faster/cleaner access to this pref?
#e etc.
# Record the default value now, before any other code can define it or ask for the pref.
# (This value is used if that pref is not yet in the db;
# it's also used by "reset to default values" buttons in the UI,
# though those will have the side effect of defining that value in the db.)
prefs = prefs_context()
if dflt is not None:
curvaljunk = prefs.get( prefskey, dflt)
return
#def init_prefs_table( prefs_table): # sets env.prefs
# for prefrec in prefs_table:
# try:
# declare_pref(*prefrec)
# except:
# print_compact_traceback( "ignoring prefs_table entry %r with this exception: " % (prefrec,) )
# pass
#
# env.prefs = prefs_context() # this is only ok because all modules use the same prefs context.
#
# if 0 and debug_flags.atom_debug:
# print "atom_debug: done with prefs_table" # remove when works
# return
#init_prefs_table( prefs_table)
# this is guaranteed to be done before any prefs_context object exists, including env.prefs
# (but not necessarily just after this module is imported, though presently, it is;
# similarly, it's not guaranteed that env.prefs exists arbitrarily early,
# though in practice it does after this module is imported, and for now it's ok
# to write code which would fail if that changed, since it'll be easy to fix that code
# (and to detect that we need to) if it ever does change.)
# ==
'''
use prefs_context() like this:
prefs = prefs_context() # once per module which uses it (must then use it in the same module)
... prefs['atom_debug'] = 1
... if prefs['atom_debug']:
...
or make up keys as strings and use indexing, prefs[key],
but try to compute the strings in only one place
and use them from only one module.
We will gradually introduce naming conventions into the keys,
for example, module/subname, type:name. These will be documented
once they are formalized.
[these rules might be revised!]
'''
# == test code (very incomplete) [revised 050804 since it was out of date]
if __name__ == '__main__':
## defaults = dict(hi = 2, lo = 1)
## print "grabbing %r, got %r" % (defaults, grab_some_prefs_from_cache(defaults))
## new = dict(hi = time.asctime())
## print "now will store new values %r" % new
## store_some_prefs(new)
## print "now we grab in same way %r" % grab_some_prefs_from_cache(defaults) # this failed to get new value, but next proc gets it
## print "done with this grossly incomplete test; the shelfname was", _shelfname
# now try this:
testprefs = prefs_context()
testprefs['x'] = 7
print "should be 7:",testprefs['x']
# end
| NanoCAD-master | packaging/Pref_Mod/preferences.py |
# setup.py
from distutils.core import setup
import py2exe
setup(version = "1.0.0", description = "Preferences modifier for databases", name = "pref_modifier", console=["pref_modifier.py"])
| NanoCAD-master | packaging/Pref_Mod/setup_win.py |
# Copyright 2005-2008 Nanorex, Inc. See LICENSE file for details.
"""
prefs_constants.py
Constants and utilities related to user preferences,
which need to be defined immediately upon startup.
@author: Mark, Bruce, Ninad
@version: $Id: prefs_constants.py 11951 2008-03-14 04:44:50Z ericmessick $
@copyright: 2005-2008 Nanorex, Inc. See LICENSE file for details.
History:
Mark 050629 moved some A6 prefs keys he had earlier defined and organized
in UserPrefs.py, into constants.py.
Bruce 050805 moved those into this new file, and added more.
Module classification:
"utilities" or perhaps "constants" for now, even though it can be
thought of as containing app-specific knowledge; for reasons and caveats
and desirable refactoring, see preferences.py docstring. The reason it
is even lower than foundation is to avoid package import cycles, e.g. if
foundation -> io -> this, or if utilities.GlobalPreferences imports this.
[bruce 071215]
Refactoring needed:
- See preferences.py docstring.
- Has a few functions that ought to be split out, like
getDefaultWorkingDirectory.
"""
import sys, os # for getDefaultWorkingDirectory
# Keys for user preferences
# (the string constants should start with the first released version they'll appear in)
# General prefs
displayCompass_prefs_key = 'A6/Display Compass'
displayCompassLabels_prefs_key = 'A7/Display Compass Label'
compassPosition_prefs_key = 'A6/Compass Position'
displayOriginAxis_prefs_key = 'A6/Display Origin Axis'
displayPOVAxis_prefs_key = 'A6/Display POV Axis'
defaultProjection_prefs_key = 'A7/Default Projection'
animateHighQualityGraphics_prefs_key = 'A7/Animate with High Quality Graphics' #mark 060315. NIY.
animateStandardViews_prefs_key = 'A7/Animate Standard Views'
animateMaximumTime_prefs_key = 'A7/Maximum Animation Time'
workingDirectory_prefs_key = 'WorkingDirectory' # Moved here from startup_funcs.py. Mark 060726.
backgroundColor_prefs_key = 'A9/Background Color' # Mark 060814.
backgroundGradient_prefs_key = 'A9/Background Gradient' # Mark 060814.
##defaultDisplayMode_prefs_key = 'A9/Default Display Mode' # Mark 060815.
# [disabled since assigned differently below -- bruce 080212]
mouseSpeedDuringRotation_prefs_key = 'A9/Mouse Speed During Rotation' #Ninad 060906
displayOriginAsSmallAxis_prefs_key = 'A9/Display Origin As Small Axis' #Ninad 060920
zoomAboutScreenCenter_prefs_key = 'A9/Zoom To Screen Center' #Ninad 060926
displayRulers_prefs_key = 'A10/Display rulers'
displayVertRuler_prefs_key = 'A10/Display vertical ruler'
displayHorzRuler_prefs_key = 'A10/Display horizontal ruler'
rulerPosition_prefs_key = 'A10/Ruler Position'
rulerColor_prefs_key = 'A10/Ruler Color'
rulerOpacity_prefs_key = 'A10/Ruler Opacity'
showRulersInPerspectiveView_prefs_key = 'A10/Show Rulers In Perspective View'
#ToolTip Prefs
dynamicToolTipWakeUpDelay_prefs_key = 'A9/DynamicToolTip Wake Up Delay'
dynamicToolTipAtomDistancePrecision_prefs_key = 'A9/DynamicToolTip Atom Distance Precision'
dynamicToolTipBendAnglePrecision_prefs_key = 'A9/DynamicToolTip Bend Angle Precision'
dynamicToolTipTorsionAnglePrecision_prefs_key = 'A9/DynamicToolTip Torsion Angle Precision'
dynamicToolTipAtomChunkInfo_prefs_key = 'A9/DynamicToolTip Atom Chunk Info'
dynamicToolTipBondChunkInfo_prefs_key = 'A9/DynamicToolTip Bond Chunk Info'
dynamicToolTipAtomPosition_prefs_key = 'A9/DynamicToolTip Atom Position'
dynamicToolTipAtomDistanceDeltas_prefs_key = 'A9/DynamicToolTip Atom Distance Deltas'
dynamicToolTipBondLength_prefs_key = 'A9/DynamicToolTip Bond Length'
dynamicToolTipAtomMass_prefs_key = 'A9/DynamicToolTip Atom Mass'
dynamicToolTipVdwRadiiInAtomDistance_prefs_key = 'A10/tooltip Vdw Radii In Atom Distance'
# Minimize prefs for Adjust All and Adjust Selection (presently on General prefs pane)
# (note, Adjust Atoms does not yet have its own prefs -- its values are derived from these
# but differently than for Adjust All/Sel)
#mark 060627, revised by bruce 060628, 060705 for A8
Adjust_watchRealtimeMinimization_prefs_key = 'A7/Watch Realtime Minimization' # same key as in A7
Adjust_endRMS_prefs_key = 'A8/End RMS Adjust'
Adjust_endMax_prefs_key = 'A8/End Max Adjust'
Adjust_cutoverRMS_prefs_key = 'A8/Cutover RMS Adjust'
Adjust_cutoverMax_prefs_key = 'A8/Cutover Max Adjust'
Adjust_minimizationEngine_prefs_key = 'A10/Adjust Minimization Engine'
#Ninad 20070509 Adjust , Minimize and Simulation(Dynamics) Preferences for DNA
#reduced model(Enable or disable elecrostatics)
electrostaticsForDnaDuringAdjust_prefs_key = 'A9/ Electrostatics for Dna During Adjust'
electrostaticsForDnaDuringMinimize_prefs_key = 'A9/ Electrostatics For Dna During Minimize'
electrostaticsForDnaDuringDynamics_prefs_key = 'A9/ Electrostatics For Dna During Simulation'
# Minimize prefs for Minimize Energy dialog (independent settings, different defaults) [bruce 060705]
Minimize_watchRealtimeMinimization_prefs_key = 'A8/Watch Realtime Minimization Minimize'
Minimize_endRMS_prefs_key = 'A8/End RMS Minimize'
Minimize_endMax_prefs_key = 'A8/End Max Minimize'
Minimize_cutoverRMS_prefs_key = 'A8/Cutover RMS Minimize'
Minimize_cutoverMax_prefs_key = 'A8/Cutover Max Minimize'
Minimize_minimizationEngine_prefs_key = 'A10/Minimize Minimization Engine'
# Pref to add potential energy to trace file
Potential_energy_tracefile_prefs_key = 'A8/Potential energy checkbox'
# Atom prefs
atomHighlightColor_prefs_key = 'A6/Atom Highlight Color'
deleteAtomHighlightColor_prefs_key = 'A10/Delete Atom Highlight Color'
bondpointHighlightColor_prefs_key = 'A7/Bondpoint Highlight Color'
bondpointHotspotColor_prefs_key = 'A6/Atom Hotspot Color'
defaultDisplayMode_prefs_key = 'A6/Default Display Mode'
diBALL_AtomRadius_prefs_key = 'A7/CPK Atom Radius Percentage' # this is about diBALL which as of 060307 is called Ball and Stick in UI
#bruce 060607 renamed cpkAtomRadius_prefs_key -> diBALL_AtomRadius_prefs_key ###DOIT
cpkScaleFactor_prefs_key = 'A7/CPK Scale Factor' # this is about diTrueCPK which as of 060307 is called CPK in UI
levelOfDetail_prefs_key = 'A7/Level Of Detail'
keepBondsDuringTransmute_prefs_key = 'A9/Keep Bonds During Transmute'
# Bond prefs
bondHighlightColor_prefs_key = 'A6/Bond Highlight Color'
deleteBondHighlightColor_prefs_key = 'A10/Delete Bond Highlight Color'
bondStretchColor_prefs_key = 'A6/Bond Stretch Color'
bondVaneColor_prefs_key = 'A6/Bond Vane Color'
diBALL_bondcolor_prefs_key = 'A6/Bond CPK Color' # this is about diBALL, not CPK [bruce 060607 comment]
#bruce 060607 renamed bondCPKColor_prefs_key -> diBALL_bondcolor_prefs_key ###DOIT
showBondStretchIndicators_prefs_key = 'A9/ Show Bond Stretch Indicators'
pibondStyle_prefs_key = 'A6/Pi Bond Style'
pibondLetters_prefs_key = 'A6/Pi Bond Letters'
showValenceErrors_prefs_key = 'A6/Show Valence Errors'
#display lines mode line thickness, mark 050831
linesDisplayModeThickness_prefs_key = 'A7/Line Thickness for Lines Display Mode'
#CPK cylinder radius (percentage), mark 051003
diBALL_BondCylinderRadius_prefs_key = 'A7/CPK Cylinder Radius Percentage' # about diBALL, called Ball and Stick as of 060307
#bruce 060607 renamed cpkCylinderRadius_prefs_key -> diBALL_BondCylinderRadius_prefs_key ###DOIT
diDNACYLINDER_BondCylinderRadius_prefs_key = 'A10/DNA Cylinder Bond Radius Percentage'
# DNA prefs
adnaBasesPerTurn_prefs_key = 'A10/A-DNA bases per turn' # Twist computed from this.
adnaRise_prefs_key = 'A10/A-DNA rise step'
bdnaBasesPerTurn_prefs_key = 'A10/B-DNA bases per turn' # Twist computed from this.
bdnaRise_prefs_key = 'A10/B-DNA rise step'
zdnaBasesPerTurn_prefs_key = 'A10/Z-DNA bases per turn' # Twist computed from this.
zdnaRise_prefs_key = 'A10/Z-DNA rise step'
dnaDefaultSegmentColor_prefs_key = 'A10/DNA default segment color'
dnaColorBasesBy_prefs_key = 'A10/DNA color bases by'
dnaStrutScaleFactor_prefs_key = 'A10/DNA strut scale factor'
arrowsOnBackBones_prefs_key = 'A9/ Show arrows on all directional bonds'
arrowsOnThreePrimeEnds_prefs_key = 'A9/ Show three prime ends as out arrow heads'
arrowsOnFivePrimeEnds_prefs_key = 'A9/ Show five prime ends as in arrow heads'
dnaStyleStrandsShape_prefs_key = 'A10/DNA style strands shape' # DNA style prefs piotr 080310
dnaStyleStrandsColor_prefs_key = 'A10/DNA style strands color'
dnaStyleStrandsScale_prefs_key = 'A10/DNA style strands scale'
dnaStyleStrandsArrows_prefs_key = 'A10/DNA style strands arrows'
dnaStyleAxisShape_prefs_key = 'A10/DNA style axis shape'
dnaStyleAxisColor_prefs_key = 'A10/DNA style axis color'
dnaStyleAxisScale_prefs_key = 'A10/DNA style axis scale'
dnaStyleAxisTaper_prefs_key = 'A10/DNA style axis taper'
dnaStyleStrutsShape_prefs_key = 'A10/DNA style struts shape'
dnaStyleStrutsColor_prefs_key = 'A10/DNA style struts color'
dnaStyleStrutsScale_prefs_key = 'A10/DNA style struts scale'
dnaStyleBasesShape_prefs_key = 'A10/DNA style bases shape'
dnaStyleBasesColor_prefs_key = 'A10/DNA style bases color'
dnaStyleBasesScale_prefs_key = 'A10/DNA style bases scale'
# Modes prefs [added by mark 050910]
# The background style and color for each mode is initialized in init_prefs()
# of the superclass basicMode (modes.py).
startupMode_prefs_key = 'A7/Startup Mode'
defaultMode_prefs_key = 'A7/Default Mode'
buildModeAutobondEnabled_prefs_key = 'A7/Build Mode Autobond Enabled' # mark 060203.
buildModeWaterEnabled_prefs_key = 'A7/Build Mode Water Enabled' # mark 060203.
buildModeHighlightingEnabled_prefs_key = 'A7/Build Mode Highlighting Enabled' # mark 060203.
buildModeSelectAtomsOfDepositedObjEnabled_prefs_key = 'A7/Build Mode Select Atoms of Deposited Obj Enabled' # mark 060304.
# Selection Behavior
permit_atom_chunk_coselection_prefs_key = 'A9 devel2/permit_atom_chunk_coselection'
# Lighting prefs [most added by mark 051124 or later]
## old_glpane_lights_prefs_key = "glpane lighting" #bruce 051206 moved this here from GLPane;
# it was hardcoded in two methods in GLPane; maybe dates from before prefs_constants module;
# in the next commit it was abandoned and changed as a fix of bug 1181; see comments near its uses in GLPane.
glpane_lights_prefs_key = 'A7/glpane lighting' #bruce 051206 introduced this key to fix bug 1181
light1Color_prefs_key = 'A7/Light1 Color' #bruce 051206 comment: this looks redundant with elements in GLPane._lights; why?
light2Color_prefs_key = 'A7/Light2 Color'
light3Color_prefs_key = 'A7/Light3 Color'
material_specular_highlights_prefs_key = 'A7/Material Specular Highlights'
material_specular_finish_prefs_key = 'A7/Material Specular Finish'
material_specular_shininess_prefs_key = 'A7/Material Specular Shininess'
material_specular_brightness_prefs_key = 'A7/Material Specular Brightness'
# File management / filename / URL preferences [tentative category, added by bruce 051130, more comments below]
wiki_help_prefix_prefs_key = 'A7/Wiki Help Prefix'
# Plug-ins prefs [added by mark 050918]
qutemol_path_prefs_key = 'A9/QuteMol Path'
qutemol_enabled_prefs_key = 'A9/QuteMol Enabled'
nanohive_path_prefs_key = 'A7/Nano-Hive Executable Path'
nanohive_enabled_prefs_key = 'A7/Nano-Hive Enabled'
povray_path_prefs_key = 'A8/POV-Ray Executable Path'
povray_enabled_prefs_key = 'A8/POV-Ray Enabled'
megapov_path_prefs_key = 'A8/MegaPOV Executable Path'
megapov_enabled_prefs_key = 'A8/MegaPOV Enabled'
povdir_path_prefs_key = 'A8/POV Include Directory' # only in Mac A8, for Windows will be in A8.1 (Linux??) [bruce 060710]
povdir_enabled_prefs_key = 'A8/POV Include Directory Enabled' # ditto, and might not end up being used [bruce 060710]
gmspath_prefs_key = 'A6/GAMESS Path'
gamess_enabled_prefs_key = 'A7/GAMESS Enabled'
gromacs_path_prefs_key = 'A10/GROMACS Path'
gromacs_enabled_prefs_key = 'A10/GROMACS Enabled'
cpp_path_prefs_key = 'A10/cpp Path'
cpp_enabled_prefs_key = 'A10/cpp Enabled'
nv1_path_prefs_key = 'A10/NanoVision-1 Path'
nv1_enabled_prefs_key = 'A10/NanoVision-1 Enabled'
# Undo and History prefs
undoRestoreView_prefs_key = 'A7/Undo Restore View'
undoAutomaticCheckpoints_prefs_key = 'A7/Undo Automatic Checkpoints'
undoStackMemoryLimit_prefs_key = 'A7/Undo Stack Memory Limit'
historyHeight_prefs_key = 'A6/History Height'
historyMsgSerialNumber_prefs_key = 'A6/History Message Serial Number'
historyMsgTimestamp_prefs_key = 'A6/History Message Timestamp'
# Window prefs (used to be called Caption prefs)
rememberWinPosSize_prefs_key = "A7/Remember Window Pos and Size" #mark 060315. NIY.
mainwindow_geometry_prefs_key_prefix = "main window/geometry" #bruce 051218 moved this from debug.py
captionPrefix_prefs_key = 'A6/Caption Prefix'
captionSuffix_prefs_key = 'A6/Caption Suffix'
captionFullPath_prefs_key = 'A6/Caption Full Path'
useSelectedFont_prefs_key = 'A9/Use Selected Font'
displayFont_prefs_key = 'A9/Display Font'
displayFontPointSize_prefs_key = 'A9/Display Font Point Size'
mtColor_prefs_key = 'A9/Model Tree Background Color' # Not yet in Preferences. Mark 2007-06-04
toolbar_state_prefs_key = 'A10/ Toolbar State '
displayReportsWidget_prefs_key = 'A10/Display Reports Widget'
#colorTheme_prefs_key = 'A9/Color Theme'
# Sponsor prefs
sponsor_download_permission_prefs_key = 'A8/Sponsor download permission'
sponsor_permanent_permission_prefs_key = 'A8/Sponsor download permission is permanent'
# The following key is not a user preference, it's a state variable that is used
# to keep track of when the sponsor logos files change. This will go away once
# Sponsors.py is re-written to incorporate a thread-safe main program
# event/command queue that can be utilized to throw up a download-permission
# dialog at the same time new logos files are detected.
#
sponsor_md5_mismatch_flag_key = 'A9/Sponsor md5 file mismatch'
#==
# List of prefs keys (strings, not _prefs_key global variable names)
# which got stored into developers or users prefs dbs (since they were saved in code committed to cvs),
# but are no longer used now.
# This list is not yet used by the code, and when it is, its format might be revised,
# but for now, make sure each line has a comment which gives complete info
# about whether or not a released version ever stored prefs using the given keys
# (and if so, exactly which released versions);
# also, each line should be signed with a name and date of the abandonment of that key.
###@@@ THIS IS NOT COMPLETE since I didn't have time to add the ones I removed from cvs rev 1.62 just before A8.
# I also forgot to remove some recently when I renamed them from A8 devel to A8 devel2. -- bruce 060705
_abandoned_prefs_keys = [
'A7/Specular Highlights', # never released, superceded by 'A7/Material Specular Highlights' [mark 051205]
'A7/Whiteness', # never released, superceded by 'A7/Material Specular Finish' [mark 051205]
'A7/Shininess', # never released, superceded by 'A7/Material Specular Shininess' [mark 051205]
'A7/Material Brightness', # never released, superceded by 'A7/Material Specular Brightness' [mark 051205]
'glpane lighting', # was released in A6 and maybe some prior versions; superceded by 'A7/glpane lighting' [bruce 051206]
'A7/Selection Behavior', # only released in pre-release snapshots of A7. [mark 060304]
'A7/Select Atoms Mode Highlighting Enabled' # only released in pre-release snapshots of A7. [mark 060404]
]
# Do not move getDefaultWorkingDirectory() to platform.py since it might
# create a recursive import problem. [Mark 060730.]
# [However, it probably doesn't belong in this file either.
# Sometime try putting it into a file in a platform-dependent package.
# bruce 071215 comment]
def getDefaultWorkingDirectory():
"""
Get the default Working Directory.
@return: The default working directory, which is platform dependent:
- Windows: $HOME\My Documents
- MacOS and Linux: $HOME
If the default working directory doesn't exist, return ".".
@rtype: string
"""
wd = ""
if sys.platform == 'win32': # Windows
# e.g. "C:\Documents and Settings\Mark\My Documents"
wd = os.path.normpath(os.path.expanduser("~/My Documents"))
# Check <wd> since some Windows OSes (i.e. Win95) may not have "~\My Documents".
if not os.path.isdir(wd):
wd = os.path.normpath(os.path.expanduser("~"))
else: # Linux and MacOS
# e.g. "/usr/mark"
wd = os.path.normpath(os.path.expanduser("~"))
if os.path.isdir(wd):
return wd
else:
print "getDefaultWorkingDirectory(): default working directory [", \
wd , "] does not exist. Setting default working directory to [.]"
return "."
_default_workingDirectory = getDefaultWorkingDirectory()
# the actual table (for doc, see the code that interprets it, in preferences.py)
# end
| NanoCAD-master | packaging/Pref_Mod/prefs_constants.py |
import os
from preferences import prefs_context
import sys
import getopt
import NE1_Build_Constants
prefs = prefs_context()
if os.name=="nt":
capture_console = False
capture_file = ""
# if it's not reporting as python is the executable
if not sys.executable.upper().endswith("PYTHON.EXE") and \
not sys.executable.upper().endswith("PYTHON"):
try:
capture_file = u"".join((sys.executable[:-4], "_console.log"))
sys.stdout = open(capture_file, 'w')
sys.stderr = sys.stdout
capture_console = True # already trapped, don't try more.
except:
pass
if not capture_console:
# Haven't captured the console log yet. Find the default user
# path and try to capture there this happens if we can't write to
# the normal log location, or if python.exe is the executable.
tmpFilePath = os.path.normpath(os.path.expanduser("~/Nanorex/"))
if not os.path.exists(tmpFilePath): #If it doesn't exist
try:
os.mkdir(tmpFilePath) #Try making one
capture_console = True
except:
pass
# we tried, but there's no easy way to capture the console
if capture_console or os.path.isdir(tmpFilePath):
try: # We made the directory or it already existed, try
# creating the log file.
capture_file = os.path.normpath(u"".join((tmpFilePath,\
"/pref_mod_console.log")))
sys.stdout = open(capture_file, 'w')
sys.stderr = sys.stdout
capture_console = True
except:
print >> sys.stderr, \
"Failed to create any console log file."
capture_console = False
from prefs_constants import qutemol_enabled_prefs_key
from prefs_constants import qutemol_path_prefs_key
from prefs_constants import nanohive_enabled_prefs_key
from prefs_constants import nanohive_path_prefs_key
from prefs_constants import povray_enabled_prefs_key
from prefs_constants import povray_path_prefs_key
from prefs_constants import megapov_enabled_prefs_key
from prefs_constants import megapov_path_prefs_key
from prefs_constants import povdir_enabled_prefs_key
from prefs_constants import gamess_enabled_prefs_key
from prefs_constants import gmspath_prefs_key
from prefs_constants import gromacs_enabled_prefs_key
from prefs_constants import gromacs_path_prefs_key
from prefs_constants import cpp_enabled_prefs_key
from prefs_constants import cpp_path_prefs_key
from prefs_constants import nv1_enabled_prefs_key
from prefs_constants import nv1_path_prefs_key
def parseopts(optslist):
global keyset,valueset,exitset
#use of globals is generally bad. If this program gets bigger, this
#should be rewritten
for oneopt in optslist:
if oneopt[0]=="-K" or oneopt[0]=="-k":
keyset=oneopt[1]
if oneopt[0]=="-V" or oneopt[0]=="-v":
valueset=oneopt[1]
if valueset.upper()=="TRUE":
valueset=True
elif valueset.upper()=="FALSE":
valueset=False
if keyset=="" or valueset=="":
exitset=True
#re-define the variables needed into a dictionary to make calling them
#easier from the command line input
prefkeys={}
prefkeys["qutemol_enabled"]=qutemol_enabled_prefs_key
prefkeys["qutemol_path"]=qutemol_path_prefs_key
prefkeys["nanohive_enabled"]=nanohive_enabled_prefs_key
prefkeys["nanohive_path"]=nanohive_path_prefs_key
prefkeys["povray_enabled"]=povray_enabled_prefs_key
prefkeys["povray_path"]=povray_path_prefs_key
prefkeys["megapov_enabled"]=megapov_enabled_prefs_key
prefkeys["megapov_path"]=megapov_path_prefs_key
prefkeys["povdir_enabled"]=povdir_enabled_prefs_key
prefkeys["gamess_enabled"]=gamess_enabled_prefs_key
prefkeys["gamess_path"]=gmspath_prefs_key
prefkeys["gromacs_enabled"]=gromacs_enabled_prefs_key
prefkeys["gromacs_path"]=gromacs_path_prefs_key
prefkeys["cpp_enabled"]=cpp_enabled_prefs_key
prefkeys["cpp_path"]=cpp_path_prefs_key
prefkeys["nv1_enabled"]=nv1_enabled_prefs_key
prefkeys["nv1_path"]=nv1_path_prefs_key
#determining if everything needed from the command line is there.
keyset=valueset=""
exitset=False
#progopts stores the arguments passed to it from the command line
try:
progopts=getopt.getopt(sys.argv[1:],"k:K:v:V:")
except:
exitset=True
#start of actual main program
progopts=progopts[0]
parseopts(progopts)
if exitset:
print >> sys.__stderr__, \
"Usage: pref_modifier -K <key value> -V <value to store>"
sys.exit(0)
key=prefkeys[keyset] #set the key value to that used by the database
print keyset
print valueset
print key
prefstmp={}
prefstmp[key]=valueset #set up the dict for the database update function
# valueset will be a string or a boolean
prefs.update(prefstmp) # modifies bsddb-shelf
# also write the key/value pair to a text file next to the prefs db
# for later use by NE1 [bruce 080505 for v1.0.1]
_tmpary = NE1_Build_Constants.NE1_RELEASE_VERSION.split(".")
if len(_tmpary) >= 3:
DEFAULT_PREFS_BASENAME = "default_prefs_v%s-%s-%s.txt" % \
(_tmpary[0], _tmpary[1], _tmpary[2])
else:
DEFAULT_PREFS_BASENAME = "default_prefs_v%s-%s.txt" % \
(_tmpary[0], _tmpary[1])
try:
from preferences import find_or_make_Nanorex_directory
nanorex = find_or_make_Nanorex_directory()
filename = os.path.join( nanorex, "Preferences", DEFAULT_PREFS_BASENAME )
# assume this file's directory exists, since the prefs db (in the same
# directory) has been created and written to by the code above
file = open(filename, "a")
def encode(string1):
string1 = string1.replace('\\', r'\\')
string1 = string1.replace('=', r'\=')
string1 = string1.replace('\n', r'\n')
string1 = string1.replace('\r', r'\r')
return string1
file.write("%s = %s\n" % (encode(key), encode(str(valueset))))
print "appended to", filename
except:
print "ignoring exception while appending to", DEFAULT_PREFS_BASENAME
pass
# end
| NanoCAD-master | packaging/Pref_Mod/pref_modifier.py |
#!/usr/bin/env python
"""
setup.py - script for building MyApplication
Usage:
% python setup.py py2app
"""
from distutils.core import setup
import py2app
setup(
app=['pref_modifier.py'],
)
| NanoCAD-master | packaging/Pref_Mod/setup_mac.py |
"""
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
APP = ['main.py']
DATA_FILES = []
OPTIONS = {'argv_emulation': True}
setup(
app=APP,
name='NanoEngineer-1',
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
| NanoCAD-master | packaging/MacOSX/setup.py |
"""
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
#from setuptools import setup
from distutils.core import setup
import py2exe
APP = [{'script': 'main.py', 'icon_resources': [(0, '../../packaging/Win32/NE1.ico')]}]
DATA_FILES = []
OPTIONS = {'argv_emulation': True}
setup(
windows=APP
#app=APP,
#name='NanoEngineer-1',
#data_files=DATA_FILES,
#options={'py2app': OPTIONS},
#setup_requires=['py2app'],
)
| NanoCAD-master | packaging/Win32/setup.py |
#!/usr/bin/python
# Copyright 2006-2007 Nanorex, Inc. See LICENSE file for details.
# $Id$
# This is used only for "make deps"
import sys, re
srcdir = sys.argv[1]
objdir = sys.argv[2]
substitution = sys.argv[3]
objs = sys.argv[4:]
# This is a little kludgey. The idea is to allow either C files (with
# ".o:.c") or C++ files (with ".o:.cpp") and ideally as many other
# filetypes as possible. For instance, Java could use ".class:.java".
# But there will likely be reasonable situations for which this won't
# work.
obj_ending, src_ending = substitution.split(':')
# Perform a pattern find-and-replace for all occurrences in a string.
def subAll(pattern, repl, str):
while True:
s = re.sub(pattern, repl, str)
if s == str:
return str
str = s
print '# Automatically generated dependencies from procdeps.py'
for obj in objs:
# Generate the source file for this object file, and put it in SRCDIR.
src = re.sub(obj_ending + '$', src_ending, obj)
src = re.sub(objdir + '/', '$(SRCDIR)/', src)
# Put this object file in OBJDIR.
obj = re.sub(objdir + '/', '$(OBJDIR)/', obj)
# Print a dependency line.
print obj + ': ' + src
print '\t$(CXX) $(CXXFLAGS) -c -o %s %s' % (obj, src)
for L in sys.stdin.readlines():
L = L.rstrip()
# Remove all absolute header files, like /usr/include stuff.
L = subAll(' /[^ ]*\.h', '', L)
if L and not L.startswith('#') and not L.rstrip().endswith(':'):
obj, hfiles = L.split(':')
# Put this object file in OBJDIR.
obj = re.sub(srcdir + '/', '$(OBJDIR)/', obj)
# Put the header files in SRCDIR.
hfiles = subAll(srcdir + '/', '$(SRCDIR)/', hfiles)
# Print the modified dependency line.
print obj + ':' + hfiles
| NanoCAD-master | cad/plugins/CoNTub/procdeps.py |
# Copyright 2007 Nanorex, Inc. See LICENSE file for details.
# usage:
#
# python Generate.py adenine > adenine.mmp
import sys
import math
zSpacing = 3180 # 0.1 pm
minorGroveDegrees = 133
baseTwistDegrees = 33.75
sugarRadius = 6760 # pm -- Value from EricD's pdb: 6760
sugarPhosphateDistance = 3640 # Value from EricD's pdb: 3574
baseTwist = math.pi * baseTwistDegrees / 180
if (len(sys.argv) < 2):
print >>sys.stderr, "must specify base name"
sys.exit(1)
baseName = sys.argv[1]
prefix = """mmpformat 050920 required; 060421 preferred
kelvin 300
group (View Data)
info opengroup open = True
csys (HomeView) (1.000000, 0.000000, 0.000000, 0.000000) (10.000000) (0.000000, 0.000000, 0.000000) (1.000000)
csys (LastView) (1.000000, 0.000000, 0.000000, 0.000000) (10.000000) (0.000000, 0.000000, 0.000000) (1.000000)
egroup (View Data)
group (%s)
info opengroup open = True
mol (%s) def""" % (baseName, baseName)
postfix = """egroup (%s)
end1
group (Clipboard)
info opengroup open = False
egroup (Clipboard)
end molecular machine part %s""" % (baseName, baseName)
def printAtom(index, type, position, bondedTo):
print "atom %d (%d) (%d, %d, %d) def" % (index, type, position[0], position[1], position[2])
if (bondedTo):
print "bond1 %d" % bondedTo
def rotate(x, y, theta):
sinTheta = math.sin(theta)
cosTheta = math.cos(theta)
return (x * cosTheta - y * sinTheta, x * sinTheta + y * cosTheta)
def midpoint(position1, position2):
x = (position1[0] + position2[0]) / 2
y = (position1[1] + position2[1]) / 2
z = (position1[2] + position2[2]) / 2
return (x, y, z)
def extendToRadius(position, radius):
oldR = math.sqrt(position[0] * position[0] + position[1] * position[1])
factor = radius / oldR
return (position[0] * factor, position[1] * factor, position[2])
# given the position of a pseudo atom in strand1, return the position
# of the same pseudo atom in strand2
def strand2(position):
x = position[0]
y = position[1]
z = position[2]
theta = math.pi * (180 - minorGroveDegrees) / 180
newX, newY = rotate(-x, y, theta)
return (newX, newY, -z)
sugar = (sugarRadius, 0, 0)
sugar2xy = rotate(sugarRadius, 0, baseTwist)
sugar2 = (sugar2xy[0], sugar2xy[1], zSpacing)
def distance(xyz):
return math.sqrt(xyz[0] * xyz[0] + xyz[1] * xyz[1] + xyz[2] * xyz[2])
dist = 0
phosphateRadius = sugarRadius
while (dist < sugarPhosphateDistance):
phosphate = extendToRadius(midpoint(sugar, sugar2), phosphateRadius)
dist = distance((sugar[0] - phosphate[0], sugar[1] - phosphate[1], sugar[2] - phosphate[2]))
phosphateRadius += 1
print >>sys.stderr, "phosphateRadius %d" % phosphateRadius
print >>sys.stderr, "dist %d" % dist
phosphate0xy = rotate(phosphate[0], phosphate[1], -baseTwist)
phosphate0 = (phosphate0xy[0], phosphate0xy[1], phosphate[2] - zSpacing)
bondpoint1 = midpoint(phosphate, sugar2)
bondpoint2 = midpoint(phosphate0, sugar)
print prefix
if (baseName == 'end1'):
# Axis
printAtom(1, 200, (0, 0, 0), 0) # Ax
printAtom(2, 204, (0, 0, zSpacing/2), 1) # Ae
printAtom(3, 0, (0, 0, -zSpacing/2), 1) # Axis bondpoint
# Strand1
printAtom(4, 201, sugar, 1) # Ss
printAtom(5, 205, phosphate, 4) # Pe
printAtom(6, 0, bondpoint2, 4)
# Strand2
printAtom(7, 201, strand2(sugar), 1) # Ss
printAtom(8, 202, strand2(phosphate), 7) # Pl
printAtom(9, 0, strand2(bondpoint1), 8)
printAtom(10, 206, strand2(bondpoint2), 7) # Sh
elif (baseName == 'end2'):
# Axis
printAtom(1, 200, (0, 0, 0), 0) # Ax
printAtom(2, 204, (0, 0, -zSpacing/2), 1) # Ae
printAtom(3, 0, (0, 0, zSpacing/2), 1) # Axis bondpoint
# Strand1
printAtom(4, 201, sugar, 1) # Ss
printAtom(5, 202, phosphate, 4) # Pl
printAtom(6, 0, bondpoint1, 5)
printAtom(7, 206, bondpoint2, 4) # Sh
# Strand2
printAtom(8, 201, strand2(sugar), 1) # Ss
printAtom(9, 205, strand2(phosphate), 8) # Pe
printAtom(10, 0, strand2(bondpoint2), 8)
else:
# Axis
printAtom(1, 200, (0, 0, 0), 0) # Ax
printAtom(2, 0, (0, 0, zSpacing/2), 1) # Axis bondpoint
printAtom(3, 0, (0, 0, -zSpacing/2), 1) # Axis bondpoint
# Strand1
printAtom(4, 201, sugar, 1) # Ss
printAtom(5, 202, phosphate, 4) # Pl
printAtom(6, 0, bondpoint1, 5)
printAtom(7, 0, bondpoint2, 4)
# Strand2
printAtom(8, 201, strand2(sugar), 1) # Ss
printAtom(9, 202, strand2(phosphate), 8) # Pl
printAtom(10, 0, strand2(bondpoint1), 9)
printAtom(11, 0, strand2(bondpoint2), 8)
print postfix
| NanoCAD-master | cad/plugins/DNA/bdna-pseudo-bases/Generate.py |
#!/usr/bin/python
# Copyright 2006-2007 Nanorex, Inc. See LICENSE file for details.
import sys
import string
sys.path.append("../../../src")
from VQT import A, V, vlen
class AtomType:
def __init__(self, symbol, number, rcovalent):
self.symbol = symbol
self.number = number
self.rcovalent = rcovalent
def __repr__(self):
return '<' + self.symbol + '>'
periodicTable = [
AtomType('X', 0, 0.0),
AtomType('H', 1, 0.31),
AtomType('C', 6, 0.77),
AtomType('N', 7, 0.73),
AtomType('O', 8, 0.69),
AtomType('P', 15, 1.08),
]
def lookupAtomType(num):
for at in periodicTable:
if at.number == num:
return at
raise Exception("AtomType not found, num=" + repr(num))
class Atom:
def __init__(self, mmpline):
if mmpline != None:
mmpline = mmpline.rstrip()
self.mmpline = mmpline
fields = mmpline.split()
self.key = string.atoi(fields[1])
self.style = fields[6]
self.hybridization = None
self.base = None
self.atomtype = lookupAtomType(string.atoi(fields[2][1:-1]))
self.x = 0.001 * string.atoi(fields[3][1:-1])
self.y = 0.001 * string.atoi(fields[4][:-1])
self.z = 0.001 * string.atoi(fields[5][:-1])
else:
self.mmpline = None
self.key = 0
self.style = None
self.hybridization = None
self.base = None
self.atomtype = lookupAtomType(0)
self.x = 0.0
self.y = 0.0
self.z = 0.0
self.bonds = [ ]
def is_singlet(self):
return self.atomtype.symbol == 'X'
def clone(self):
a = Atom(self.mmpline)
for attr in ('key', 'style', 'hybridization', 'base', 'atomtype',
'x', 'y', 'z', 'bonds'):
setattr(a, attr, getattr(self, attr))
return a
def hybridize(self, hybrids={
'C': { 4: 'sp3',
3: 'sp2',
2: 'sp',
},
'O': { 2: 'sp3',
1: 'sp2',
},
'N': { 3: 'sp3',
2: 'sp2',
1: 'sp',
}
}):
try:
self.hybridization = hybrids[self.atomtype.symbol][len(self.bonds)]
except KeyError:
self.hybridization = None
def posn(self):
return V(self.x, self.y, self.z)
def __repr__(self):
r = "<%s %d (%g, %g, %g)" % \
(self.atomtype.symbol, self.key, self.x, self.y, self.z)
r += " %s" % self.style
if self.hybridization != None:
r += " %s" % self.hybridization
if self.base != None:
r += " (base %d)" % self.base
if self.bonds:
r += " ["
for b in self.bonds:
r += " " + repr(b)
r += " ]"
return r + ">"
class Bondpoint(Atom):
def __init__(self, owner, v):
Atom.__init__(self, mmpline=None)
self.style = owner.style
self.base = owner.base
self.x = v[0]
self.y = v[1]
self.z = v[2]
self.bonds = [ owner.key ]
def __repr__(self):
r = "<%s %d (%g, %g, %g)" % \
(self.atomtype.symbol, self.key, self.x, self.y, self.z)
r += " %s" % self.style
if self.base != None:
r += " (base %d)" % self.base
if self.bonds:
r += " ["
for b in self.bonds:
r += " " + repr(b)
r += " ]"
return r + ">"
class MakeBondpoint(Exception):
pass
class Base:
def __init__(self, strand, key):
self.key = key
self.atomlist = [ ]
self.phosphorusZcoord = 0.
self.strand = strand
atm0 = strand.atoms[key]
self.style = atm0.style
self.addAtom(atm0)
def __cmp__(self, other):
return -cmp(self.phosphorusZcoord, other.phosphorusZcoord)
def keys(self):
return map(lambda a: a.key, self.atomlist)
def __len__(self):
return len(self.atomlist)
def addAtom(self, a):
k = a.key
if a not in self.atomlist:
if a.style == self.style:
a.base = self.key
self.atomlist.append(a)
if a.atomtype.symbol == 'P':
self.phosphorusZcoord = a.z
else:
raise MakeBondpoint
def addLayer(self):
atoms = self.strand.atoms
newguys = [ ]
for a in self.atomlist:
for k in a.bonds:
if k not in newguys and k not in self.keys():
newguys.append(k)
atoms[k].buddy = a
newAtoms = 0
for k in newguys:
a2 = atoms[k]
a = a2.buddy
try:
self.addAtom(a2)
newAtoms += 1
except MakeBondpoint:
# don't make this bondpoint if it's already been made
if not hasattr(a, 'gotBondpoint'):
p1, p2 = a.posn(), a2.posn()
r1, r2 = a.atomtype.rcovalent, a2.atomtype.rcovalent
p = (r2 * p1 + r1 * p2) / (r1 + r2)
bpt = Bondpoint(a, p)
# pick up a new key
self.strand.addAtom(bpt)
self.addAtom(bpt)
a.gotBondpoint = True
return newAtoms
def grow(self):
while True:
if self.addLayer() == 0:
return
class Strand:
def __init__(self, filename=None):
self.atoms = { }
self.nextKey = 1
self.bases = [ ]
if filename != None:
for L in open(filename).readlines():
if L.startswith("atom"):
self.addAtom(Atom(L))
def addAtom(self, a):
a.key = key = self.nextKey
self.nextKey += 1
self.atoms[key] = a
def transform(self, t):
if t.func_code.co_argcount == 1:
for a in self.atoms.values():
v = V(a.x, a.y, a.z)
a.x, a.y, a.z = tuple(t(v))
else:
for a in self.atoms.values():
a.x, a.y, a.z = t(a.x, a.y, a.z)
def addAtomFromMmp(self, mmpline):
self.addAtom(Atom(mmpline))
def inferBonds(self):
maxBondLength = 2.5
def quantize(vec, maxBondLength=maxBondLength):
return (int(vec[0] / maxBondLength),
int(vec[1] / maxBondLength),
int(vec[2] / maxBondLength))
def bond_atoms(a1, a2):
if a1.key not in a2.bonds:
a2.bonds.append(a1.key)
if a2.key not in a1.bonds:
a1.bonds.append(a2.key)
buckets = { }
for atom in self.atoms.values():
atom.bonds = [ ] # clear existing bonds
# put this atom in one of the buckets
key = quantize(atom.posn())
try:
buckets[key].append(atom)
except KeyError:
buckets[key] = [ atom ]
def region(center):
lst = [ ]
x0, y0, z0 = quantize(center)
for x in range(x0 - 1, x0 + 2):
for y in range(y0 - 1, y0 + 2):
for z in range(z0 - 1, z0 + 2):
key = (x, y, z)
try:
lst += buckets[key]
except KeyError:
pass
return lst
for atm1 in self.atoms.values():
for atm2 in region(atm1.posn()):
bondLen = vlen(atm1.posn() - atm2.posn())
idealBondLen = atm1.atomtype.rcovalent + atm2.atomtype.rcovalent
a = 0.2
if (1-a) * idealBondLen < bondLen < (1+a) * idealBondLen:
bond_atoms(atm1, atm2)
atm1.hybridize()
def assignBases(self):
self.inferBonds()
remainingKeys = self.atoms.keys()
while len(remainingKeys) > 0:
baseKey = remainingKeys[0]
print "Base", baseKey
base = Base(self, baseKey)
self.bases.append(base)
remainingKeys = remainingKeys[1:]
base.grow()
for key in base.keys():
if key in remainingKeys:
remainingKeys.remove(key)
def baseSort(self):
self.bases.sort()
self.renumberAtoms(lambda a1, a2: cmp(a1.base, a2.base))
def renumberAtoms(self, sortfunc=None):
# Renumber their keys, and recompute bonds with new keys
atomlist = self.atoms.values()
if sortfunc != None:
atomlist.sort(sortfunc)
self.atoms = { }
self.nextKey = 1
for i in range(len(atomlist)):
self.addAtom(atomlist[i])
self.inferBonds()
def filter(self, filt):
s = Strand()
for a in self.atoms.values():
if filt(a):
s.addAtom(a.clone())
s.inferBonds()
return s
def writeManyMmps(self, specs, tfm0, tfm):
# discard tiny "bases" and any atoms in them
tinybases = filter(lambda b: len(b) < 6, self.bases)
for b in tinybases:
for a in b.atomlist:
del self.atoms[a.key]
self.bases.remove(b)
# sort bases in order of decreasing phosphorus z coord
self.baseSort()
for index, groupname, filename in specs:
basekey = self.bases[index].key
base = self.filter(lambda a: a.base == basekey)
def tfm2(x, y, z, tfm0=tfm0, tfm=tfm, index=index):
v = V(x,y,z)
v = tfm0(v)
while index:
v = tfm(v)
index -= 1
return tuple(v)
base.transform(tfm2)
base.writeMmp(filename, groupname)
mmptext = """mmpformat 050920 required; 060421 preferred
kelvin 300
group (View Data)
info opengroup open = True
csys (HomeView) (1.000000, 0.000000, 0.000000, 0.000000) (10.000000) (0.000000, 0.000000, 0.000000) (1.000000)
csys (LastView) (1.000000, 0.000000, 0.000000, 0.000000) (8.153929) (0.000000, 0.000000, 0.000000) (1.000000)
egroup (View Data)
group (%(groupname)s)
info opengroup open = True
%(text)s
egroup (%(groupname)s)
end1
group (Clipboard)
info opengroup open = False
egroup (Clipboard)
end molecular machine part %(groupname)s
"""
def writeMmp(self, filename, groupname=None):
s = ""
thisgroup = None
for a in self.atoms.values():
if groupname == None:
if thisgroup != a.base:
s += "mol (Strand %d) def\n" % a.base
thisgroup = a.base
s += ("atom %d (%d) (%d, %d, %d) def\n" %
(a.key, a.atomtype.number,
int(1000 * a.x), int(1000 * a.y), int(1000 * a.z)))
if a.hybridization != None:
s += "info atom atomtype = " + a.hybridization + "\n"
bstr = ""
for b in a.bonds:
if b < a.key:
bstr += " " + repr(b)
if bstr:
s += "bond1" + bstr + "\n"
if groupname != None:
s = "mol (" + groupname + ") def\n" + s
outf = open(filename, "w")
outf.write(self.mmptext % {"groupname": groupname, "text": s[:-1]})
outf.close()
########################################
if (__name__ == '__main__'):
g = Strand('strund1.mmp')
g.assignBases()
g.baseSort()
if True:
specs = [
(0, 'cytosine', 'cytosine-inner.mmp'),
(1, 'guanine', 'guanine-outer.mmp'),
(3, 'adenine', 'adenine-outer.mmp'),
(4, 'adenine', 'adenine-inner.mmp'),
(6, 'thymine', 'thymine-inner.mmp'),
(7, 'thymine', 'thymine-outer.mmp'),
(9, 'cytosine', 'cytosine-outer.mmp'),
(10, 'guanine', 'guanine-inner.mmp'),
]
k = [ 0 ]
def tfm0(v, k=k):
k[0] = 0
return V(v[0], v[1], v[2] - 20.2)
def tfm(v, k=k):
angle = pi / 6
x, y, z = tuple(v)
c, s = cos(angle), sin(angle)
x, y = c * x + s * y, -s * x + c * y
if (k[0] & 1) == 0:
zdiff = 1.67
else:
zdiff = 5.76
k[0] += 1
return V(x, y, z + zdiff)
g.writeManyMmps(specs, tfm0, tfm)
else:
g.writeMmp('groups.mmp', None)
| NanoCAD-master | cad/plugins/DNA/Z-DNA/Atomistic-bases/prepare.py |
#!/usr/bin/python
# Copyright 2006-2007 Nanorex, Inc. See LICENSE file for details.
import sys
import string
sys.path.append("../../../src")
from VQT import A, V, vlen
class AtomType:
def __init__(self, symbol, number, rcovalent):
self.symbol = symbol
self.number = number
self.rcovalent = rcovalent
def __repr__(self):
return '<' + self.symbol + '>'
periodicTable = [
AtomType('X', 0, 0.0),
AtomType('H', 1, 0.31),
AtomType('C', 6, 0.77),
AtomType('N', 7, 0.73),
AtomType('O', 8, 0.69),
AtomType('P', 15, 1.08),
]
def lookupAtomType(num):
for at in periodicTable:
if at.number == num:
return at
raise Exception("AtomType not found, num=" + repr(num))
class Atom:
def __init__(self, mmpline):
if mmpline != None:
mmpline = mmpline.rstrip()
self.mmpline = mmpline
fields = mmpline.split()
self.key = string.atoi(fields[1])
self.style = fields[6]
self.hybridization = None
self.base = None
self.atomtype = lookupAtomType(string.atoi(fields[2][1:-1]))
self.x = 0.001 * string.atoi(fields[3][1:-1])
self.y = 0.001 * string.atoi(fields[4][:-1])
self.z = 0.001 * string.atoi(fields[5][:-1])
else:
self.mmpline = None
self.key = 0
self.style = None
self.hybridization = None
self.base = None
self.atomtype = lookupAtomType(0)
self.x = 0.0
self.y = 0.0
self.z = 0.0
self.bonds = [ ]
def is_singlet(self):
return self.atomtype.symbol == 'X'
def clone(self):
a = Atom(self.mmpline)
for attr in ('key', 'style', 'hybridization', 'base', 'atomtype',
'x', 'y', 'z', 'bonds'):
setattr(a, attr, getattr(self, attr))
return a
def hybridize(self, hybrids={
'C': { 4: 'sp3',
3: 'sp2',
2: 'sp',
},
'O': { 2: 'sp3',
1: 'sp2',
},
'N': { 3: 'sp3',
2: 'sp2',
1: 'sp',
}
}):
try:
self.hybridization = hybrids[self.atomtype.symbol][len(self.bonds)]
except KeyError:
self.hybridization = None
def posn(self):
return V(self.x, self.y, self.z)
def __repr__(self):
r = "<%s %d (%g, %g, %g)" % \
(self.atomtype.symbol, self.key, self.x, self.y, self.z)
r += " %s" % self.style
if self.hybridization != None:
r += " %s" % self.hybridization
if self.base != None:
r += " (base %d)" % self.base
if self.bonds:
r += " ["
for b in self.bonds:
r += " " + repr(b)
r += " ]"
return r + ">"
class Bondpoint(Atom):
def __init__(self, owner, v):
Atom.__init__(self, mmpline=None)
self.style = owner.style
self.base = owner.base
self.x = v[0]
self.y = v[1]
self.z = v[2]
self.bonds = [ owner.key ]
def __repr__(self):
r = "<%s %d (%g, %g, %g)" % \
(self.atomtype.symbol, self.key, self.x, self.y, self.z)
r += " %s" % self.style
if self.base != None:
r += " (base %d)" % self.base
if self.bonds:
r += " ["
for b in self.bonds:
r += " " + repr(b)
r += " ]"
return r + ">"
class MakeBondpoint(Exception):
pass
class Base:
def __init__(self, strand, key):
self.key = key
self.atomlist = [ ]
self.phosphorusZcoord = 0.
self.strand = strand
atm0 = strand.atoms[key]
self.style = atm0.style
self.addAtom(atm0)
def __cmp__(self, other):
return -cmp(self.phosphorusZcoord, other.phosphorusZcoord)
def keys(self):
return map(lambda a: a.key, self.atomlist)
def __len__(self):
return len(self.atomlist)
def addAtom(self, a):
k = a.key
if a not in self.atomlist:
if a.style == self.style:
a.base = self.key
self.atomlist.append(a)
if a.atomtype.symbol == 'P':
self.phosphorusZcoord = a.z
else:
raise MakeBondpoint
def addLayer(self):
atoms = self.strand.atoms
newguys = [ ]
for a in self.atomlist:
for k in a.bonds:
if k not in newguys and k not in self.keys():
newguys.append(k)
atoms[k].buddy = a
newAtoms = 0
for k in newguys:
a2 = atoms[k]
a = a2.buddy
try:
self.addAtom(a2)
newAtoms += 1
except MakeBondpoint:
# don't make this bondpoint if it's already been made
if not hasattr(a, 'gotBondpoint'):
p1, p2 = a.posn(), a2.posn()
r1, r2 = a.atomtype.rcovalent, a2.atomtype.rcovalent
p = (r2 * p1 + r1 * p2) / (r1 + r2)
bpt = Bondpoint(a, p)
# pick up a new key
self.strand.addAtom(bpt)
self.addAtom(bpt)
a.gotBondpoint = True
return newAtoms
def grow(self):
while True:
if self.addLayer() == 0:
return
class Strand:
def __init__(self, filename=None):
self.atoms = { }
self.nextKey = 1
self.bases = [ ]
if filename != None:
for L in open(filename).readlines():
if L.startswith("atom"):
self.addAtom(Atom(L))
self.assignBases()
def addAtom(self, a):
a.key = key = self.nextKey
self.nextKey += 1
self.atoms[key] = a
def transform(self, t):
if t.func_code.co_argcount == 1:
for a in self.atoms.values():
v = V(a.x, a.y, a.z)
a.x, a.y, a.z = tuple(t(v))
else:
for a in self.atoms.values():
a.x, a.y, a.z = t(a.x, a.y, a.z)
def addAtomFromMmp(self, mmpline):
self.addAtom(Atom(mmpline))
def inferBonds(self):
maxBondLength = 2.5
def quantize(vec, maxBondLength=maxBondLength):
return (int(vec[0] / maxBondLength),
int(vec[1] / maxBondLength),
int(vec[2] / maxBondLength))
def bond_atoms(a1, a2):
if a1.key not in a2.bonds:
a2.bonds.append(a1.key)
if a2.key not in a1.bonds:
a1.bonds.append(a2.key)
buckets = { }
for atom in self.atoms.values():
atom.bonds = [ ] # clear existing bonds
# put this atom in one of the buckets
key = quantize(atom.posn())
try:
buckets[key].append(atom)
except KeyError:
buckets[key] = [ atom ]
def region(center):
lst = [ ]
x0, y0, z0 = quantize(center)
for x in range(x0 - 1, x0 + 2):
for y in range(y0 - 1, y0 + 2):
for z in range(z0 - 1, z0 + 2):
key = (x, y, z)
try:
lst += buckets[key]
except KeyError:
pass
return lst
for atm1 in self.atoms.values():
for atm2 in region(atm1.posn()):
bondLen = vlen(atm1.posn() - atm2.posn())
idealBondLen = atm1.atomtype.rcovalent + atm2.atomtype.rcovalent
a = 0.2
if (1-a) * idealBondLen < bondLen < (1+a) * idealBondLen:
bond_atoms(atm1, atm2)
atm1.hybridize()
def assignBases(self):
self.inferBonds()
remainingKeys = self.atoms.keys()
while len(remainingKeys) > 0:
baseKey = remainingKeys[0]
print "Base", baseKey
base = Base(self, baseKey)
self.bases.append(base)
remainingKeys = remainingKeys[1:]
base.grow()
for key in base.keys():
if key in remainingKeys:
remainingKeys.remove(key)
def renumberAtoms(self):
# Renumber their keys, and recompute bonds with new keys
atomlist = self.atoms.values()
self.atoms = { }
self.nextKey = 1
for i in range(len(atomlist)):
self.addAtom(atomlist[i])
self.inferBonds()
def filter(self, filt):
s = Strand()
for a in self.atoms.values():
if filt(a):
s.addAtom(a.clone())
s.inferBonds()
return s
def writeManyMmps(self, specs, tfm0, tfm):
# discard tiny "bases" and any atoms in them
tinybases = filter(lambda b: len(b) < 6, self.bases)
for b in tinybases:
for a in b.atomlist:
del self.atoms[a.key]
self.bases.remove(b)
self.renumberAtoms()
# sort bases in order of decreasing phosphorus z coord
self.bases.sort()
for index, groupname, filename in specs:
basekey = self.bases[index].key
base = self.filter(lambda a: a.base == basekey)
def tfm2(x, y, z, tfm0=tfm0, tfm=tfm, index=index):
v = V(x,y,z)
v = tfm0(v)
while index:
v = tfm(v)
index -= 1
return tuple(v)
base.transform(tfm2)
base.writeMmp(filename, groupname)
mmptext = """mmpformat 050920 required; 060421 preferred
kelvin 300
group (View Data)
info opengroup open = True
csys (HomeView) (1.000000, 0.000000, 0.000000, 0.000000) (10.000000) (0.000000, 0.000000, 0.000000) (1.000000)
csys (LastView) (1.000000, 0.000000, 0.000000, 0.000000) (8.153929) (0.000000, 0.000000, 0.000000) (1.000000)
egroup (View Data)
group (%(groupname)s)
info opengroup open = True
%(text)s
egroup (%(groupname)s)
end1
group (Clipboard)
info opengroup open = False
egroup (Clipboard)
end molecular machine part %(groupname)s
"""
def writeMmp(self, filename, groupname=None):
# Sort the atoms by what group they are in
atomlist = self.atoms.values()
atomlist.sort(lambda a1, a2: cmp(a1.base, a2.base))
self.renumberAtoms()
# write the file
s = ""
thisgroup = None
for a in self.atoms.values():
if groupname == None:
if thisgroup != a.base:
s += "mol (Strand %d) def\n" % a.base
thisgroup = a.base
s += ("atom %d (%d) (%d, %d, %d) def\n" %
(a.key, a.atomtype.number,
int(1000 * a.x), int(1000 * a.y), int(1000 * a.z)))
if a.hybridization != None:
s += "info atom atomtype = " + a.hybridization + "\n"
bstr = ""
for b in a.bonds:
if b < a.key:
bstr += " " + repr(b)
if bstr:
s += "bond1" + bstr + "\n"
if groupname != None:
s = "mol (" + groupname + ") def\n" + s
outf = open(filename, "w")
outf.write(self.mmptext % {"groupname": groupname, "text": s[:-1]})
outf.close()
########################################
if (__name__ == '__main__'):
g = Strand('strund1.mmp')
specs = [
(0, 'guanine', 'guanine.mmp'),
(1, 'cytosine', 'cytosine.mmp'),
(3, 'adenine', 'adenine.mmp'),
(6, 'thymine', 'thymine.mmp')
]
def tfm0(v):
return v + V(0, 0, -18.7)
def tfm(v):
angle = -36 * pi / 180
x, y, z = tuple(v)
c, s = cos(angle), sin(angle)
x, y = c * x + s * y, -s * x + c * y
return V(x, y, z + 3.391)
g.writeManyMmps(specs, tfm0, tfm)
| NanoCAD-master | cad/plugins/DNA/B-DNA/Atomistic-bases/prepare.py |
#!/usr/bin/python
# Copyright 2006-2007 Nanorex, Inc. See LICENSE file for details.
import sys
import string
sys.path.append("../../../src")
from VQT import A, V, vlen
class AtomType:
def __init__(self, symbol, number, rcovalent):
self.symbol = symbol
self.number = number
self.rcovalent = rcovalent
def __repr__(self):
return '<' + self.symbol + '>'
periodicTable = [
AtomType('X', 0, 0.0),
AtomType('H', 1, 0.31),
AtomType('C', 6, 0.77),
AtomType('N', 7, 0.73),
AtomType('O', 8, 0.69),
AtomType('P', 15, 1.08),
]
def lookupAtomType(num):
for at in periodicTable:
if at.number == num:
return at
raise Exception("AtomType not found, num=" + repr(num))
class Atom:
def __init__(self, mmpline):
if mmpline != None:
mmpline = mmpline.rstrip()
self.mmpline = mmpline
fields = mmpline.split()
self.key = string.atoi(fields[1])
self.style = fields[6]
self.hybridization = None
self.base = None
self.atomtype = lookupAtomType(string.atoi(fields[2][1:-1]))
self.x = 0.001 * string.atoi(fields[3][1:-1])
self.y = 0.001 * string.atoi(fields[4][:-1])
self.z = 0.001 * string.atoi(fields[5][:-1])
else:
self.mmpline = None
self.key = 0
self.style = None
self.hybridization = None
self.base = None
self.atomtype = lookupAtomType(0)
self.x = 0.0
self.y = 0.0
self.z = 0.0
self.bonds = [ ]
def is_singlet(self):
return self.atomtype.symbol == 'X'
def clone(self):
a = Atom(self.mmpline)
for attr in ('key', 'style', 'hybridization', 'base', 'atomtype',
'x', 'y', 'z', 'bonds'):
setattr(a, attr, getattr(self, attr))
return a
def hybridize(self, hybrids={
'C': { 4: 'sp3',
3: 'sp2',
2: 'sp',
},
'O': { 2: 'sp3',
1: 'sp2',
},
'N': { 3: 'sp3',
2: 'sp2',
1: 'sp',
}
}):
try:
self.hybridization = hybrids[self.atomtype.symbol][len(self.bonds)]
except KeyError:
self.hybridization = None
def posn(self):
return V(self.x, self.y, self.z)
def __repr__(self):
r = "<%s %d (%g, %g, %g)" % \
(self.atomtype.symbol, self.key, self.x, self.y, self.z)
r += " %s" % self.style
if self.hybridization != None:
r += " %s" % self.hybridization
if self.base != None:
r += " (base %d)" % self.base
if self.bonds:
r += " ["
for b in self.bonds:
r += " " + repr(b)
r += " ]"
return r + ">"
class Bondpoint(Atom):
def __init__(self, owner, v):
Atom.__init__(self, mmpline=None)
self.style = owner.style
self.base = owner.base
self.x = v[0]
self.y = v[1]
self.z = v[2]
self.bonds = [ owner.key ]
def __repr__(self):
r = "<%s %d (%g, %g, %g)" % \
(self.atomtype.symbol, self.key, self.x, self.y, self.z)
r += " %s" % self.style
if self.base != None:
r += " (base %d)" % self.base
if self.bonds:
r += " ["
for b in self.bonds:
r += " " + repr(b)
r += " ]"
return r + ">"
class MakeBondpoint(Exception):
pass
class Base:
def __init__(self, strand, key):
self.key = key
self.atomlist = [ ]
self.phosphorusZcoord = 0.
self.strand = strand
atm0 = strand.atoms[key]
self.style = atm0.style
self.addAtom(atm0)
def __cmp__(self, other):
return -cmp(self.phosphorusZcoord, other.phosphorusZcoord)
def keys(self):
return map(lambda a: a.key, self.atomlist)
def __len__(self):
return len(self.atomlist)
def addAtom(self, a):
k = a.key
if a not in self.atomlist:
if a.style == self.style:
a.base = self.key
self.atomlist.append(a)
if a.atomtype.symbol == 'P':
self.phosphorusZcoord = a.z
else:
raise MakeBondpoint
def addLayer(self):
atoms = self.strand.atoms
newguys = [ ]
for a in self.atomlist:
for k in a.bonds:
if k not in newguys and k not in self.keys():
newguys.append(k)
atoms[k].buddy = a
newAtoms = 0
for k in newguys:
a2 = atoms[k]
a = a2.buddy
try:
self.addAtom(a2)
newAtoms += 1
except MakeBondpoint:
# don't make this bondpoint if it's already been made
if not hasattr(a, 'gotBondpoint'):
p1, p2 = a.posn(), a2.posn()
r1, r2 = a.atomtype.rcovalent, a2.atomtype.rcovalent
p = (r2 * p1 + r1 * p2) / (r1 + r2)
bpt = Bondpoint(a, p)
# pick up a new key
self.strand.addAtom(bpt)
self.addAtom(bpt)
a.gotBondpoint = True
return newAtoms
def grow(self):
while True:
if self.addLayer() == 0:
return
class Strand:
def __init__(self, filename=None):
self.atoms = { }
self.nextKey = 1
self.bases = [ ]
if filename != None:
for L in open(filename).readlines():
if L.startswith("atom"):
self.addAtom(Atom(L))
self.assignBases()
def addAtom(self, a):
a.key = key = self.nextKey
self.nextKey += 1
self.atoms[key] = a
def transform(self, t):
if t.func_code.co_argcount == 1:
for a in self.atoms.values():
v = V(a.x, a.y, a.z)
a.x, a.y, a.z = tuple(t(v))
else:
for a in self.atoms.values():
a.x, a.y, a.z = t(a.x, a.y, a.z)
def addAtomFromMmp(self, mmpline):
self.addAtom(Atom(mmpline))
def inferBonds(self):
maxBondLength = 2.5
def quantize(vec, maxBondLength=maxBondLength):
return (int(vec[0] / maxBondLength),
int(vec[1] / maxBondLength),
int(vec[2] / maxBondLength))
def bond_atoms(a1, a2):
if a1.key not in a2.bonds:
a2.bonds.append(a1.key)
if a2.key not in a1.bonds:
a1.bonds.append(a2.key)
buckets = { }
for atom in self.atoms.values():
atom.bonds = [ ] # clear existing bonds
# put this atom in one of the buckets
key = quantize(atom.posn())
try:
buckets[key].append(atom)
except KeyError:
buckets[key] = [ atom ]
def region(center):
lst = [ ]
x0, y0, z0 = quantize(center)
for x in range(x0 - 1, x0 + 2):
for y in range(y0 - 1, y0 + 2):
for z in range(z0 - 1, z0 + 2):
key = (x, y, z)
try:
lst += buckets[key]
except KeyError:
pass
return lst
for atm1 in self.atoms.values():
for atm2 in region(atm1.posn()):
bondLen = vlen(atm1.posn() - atm2.posn())
idealBondLen = atm1.atomtype.rcovalent + atm2.atomtype.rcovalent
a = 0.2
if (1-a) * idealBondLen < bondLen < (1+a) * idealBondLen:
bond_atoms(atm1, atm2)
atm1.hybridize()
def assignBases(self):
self.inferBonds()
remainingKeys = self.atoms.keys()
while len(remainingKeys) > 0:
baseKey = remainingKeys[0]
print "Base", baseKey
base = Base(self, baseKey)
self.bases.append(base)
remainingKeys = remainingKeys[1:]
base.grow()
for key in base.keys():
if key in remainingKeys:
remainingKeys.remove(key)
def renumberAtoms(self):
# Renumber their keys, and recompute bonds with new keys
atomlist = self.atoms.values()
self.atoms = { }
self.nextKey = 1
for i in range(len(atomlist)):
self.addAtom(atomlist[i])
self.inferBonds()
def filter(self, filt):
s = Strand()
for a in self.atoms.values():
if filt(a):
s.addAtom(a.clone())
s.inferBonds()
return s
def writeManyMmps(self, specs, tfm0, tfm):
# discard tiny "bases" and any atoms in them
tinybases = filter(lambda b: len(b) < 6, self.bases)
for b in tinybases:
for a in b.atomlist:
del self.atoms[a.key]
self.bases.remove(b)
self.renumberAtoms()
# sort bases in order of decreasing phosphorus z coord
self.bases.sort()
for index, groupname, filename in specs:
basekey = self.bases[index].key
base = self.filter(lambda a: a.base == basekey)
def tfm2(x, y, z, tfm0=tfm0, tfm=tfm, index=index):
v = V(x,y,z)
v = tfm0(v)
while index:
v = tfm(v)
index -= 1
return tuple(v)
base.transform(tfm2)
base.writeMmp(filename, groupname)
mmptext = """mmpformat 050920 required; 060421 preferred
kelvin 300
group (View Data)
info opengroup open = True
csys (HomeView) (1.000000, 0.000000, 0.000000, 0.000000) (10.000000) (0.000000, 0.000000, 0.000000) (1.000000)
csys (LastView) (1.000000, 0.000000, 0.000000, 0.000000) (8.153929) (0.000000, 0.000000, 0.000000) (1.000000)
egroup (View Data)
group (%(groupname)s)
info opengroup open = True
%(text)s
egroup (%(groupname)s)
end1
group (Clipboard)
info opengroup open = False
egroup (Clipboard)
end molecular machine part %(groupname)s
"""
def writeMmp(self, filename, groupname=None):
# Sort the atoms by what group they are in
atomlist = self.atoms.values()
atomlist.sort(lambda a1, a2: cmp(a1.base, a2.base))
self.renumberAtoms()
# write the file
s = ""
thisgroup = None
for a in self.atoms.values():
if groupname == None:
if thisgroup != a.base:
s += "mol (Strand %d) def\n" % a.base
thisgroup = a.base
s += ("atom %d (%d) (%d, %d, %d) def\n" %
(a.key, a.atomtype.number,
int(1000 * a.x), int(1000 * a.y), int(1000 * a.z)))
if a.hybridization != None:
s += "info atom atomtype = " + a.hybridization + "\n"
bstr = ""
for b in a.bonds:
if b < a.key:
bstr += " " + repr(b)
if bstr:
s += "bond1" + bstr + "\n"
if groupname != None:
s = "mol (" + groupname + ") def\n" + s
outf = open(filename, "w")
outf.write(self.mmptext % {"groupname": groupname, "text": s[:-1]})
outf.close()
########################################
if (__name__ == '__main__'):
g = Strand('strund1.mmp')
specs = [
(0, 'guanine', 'guanine.mmp'),
(1, 'cytosine', 'cytosine.mmp'),
(3, 'adenine', 'adenine.mmp'),
(6, 'thymine', 'thymine.mmp')
]
def tfm0(v):
return v + V(0, 0, -18.7)
def tfm(v):
angle = -36 * pi / 180
x, y, z = tuple(v)
c, s = cos(angle), sin(angle)
x, y = c * x + s * y, -s * x + c * y
return V(x, y, z + 3.391)
g.writeManyMmps(specs, tfm0, tfm)
| NanoCAD-master | cad/plugins/DNA/bdna-bases/prepare.py |
#!/usr/bin/python
# Copyright 2006-2007 Nanorex, Inc. See LICENSE file for details.
import sys
import string
sys.path.append("../../../src")
from VQT import A, V, vlen
class AtomType:
def __init__(self, symbol, number, rcovalent):
self.symbol = symbol
self.number = number
self.rcovalent = rcovalent
def __repr__(self):
return '<' + self.symbol + '>'
periodicTable = [
AtomType('X', 0, 0.0),
AtomType('H', 1, 0.31),
AtomType('C', 6, 0.77),
AtomType('N', 7, 0.73),
AtomType('O', 8, 0.69),
AtomType('P', 15, 1.08),
]
def lookupAtomType(num):
for at in periodicTable:
if at.number == num:
return at
raise Exception("AtomType not found, num=" + repr(num))
class Atom:
def __init__(self, mmpline):
if mmpline != None:
mmpline = mmpline.rstrip()
self.mmpline = mmpline
fields = mmpline.split()
self.key = string.atoi(fields[1])
self.style = fields[6]
self.hybridization = None
self.base = None
self.atomtype = lookupAtomType(string.atoi(fields[2][1:-1]))
self.x = 0.001 * string.atoi(fields[3][1:-1])
self.y = 0.001 * string.atoi(fields[4][:-1])
self.z = 0.001 * string.atoi(fields[5][:-1])
else:
self.mmpline = None
self.key = 0
self.style = None
self.hybridization = None
self.base = None
self.atomtype = lookupAtomType(0)
self.x = 0.0
self.y = 0.0
self.z = 0.0
self.bonds = [ ]
def is_singlet(self):
return self.atomtype.symbol == 'X'
def clone(self):
a = Atom(self.mmpline)
for attr in ('key', 'style', 'hybridization', 'base', 'atomtype',
'x', 'y', 'z', 'bonds'):
setattr(a, attr, getattr(self, attr))
return a
def hybridize(self, hybrids={
'C': { 4: 'sp3',
3: 'sp2',
2: 'sp',
},
'O': { 2: 'sp3',
1: 'sp2',
},
'N': { 3: 'sp3',
2: 'sp2',
1: 'sp',
}
}):
try:
self.hybridization = hybrids[self.atomtype.symbol][len(self.bonds)]
except KeyError:
self.hybridization = None
def posn(self):
return V(self.x, self.y, self.z)
def __repr__(self):
r = "<%s %d (%g, %g, %g)" % \
(self.atomtype.symbol, self.key, self.x, self.y, self.z)
r += " %s" % self.style
if self.hybridization != None:
r += " %s" % self.hybridization
if self.base != None:
r += " (base %d)" % self.base
if self.bonds:
r += " ["
for b in self.bonds:
r += " " + repr(b)
r += " ]"
return r + ">"
class Bondpoint(Atom):
def __init__(self, owner, v):
Atom.__init__(self, mmpline=None)
self.style = owner.style
self.base = owner.base
self.x = v[0]
self.y = v[1]
self.z = v[2]
self.bonds = [ owner.key ]
def __repr__(self):
r = "<%s %d (%g, %g, %g)" % \
(self.atomtype.symbol, self.key, self.x, self.y, self.z)
r += " %s" % self.style
if self.base != None:
r += " (base %d)" % self.base
if self.bonds:
r += " ["
for b in self.bonds:
r += " " + repr(b)
r += " ]"
return r + ">"
class MakeBondpoint(Exception):
pass
class Base:
def __init__(self, strand, key):
self.key = key
self.atomlist = [ ]
self.phosphorusZcoord = 0.
self.strand = strand
atm0 = strand.atoms[key]
self.style = atm0.style
self.addAtom(atm0)
def __cmp__(self, other):
return -cmp(self.phosphorusZcoord, other.phosphorusZcoord)
def keys(self):
return map(lambda a: a.key, self.atomlist)
def __len__(self):
return len(self.atomlist)
def addAtom(self, a):
k = a.key
if a not in self.atomlist:
if a.style == self.style:
a.base = self.key
self.atomlist.append(a)
if a.atomtype.symbol == 'P':
self.phosphorusZcoord = a.z
else:
raise MakeBondpoint
def addLayer(self):
atoms = self.strand.atoms
newguys = [ ]
for a in self.atomlist:
for k in a.bonds:
if k not in newguys and k not in self.keys():
newguys.append(k)
atoms[k].buddy = a
newAtoms = 0
for k in newguys:
a2 = atoms[k]
a = a2.buddy
try:
self.addAtom(a2)
newAtoms += 1
except MakeBondpoint:
# don't make this bondpoint if it's already been made
if not hasattr(a, 'gotBondpoint'):
p1, p2 = a.posn(), a2.posn()
r1, r2 = a.atomtype.rcovalent, a2.atomtype.rcovalent
p = (r2 * p1 + r1 * p2) / (r1 + r2)
bpt = Bondpoint(a, p)
# pick up a new key
self.strand.addAtom(bpt)
self.addAtom(bpt)
a.gotBondpoint = True
return newAtoms
def grow(self):
while True:
if self.addLayer() == 0:
return
class Strand:
def __init__(self, filename=None):
self.atoms = { }
self.nextKey = 1
self.bases = [ ]
if filename != None:
for L in open(filename).readlines():
if L.startswith("atom"):
self.addAtom(Atom(L))
def addAtom(self, a):
a.key = key = self.nextKey
self.nextKey += 1
self.atoms[key] = a
def transform(self, t):
if t.func_code.co_argcount == 1:
for a in self.atoms.values():
v = V(a.x, a.y, a.z)
a.x, a.y, a.z = tuple(t(v))
else:
for a in self.atoms.values():
a.x, a.y, a.z = t(a.x, a.y, a.z)
def addAtomFromMmp(self, mmpline):
self.addAtom(Atom(mmpline))
def inferBonds(self):
maxBondLength = 2.5
def quantize(vec, maxBondLength=maxBondLength):
return (int(vec[0] / maxBondLength),
int(vec[1] / maxBondLength),
int(vec[2] / maxBondLength))
def bond_atoms(a1, a2):
if a1.key not in a2.bonds:
a2.bonds.append(a1.key)
if a2.key not in a1.bonds:
a1.bonds.append(a2.key)
buckets = { }
for atom in self.atoms.values():
atom.bonds = [ ] # clear existing bonds
# put this atom in one of the buckets
key = quantize(atom.posn())
try:
buckets[key].append(atom)
except KeyError:
buckets[key] = [ atom ]
def region(center):
lst = [ ]
x0, y0, z0 = quantize(center)
for x in range(x0 - 1, x0 + 2):
for y in range(y0 - 1, y0 + 2):
for z in range(z0 - 1, z0 + 2):
key = (x, y, z)
try:
lst += buckets[key]
except KeyError:
pass
return lst
for atm1 in self.atoms.values():
for atm2 in region(atm1.posn()):
bondLen = vlen(atm1.posn() - atm2.posn())
idealBondLen = atm1.atomtype.rcovalent + atm2.atomtype.rcovalent
a = 0.2
if (1-a) * idealBondLen < bondLen < (1+a) * idealBondLen:
bond_atoms(atm1, atm2)
atm1.hybridize()
def assignBases(self):
self.inferBonds()
remainingKeys = self.atoms.keys()
while len(remainingKeys) > 0:
baseKey = remainingKeys[0]
print "Base", baseKey
base = Base(self, baseKey)
self.bases.append(base)
remainingKeys = remainingKeys[1:]
base.grow()
for key in base.keys():
if key in remainingKeys:
remainingKeys.remove(key)
def baseSort(self):
self.bases.sort()
self.renumberAtoms(lambda a1, a2: cmp(a1.base, a2.base))
def renumberAtoms(self, sortfunc=None):
# Renumber their keys, and recompute bonds with new keys
atomlist = self.atoms.values()
if sortfunc != None:
atomlist.sort(sortfunc)
self.atoms = { }
self.nextKey = 1
for i in range(len(atomlist)):
self.addAtom(atomlist[i])
self.inferBonds()
def filter(self, filt):
s = Strand()
for a in self.atoms.values():
if filt(a):
s.addAtom(a.clone())
s.inferBonds()
return s
def writeManyMmps(self, specs, tfm0, tfm):
# discard tiny "bases" and any atoms in them
tinybases = filter(lambda b: len(b) < 6, self.bases)
for b in tinybases:
for a in b.atomlist:
del self.atoms[a.key]
self.bases.remove(b)
# sort bases in order of decreasing phosphorus z coord
self.baseSort()
for index, groupname, filename in specs:
basekey = self.bases[index].key
base = self.filter(lambda a: a.base == basekey)
def tfm2(x, y, z, tfm0=tfm0, tfm=tfm, index=index):
v = V(x,y,z)
v = tfm0(v)
while index:
v = tfm(v)
index -= 1
return tuple(v)
base.transform(tfm2)
base.writeMmp(filename, groupname)
mmptext = """mmpformat 050920 required; 060421 preferred
kelvin 300
group (View Data)
info opengroup open = True
csys (HomeView) (1.000000, 0.000000, 0.000000, 0.000000) (10.000000) (0.000000, 0.000000, 0.000000) (1.000000)
csys (LastView) (1.000000, 0.000000, 0.000000, 0.000000) (8.153929) (0.000000, 0.000000, 0.000000) (1.000000)
egroup (View Data)
group (%(groupname)s)
info opengroup open = True
%(text)s
egroup (%(groupname)s)
end1
group (Clipboard)
info opengroup open = False
egroup (Clipboard)
end molecular machine part %(groupname)s
"""
def writeMmp(self, filename, groupname=None):
s = ""
thisgroup = None
for a in self.atoms.values():
if groupname == None:
if thisgroup != a.base:
s += "mol (Strand %d) def\n" % a.base
thisgroup = a.base
s += ("atom %d (%d) (%d, %d, %d) def\n" %
(a.key, a.atomtype.number,
int(1000 * a.x), int(1000 * a.y), int(1000 * a.z)))
if a.hybridization != None:
s += "info atom atomtype = " + a.hybridization + "\n"
bstr = ""
for b in a.bonds:
if b < a.key:
bstr += " " + repr(b)
if bstr:
s += "bond1" + bstr + "\n"
if groupname != None:
s = "mol (" + groupname + ") def\n" + s
outf = open(filename, "w")
outf.write(self.mmptext % {"groupname": groupname, "text": s[:-1]})
outf.close()
########################################
if (__name__ == '__main__'):
g = Strand('strund1.mmp')
g.assignBases()
g.baseSort()
if True:
specs = [
(0, 'cytosine', 'cytosine-inner.mmp'),
(1, 'guanine', 'guanine-outer.mmp'),
(3, 'adenine', 'adenine-outer.mmp'),
(4, 'adenine', 'adenine-inner.mmp'),
(6, 'thymine', 'thymine-inner.mmp'),
(7, 'thymine', 'thymine-outer.mmp'),
(9, 'cytosine', 'cytosine-outer.mmp'),
(10, 'guanine', 'guanine-inner.mmp'),
]
k = [ 0 ]
def tfm0(v, k=k):
k[0] = 0
return V(v[0], v[1], v[2] - 20.2)
def tfm(v, k=k):
angle = pi / 6
x, y, z = tuple(v)
c, s = cos(angle), sin(angle)
x, y = c * x + s * y, -s * x + c * y
if (k[0] & 1) == 0:
zdiff = 1.67
else:
zdiff = 5.76
k[0] += 1
return V(x, y, z + zdiff)
g.writeManyMmps(specs, tfm0, tfm)
else:
g.writeMmp('groups.mmp', None)
| NanoCAD-master | cad/plugins/DNA/zdna-bases/prepare.py |
# Copyright 2008 Nanorex, Inc. See LICENSE file for details.
"""
This program can be used to translate cartesian coordinates of PAM5
strut ends into the basis necessary to generate gromacs virtual
particles (which are used to represent the ends of those struts).
First it reads the locations of the three real atoms which represent
and define the base pair plane, and the coordinate basis within that
plane. Those atoms are the groove (a Gv5 or Gr5 pseudo-atom), which
is at the origin of the virtual particle basis; an Ss5 referred to as
'a'; and another Ss5 referred to as 'b'. In a normal strand of BDNA,
the two 'a' sugars on a stacked pair of base pairs are farther apart
than the 'b' sugars. The 'a' sugars are on opposite strands from each
other.
Next, a set of positions of strut ends are read. These should be
co-planer to the three atoms read in above. The vector 'va'
represents the displacement from the groove atom to sugar 'a', and
'vb' represents the displacement from the groove atom to sugar 'b'.
The location of the strut end is then (groove + A*va + B*vb).
The three coefficients A, B, and C necessary to reach the strut end
are printed. The third basis vector is the cross product of the first
two. For strut ends which are coplaner, C should be negligable. The
length of the displacement from the read strut end position to the one
calculated based on the new basis is printed as 'error'. This same
quantity calculated without the C component is printed as 'error2d'.
Both error values should be small.
"""
import sys
import math
from Numeric import array
from LinearAlgebra import inverse
# a nice upgrade would be to read values selected from the NE1 history
# buffer, which look like this:
#
# [X = 20.260] [Y = 7.700] [Z = 3.480]
def readVector():
line = sys.stdin.readline()
(xs, ys, zs) = line.strip().split(" ")
return [float(xs), float(ys), float(zs)]
# most of the routines below can probably be replaced with Numeric
# calls...
def vsub(a, b):
return [a[0]-b[0], a[1]-b[1], a[2]-b[2]]
def vadd(a, b):
return [a[0]+b[0], a[1]+b[1], a[2]+b[2]]
def vdot(a, b):
return a[0]*b[0] + a[1]*b[1] + a[2]*b[2]
def vmulk(a, k):
return [a[0]*k, a[1]*k, a[2]*k]
def vcross(a, b):
return [a[1]*b[2]-a[2]*b[1], a[2]*b[0]-a[0]*b[2], a[0]*b[1]-a[1]*b[0]]
print "enter groove position"
g = readVector()
print "enter a"
a = readVector()
print "enter b"
b = readVector()
va = vsub(a, g)
vb = vsub(b, g)
vc = vcross(va, vb)
mat = array([[va[0], vb[0], vc[0]], [va[1], vb[1], vc[1]], [va[2], vb[2], vc[2]]])
inv = inverse(mat)
while (True):
print "enter vector"
v = readVector()
vv = vsub(v, g)
A = vv[0]*inv[0, 0] + vv[1]*inv[0, 1] + vv[2]*inv[0, 2]
B = vv[0]*inv[1, 0] + vv[1]*inv[1, 1] + vv[2]*inv[1, 2]
C = vv[0]*inv[2, 0] + vv[1]*inv[2, 1] + vv[2]*inv[2, 2]
vcalc1 = vadd(vmulk(va, A), vmulk(vb, B))
vcalc = vadd(vcalc1, vmulk(vc, C))
delta = vsub(vv, vcalc)
error = math.sqrt(vdot(delta, delta))
delta2d = vsub(vv, vcalc1)
error2d = math.sqrt(vdot(delta2d, delta2d))
print "A: %f B: %f C: %f error: %e error2d: %e" % (A, B, C, error, error2d)
| NanoCAD-master | cad/plugins/NanoDynamics-1/TranslateStruts.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.