python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
import argparse
import mlflow
import torch
from aml import CropSegChipsDataModule
from models import ModelPlusSigmoid, SegmentationModel
from pytorch_lightning import Trainer
def parse_args():
# setup arg parser
parser = argparse.ArgumentParser()
# add arguments
parser.add_argument("--dataset", type=str)
parser.add_argument("--onnx_model_path", type=str)
parser.add_argument("--model_dir", type=str, default="./")
parser.add_argument("--ndvi_stack_bands", type=int, default=37)
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--max_epochs", type=int, default=10)
parser.add_argument("--learning_rate", type=float, default=0.001)
parser.add_argument("--weight_decay", type=float, default=0.001)
parser.add_argument("--num_workers", type=int, default=4)
parser.add_argument("--num_gpus", type=int, default=1)
# parse args
args = parser.parse_args()
# return args
return args
def main(args: argparse.Namespace):
# Setup DataLoader
data = CropSegChipsDataModule(
data_dir=args.dataset, batch_size=args.batch_size, num_workers=args.num_workers
)
data.setup()
# Setup Segmentation Model
model = SegmentationModel(
lr=args.learning_rate,
weight_decay=args.weight_decay,
in_channels=args.ndvi_stack_bands,
num_epochs=args.max_epochs,
classes=1,
)
# Enables logging
mlflow.pytorch.autolog(log_models=False)
# Train
trainer = Trainer(max_epochs=args.max_epochs, accelerator="gpu", devices=args.num_gpus)
trainer.fit(model, data)
# Signature
batch = next(iter(data.train_dataloader()))
ndvi_batch = batch["image"]
ndvi_sample = ndvi_batch[0:1, :, :, :].numpy()
# Set model to inference mode before exporting to ONNX
trace_model = ModelPlusSigmoid(model).eval()
dummy_input = torch.randn(
args.batch_size, args.ndvi_stack_bands, ndvi_sample.shape[-2], ndvi_sample.shape[-1]
)
# Export the model
torch.onnx.export(
trace_model,
dummy_input, # model example input
args.onnx_model_path, # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
do_constant_folding=True, # whether to execute constant folding for optimization
opset_version=11,
input_names=["ndvi_stack"], # the model's input names
output_names=["seg_map"], # the model's output names
dynamic_axes={
"ndvi_stack": {0: "batch_size"}, # variable length axes
"seg_map": {0: "batch_size"},
},
)
# Stop Logging
mlflow.end_run()
if __name__ == "__main__":
# parse args
args = parse_args()
# run main function
main(args)
| EXA-1-master | exa/libraries/farmvibes-ai-main/notebooks/crop_segmentation/notebook_lib/aml_train_script.py |
from itertools import chain
CROP_INDICES = [e for e in chain(range(1, 81), range(196, 256))]
# 24 - Winter Wheat: 1127411
# 36 - Alfalfa: 877421
# 43 - Potatoes: 497398
# 37 - Other Hay/Non Alfalfa: 460732
# 68 - Apples: 416528
# 1 - Corn: 396329
# 23 - Spring Wheat: 150973
# 69 - Grapes: 124028
# 42 - Dry Beans: 118422
# 59 - Sod/Grass Seed: 115036
# 12 - Sweet Corn: 100565
WINTER_WHEAT_INDEX = [24]
ALFALFA_INDEX = [36]
POTATO_INDEX = [43]
OTHER_HAY_INDEX = [37]
APPELS_INDEX = [68]
CORN_INDEX = [1]
SPRING_WHEAT_INDEX = [23]
GRAPES_INDEX = [69]
DRY_BEANS_INDEX = [42]
SOD_INDEX = [59]
SWEET_CORN_INDEX = [12]
| EXA-1-master | exa/libraries/farmvibes-ai-main/notebooks/crop_segmentation/notebook_lib/constants.py |
import json
from itertools import groupby
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, cast
import numpy as np
import rasterio
import rasterio.merge
import torch
from rasterio.crs import CRS
from rasterio.errors import RasterioIOError
from rasterio.vrt import WarpedVRT
from rasterio.windows import from_bounds
from rtree.index import Index, Property
from torch import Tensor
from torchgeo.datasets import BoundingBox, RasterDataset
from vibe_core.data import Raster
from vibe_core.data.rasters import CategoricalRaster
class NDVIDataset(RasterDataset):
#: Color map for the dataset, used for plotting
cmap: Dict[int, Tuple[int, int, int, int]] = {}
def __init__(
self,
ndvi_rasters: List[Raster],
stack_n_bands: int = 37,
crs: Optional[CRS] = None,
res: Optional[float] = None,
transforms: Optional[Callable[[Dict[str, Any]], Dict[str, Any]]] = None,
cache: bool = True,
) -> None:
"""Initialize a new Dataset instance.
Args:
ndvi_rasters: list of Rasters output by TerraVibes workflow
stack_n_bands: number of bands of the ndvi stack (available
rasters will be temporally sampled to compose a stack
with this number of bands)
crs: :term:`coordinate reference system (CRS)` to warp to
(defaults to the CRS of the first file found)
res: resolution of the dataset in units of CRS
(defaults to the resolution of the first file found)
transforms: a function/transform that takes an input sample
and returns a transformed version
cache: if True, cache file handle to speed up repeated sampling
Raises:
FileNotFoundError: if no files are found in ``root``
"""
if stack_n_bands > len(ndvi_rasters):
raise ValueError(
f"Number of NDVI rasters must be >= stack_n_bands, found {len(ndvi_rasters)}"
)
self.transforms = transforms
# Create an R-tree to index the dataset
self.index = Index(interleaved=False, properties=Property(dimension=3))
self.index_size = 0
# Sort raster by date
self.ndvi_rasters = sorted(ndvi_rasters, key=lambda x: x.time_range[0])
self.stack_n_bands = stack_n_bands
self.cache = cache
# Read color map
vis_asset = self.ndvi_rasters[0].visualization_asset
with open(vis_asset.local_path) as mtdt:
metadata = json.load(mtdt)
self.cmap = metadata["colormap"]
# Build the index, temporally sampling rasters
for year, grouped_rasters in groupby(self.ndvi_rasters, lambda x: x.time_range[0].year):
# Group rasters by year and find unique dates (might have multiple rasters for a date)
rasters = list(grouped_rasters)
unique_dates = set([raster.time_range[0] for raster in rasters])
n_unique_dates = len(unique_dates)
# Raise exception if we cannot build a stack
if n_unique_dates < self.stack_n_bands:
raise ValueError(
f"{n_unique_dates} unique dates for {year}, "
f"expected at least {self.stack_n_bands}"
)
# Define sampling interval for dates
selected_date_idxs = np.round(
np.linspace(0, n_unique_dates - 1, self.stack_n_bands)
).astype(int)
selected_rasters = [rasters[idx] for idx in selected_date_idxs]
# Loop through the selected rasters
for raster in selected_rasters:
try:
self._add_raster_to_index(raster)
except RasterioIOError:
# Skip files that rasterio is unable to read
continue
if self.index_size == 0:
raise FileNotFoundError(
f"Couldn't read {self.__class__.__name__} data from ndvi_rasters"
)
def _add_raster_to_index(self, raster: Raster):
filepath = raster.raster_asset.local_path
with rasterio.open(filepath) as src:
crs = src.crs
res = src.res[0]
with WarpedVRT(src, crs=crs) as vrt:
minx, miny, maxx, maxy = vrt.bounds
start_date, end_date = raster.time_range
coords = (
minx,
maxx,
miny,
maxy,
start_date.timestamp(),
end_date.timestamp(),
)
self.index.insert(self.index_size, coords, filepath)
self.index_size += 1
self._crs = cast(CRS, crs)
self.res = cast(float, res)
def __getitem__(self, query: BoundingBox) -> Dict[str, Any]:
"""Retrieve image/mask and metadata indexed by query.
Args:
query: (minx, maxx, miny, maxy, mint, maxt) coordinates to index
Returns:
sample of image/mask and metadata at that index
Raises:
IndexError: if query is not found in the index
"""
hit_samples = [hit for hit in self.index.intersection(tuple(query), objects=True)]
if not hit_samples:
raise IndexError(f"query: {query} not found in index with bounds: {self.bounds}")
filepaths: List[str] = [hit.object for hit in hit_samples] # type:ignore
maxt_timestamp = [hit.bounds[-1] for hit in hit_samples] # type:ignore
data_list: List[Tensor] = []
spatial_merge_list: List[str] = []
merge_timestamp = maxt_timestamp[0]
for filepath, ts in zip(filepaths, maxt_timestamp):
# if date matches the merge_timestamp, add the raster to be merged
if ts == merge_timestamp:
spatial_merge_list.append(filepath)
merge_timestamp = ts
else: # date changed, merge rasters and add new raster to the list
data_list.append(self._spatial_merge_files(spatial_merge_list, query))
spatial_merge_list = [filepath]
merge_timestamp = ts
# merge the remaining rasters
data_list.append(self._spatial_merge_files(spatial_merge_list, query))
# Stack ndvi rasters in the channel dimension
data = torch.cat(data_list, dim=0)
sample = {"image": data, "crs": self.crs, "bbox": query}
if self.transforms is not None:
sample = self.transforms(sample)
return sample
def _spatial_merge_files(self, filepaths: Sequence[str], query: BoundingBox) -> Tensor:
"""Load and spatially merge one or more files.
Args:
filepaths: one or more files to load and merge
query: (minx, maxx, miny, maxy, mint, maxt) coordinates to index
Returns:
image/mask at that index
"""
if self.cache:
vrt_fhs = [self._cached_load_warp_file(fp) for fp in filepaths]
else:
vrt_fhs = [self._load_warp_file(fp) for fp in filepaths]
bounds = (query.minx, query.miny, query.maxx, query.maxy)
if len(vrt_fhs) == 1:
src = vrt_fhs[0]
out_width = int(round((query.maxx - query.minx) / self.res))
out_height = int(round((query.maxy - query.miny) / self.res))
out_shape = (src.count, out_height, out_width)
dest = src.read(out_shape=out_shape, window=from_bounds(*bounds, src.transform))
else:
dest, _ = rasterio.merge.merge(vrt_fhs, bounds, self.res)
# fix numpy dtypes which are not supported by pytorch tensors
if dest.dtype == np.uint16:
dest = dest.astype(np.int32)
elif dest.dtype == np.uint32:
dest = dest.astype(np.int64)
tensor = torch.tensor(dest)
return tensor
class CDLMask(RasterDataset):
"""
Binary mask dataset based on the choice of a CDL index subset to serve as a positive indices.
"""
is_image = False
def __init__(
self,
cdl_rasters: List[CategoricalRaster],
positive_indices: Sequence[int],
crs: Optional[CRS] = None,
res: Optional[float] = None,
transforms: Optional[Callable[[Dict[str, Any]], Dict[str, Any]]] = None,
cache: bool = True,
):
"""Initialize a new Dataset instance.
Args:
cdl_rasters: list of Rasters output by TerraVibes workflow
positive_indices: crop indices to consider as the positive label
crs: :term:`coordinate reference system (CRS)` to warp to
(defaults to the CRS of the first file found)
res: resolution of the dataset in units of CRS
(defaults to the resolution of the first file found)
transforms: a function/transform that takes an input sample
and returns a transformed version
cache: if True, cache file handle to speed up repeated sampling
Raises:
FileNotFoundError: if no files are found in ``root``
"""
self.positive_indices = torch.as_tensor(positive_indices)
self.transforms = transforms
self.cdl_rasters = sorted(cdl_rasters, key=lambda x: x.time_range[0])
self.cache = cache
# Read color map
vis_asset = self.cdl_rasters[0].visualization_asset
with open(vis_asset.local_path) as mtdt:
metadata = json.load(mtdt)
self.cmap = metadata["colormap"]
# Create an R-tree to index the dataset
self.index = Index(interleaved=False, properties=Property(dimension=3))
# Populate the dataset index
sample_idx = 0
for raster in self.cdl_rasters:
filepath = raster.raster_asset.local_path
try:
with rasterio.open(filepath) as src:
crs = src.crs
res = src.res[0]
with WarpedVRT(src, crs=crs) as vrt:
minx, miny, maxx, maxy = vrt.bounds
except RasterioIOError:
# Skip files that rasterio is unable to read
continue
else:
start_date, end_date = raster.time_range
coords = (
minx,
maxx,
miny,
maxy,
start_date.timestamp(),
end_date.timestamp(),
)
self.index.insert(sample_idx, coords, filepath)
sample_idx += 1
if sample_idx == 0:
raise FileNotFoundError(
f"Couldn't read {self.__class__.__name__} data from ndvi_rasters"
)
self._crs = cast(CRS, crs)
self.res = cast(float, res)
def __getitem__(self, query: BoundingBox) -> Dict[str, Any]:
sample = super().__getitem__(query)
sample["mask"] = torch.isin(sample["mask"], self.positive_indices).to(torch.float32)
return sample
| EXA-1-master | exa/libraries/farmvibes-ai-main/notebooks/crop_segmentation/notebook_lib/datasets.py |
EXA-1-master | exa/libraries/farmvibes-ai-main/notebooks/crop_segmentation/notebook_lib/__init__.py |
|
import glob
import os
from typing import Optional
import pytorch_lightning as pl
import torch
from torch.utils.data import DataLoader, Dataset
# Python 3.8 does not support subscripted generics for classes not in typing
class CropSegChipDataset(Dataset): # type: ignore
"""
Dataset for AML training/inference for NDVI and CDL chips/patches stored locally.
"""
def __init__(self, data_dir: str):
self.data_dir = data_dir
self.ndvi_paths = glob.glob(os.path.join(self.data_dir, "ndvi", "*.pt"))
self.cdl_paths = [path.replace("ndvi", "cdl") for path in self.ndvi_paths]
def __getitem__(self, index: int):
ndvi = torch.load(self.ndvi_paths[index])
cdl = torch.load(self.cdl_paths[index])
sample = {"image": ndvi, "mask": cdl}
return sample
def __len__(self):
return len(self.ndvi_paths)
class CropSegChipsDataModule(pl.LightningDataModule):
def __init__(
self,
data_dir: str,
batch_size: int = 16,
num_workers: int = 4,
):
"""
Init a Crop Segmentation Data Module instance for pre-generated chips
Args:
data_dir: dir with train and val folders where respective ndvi and cdl maps are
stored (e.g., aml/dataset)
batch_size: how many samples are fed to the network in a single batch.
num_workers: how many worker processes to use in the data loader.
"""
super().__init__()
self.data_dir = data_dir
self.batch_size = batch_size
self.num_workers = num_workers
def prepare_data(self) -> None:
# Skipping prepare_data as data is already downloaded
pass
def setup(self, stage: Optional[str] = None):
train_dir = os.path.join(self.data_dir, "train")
self.train_dataset = CropSegChipDataset(train_dir)
val_dir = os.path.join(self.data_dir, "val")
self.val_dataset = CropSegChipDataset(val_dir)
def _get_dataloader(
self, dataset: CropSegChipDataset, shuffle: bool
) -> DataLoader: # type: ignore
return DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=shuffle,
num_workers=self.num_workers,
prefetch_factor=5 if self.num_workers else 2,
)
def train_dataloader(self) -> DataLoader: # type: ignore
return self._get_dataloader(self.train_dataset, shuffle=True)
def val_dataloader(self) -> DataLoader: # type: ignore
return self._get_dataloader(self.val_dataset, shuffle=False)
def test_dataloader(self) -> DataLoader: # type: ignore
return self.val_dataloader()
def predict_dataloader(self) -> DataLoader: # type: ignore
return self.val_dataloader()
| EXA-1-master | exa/libraries/farmvibes-ai-main/notebooks/crop_segmentation/notebook_lib/aml.py |
import io
from datetime import datetime
import matplotlib.pyplot as plt
from IPython.display import Image, display
from shapely import geometry as shpg
from torchgeo.datasets import BoundingBox
def bbox_to_shapely(bbox: BoundingBox) -> shpg.Polygon:
"""
Convert from torchgeo's BoundingBox to a shapely polygon
"""
return shpg.box(bbox.minx, bbox.miny, bbox.maxx, bbox.maxy)
def format_timestamp(timestamp: float) -> str:
return datetime.fromtimestamp(timestamp).strftime("%Y/%m/%d")
def lw_plot():
"""
Compress images to make notebook smaller
"""
iobytes = io.BytesIO()
plt.savefig(iobytes, format="jpg", bbox_inches="tight")
plt.close()
iobytes.seek(0)
display(Image(data=iobytes.read()))
| EXA-1-master | exa/libraries/farmvibes-ai-main/notebooks/crop_segmentation/notebook_lib/utils.py |
import os
from datetime import datetime
from math import ceil, log10
from typing import Any, Iterator, List, Optional, Tuple
import pytorch_lightning as pl
from torch import save as torch_save
from torch.utils.data import DataLoader
from torchgeo.datasets import BoundingBox, GeoDataset
from torchgeo.datasets.utils import stack_samples
from torchgeo.samplers import GridGeoSampler, RandomGeoSampler
from torchgeo.samplers.single import GeoSampler
from tqdm import tqdm
from vibe_core.data import CategoricalRaster, Raster
from .constants import CROP_INDICES
from .datasets import CDLMask, NDVIDataset
def save_chips_locally(dataloader: DataLoader, output_dir: str) -> None: # type: ignore
ndvi_path = os.path.join(output_dir, "ndvi")
os.makedirs(ndvi_path, exist_ok=True)
cdl_path = os.path.join(output_dir, "cdl")
os.makedirs(cdl_path, exist_ok=True)
batch = next(iter(dataloader))
batch_size = batch["image"].size(0)
zfill = ceil(log10(len(dataloader) * batch_size))
sample_idx = 1
for batch in tqdm(dataloader):
ndvi_batch, cdl_batch = batch["image"], batch["mask"]
zfill = ceil(log10(len(dataloader) * ndvi_batch.size(0)))
for i in range(ndvi_batch.size(0)):
torch_save(
ndvi_batch[i, :, :, :].clone(),
os.path.join(ndvi_path, f"{sample_idx + i:0>{zfill}}.pt"),
)
torch_save(
cdl_batch[i, :, :, :].clone(),
os.path.join(cdl_path, f"{sample_idx + i:0>{zfill}}.pt"),
)
sample_idx += ndvi_batch.size(0)
def year_bbox(bbox: BoundingBox) -> BoundingBox:
"""Method that set the bounding box's
mint and maxt to comprise the whole year
"""
year = datetime.fromtimestamp(bbox.mint).year
bounding_box = BoundingBox(
minx=bbox.minx,
maxx=bbox.maxx,
miny=bbox.miny,
maxy=bbox.maxy,
mint=datetime(year, 1, 1).timestamp(),
maxt=datetime(year + 1, 1, 1).timestamp() - 1,
)
return bounding_box
class YearRandomGeoSampler(RandomGeoSampler):
"""Samples elements from a region of interest randomly.
The main difference to RandomGeoSampler is that we explicitly
alter the time range to fetch all samples from a single year.
This is required for sampling a stacked NDVI from NDVIDataset
"""
def __iter__(self) -> Iterator[BoundingBox]:
for bbox in super().__iter__():
yield year_bbox(bbox)
class YearGridGeoSampler(GridGeoSampler):
"""Samples elements in a grid-like fashion.
The main difference to GridGeoSampler is that we explicitly
alter the time range to fetch all samples from a single year.
This is required for sampling a stacked NDVI from NDVIDataset
"""
def __iter__(self) -> Iterator[BoundingBox]:
for bbox in super().__iter__():
yield year_bbox(bbox)
class CropSegDataModule(pl.LightningDataModule):
def __init__(
self,
ndvi_rasters: List[Raster],
cdl_rasters: List[CategoricalRaster],
ndvi_stack_bands: int = 37,
img_size: Tuple[int, int] = (256, 256),
epoch_size: int = 1024,
batch_size: int = 16,
num_workers: int = 4,
val_ratio: float = 0.2,
positive_indices: List[int] = CROP_INDICES,
train_years: List[int] = [2020],
val_years: List[int] = [2020],
):
"""
Init a CropSegDataModule instance
Args:
ndvi_rasters: NDVI rasters generated by TerraVibes workflow
cdl_rasters: CDL maps downloaded by TerraVibes workflow
ndvi_stack_bands: how many daily NDVI maps will be stacked as training input.
img_size: tuple that defines the size of each chip that is fed to the network.
epoch_size: how many samples are sampled during training for one epoch.
batch_size: how many samples are fed to the network in a single batch.
num_workers: how many worker processes to use in the data loader.
val_ratio: how much of the data to separate for validation.
positive_indices: which CDL indices are considered as positive samples.
Crop types with a minimum of 1e5 pixels in the RoI are available
in the module `notebook_lib.constants`. You can combine multiple
constants by adding them (e.g., `constants.POTATO_INDEX + constants.CORN_INDEX`)
train_years: years used for training.
val_years: years used for validation.
"""
super().__init__()
self.ndvi_rasters = ndvi_rasters
self.cdl_rasters = cdl_rasters
self.img_size = img_size
self.batch_size = batch_size
self.num_workers = num_workers
self.epoch_size = epoch_size
self.val_ratio = val_ratio
self.positive_indices = positive_indices
self.train_years = train_years
self.val_years = val_years
self.years = list(set(self.train_years) | set(self.val_years))
self.ndvi_stack_bands = ndvi_stack_bands
def prepare_data(self) -> None:
# Skipping prepare_data as TerraVibes has already downloaded it
pass
def setup(self, stage: Optional[str] = None):
input_dataset = NDVIDataset(
self.ndvi_rasters,
self.ndvi_stack_bands,
)
target_dataset = CDLMask(
self.cdl_rasters,
positive_indices=self.positive_indices,
)
self.train_dataset = input_dataset & target_dataset # Intersection dataset
# Use the same dataset for training and validation, use different RoIs
self.val_dataset = self.train_dataset
self.test_dataset = self.train_dataset
def _get_dataloader(
self, dataset: GeoDataset, sampler: GeoSampler
) -> DataLoader: # type: ignore
return DataLoader(
dataset,
batch_size=self.batch_size,
sampler=sampler,
num_workers=self.num_workers,
prefetch_factor=5 if self.num_workers else 2,
collate_fn=stack_samples,
)
def _get_split_roi(self, ref_dataset: GeoDataset):
minx, maxx, miny, maxy, _, _ = ref_dataset.bounds
width = ref_dataset.bounds.maxx - ref_dataset.bounds.minx
height = ref_dataset.bounds.maxy - ref_dataset.bounds.miny
if height > width:
train_x = maxx
val_x = minx
train_y = maxy - self.val_ratio * height
val_y = maxy - self.val_ratio * height
else:
train_x = maxx - self.val_ratio * width
val_x = maxx - self.val_ratio * width
train_y = maxy
val_y = miny
train_mint = datetime(min(self.train_years), 1, 1).timestamp()
train_maxt = datetime(max(self.train_years) + 1, 1, 1).timestamp() - 1
val_mint = datetime(min(self.val_years), 1, 1).timestamp()
val_maxt = datetime(max(self.val_years) + 1, 1, 1).timestamp() - 1
train_roi = BoundingBox(minx, train_x, miny, train_y, train_mint, train_maxt)
val_roi = BoundingBox(val_x, maxx, val_y, maxy, val_mint, val_maxt)
return train_roi, val_roi
def train_dataloader(self) -> DataLoader: # type: ignore
# Use the first dataset as index source
train_roi, _ = self._get_split_roi(self.train_dataset)
sampler = YearRandomGeoSampler(
self.train_dataset,
size=self.img_size,
length=self.epoch_size,
roi=train_roi,
)
return self._get_dataloader(self.train_dataset, sampler)
def val_dataloader(self) -> DataLoader: # type: ignore
_, val_roi = self._get_split_roi(self.val_dataset)
sampler = YearGridGeoSampler(
self.val_dataset,
size=self.img_size,
stride=self.img_size,
roi=val_roi,
)
return self._get_dataloader(self.val_dataset, sampler)
def test_dataloader(self) -> DataLoader: # type: ignore
return self.val_dataloader()
def predict_dataloader(self) -> DataLoader: # type: ignore
return self.val_dataloader()
def on_before_batch_transfer(self, batch: Any, dataloader_idx: int):
batch["bbox"] = [(a for a in b) for b in batch["bbox"]]
return batch
| EXA-1-master | exa/libraries/farmvibes-ai-main/notebooks/crop_segmentation/notebook_lib/modules.py |
EXA-1-master | exa/libraries/farmvibes-ai-main/notebooks/deepmc/__init__.py |
|
from torch import nn, Tensor
class TimeDistributed(nn.Module):
def __init__(self, module: nn.Module, batch_first: bool = False):
super().__init__()
self.module = module
self.batch_first = batch_first
def forward(self, x: Tensor):
if len(x.size()) <= 2:
return self.module(x)
# Squash samples and timesteps into a single axis
# COMMENT: Can use rearrange here :)
x_reshape = x.contiguous().view(-1, x.size(-1)) # (samples * timesteps, input_size)
y = self.module(x_reshape)
# We have to reshape Y
if self.batch_first:
y = y.contiguous().view(x.size(0), -1, y.size(-1)) # (samples, timesteps, output_size)
else:
y = y.view(-1, x.size(1), y.size(-1)) # (timesteps, samples, output_size)
return y
| EXA-1-master | exa/libraries/farmvibes-ai-main/notebooks/deepmc/notebook_lib/time.py |
from keras.layers import BatchNormalization, Dense, Input
from keras.models import Sequential
from keras.utils.vis_utils import plot_model
def simple_mixture_model(inshape: int):
model = Sequential()
model.add(Input(shape=(inshape,)))
model.add(Dense(inshape * 2, activation="relu"))
model.add(BatchNormalization())
model.add(Dense(inshape * 4, activation="relu"))
model.add(BatchNormalization())
model.add(Dense(inshape))
model.compile(loss="mae", optimizer="adam")
return model
def fit_model(model, train_X, train_y, test_X, test_y, batch_size: int):
batch_size = batch_size
validation_data = (test_X, test_y)
# fit network
history = model.fit(
train_X,
train_y,
epochs=20,
batch_size=batch_size,
validation_data=validation_data,
verbose=1,
)
return model, history
| EXA-1-master | exa/libraries/farmvibes-ai-main/notebooks/deepmc/notebook_lib/post_models.py |
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, cast
import numpy as np
import pandas as pd
from shapely.geometry import Point
from vibe_core.client import FarmvibesAiClient, get_default_vibe_client
from vibe_core.datamodel import RunConfigUser, RunConfig
from vibe_core.datamodel import SpatioTemporalJson
class Forecast:
def __init__(
self,
workflow_name: str,
geometry: Point,
time_range: Tuple[datetime, datetime],
parameters: List[Dict[str, str]],
date_column: str = "date",
):
self.client: FarmvibesAiClient = get_default_vibe_client()
self.workflow_name = workflow_name
self.geometry = geometry
self.parameters = parameters
self.time_range = time_range
self.date_column = date_column
def submit_download_request(self):
"""
Submit request to worker to download forecast data
"""
run_list = []
for parameter in self.parameters:
run_name = f"forecast_{parameter['weather_type']}"
run = self.client.run(
workflow=self.workflow_name,
name=run_name,
geometry=self.geometry,
time_range=self.time_range,
parameters=parameter,
)
try:
run.block_until_complete(5)
except RuntimeError:
print(run)
run_list.append(
{
"id": run.id,
"weather_type": parameter["weather_type"],
}
)
return run_list
def get_run_status(self, run_list: List[Dict[str, str]]):
all_done = True
out_ = []
for run_item in run_list:
o = self.client.describe_run(run_item["id"])
print(f"Execution status for {run_item['weather_type']}: {o.details.status}")
if o.details.status == "done":
out_.append(o)
else:
all_done = False
if o.details.status == "failed":
print(o.details)
return all_done, out_
def get_all_assets(self, details: RunConfigUser):
asset_files = []
output = details.output["weather_forecast"]
record: Dict[str, Any]
for record in cast(List[Dict[str, Any]], output):
for _, value in record["assets"].items():
asset_files.append(value["href"])
df_assets = [pd.read_csv(f, index_col=False) for f in asset_files]
df_out = pd.concat(df_assets)
df_out = self.clean_forecast_data(forecast_df=df_out, run_details=details)
return df_out
def get_downloaded_data(self, run_list: List[Dict[str, str]], offset_hours: int = 0):
"""
check the download status. If status is done, fetch the downloaded data
"""
forecast_dataset = pd.DataFrame()
status = False
out_ = []
while status is False:
status, out_ = self.get_run_status(run_list)
time.sleep(10)
if status:
for detail in out_:
df = self.get_all_assets(detail)
# Offset from UTC to specified timezone
df.index = df.index + pd.offsets.Hour(offset_hours)
if not df.empty:
forecast_dataset = pd.concat([forecast_dataset, df], axis=1)
return forecast_dataset
def clean_forecast_data(
self,
forecast_df: pd.DataFrame,
run_details: RunConfig,
):
df = forecast_df[self.date_column]
assert isinstance(run_details.user_input, SpatioTemporalJson)
start_date: datetime = run_details.user_input.start_date
end_date: datetime = run_details.user_input.end_date
# derive forecast data
forecast_df.drop(columns=[self.date_column], inplace=True)
a = forecast_df.values.tolist()
o = pd.DataFrame([a])
o = o.T
df_date = pd.DataFrame(
data=pd.date_range(start_date, end_date + timedelta(days=1), freq="h"),
columns=[self.date_column],
)
# derive hours
hours = [f"{str(i)}:00:00" for i in range(24)]
list_hours = [hours for _ in range(forecast_df.shape[0])]
assert run_details.parameters is not None, "Parameters are not defined"
# transform forecast data with date and time
df = pd.DataFrame(
data={
self.date_column: df.values,
"time": list_hours,
run_details.parameters["weather_type"]: o[0],
}
)
df = df.explode(column=["time", run_details.parameters["weather_type"]])
df[self.date_column] = df[self.date_column].astype(str) + " " + df["time"]
df[self.date_column] = pd.to_datetime(df[self.date_column].values)
df.drop(columns=["time"], inplace=True)
df = pd.merge(df_date, df, how="left", left_on=self.date_column, right_on=self.date_column)
df.reset_index()
df.set_index(self.date_column, inplace=True)
df.sort_index(ascending=True, inplace=True)
df[run_details.parameters["weather_type"]] = df[
run_details.parameters["weather_type"]
].values.astype(np.float32)
# rename columns with suffix forecast
df.rename(
columns={
run_details.parameters[
"weather_type"
]: f"{run_details.parameters['weather_type']}_forecast"
},
inplace=True,
)
# interpolate to derive missing data
df = df.interpolate(method="from_derivatives")
assert df is not None, "Interpolation deleted all data"
df = df.dropna()
return df
| EXA-1-master | exa/libraries/farmvibes-ai-main/notebooks/deepmc/notebook_lib/forecast.py |
from typing import Any, Tuple, Union, List
import torch
import torch.nn.functional as F
from einops.layers.torch import Rearrange
from notebook_lib.encoder import Encoder
from notebook_lib.locally_connected import LocallyConnected1d
from torch import nn
class MyLSTM(nn.LSTM):
def forward(self, *args: Any, **kwargs: Any):
return super().forward(*args, **kwargs)[0]
class DeepMCModel(nn.Module):
def __init__(
self,
first_channels: int, # 3
rest_channels: int, # 1
first_encoder_channels: int, # 3
rest_encoder_channels: Tuple[int, int, int], # [4, 8, 16]
sequence_length: int, # 24
kernel_size: int, # 2
num_inputs: int, # 6
encoder_layers: int = 2,
encoder_features: int = 4,
encoder_heads: int = 4,
encoder_ff_features: int = 16,
encoder_dropout: float = 0.1,
decoder_features: Tuple[int, int] = (20, 16),
dropout: float = 0.2,
batch_first: bool = True,
return_sequence: bool = True,
):
super(DeepMCModel, self).__init__()
self.return_sequence = return_sequence
self.num_inputs = num_inputs
out_seq_len = sequence_length - kernel_size + 1
self.encoders = nn.ModuleList(
[
nn.Sequential(
Rearrange("b l d -> b d l"),
LocallyConnected1d(
in_channels=first_channels,
out_channels=first_encoder_channels,
seq_len=sequence_length,
kernel_size=kernel_size,
),
nn.BatchNorm1d(first_encoder_channels),
Rearrange("b d l -> b l d"),
Encoder(
in_features=first_encoder_channels,
num_layers=encoder_layers,
d_model=encoder_features,
num_heads=encoder_heads,
d_ff=encoder_ff_features,
max_seq_len=out_seq_len,
dropout=encoder_dropout,
),
nn.Flatten(),
)
]
)
re1, re2, re3 = rest_encoder_channels
for _ in range(num_inputs - 1):
self.encoders.append(
nn.Sequential(
Rearrange("b l d -> b d l"),
LocallyConnected1d(
in_channels=rest_channels,
out_channels=re1,
seq_len=sequence_length,
kernel_size=kernel_size,
),
nn.ReLU(),
nn.BatchNorm1d(re1),
LocallyConnected1d(
in_channels=re1,
out_channels=re2,
seq_len=out_seq_len,
kernel_size=kernel_size,
),
nn.ReLU(),
nn.BatchNorm1d(re2),
Rearrange("b d l -> b l d"),
nn.Dropout(dropout),
MyLSTM(
input_size=re2,
hidden_size=re3,
num_layers=1,
batch_first=batch_first,
),
# nn.ReLU(), # Do ReLU outside the model
)
)
dec_input_features = out_seq_len * encoder_features + (self.num_inputs - 1) * re3
df1, df2 = decoder_features
self.decoder = nn.Sequential(
nn.BatchNorm1d(dec_input_features),
Rearrange("b d -> b 1 d"),
MyLSTM(input_size=dec_input_features, hidden_size=df1, batch_first=batch_first),
Rearrange("b 1 d -> b d"),
nn.ReLU(),
nn.BatchNorm1d(df1),
nn.Linear(df1, df2),
nn.ReLU(),
nn.Linear(df2, 1),
)
def forward(self, x: Union[torch.Tensor, List[torch.Tensor]]):
sliced_encoders = nn.ModuleList(list(self.encoders)[1:])
x = [self.encoders[0](x[0])] + [
F.relu(encoder(xi)[:, -1]) for encoder, xi in zip(sliced_encoders, x[1:])
]
x = torch.cat(x, dim=1)
x = self.decoder(x)
return x
class DeepMCPostModel(nn.Module):
def __init__(
self,
first_in_features: int,
first_out_features: int = 48,
second_out_features: int = 96,
out_features: int = 24,
) -> None:
super(DeepMCPostModel, self).__init__()
self.model = nn.Sequential(
nn.Linear(in_features=first_in_features, out_features=first_out_features),
nn.ReLU(),
nn.BatchNorm1d(first_out_features),
nn.Linear(in_features=first_out_features, out_features=second_out_features),
nn.ReLU(),
nn.BatchNorm1d(second_out_features),
nn.Linear(in_features=second_out_features, out_features=out_features),
)
def forward(self, x: torch.Tensor):
y_pred = self.model(x)
return y_pred
| EXA-1-master | exa/libraries/farmvibes-ai-main/notebooks/deepmc/notebook_lib/models.py |
from math import ceil
from typing import Any, Optional, Tuple
from numpy._typing import NDArray
import numpy as np
import pandas as pd
import pywt
from sklearn.preprocessing import StandardScaler
class Preprocess:
def __init__(
self,
train_scaler: StandardScaler,
output_scaler: StandardScaler,
is_training: bool,
is_validation: bool = False,
ts_lookahead: int = 24,
ts_lookback: int = 24,
chunk_size: int = 528,
wavelet: str = "bior3.5",
mode: str = "periodic",
level: int = 5,
relevant: bool = False,
):
self.train_scaler = train_scaler
self.output_scaler = output_scaler
self.trunc = chunk_size
self.ts_lookback = ts_lookback
self.wavelet = wavelet
self.mode = mode
self.level = level
self.is_training = is_training
self.ts_lookahead = ts_lookahead
self.is_validation = is_validation
self.relevant = relevant
def wavelet_transform_predict(self, df_in: pd.DataFrame, predict: str) -> NDArray[Any]:
i = 1
start = i
end = start
t_test_X = []
test_df = pd.DataFrame(
self.train_scaler.transform(df_in), columns=df_in.columns, index=df_in.index
)
# convert input data to wavelet
while end < test_df.shape[0]:
start = i
end = start + self.trunc
i = i + 1
chunkdataDF = test_df.iloc[start:end]
test_uX, _ = self.convert_df_wavelet_input(data_df=chunkdataDF, predict=predict)
t_test_X.append(test_uX)
test_X = t_test_X[0].copy()
for i in range(1, len(t_test_X)):
for j in range(len(t_test_X[i])):
test_X[j] = np.append(test_X[j], t_test_X[i][j], axis=0)
return test_X
def wavelet_transform_train(
self, train_df: pd.DataFrame, test_df: pd.DataFrame, out_feature: str
) -> Tuple[NDArray[Any], ...]:
t_train_X, t_train_y = self.prepare_wavelet_data(train_df, out_feature=out_feature)
t_test_X, t_test_y = self.prepare_wavelet_data(test_df, out_feature=out_feature)
train_X = t_train_X[0].copy()
train_y = t_train_y[0].copy()
for i in range(1, len(t_train_X)):
train_y = np.append(train_y, t_train_y[i], axis=0)
for j in range(len(t_train_X[i])):
train_X[j] = np.append(train_X[j], t_train_X[i][j], axis=0)
test_X = t_test_X[0].copy()
test_y = t_test_y[0].copy()
for i in range(1, len(t_test_X)):
test_y = np.append(test_y, t_test_y[i], axis=0)
for j in range(len(t_test_X[i])):
test_X[j] = np.append(test_X[j], t_test_X[i][j], axis=0)
return train_X, train_y, test_X, test_y
def prepare_wavelet_data(self, data_df: pd.DataFrame, out_feature: str):
i = 0
start = i * self.trunc
end = start
t_data_x = []
t_data_y = []
while end < data_df.shape[0]:
start = i
end = start + self.trunc
i = i + 1
o_data_df = data_df.iloc[start:end]
data_ux, data_uy = self.convert_df_wavelet_input(
o_data_df,
predict=out_feature,
)
t_data_x.append(data_ux)
t_data_y.append(data_uy)
return t_data_x, t_data_y
def dl_preprocess_data(
self,
df: pd.DataFrame,
predict: str,
per_split: float = 0.8,
training: bool = False,
) -> Tuple[NDArray, Optional[NDArray], Optional[NDArray], Optional[NDArray]]: # type: ignore
"""
merge chunk of data as single entity
Args:
df: lookback input data chunk based on number of models
predict: feature to predict
per_split: percentage data split
Returns:
data as single entity
"""
n_in = self.ts_lookback
scaled_df = df
data = scaled_df.values.astype(float)
if training:
n_out = self.ts_lookahead
label_df = df.copy()
for column in label_df:
if column != predict:
label_df.drop(columns=column, inplace=True)
label_data = label_df.values
# label_data = label_df.values
X, y = list(), list()
in_start = 0
# step over the entire history one time step at a time
# reshape input to be 3D [samples, timesteps, features]
for _ in range(len(data)):
# define the end of the input sequence
in_end = in_start + n_in
out_end = in_end + n_out
# ensure we have enough data for this instance
if out_end <= len(data):
X.append(data[in_start:in_end, :])
y.append(label_data[in_end:out_end, :])
# move along one time step
in_start += 1
X = np.array(X)
y = np.array(y)
if self.is_validation is True:
n_train_split = ceil(len(data) * per_split)
train_X, train_y = X[:n_train_split, :, :], y[:n_train_split, :, :]
test_X, test_y = X[n_train_split:, :], y[n_train_split:, :]
return train_X, train_y, test_X, test_y
else:
return X, y, None, None
else:
X = list()
in_start = 0
for _ in range(len(data) - n_in + 1):
in_end = in_start + n_in
if in_end <= len(data):
X.append(data[in_start:in_end, :])
in_start += 1
X = np.array(X)
return X, None, None, None
def convert_df_wavelet_input(self, data_df: pd.DataFrame, predict: str):
if self.relevant:
return self.convert_df_wavelet_input_relevant(data_df, predict)
else:
return self.convert_df_wavelet_input_not_relevant(data_df, predict)
def convert_df_wavelet_input_not_relevant(self, data_df: pd.DataFrame, predict: str):
level = self.level
rd = list()
N = data_df.shape[0]
test_X = list()
if self.is_training:
test_y = self.dl_preprocess_data(
data_df.iloc[-self.ts_lookback - self.ts_lookahead :],
predict=predict,
training=self.is_training,
)[1]
assert test_y is not None
test_y = test_y[[-1], :, :]
data_df = data_df.iloc[: -self.ts_lookahead]
else:
test_y = []
wp5 = pywt.wavedec(data=data_df[predict], wavelet=self.wavelet, mode=self.mode, level=level)
N = data_df.shape[0]
for i in range(1, level + 1):
rd.append(pywt.waverec(wp5[:-i] + [None] * i, wavelet=self.wavelet, mode=self.mode)[:N])
t_test_X = self.dl_preprocess_data(data_df.iloc[-self.ts_lookback :], predict=predict)[0]
test_X.append(t_test_X[[-1], :, :])
wpt_df = data_df[[]].copy()
for i in range(0, level):
wpt_df[predict] = rd[i][:]
t_test_X = self.dl_preprocess_data(wpt_df.iloc[-self.ts_lookback :], predict=predict)[0]
test_X.append(t_test_X[[-1], :, :])
return test_X, test_y
def convert_df_wavelet_input_relevant(self, data_df: pd.DataFrame, predict: str):
rd = list()
test_X = list()
if self.is_training:
test_y = self.dl_preprocess_data(
data_df.iloc[-self.ts_lookback - self.ts_lookahead :],
predict=predict,
training=self.is_training,
)[1]
assert test_y is not None
test_y = test_y[[-1], :, :]
else:
test_y = []
data_df = data_df.iloc[: -self.ts_lookahead]
t_test_X = self.dl_preprocess_data(data_df.iloc[-self.ts_lookback :], predict=predict)[0]
data = data_df[predict]
data = data.append(data_df[predict + "_forecast"].iloc[-self.ts_lookback :]).values
wp5 = pywt.wavedec(data=data, wavelet=self.wavelet, mode=self.mode, level=self.level)
N = data.shape[0]
for i in range(1, self.level + 1):
rd.append(
pywt.waverec(wp5[:-i] + [None] * i, wavelet=self.wavelet, mode=self.mode)[: N - 24]
)
test_X.append(t_test_X[[-1], :, :])
wpt_df = data_df[[]].copy()
for i in range(0, self.level):
wpt_df[predict] = rd[i]
t_test_X = self.dl_preprocess_data(wpt_df.iloc[-self.ts_lookback :], predict=predict)[0]
test_X.append(t_test_X[[-1], :, :])
return test_X, test_y
| EXA-1-master | exa/libraries/farmvibes-ai-main/notebooks/deepmc/notebook_lib/preprocess.py |
EXA-1-master | exa/libraries/farmvibes-ai-main/notebooks/deepmc/notebook_lib/__init__.py |
|
from typing import Optional
from torch import nn, Tensor
from notebook_lib.helpers import point_wise_feed_forward_network, positional_encoding
from notebook_lib.transform import MultiHeadAttention
class EncoderLayer(nn.Module):
def __init__(self, d_model: int, num_heads: int, d_ff: int, rate: float):
super().__init__()
self.mha = MultiHeadAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(
in_features=d_model, out_features=d_model, d_ff=d_ff
)
self.layernorm1 = nn.LayerNorm(d_model, eps=1e-6)
self.layernorm2 = nn.LayerNorm(d_model, eps=1e-6)
self.dropout1 = nn.Dropout(rate)
self.dropout2 = nn.Dropout(rate)
def forward(self, x: Tensor, mask: Tensor):
attn_output = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model)
attn_output = self.dropout1(attn_output)
out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model)
ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)
ffn_output = self.dropout2(ffn_output)
out2 = self.layernorm2(
out1 + ffn_output
) # (batch_size, input_seq_len, d_model)
return out2
class Encoder(nn.Sequential):
def __init__(
self,
in_features: int,
num_layers: int,
d_model: int,
num_heads: int,
d_ff: int,
max_seq_len: int,
dropout: float = 0.1,
):
super().__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = nn.Sequential(nn.Linear(in_features, self.d_model), nn.ReLU())
self.pos_encoding = positional_encoding(max_seq_len, d_model)
self.enc_layers = nn.ModuleList(
[EncoderLayer(d_model, num_heads, d_ff, dropout) for _ in range(num_layers)]
)
self.dropout = nn.Dropout(dropout)
def forward(self, x: Tensor, mask: Optional[Tensor] = None):
seq_len = x.size(1)
# adding embedding and position encoding.
x = self.embedding(x) # (batch_size, input_seq_len, d_model)
x = x * self.d_model**0.5
x = x + self.pos_encoding[:, :seq_len, :]
x = self.dropout(x)
for enc_layer in self.enc_layers:
x = enc_layer(x, mask)
return x # (batch_size, input_seq_len, d_model)
| EXA-1-master | exa/libraries/farmvibes-ai-main/notebooks/deepmc/notebook_lib/encoder.py |
import os
import pickle
from datetime import datetime, timedelta
from typing import Any, List, cast
import numpy as np
import onnxruntime
import pandas as pd
from numpy._typing import NDArray
from .preprocess import Preprocess
MODEL_SUFFIX = "deepmc."
class InferenceWeather:
def __init__(
self,
root_path: str,
data_export_path: str,
station_name: str,
predicts: List[str],
total_models: int = 24,
feed_interval_minutes: int = 60,
chunk_size: int = 528,
ts_lookback: int = 24,
date_attribute: str = "date",
wavelet: str = "bior3.5",
mode: str = "periodic",
level: int = 5,
relevant: bool = False,
):
if relevant:
self.relevant_text = "relevant"
else:
self.relevant_text = "not-relevant"
self.total_models = total_models
self.ts_lookahead = total_models
self.feed_interval = feed_interval_minutes
self.date_attribute = date_attribute
self.root_path = root_path
self.model_path = self.root_path + f"{station_name}/{self.relevant_text}/model_%s/"
self.post_model_path = self.model_path + "post/"
self.ts_lookback = ts_lookback
self.chunk_size = chunk_size
self.wavelet = wavelet
self.mode = mode
self.level = level
self.data_export_path = data_export_path
self.predicts = predicts
self.onnx_file = os.path.join(self.model_path, "export.onnx")
self.post_onnx_file = os.path.join(self.post_model_path, "export.onnx")
self.relevant = relevant
def inference(
self,
input_df: pd.DataFrame,
start_datetime: datetime,
):
cols = self.predicts.copy()
cols.append(self.date_attribute)
df_out = pd.DataFrame(columns=cols)
df_in_1 = input_df[(input_df.index <= start_datetime)].tail(
self.chunk_size + self.total_models
)
if df_in_1.shape[0] < self.chunk_size:
raise RuntimeError(
f"Forecast not done between {start_datetime.strftime('%m/%d/%Y, %H:%M:%S')},"
" since number of input data points less than chunk size"
)
df_out = pd.DataFrame(columns=self.predicts)
df_out = self.run_predict(
df_in=df_in_1,
df_out=df_out,
)
return df_out
def inference_historical(
self,
input_df: pd.DataFrame,
start_datetime: datetime,
end_datetime: datetime,
):
cols = self.predicts.copy()
cols.append(self.date_attribute)
df_out = pd.DataFrame(columns=cols)
df_in = input_df[(input_df.index > (start_datetime - timedelta(hours=(self.chunk_size))))]
if df_in.shape[0] < self.chunk_size:
raise RuntimeError(
f"Forecast not done between {start_datetime.strftime('%m/%d/%Y, %H:%M:%S')}"
f" and {end_datetime.strftime('%m/%d/%Y, %H:%M:%S')}, since number of input data"
" points less than chunk size",
)
y_datetime_out = input_df.index[
(input_df.index >= start_datetime) & (input_df.index <= end_datetime)
]
df_all_predict = pd.DataFrame()
# df_out = pd.DataFrame(columns=self.predicts)
for predict in self.predicts:
input_order_df = df_in[df_in.columns].copy()
out_feature_df = input_order_df[predict]
input_order_df.drop(columns=[predict], inplace=True)
input_order_df[predict] = out_feature_df
df_out = self.run_individual_predict_historical(
df_in=input_order_df,
df_out=cast(pd.DatetimeIndex, y_datetime_out),
predict=predict,
)
df_all_predict = pd.concat([df_all_predict, df_out], axis=1)
df_all_predict = df_all_predict.loc[:, ~df_all_predict.columns.duplicated()] # type: ignore
return df_all_predict
def predict(
self, path: str, predict: str, model_idx: int, inputs: NDArray[Any], is_post: bool = False
):
path = path % (predict, model_idx)
session = onnxruntime.InferenceSession(path, None)
if not is_post:
in_ = {
out.name: inputs[i].astype(np.float32) for i, out in enumerate(session.get_inputs())
}
else:
in_ = {
out.name: inputs.astype(np.float32) for i, out in enumerate(session.get_inputs())
}
result = session.run(None, input_feed=in_)[0]
return result
def run_individual_predict(
self,
df_in: pd.DataFrame,
predict: str,
):
df_predict = pd.DataFrame(columns=[predict, self.date_attribute])
interval = self.feed_interval
start_date: datetime = cast(datetime, df_in.index[-1])
with open(self.data_export_path % (predict, self.relevant_text), "rb") as f:
train_scaler, output_scaler = pickle.load(f)[4:6]
preprocess = Preprocess(
train_scaler=train_scaler,
output_scaler=output_scaler,
is_training=False,
ts_lookahead=self.ts_lookahead,
ts_lookback=self.ts_lookback,
chunk_size=self.chunk_size,
wavelet=self.wavelet,
mode=self.mode,
level=self.level,
relevant=self.relevant,
)
test_X = preprocess.wavelet_transform_predict(df_in=df_in, predict=predict)
time_arr = []
post_yhat = np.empty([1, self.ts_lookahead, self.ts_lookahead])
for idx in range(0, self.total_models):
out_x = self.predict(path=self.onnx_file, predict=predict, model_idx=idx, inputs=test_X)
out_x = preprocess.dl_preprocess_data(pd.DataFrame(out_x), predict=predict)[0]
out_x = out_x.transpose((0, 2, 1))
out_x = self.predict(
path=self.post_onnx_file, predict=predict, model_idx=idx, inputs=out_x
)
post_yhat[:, :, idx] = out_x
hours_added = timedelta(minutes=interval)
_date = start_date + hours_added
time_arr.append(_date)
interval += self.feed_interval
yhat_final = []
init_start = 0
end = post_yhat.shape[0]
for i in range(init_start, end, self.total_models):
for j in range(self.total_models):
yhat_final.append(post_yhat[i, -1, j])
yhat_final = output_scaler.inverse_transform(np.expand_dims(yhat_final, axis=1))[:, 0]
df_predict = pd.DataFrame(data=list(zip(time_arr, yhat_final)), columns=["date", predict])
return df_predict
def run_predict(
self,
df_in: pd.DataFrame,
df_out: pd.DataFrame,
):
df_all_predict = pd.DataFrame()
df_in.sort_values(by=[self.date_attribute], ascending=True, inplace=True)
for predict in self.predicts:
input_order_df = df_in[df_in.columns].copy()
out_feature_df = input_order_df[predict]
input_order_df.drop(columns=[predict], inplace=True)
input_order_df[predict] = out_feature_df
df_predict = self.run_individual_predict(
df_in=df_in,
predict=predict,
)
if df_predict is not None:
if df_all_predict.empty:
df_all_predict[predict] = df_predict[predict]
df_all_predict[self.date_attribute] = df_predict[self.date_attribute]
else:
df_all_predict = pd.concat([df_all_predict, df_predict], axis=1)
df_all_predict = df_all_predict.loc[:, list(~df_all_predict.columns.duplicated())]
df_out = pd.concat([df_out, df_all_predict], ignore_index=True)
df_out.reset_index(drop=True, inplace=True)
return df_out
def run_individual_predict_historical(
self,
df_in: pd.DataFrame,
df_out: pd.DatetimeIndex,
predict: str,
):
df_predict = pd.DataFrame(columns=[predict, self.date_attribute])
with open(self.data_export_path % (predict, self.relevant_text), "rb") as f:
train_scaler, output_scaler = pickle.load(f)[4:6]
preprocess = Preprocess(
train_scaler=train_scaler,
output_scaler=output_scaler,
is_training=True,
ts_lookahead=self.ts_lookahead,
ts_lookback=self.ts_lookback,
chunk_size=self.chunk_size,
wavelet=self.wavelet,
mode=self.mode,
level=self.level,
relevant=self.relevant,
)
inshape = self.total_models
test_X = preprocess.wavelet_transform_predict(df_in=df_in, predict=predict)
post_yhat = np.empty([test_X[0].shape[0] + 1 - inshape, inshape, self.total_models])
for idx in range(0, self.total_models):
out_x = self.predict(path=self.onnx_file, predict=predict, model_idx=idx, inputs=test_X)
out_x = preprocess.dl_preprocess_data(pd.DataFrame(out_x), predict=predict)[0]
out_x = out_x[..., 0]
out_x = self.predict(
path=self.post_onnx_file,
predict=predict,
model_idx=idx,
inputs=out_x,
is_post=True,
)
post_yhat[:, :, idx] = out_x
yhat_final = []
init_start = 0
end = post_yhat.shape[0]
for i in range(init_start, end, self.total_models):
for j in range(self.total_models):
yhat_final.append(post_yhat[i, -1, j])
yhat_final = output_scaler.inverse_transform(np.expand_dims(yhat_final, axis=1))[:, 0]
df_predict = pd.DataFrame(data=list(zip(df_out, yhat_final)), columns=["date", predict])
return df_predict
| EXA-1-master | exa/libraries/farmvibes-ai-main/notebooks/deepmc/notebook_lib/prediction.py |
from datetime import datetime, timedelta
from typing import Any, Dict, List
import numpy as np
import pandas as pd
from numpy._typing import NDArray
from pandas.tseries.offsets import DateOffset
from sklearn.preprocessing import StandardScaler
def get_csv_data(
path: str,
date_attribute: str = "date",
columns_rename: Dict[str, str] = {},
frequency: str = "60min",
):
"""
Read data from CSV file using Pandas python package.
"""
data_df = pd.read_csv(path)
data_df[date_attribute] = pd.to_datetime(data_df[date_attribute])
if columns_rename:
data_df.rename(columns=columns_rename, inplace=True)
# apply index on date
data_df.reset_index(drop=True, inplace=True)
data_df.set_index(date_attribute, inplace=True)
data_df.sort_index(ascending=True, inplace=True)
# interpolate to derive missing data
data_df = data_df.interpolate(method="from_derivatives")
assert data_df is not None, "Interpolate deleted all data"
data_df = data_df.dropna()
# Group rows by frequency, requires date attribute indexed to execute this
data_df = data_df.fillna(method="ffill")
data_df = data_df.fillna(method="bfill")
data_df = data_df.groupby(pd.Grouper(freq=frequency)).mean()
data_df = data_df.fillna(method="ffill")
data_df = data_df.fillna(method="bfill")
return data_df
def hour_round(t: datetime):
# Rounds to nearest hour by adding a timedelta hour if minute >= 30
return t.replace(second=0, microsecond=0, minute=0, hour=t.hour) + timedelta(
hours=t.minute // 30
)
def get_split_scaled_data(data: pd.DataFrame, out_feature: str, split_ratio: float = 0.92):
split = int(split_ratio * data.shape[0])
train_data = data.iloc[:split]
test_data = data.iloc[split:]
output_scaler = StandardScaler()
output_scaler.fit_transform(np.expand_dims(data[out_feature].values, axis=1)) # type: ignore
train_scaler = StandardScaler()
train_scale_df = pd.DataFrame(
train_scaler.fit_transform(train_data), columns=train_data.columns, index=train_data.index
)
test_scale_df = pd.DataFrame(
train_scaler.transform(test_data), columns=test_data.columns, index=test_data.index
)
return train_scaler, output_scaler, train_scale_df, test_scale_df
def shift_index(ds_df: pd.DataFrame, freq_minutes: int, num_indices: int, dateColumn: str = "date"):
ds_df[dateColumn] = ds_df.index.shift(-num_indices, freq=DateOffset(minutes=freq_minutes))
ds_df = ds_df.reset_index(drop=True)
ds_df = ds_df.set_index(dateColumn)
return ds_df
def clean_relevant_data(
actual_df: pd.DataFrame,
forecast_df: pd.DataFrame,
out_variables: List[str],
freq_hours: int,
num_of_indices: int,
):
base_data_df = actual_df.copy()
current_ws_df = forecast_df.add_suffix("Current")
base_data_df = base_data_df.join(current_ws_df)
shift_forecast_df = shift_index(forecast_df, freq_hours * 60, num_of_indices)
base_data_df = base_data_df.join(shift_forecast_df)
base_data_df = base_data_df[out_variables]
base_data_df = base_data_df.interpolate(method="from_derivatives")
assert base_data_df is not None, "Interpolate deleted all data"
base_data_df = base_data_df.dropna()
return base_data_df
def smooth(y: NDArray[Any], box_pts: int):
box = np.ones(box_pts) / box_pts
y_smooth = np.convolve(y, box, mode="same")
return y_smooth
| EXA-1-master | exa/libraries/farmvibes-ai-main/notebooks/deepmc/notebook_lib/utils.py |
from torch import Tensor
import torch.nn as nn
from einops import rearrange
from notebook_lib.helpers import attn
class MultiHeadAttention(nn.Module):
def __init__(self, d_model: int, num_heads: int):
super().__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.d_head = d_model // self.num_heads
self.scale = self.d_head ** -0.5
self.wq = nn.Linear(d_model, d_model)
self.wk = nn.Linear(d_model, d_model)
self.wv = nn.Linear(d_model, d_model)
self.dense = nn.Linear(d_model, d_model)
def forward(self, v: Tensor, k: Tensor, q: Tensor, mask: Tensor):
# (batch_size, seq_len, d_model)
q = self.wq(q)
k = self.wq(k)
v = self.wq(v)
# (batch_size, num_heads, seq_len_q, depth)
q, k, v = (rearrange(x, "b l (h d) -> (b h) l d", h=self.num_heads) for x in (q, k, v))
q *= self.scale
# scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
# attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
scaled_attention = attn(q, k, v, mask)
concat_attention = rearrange(scaled_attention, "(b h) l d -> b l (h d)", h=self.num_heads)
output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)
return output
| EXA-1-master | exa/libraries/farmvibes-ai-main/notebooks/deepmc/notebook_lib/transform.py |
import os
import pickle
import shutil
from typing import Any, List, Tuple, Union
import numpy as np
import pandas as pd
import pytorch_lightning as pl
import torch
from notebook_lib.models import DeepMCModel, DeepMCPostModel
from notebook_lib.modules import DeepMCPostTrain, DeepMCTrain
from numpy._typing import NDArray
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from torch import Tensor
from torch.utils.data import DataLoader, TensorDataset
from . import utils
from .preprocess import Preprocess
MODEL_SUFFIX = "deepmc."
class ModelTrainWeather:
def __init__(
self,
root_path: str,
data_export_path: str,
station_name: str,
train_features: List[str],
out_features: List[str],
chunk_size: int = 528,
ts_lookback: int = 24,
total_models: int = 24,
is_validation: bool = False,
wavelet: str = "bior3.5",
mode: str = "periodic",
level: int = 5,
batch_size: int = 256,
relevant: bool = False,
):
if relevant:
self.relevant_text = "relevant"
else:
self.relevant_text = "not-relevant"
self.total_models = total_models
self.root_path = root_path
self.data_export_path = data_export_path
self.path_to_station = os.path.join(self.root_path, station_name, self.relevant_text, "")
self.model_path = os.path.join(self.path_to_station, "model_%s", "")
self.post_model_path = os.path.join(self.model_path, "post", "")
self.train_features = train_features
self.out_features = out_features
self.ts_lookback = ts_lookback
self.is_validation = is_validation
self.ts_lookahead = total_models
self.chunk_size = chunk_size
self.wavelet = wavelet
self.mode = mode
self.level = level
self.batch_size = batch_size
self.relevant = relevant
def train_model(
self,
input_df: pd.DataFrame,
start: int = 0,
end: int = -1,
epochs: int = 20,
):
end = self.total_models if end == -1 else end
for out_feature in self.out_features:
if not os.path.exists(self.path_to_station % out_feature):
os.makedirs(self.path_to_station % out_feature, exist_ok=True)
input_order_df = input_df[self.train_features].copy()
out_feature_df = input_order_df[out_feature]
input_order_df.drop(columns=[out_feature], inplace=True)
input_order_df[out_feature] = out_feature_df
# data preprocessing
(train_scaler, output_scaler, train_df, test_df,) = utils.get_split_scaled_data(
data=input_order_df, out_feature=out_feature, split_ratio=0.92
)
if os.path.exists(self.data_export_path % (out_feature, self.relevant_text)):
with open(self.data_export_path % (out_feature, self.relevant_text), "rb") as f:
(
train_X,
train_y,
test_X,
test_y,
train_scaler,
output_scaler,
) = pickle.load(f)
self.preprocess = Preprocess(
train_scaler=train_scaler,
output_scaler=output_scaler,
is_training=True,
is_validation=self.is_validation,
ts_lookahead=self.ts_lookahead,
ts_lookback=self.ts_lookback,
chunk_size=self.chunk_size,
wavelet=self.wavelet,
mode=self.mode,
level=self.level,
relevant=self.relevant,
)
else:
self.preprocess = Preprocess(
train_scaler=train_scaler,
output_scaler=output_scaler,
is_training=True,
is_validation=self.is_validation,
ts_lookahead=self.ts_lookahead,
ts_lookback=self.ts_lookback,
chunk_size=self.chunk_size,
wavelet=self.wavelet,
mode=self.mode,
level=self.level,
relevant=self.relevant,
)
(
train_X,
train_y,
test_X,
test_y,
) = self.preprocess.wavelet_transform_train(train_df, test_df, out_feature)
with open(self.data_export_path % (out_feature, self.relevant_text), "wb") as f:
pickle.dump(
[train_X, train_y, test_X, test_y, train_scaler, output_scaler],
f,
)
self.train_models(
train_X=train_X, # type: ignore
train_y=train_y, # type: ignore
test_X=test_X, # type: ignore
test_y=test_y, # type: ignore
epochs=epochs,
out_feature=out_feature,
start=start,
end=end,
)
def train_models(
self,
train_X: List[NDArray[Any]],
train_y: List[NDArray[Any]],
test_X: List[NDArray[Any]],
test_y: List[NDArray[Any]],
epochs: int,
out_feature: str,
start: int,
end: int,
):
first_channels = train_X[0].shape[2]
rest_channels = train_X[1].shape[2]
first_encoder_channels = 3
rest_encoder_channels = (4, 8, 16)
sequence_length = train_X[0].shape[1]
kernel_size = 2
num_inputs = len(train_X)
for i in range(start, end):
train_inputs = [
torch.from_numpy(x.astype(np.float32))
for x in (*train_X, train_y[:, i]) # type: ignore
]
train_dataset = TensorDataset(*train_inputs)
train_loader = DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True)
val_inputs = [
torch.from_numpy(x.astype(np.float32))
for x in (*test_X, test_y[:, i]) # type: ignore
]
val_dataset = TensorDataset(*val_inputs)
val_loader = DataLoader(val_dataset, batch_size=self.batch_size)
m = DeepMCTrain(
first_channels=first_channels,
rest_channels=rest_channels,
first_encoder_channels=first_encoder_channels,
rest_encoder_channels=rest_encoder_channels,
sequence_length=sequence_length,
kernel_size=kernel_size,
num_inputs=num_inputs,
)
model_path = self.model_path % (out_feature, str(i))
if os.path.exists(model_path):
shutil.rmtree(model_path, ignore_errors=True)
os.makedirs(model_path, exist_ok=True)
t_obj = pl.Trainer(
logger=True,
max_epochs=epochs,
callbacks=[
LearningRateMonitor(),
ModelCheckpoint(
monitor="val_loss/total",
save_last=True,
dirpath=model_path,
),
],
num_processes=1,
)
t_obj.fit(m, train_loader, val_loader)
self.export_to_onnx(file_path=model_path, model=m.deepmc, inputs=train_inputs)
self.post_model(
m.deepmc,
train_X=train_X,
train_y=train_y,
test_X=test_X,
test_y=test_y,
out_feature=out_feature,
model_index=i,
epochs=epochs,
)
def export_to_onnx(
self,
file_path: str,
model: Union[DeepMCModel, DeepMCPostModel],
inputs: Union[List[Tensor], Tensor],
):
batch_axes = {f"tensor.{str(i)}": {0: "batch_size"} for i in range(len(inputs))}
onnx_output_path = os.path.join(file_path, "export.onnx")
if os.path.exists(onnx_output_path):
os.remove(onnx_output_path)
# Export the model
torch.onnx.export(
model,
inputs,
onnx_output_path,
input_names=list(batch_axes.keys()),
dynamic_axes=batch_axes,
)
def get_dataloader(
self, gt: NDArray[Any], target: NDArray[Any], o_feature: str
) -> Tuple[DataLoader[Any], List[Tensor]]:
o_x = self.preprocess.dl_preprocess_data(pd.DataFrame(gt), o_feature)[0][:, :, 0].astype(
np.float32
)
o_y = self.preprocess.dl_preprocess_data(pd.DataFrame(target), o_feature)[0][
:, :, 0
].astype(np.float32)
o_inputs = [torch.from_numpy(x.astype(np.float32)) for x in (o_x, o_y)]
o_dataset = TensorDataset(*o_inputs)
o_loader = DataLoader(o_dataset, batch_size=self.batch_size, shuffle=True)
return o_loader, o_inputs
def post_model(
self,
m: DeepMCModel,
train_X: List[NDArray[Any]],
train_y: List[NDArray[Any]],
test_X: List[NDArray[Any]],
test_y: List[NDArray[Any]],
out_feature: str,
model_index: int,
epochs: int,
):
m.eval()
def xf(a: List[NDArray[Any]]) -> List[Tensor]:
return [torch.from_numpy(x.astype(np.float32)) for x in a]
train_yhat = m(xf(train_X)).detach().numpy()[:, 0]
test_yhat = m(xf(test_X)).detach().numpy()[:, 0]
post_model_path = self.post_model_path % (out_feature, str(model_index))
if not os.path.exists(post_model_path):
os.mkdir(post_model_path)
train_dataloader, _ = self.get_dataloader(
gt=train_y[:, model_index, 0], target=train_yhat, o_feature=out_feature # type: ignore
)
val_dataloader, val_inputs = self.get_dataloader(
gt=test_y[:, model_index, 0], target=test_yhat, o_feature=out_feature # type: ignore
)
p_m = DeepMCPostTrain(first_in_features=self.total_models)
t_obj = pl.Trainer(
logger=True,
max_epochs=epochs,
callbacks=[
LearningRateMonitor(),
ModelCheckpoint(
monitor="val_loss/total",
save_last=True,
dirpath=post_model_path,
),
],
num_processes=1,
)
t_obj.fit(p_m, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader)
self.export_to_onnx(file_path=post_model_path, model=p_m.deepmc, inputs=torch.rand((1, 24)))
| EXA-1-master | exa/libraries/farmvibes-ai-main/notebooks/deepmc/notebook_lib/train.py |
from typing import Optional, Tuple, Union, cast
import torch
from einops import rearrange
from torch import nn
from torch.nn import functional as F
from torch.nn.parameter import Parameter
from torch.types import _dtype
from unfoldNd.utils import _get_conv, _get_kernel_size_numel, _tuple
def _make_weight(
in_channels: int,
kernel_size: Tuple[int, ...],
device: Optional[str],
dtype: Optional[_dtype],
) -> torch.Tensor:
"""Create one-hot convolution kernel. ``kernel_size`` must be an ``N``-tuple.
Details:
Let ``T`` denote the one-hot weight, then
``T[c * i, 0, j] = δᵢⱼ ∀ c = 1, ... C_in``
(``j`` is a group index of the ``Kᵢ``).
This can be done by building diagonals ``D[i, j] = δᵢⱼ``, reshaping
them into ``[∏ᵢ Kᵢ, 1, K]``, and repeat them ``C_in`` times along the
leading dimension.
Returns:
torch.Tensor : A tensor of shape ``[ C_in * ∏ᵢ Kᵢ, 1, K]`` where
``K = (K₁, K₂, ..., Kₙ)`` is the kernel size. Filter groups are
one-hot such that they effectively extract one element of the patch
the kernel currently overlaps with.
"""
kernel_size_numel = _get_kernel_size_numel(kernel_size)
repeat = [in_channels, 1] + [1 for _ in kernel_size]
return (
torch.eye(kernel_size_numel, device=device, dtype=dtype) # type: ignore
.reshape((kernel_size_numel, 1, *kernel_size))
.repeat(*repeat)
)
class Unfold1d(torch.nn.Module):
"""Extracts sliding local blocks from a batched input tensor. Also known as im2col.
PyTorch module that accepts 3d, 4d, and 5d tensors. Acts like ``torch.nn.Unfold``
for a 4d input. Uses one-hot convolution under the hood.
See docs at https://pytorch.org/docs/stable/generated/torch.nn.Unfold.html.
"""
def __init__(
self,
in_channels: int,
kernel_size: Union[int, Tuple[int, ...]],
dilation: int = 1,
padding: int = 0,
stride: int = 1,
device: Optional[str] = None,
dtype: Optional[_dtype] = None,
):
super().__init__()
self.in_channels = in_channels
self.kernel_size = kernel_size
self.dilation = dilation
self.padding = padding
self.stride = stride
# get convolution operation
N = 1
self._conv = _get_conv(N)
# prepare one-hot convolution kernel
kernel_size = _tuple(kernel_size, N)
self.kernel_size_numel = _get_kernel_size_numel(kernel_size)
self.weight = _make_weight(in_channels, cast(Tuple[int, ...], kernel_size), device, dtype)
def forward(self, input: torch.Tensor):
batch_size = input.shape[0]
unfold = self._conv(
input,
self.weight,
bias=None,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.in_channels,
)
return unfold.reshape(batch_size, self.in_channels * self.kernel_size_numel, -1)
class LocallyConnected1d(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
seq_len: int,
kernel_size: int,
stride: int = 1,
padding: Union[int, Tuple[int, int]] = 0,
bias: bool = True,
device: Optional[str] = None,
dtype: Optional[_dtype] = None,
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.seq_len = seq_len
self.kernel_size = kernel_size
self.stride = stride
self.padding = (padding, padding) if isinstance(padding, int) else padding
out_seq_len = (seq_len + sum(self.padding) - (kernel_size - 1) - 1) // stride + 1
self.unfold = Unfold1d(self.in_channels, self.kernel_size, stride=stride)
self.weight = Parameter(
torch.empty(
# Pyright mistakenly thinks that the type of size is int
(in_channels, out_channels, kernel_size, out_seq_len), # type: ignore
device=device,
dtype=dtype, # type: ignore
)
)
if bias:
self.bias = Parameter((torch.empty(out_channels, out_seq_len)))
else:
self.register_parameter("bias", None)
self.reset_parameters()
def reset_parameters(self):
# Do normal initialization for now, but can use something smarter
nn.init.normal_(self.weight, std=0.1)
if self.bias is not None:
nn.init.normal_(self.bias, std=0.1)
def forward(self, x: torch.Tensor):
x = F.pad(x, self.padding)
x = self.unfold(x)
x = rearrange(x, "b (i k) l -> b i l k", i=self.in_channels)
x = torch.einsum("b i l k, i o k l -> bol", x, self.weight)
if self.bias is not None:
x = x + self.bias
return x
| EXA-1-master | exa/libraries/farmvibes-ai-main/notebooks/deepmc/notebook_lib/locally_connected.py |
from typing import Tuple
import pytorch_lightning as pl
import torch
from notebook_lib.models import DeepMCModel, DeepMCPostModel
from torch import Tensor, nn
class DeepMCTrain(pl.LightningModule):
def __init__(
self,
first_channels: int,
rest_channels: int,
first_encoder_channels: int,
rest_encoder_channels: Tuple[int, int, int],
sequence_length: int,
kernel_size: int,
num_inputs: int,
encoder_layers: int = 2,
encoder_models: int = 4,
encoder_heads: int = 4,
encoder_ff_features: int = 16,
encoder_lr: float = 0.1,
dropout: float = 0.2,
batch_first: bool = True,
return_sequence: bool = True,
):
super().__init__()
self.deepmc = DeepMCModel(
first_channels=first_channels,
rest_channels=rest_channels,
first_encoder_channels=first_encoder_channels,
rest_encoder_channels=rest_encoder_channels,
sequence_length=sequence_length,
kernel_size=kernel_size,
num_inputs=num_inputs,
encoder_layers=encoder_layers,
encoder_features=encoder_models,
encoder_heads=encoder_heads,
encoder_ff_features=encoder_ff_features,
encoder_dropout=encoder_lr,
dropout=dropout,
batch_first=batch_first,
return_sequence=return_sequence,
)
self.loss = nn.MSELoss(reduction="sum")
def forward(self, x: Tensor):
y = self.deepmc(x)
return y
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=0.002, eps=1e-07)
return optimizer
def training_step(self, train_batch: Tensor, _):
x, y = train_batch[:6], train_batch[6]
y_hat = self.deepmc(x)
loss = self.loss(y_hat, y)
self.log("train_loss/total", loss)
return loss
def validation_step(self, validation_batch: Tensor, _):
x, y = validation_batch[:6], validation_batch[6]
y_hat = self.deepmc(x)
loss = self.loss(y_hat, y)
self.log("val_loss/total", loss, on_epoch=True)
return loss
class DeepMCPostTrain(pl.LightningModule):
def __init__(
self,
first_in_features: int,
first_out_features: int = 48,
second_out_features: int = 96,
out_features: int = 24,
):
super().__init__()
self.deepmc = DeepMCPostModel(
first_in_features=first_in_features,
first_out_features=first_out_features,
second_out_features=second_out_features,
out_features=out_features,
)
self.loss = nn.L1Loss(reduction="sum")
def forward(self, x: Tensor):
y = self.deepmc(x)
return y
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=0.001)
return optimizer
def training_step(self, batch: Tensor, _):
x, y = batch
y_hat = self.deepmc(x)
loss = self.loss(y_hat, y)
self.log("train_loss/total", loss)
return loss
def validation_step(self, batch: Tensor, _):
x, y = batch
y_hat = self.deepmc(x)
loss = self.loss(y_hat, y)
self.log("val_loss/total", loss, on_epoch=True)
return loss
| EXA-1-master | exa/libraries/farmvibes-ai-main/notebooks/deepmc/notebook_lib/modules.py |
import numpy as np
import tensorflow as tf
def get_angles(pos, i, d_model):
angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model))
return pos * angle_rates
def positional_encoding(position, d_model):
angle_rads = get_angles(
np.arange(position)[:, np.newaxis], np.arange(d_model)[np.newaxis, :], d_model
)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
return tf.cast(pos_encoding, dtype=tf.float32)
# create mask for padding, 0 --> 1 (mask)
def create_padding_mask(seq):
seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
# add extra dimensions to add the padding
# to the attention logits.
return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
def create_look_ahead_mask(size):
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask # (seq_len, seq_len)
def scaled_dot_product_attention(q, k, v, mask):
"""Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
mask: Float tensor with shape broadcastable
to (..., seq_len_q, seq_len_k). Defaults to None.
Returns:
output, attention_weights
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor.
if mask is not None:
scaled_attention_logits += mask * -1e9
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(
scaled_attention_logits, axis=-1
) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights
def print_out(q, k, v):
temp_out, temp_attn = scaled_dot_product_attention(q, k, v, None)
print("Attention weights are:")
print(temp_attn)
print("Output is:")
print(temp_out)
"""
- Q (query), K (key) and V (value) are split into multiple heads (num_heads)
- each tuple (q, k, v) are fed to scaled_dot_product_attention
- all attention outputs are concatenated
"""
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.wq = tf.keras.layers.Dense(d_model)
self.wk = tf.keras.layers.Dense(d_model)
self.wv = tf.keras.layers.Dense(d_model)
self.dense = tf.keras.layers.Dense(d_model)
def split_heads(self, x, batch_size):
"""Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
"""
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, v, k, q, mask):
batch_size = tf.shape(q)[0]
q = self.wq(q) # (batch_size, seq_len, d_model)
k = self.wk(k) # (batch_size, seq_len, d_model)
v = self.wv(v) # (batch_size, seq_len, d_model)
q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)
k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)
v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)
scaled_attention, attention_weights = scaled_dot_product_attention(q, k, v, mask)
scaled_attention = tf.transpose(
scaled_attention, perm=[0, 2, 1, 3]
) # (batch_size, seq_len_q, num_heads, depth)
concat_attention = tf.reshape(
scaled_attention, (batch_size, -1, self.d_model)
) # (batch_size, seq_len_q, d_model)
output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)
return output, attention_weights
def point_wise_feed_forward_network(d_model, dff):
return tf.keras.Sequential(
[
tf.keras.layers.Dense(dff, activation="relu"), # (batch_size, seq_len, dff)
tf.keras.layers.Dense(d_model), # (batch_size, seq_len, d_model)
]
)
class EncoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(EncoderLayer, self).__init__()
self.mha = MultiHeadAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
attn_output, _ = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model)
ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(out1 + ffn_output) # (batch_size, input_seq_len, d_model)
return out2
class DecoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(DecoderLayer, self).__init__()
self.mha1 = MultiHeadAttention(d_model, num_heads)
self.mha2 = MultiHeadAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
self.dropout3 = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training, look_ahead_mask, padding_mask):
# enc_output.shape == (batch_size, input_seq_len, d_model)
attn1, attn_weights_block1 = self.mha1(
x, x, x, look_ahead_mask
) # (batch_size, target_seq_len, d_model)
attn1 = self.dropout1(attn1, training=training)
out1 = self.layernorm1(attn1 + x)
attn2, attn_weights_block2 = self.mha2(
enc_output, enc_output, out1, padding_mask
) # (batch_size, target_seq_len, d_model)
attn2 = self.dropout2(attn2, training=training)
out2 = self.layernorm2(attn2 + out1) # (batch_size, target_seq_len, d_model)
ffn_output = self.ffn(out2) # (batch_size, target_seq_len, d_model)
ffn_output = self.dropout3(ffn_output, training=training)
out3 = self.layernorm3(ffn_output + out2) # (batch_size, target_seq_len, d_model)
return out3, attn_weights_block1, attn_weights_block2
class Encoder(tf.keras.layers.Layer):
def __init__(self, num_layers, d_model, num_heads, dff, maximum_position_encoding, rate=0.1):
super(Encoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Dense(d_model, activation="relu")
self.pos_encoding = positional_encoding(maximum_position_encoding, self.d_model)
self.enc_layers = [EncoderLayer(d_model, num_heads, dff, rate) for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
seq_len = tf.shape(x)[1]
# print("Encoder:", x.shape)
# adding embedding and position encoding.
x = self.embedding(x) # (batch_size, input_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x = self.enc_layers[i](x, training, mask)
return x # (batch_size, input_seq_len, d_model)
class Decoder(tf.keras.layers.Layer):
def __init__(self, num_layers, d_model, num_heads, dff, maximum_position_encoding, rate=0.1):
super(Decoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Dense(d_model, activation="relu")
self.pos_encoding = positional_encoding(maximum_position_encoding, d_model)
self.dec_layers = [DecoderLayer(d_model, num_heads, dff, rate) for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training, look_ahead_mask, padding_mask):
seq_len = tf.shape(x)[1]
attention_weights = {}
x = self.embedding(x) # (batch_size, target_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x, block1, block2 = self.dec_layers[i](
x, enc_output, training, look_ahead_mask, padding_mask
)
attention_weights["decoder_layer{}_block1".format(i + 1)] = block1
attention_weights["decoder_layer{}_block2".format(i + 1)] = block2
return x, attention_weights
class Transformer(tf.keras.Model):
def __init__(
self, num_layers, d_model, num_heads, dff, target_vocab_size, pe_input, pe_target, rate=0.1
):
super(Transformer, self).__init__()
self.encoder = Encoder(num_layers, d_model, num_heads, dff, pe_input, rate)
self.decoder = Decoder(num_layers, d_model, num_heads, dff, pe_target, rate)
self.final_layer = tf.keras.layers.Dense(target_vocab_size)
def call(self, inp, tar, training, enc_padding_mask, look_ahead_mask, dec_padding_mask):
enc_output = self.encoder(
inp, training, enc_padding_mask
) # (batch_size, inp_seq_len, d_model)
# dec_output.shape == (batch_size, tar_seq_len, d_model)
dec_output, attention_weights = self.decoder(
tar, enc_output, training, look_ahead_mask, dec_padding_mask
)
final_output = self.final_layer(dec_output) # (batch_size, tar_seq_len, target_vocab_size)
return final_output, attention_weights
class GLU(tf.keras.layers.Layer):
def __init__(self, input_channel, output_channel):
super(GLU, self).__init__()
self.linear_left = tf.keras.layers.Dense(output_channel)
self.linear_right = tf.keras.layers.Dense(output_channel)
def call(self, x):
return tf.math.multiply(
self.linear_left(x), tf.keras.activations.sigmoid(self.linear_right(x))
)
class FFT(tf.keras.layers.Layer):
def __init__(self, time_step, order, output_channel):
super(FFT, self).__init__()
self.time_step = time_step
self.order = order
self.output_channel = output_channel
self.GLUs = [] # nn.ModuleList()
for i in range(3):
if i == 0:
self.GLUs.append(
GLU(self.time_step * self.order, self.time_step * self.output_channel)
)
self.GLUs.append(
GLU(self.time_step * self.order, self.time_step * self.output_channel)
)
elif i == 1:
self.GLUs.append(
GLU(self.time_step * self.output_channel, self.time_step * self.output_channel)
)
self.GLUs.append(
GLU(self.time_step * self.output_channel, self.time_step * self.output_channel)
)
else:
self.GLUs.append(
GLU(self.time_step * self.output_channel, self.time_step * self.output_channel)
)
self.GLUs.append(
GLU(self.time_step * self.output_channel, self.time_step * self.output_channel)
)
def call(self, x):
# x should be (b, seq_len, units)
x = tf.keras.layers.Permute((2, 1))(x)
ffted = tf.signal.fft(tf.cast(x, dtype=tf.complex64)) # (b, units, seq_len)
real = tf.math.real(ffted) # [b, units, seq_len]
img = tf.math.imag(ffted)
for i in range(3):
real = self.GLUs[i * 2](real)
img = self.GLUs[2 * i + 1](img)
time_step_as_inner = tf.dtypes.complex(real, img)
iffted = tf.signal.ifft(time_step_as_inner) # [b, k, node_cnt, 48]
iffted = tf.cast(iffted, dtype=tf.float32)
iffted = tf.keras.layers.Permute((2, 1))(iffted)
return iffted
| EXA-1-master | exa/libraries/farmvibes-ai-main/notebooks/deepmc/notebook_lib/transformer_models_ts.py |
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from numpy._typing import NDArray
from torch import Tensor
from torch.nn import Sequential
# Python 3.8 and subscripted generics require type: ignore
def get_angles(pos: NDArray, i: NDArray, d_model: int): # type: ignore
angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model))
return pos * angle_rates
def positional_encoding(position: int, d_model: int) -> Tensor:
angle_rads = get_angles(
np.arange(position)[:, np.newaxis], np.arange(d_model)[np.newaxis, :], d_model
)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
return torch.tensor(pos_encoding, dtype=torch.float32)
def attn(
q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, mask: Optional[torch.Tensor] = None
) -> Tensor:
sim = torch.einsum("b i d, b j d -> b i j", q, k)
if mask is not None:
max_neg_value = -torch.finfo(sim.dtype).max
sim.masked_fill_(~mask, max_neg_value)
attn = sim.softmax(dim=-1)
out = torch.einsum("b i j, b j d -> b i d", attn, v)
return out
def point_wise_feed_forward_network(in_features: int, out_features: int, d_ff: int) -> Sequential:
return Sequential(
nn.Linear(in_features, d_ff),
nn.ReLU(),
nn.Linear(d_ff, out_features),
)
| EXA-1-master | exa/libraries/farmvibes-ai-main/notebooks/deepmc/notebook_lib/helpers.py |
EXA-1-master | exa/libraries/farmvibes-ai-main/notebooks/weed_detection/__init__.py |
|
EXA-1-master | exa/libraries/farmvibes-ai-main/notebooks/heatmaps/notebook_lib/__init__.py |
|
import re
from datetime import datetime
from typing import Any, Dict, List, Tuple, Union, cast
from zipfile import ZipFile
import geopandas as gpd
import pandas as pd
from geopandas import GeoSeries
from matplotlib import pyplot as plt
from shapely import geometry as shpg
from vibe_core.client import get_default_vibe_client
from vibe_core.data import ExternalReferenceList, Raster
from vibe_core.data.core_types import BaseVibeDict, DataVibe
def view_output(archive_path: str, title: str):
all = []
column_name = "layer"
match_str = "cluster(.*).shp"
z = ZipFile(archive_path)
output_shapes = [
o
for o in z.namelist()
if o.endswith(".shp") and o != "cluster0.shp" and o != "cluster0.0.shp"
]
for o in output_shapes:
f = gpd.read_file(archive_path + "!" + o)
f[column_name] = cast("re.Match[str]", re.search(match_str, o)).group(1)
all.append(f)
all = pd.concat(all)
all[column_name] = all[column_name].astype(float)
_, ax = plt.subplots(figsize=(7, 6))
all.plot(ax=ax, column=column_name, legend=True, cmap="viridis")
plt.axis("off")
plt.title(title)
def create_heatmap(
imagery: Raster, geojson_url: str, farm_boundary: str, parameters: Dict[str, Any]
):
now = datetime.now()
# create id
geom_url_hash = str(hash(geojson_url))
# read farm boundary
data_frame = gpd.read_file(farm_boundary)
geometry = shpg.mapping(data_frame["geometry"][0])
# submit request to farmVibes cluster
sample_inputs = ExternalReferenceList(
id=geom_url_hash, time_range=(now, now), geometry=geometry, assets=[], urls=[geojson_url]
)
inputs = {"input_raster": imagery, "input_samples": sample_inputs}
workflow = "farm_ai/agriculture/heatmap_sensor"
name = "heatmap_example"
out = submit_inputs_request(inputs, parameters, workflow, name)
dv = cast(List[DataVibe], out["result"])[0]
asset = dv.assets[0]
return asset.path_or_url
def submit_inputs_request(
inputs: Union[Dict[str, Any], ExternalReferenceList],
parameters: Dict[str, Any],
workflow: str,
name: str,
) -> BaseVibeDict:
client = get_default_vibe_client()
run = client.run(
workflow=workflow,
name=name,
input_data=inputs,
parameters=parameters,
)
# display execution results
run.monitor(refresh_time_s=5)
if run.status == "done":
assert run.output, "No output found in completed run"
return run.output
else:
raise Exception(client.describe_run(run.id))
def get_raster_from_cluster(
farm_boundary: str, time_range: Tuple[datetime, datetime], sr_id: int = 32611
) -> Union[Raster, None]:
client = get_default_vibe_client()
# read farm boundary
data_frame = gpd.read_file(farm_boundary)
geometry = GeoSeries([data_frame["geometry"][0]], crs=sr_id).to_crs(4326)[0]
run = client.run(
workflow="data_ingestion/sentinel2/preprocess_s2",
name="image_example",
geometry=geometry, # type: ignore
time_range=time_range,
)
# display execution results
run.monitor(refresh_time_s=5)
if run.status == "done":
assert run.output, "No output found in completed run"
return run.output["raster"][0] # type: ignore
else:
raise Exception(client.describe_run(run.id))
def get_raster_from_external(imagery_url: str, farm_boundary: str, sr_id: int = 32611) -> Raster:
url_hash = str(hash(imagery_url))
now = datetime.now()
# read farm boundary
data_frame = gpd.read_file(farm_boundary)
geometry = GeoSeries([data_frame["geometry"][0]], crs=sr_id).to_crs(4326)[0]
geometry = shpg.mapping(geometry)
inputs = ExternalReferenceList(
id=url_hash, time_range=(now, now), geometry=geometry, assets=[], urls=[imagery_url]
)
out = submit_inputs_request(
inputs=inputs,
parameters={},
workflow="data_ingestion/user_data/ingest_raster",
name="image_example",
)
return cast(List[Raster], out["raster"])[0]
| EXA-1-master | exa/libraries/farmvibes-ai-main/notebooks/heatmaps/notebook_lib/utils.py |
from setuptools import setup
if __name__ == "__main__":
setup()
| EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/setup.py |
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Dict, List, Tuple, cast
import pytest
from shapely import geometry as shpg
from vibe_core.data import CategoricalRaster, Raster, RasterSequence
from vibe_core.data.core_types import BaseVibe, DataVibe, DataVibeType, TimeSeries
from vibe_core.data.utils import (
StacConverter,
get_most_specific_type,
serialize_input,
serialize_stac,
)
@dataclass
class BaseTest(BaseVibe):
int_field: int
str_field: str
def serialize_vibe(vibe: BaseVibe):
converter = StacConverter()
return serialize_stac(converter.to_stac_item(vibe))
@pytest.fixture
def input_pair(request: pytest.FixtureRequest):
param: str = request.param # type:ignore
if param == "base":
x = BaseTest(0, "string")
return x, serialize_vibe(x)
now = datetime.now()
geom = shpg.mapping(shpg.box(0, 0, 1, 1))
kwargs = {"id": "1", "time_range": (now, now), "geometry": geom, "assets": []}
if param == "vibe":
x = DataVibe(**kwargs)
elif param == "raster":
x = Raster(**kwargs, bands={})
else:
raise ValueError(f"Unrecognized parameter {param}")
return x, serialize_vibe(x)
@pytest.mark.parametrize("repeats", (1, 10))
@pytest.mark.parametrize("input_pair", ("base", "vibe", "raster"), indirect=True)
def test_serialize_basevibe(input_pair: Tuple[BaseVibe, Dict[str, Any]], repeats: int):
input, serial = input_pair
assert serialize_input(input) == serial
input_list = [input for _ in range(repeats)]
serial_list = [serial for _ in range(repeats)]
assert serialize_input(input_list) == serial_list
assert serialize_input({"item": input, "list": input_list}) == {
"item": serial,
"list": serial_list,
}
def test_get_most_specific_type():
assert get_most_specific_type([DataVibe, DataVibe]) is DataVibe
assert get_most_specific_type([DataVibe, TimeSeries]) is TimeSeries
assert get_most_specific_type([DataVibe, Raster]) is Raster
assert get_most_specific_type([CategoricalRaster, Raster]) is CategoricalRaster
assert get_most_specific_type([DataVibe, Raster, CategoricalRaster]) is CategoricalRaster
def test_most_specific_type_incompatible_fails():
with pytest.raises(ValueError):
get_most_specific_type([DataVibe, Raster, TimeSeries])
with pytest.raises(ValueError):
get_most_specific_type([Raster, TimeSeries])
with pytest.raises(ValueError):
get_most_specific_type([RasterSequence, CategoricalRaster])
def test_most_specific_type_container():
assert get_most_specific_type(cast(List[DataVibeType], [DataVibe, List[DataVibe]])) is DataVibe
assert (
get_most_specific_type(cast(List[DataVibeType], [List[DataVibe], List[DataVibe]]))
is List[DataVibe]
)
assert (
get_most_specific_type(cast(List[DataVibeType], [List[CategoricalRaster], Raster]))
is CategoricalRaster
)
assert (
get_most_specific_type(cast(List[DataVibeType], [List[CategoricalRaster], List[Raster]]))
is List[CategoricalRaster]
)
@pytest.mark.parametrize("input_pair", ("base", "vibe", "raster"), indirect=True)
def test_serialize_deserialize_stac(input_pair: Tuple[BaseVibe, Dict[str, Any]]):
input, serial = input_pair
converter = StacConverter()
serialized = converter.to_stac_item(input)
output = converter.from_stac_item(serialized)
assert input == output
assert serial == serialize_stac(serialized)
| EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/tests/test_utils.py |
from vibe_core.data import data_registry, DataVibe
def test_register_type():
class InternalFakeType(DataVibe):
pass
assert data_registry.retrieve("InternalFakeType") == InternalFakeType
| EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/tests/test_register.py |
import json
from typing import Any, Dict, List, cast
from urllib.parse import urljoin
import msal
import requests
from requests.exceptions import HTTPError
class ADMAgClient:
"""Client for Azure Data Manager for Agriculture (ADMAg) API.
:param base_url: The base URL for the ADMAg API.
:param api_version: The API version to be used.
:param client_id: The client ID.
:param client_secret: The client secret.
:param authority: The URI of the identity provider.
:param default_scope: The scope of the access request.
"""
DEFAULT_TIMEOUT = 120
"""Default timeout for requests."""
NEXT_PAGES_LIMIT = 100000
"""Maximum number of pages to retrieve in a single request."""
CONTENT_TAG = "value"
"""Tag for the content of the response."""
LINK_TAG = "nextLink"
"""Tag for the next link of the response."""
def __init__(
self,
base_url: str,
api_version: str,
client_id: str,
client_secret: str,
authority: str,
default_scope: str,
):
self.token = self.get_token(
client_id=client_id,
client_secret=client_secret,
authority=authority,
default_scope=default_scope,
)
self.api_version = api_version
self.base_url = base_url
self.session = requests.Session()
self.session.headers.update(self.header())
def get_token(self, client_id: str, client_secret: str, authority: str, default_scope: str):
"""
Generates the ADMAg access token to be used before each call.
:param client_id: The client ID.
:param client_secret: The client secret.
:param authority: The URI of the identity provider.
:param default_scope: The scope of the access request.
:return: The access token as a string.
:raises Exception: If error retrieving token.
"""
app = msal.ConfidentialClientApplication(
client_id=client_id, client_credential=client_secret, authority=authority
)
# Initialize the token to access admag resources
# default value is none, if token for application is alread initialized
app.acquire_token_silent(scopes=[default_scope], account=None)
token_result = cast(Dict[str, Any], app.acquire_token_for_client(scopes=default_scope))
if "access_token" in token_result:
return token_result["access_token"]
else:
message = {
"error": token_result.get("error"),
"description": token_result.get("error_description"),
"correlationId": token_result.get("correlation_id"),
}
raise Exception(message)
def header(self) -> Dict[str, str]:
"""Generates a header containing authorization for ADMAg API requests.
:return: A dictionary containing the header for the API requests.
"""
header: Dict[str, str] = {
"Authorization": "Bearer " + self.token,
"Content-Type": "application/merge-patch+json",
}
return header
def _request(self, method: str, endpoint: str, *args: Any, **kwargs: Any):
response = self.session.request(method, urljoin(self.base_url, endpoint), *args, **kwargs)
try:
r = json.loads(response.text)
except json.JSONDecodeError:
r = response.text
try:
response.raise_for_status()
except HTTPError as e:
error_message = r.get("message", "") if isinstance(r, dict) else r
msg = f"{e}. {error_message}"
raise HTTPError(msg, response=e.response)
return cast(Any, r)
def _get(self, endpoint: str, params: Dict[str, Any] = {}):
request_params = {"api-version": self.api_version}
request_params.update(params)
response = self._request(
"GET",
endpoint,
params=request_params,
timeout=self.DEFAULT_TIMEOUT,
)
visited_next_links = set()
if self.CONTENT_TAG in response:
composed_response = {self.CONTENT_TAG: response[self.CONTENT_TAG]}
next_link = "" if self.LINK_TAG not in response else response[self.LINK_TAG]
next_link_index = 0
while next_link:
if next_link in visited_next_links:
raise RuntimeError(f"Repeated nextLink {next_link} in ADMAg get request")
if next_link_index >= self.NEXT_PAGES_LIMIT:
raise RuntimeError(f"Next pages limit {self.NEXT_PAGES_LIMIT} exceded")
tmp_response = self._request(
"GET",
next_link,
timeout=self.DEFAULT_TIMEOUT,
)
if self.CONTENT_TAG in tmp_response:
composed_response[self.CONTENT_TAG].extend(tmp_response[self.CONTENT_TAG])
visited_next_links.add(next_link)
next_link_index = next_link_index + 1
next_link = "" if self.LINK_TAG not in tmp_response else tmp_response[self.LINK_TAG]
response = composed_response
return response
def get_seasonal_fields(self, farmer_id: str, params: Dict[str, Any] = {}):
"""Retrieves the seasonal fields for a given farmer.
:param farmer_id: The ID of the farmer.
:param params: Additional parameters to be passed to the request. Defaults to {}.
:return: The information for each seasonal fields.
"""
endpoint = f"/farmers/{farmer_id}/seasonal-fields"
request_params = {"api-version": self.api_version}
request_params.update(params)
return self._get(
endpoint=endpoint,
params=request_params,
)
def get_field(self, farmer_id: str, field_id: str):
"""
Retrieves the field information for a given farmer and field.
:param farmer_id: The ID of the farmer.
:param field_id: The ID of the field.
:return: The field information.
"""
endpoint = f"/farmers/{farmer_id}/fields/{field_id}"
return self._get(endpoint)
def get_seasonal_field(self, farmer_id: str, seasonal_field_id: str):
"""Retrieves the information of a seasonal field for a given farmer.
:param farmer_id: The ID of the farmer.
:param seasonal_field_id: The ID of the seasonal field.
:return: The seasonal field information.
"""
endpoint = f"/farmers/{farmer_id}/seasonal-fields/{seasonal_field_id}"
return self._get(endpoint)
def get_boundary(self, farmer_id: str, boundary_id: str):
"""Retrieves the information of a boundary for a given farmer.
:param farmer_id: The ID of the farmer.
:param boundary_id: The ID of the boundary.
:return: The boundary information.
"""
endpoint = f"farmers/{farmer_id}/boundaries/{boundary_id}"
return self._get(endpoint)
def get_season(self, season_id: str):
"""Retrieves season information with a given id.
:param season_id: The id of the season to retrieve.
:return: The season data.
"""
endpoint = f"/seasons/{season_id}"
return self._get(endpoint)
def get_operation_info(
self,
farmer_id: str,
associated_boundary_ids: List[str],
operation_name: str,
min_start_operation: str,
max_end_operation: str,
sources: List[str] = [],
):
"""
Retrieves the information of a specified operation for a given farmer.
This method will return information about the specified operation name,
in the specified time range, for the given farmer and associated boundary IDs.
:param farmer_id: The ID of the farmer.
:param associated_boundary_ids: The IDs of the boundaries associated to the operation.
:param operation_name: The name of the operation.
:param min_start_operation: The minimum start date of the operation.
:param max_end_operation: The maximum end date of the operation.
:param sources: (optional) The sources of the operation.
:return: The operation information.
"""
endpoint = f"/farmers/{farmer_id}/{operation_name}"
params = {
"api-version": self.api_version,
"associatedBoundaryIds": associated_boundary_ids,
"minOperationStartDateTime": min_start_operation,
"maxOperationEndDateTime": max_end_operation,
}
if sources:
params["sources"] = sources
return self._get(endpoint, params=params)
def get_harvest_info(
self,
farmer_id: str,
associated_boundary_ids: List[str],
min_start_operation: str,
max_end_operation: str,
):
"""Retrieves the harvest information for a given farmer.
This method will return the harvest information for a given farmer,
associated with the provided boundary ids, between the start and end
operation dates specified.
:param farmer_id: ID of the farmer.
:param associated_boundary_ids: List of associated boundary IDs.
:param min_start_operation: The minimum start date of the operation.
:param max_end_operation: The maximum end date of the operation.
:return: Dictionary with harvest information.
"""
return self.get_operation_info(
farmer_id=farmer_id,
associated_boundary_ids=associated_boundary_ids,
operation_name="harvest-data",
min_start_operation=min_start_operation,
max_end_operation=max_end_operation,
)
def get_fertilizer_info(
self,
farmer_id: str,
associated_boundary_ids: List[str],
min_start_operation: str,
max_end_operation: str,
):
"""Retrieves the fertilizer information for a given farmer.
This method will return the fertilizer information for a given farmer,
associated with the provided boundary ids, between the start and end
operation dates specified.
:param farmer_id: ID of the farmer.
:param associated_boundary_ids: List of associated boundary IDs.
:param min_start_operation: The minimum start date of the operation.
:param max_end_operation: The maximum end date of the operation.
:return: Dictionary with fertilizer information.
"""
return self.get_operation_info(
farmer_id=farmer_id,
associated_boundary_ids=associated_boundary_ids,
operation_name="application-data",
min_start_operation=min_start_operation,
max_end_operation=max_end_operation,
sources=["Fertilizer"],
)
def get_organic_amendments_info(
self,
farmer_id: str,
associated_boundary_ids: List[str],
min_start_operation: str,
max_end_operation: str,
):
"""Retrieves the organic amendments information for a given farmer.
This method will return the organic amendments information for a given farmer,
associated with the provided boundary ids, between the start and end
operation dates specified.
:param farmer_id: ID of the farmer.
:param associated_boundary_ids: List of associated boundary IDs.
:param min_start_operation: The minimum start date of the operation.
:param max_end_operation: The maximum end date of the operation.
:return: Dictionary with organic amendments information.
"""
return self.get_operation_info(
farmer_id=farmer_id,
associated_boundary_ids=associated_boundary_ids,
operation_name="application-data",
min_start_operation=min_start_operation,
max_end_operation=max_end_operation,
sources=["Omad"],
)
def get_tillage_info(
self,
farmer_id: str,
associated_boundary_ids: List[str],
min_start_operation: str,
max_end_operation: str,
):
"""Retrieves the tillage information for a given farmer.
This method will return the tillage information for a given farmer,
associated with the provided boundary ids, between the start and end
operation dates specified.
:param farmer_id: ID of the farmer.
:param associated_boundary_ids: List of associated boundary IDs.
:param min_start_operation: The minimum start date of the operation.
:param max_end_operation: The maximum end date of the operation.
:return: Dictionary with tillage information.
"""
return self.get_operation_info(
farmer_id=farmer_id,
associated_boundary_ids=associated_boundary_ids,
operation_name="tillage-data",
min_start_operation=min_start_operation,
max_end_operation=max_end_operation,
)
| EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/vibe_core/admag_client.py |
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple, Union
from collections import Counter
from dateutil.tz import tzlocal
from dateutil.tz.tz import tzfile
from rich.console import Console
from rich.highlighter import NullHighlighter
from rich.live import Live
from rich.markup import escape
from rich.padding import Padding
from rich.table import Table
from rich.progress_bar import ProgressBar
from vibe_core.datamodel import RunDetails, RunStatus, TaskDescription
LEFT_BORDER_PADDING = (0, 0, 0, 4)
CONSOLE_WIDTH = 100
STATUS_STR_MAP = {
RunStatus.pending: "[yellow]pending[/]",
RunStatus.running: "[cyan]running[/]",
RunStatus.failed: "[red]failed[/]",
RunStatus.done: "[green]done[/]",
RunStatus.queued: "[yellow]queued[/]",
RunStatus.cancelled: "[yellow]cancelled[/]",
RunStatus.cancelling: "[yellow]cancelling[/]",
}
FETCHING_INFO_STR = ":hourglass_not_done: [yellow]Fetching information...[/]"
def strftimedelta(start: datetime, end: datetime) -> str:
"""Returns the time delta between two datetimes as a string in the format 'HH:MM:SS'.
:param start: Start datetime object.
:param end: End datetime object.
:return: The timedelta formatted as a 'HH:MM:SS' string.
"""
tdelta = end - start
hours, rem = divmod(int(tdelta.total_seconds()), 3600)
minutes, seconds = divmod(rem, 60)
return f"{hours:02d}:{minutes:02d}:{seconds:02d}"
def format_typing(type_dict: Dict[str, str]) -> Dict[str, str]:
"""Formats the types in a type dictionary.
This function takes a dictionary with type strings and formats it,
replacing the "typing." prefix in the values with an empty string.
:param type_dict: The type dictionary to format.
:return: The formatted dictionary.
"""
return {k: v.replace("typing.", "") for k, v in type_dict.items()}
class VibeWorkflowDocumenter:
"""Documenter class for :class:`VibeWorkflow` objects.
This class implements the logic for printing/formatting information about workflows,
including formatting the text elements and adding styling tags. It contains the methods
to print out a description of the workflow with its sources, sinks and parameters.
:param name: The name of the workflow.
:param sources: Dictionary with source names and types.
:param sinks: Dictionary with sink names and types.
:param parameters: Dictionary with parameter names and default values.
:param description: A :class:`TaskDescription` object containing the short and
long description of the workflow.
"""
TITLE_STR = "[bold green]Workflow:[/] [bold underline dodger_blue2]{}[/]"
DESCRIPTION_SECTION_STR = "\n[bold green]Description:[/]"
DESCRIPTION_STR = "{short_description} {long_description}"
ITEM_SECTION_STR = "\n[bold green]{}:[/]"
ITEM_STR = "- [bold]{name}[/]{additional_info}{description}"
def __init__(
self,
name: str,
sources: Dict[str, str],
sinks: Dict[str, str],
parameters: Dict[str, Any],
description: TaskDescription,
):
self.wf_name = name
self.parameters = parameters
self.description = description
self.sources = format_typing(sources)
self.sinks = format_typing(sinks)
self.console = Console(width=CONSOLE_WIDTH, highlighter=NullHighlighter())
self.console.clear()
@property
def formatted_parameters(self) -> Dict[str, str]:
"""Returns a dictionary of workflow's parameters with their default values.
:return: A dictionary containing the formatted parameters and default values.
"""
return {
param_name: "default: task defined"
if isinstance(param_value, list)
else f"default: {param_value}"
for param_name, param_value in self.parameters.items()
}
def _print_header(self):
self.console.print(self.TITLE_STR.format(self.wf_name))
self.console.print(self.DESCRIPTION_SECTION_STR)
desc = escape(
self.DESCRIPTION_STR.format(
short_description=self.description.short_description,
long_description=self.description.long_description,
)
)
desc = Padding(desc, LEFT_BORDER_PADDING)
self.console.print(desc)
def _print_sources(self, section_name: str = "Sources"):
self._print_items_description(self.description.inputs, section_name, self.sources)
def _print_sinks(self, section_name: str = "Sinks"):
self._print_items_description(self.description.outputs, section_name, self.sinks)
def _print_parameters(self, section_name: str = "Parameters"):
if self.parameters:
desc = {
k: v if not isinstance(v, list) else ""
for k, v in self.description.parameters.items()
}
self._print_items_description(desc, section_name, self.formatted_parameters)
def _print_tasks(self, section_name: str = "Tasks"):
task_dict = {task_name: "" for task_name in self.description.task_descriptions.keys()}
self._print_items_description(self.description.task_descriptions, section_name, task_dict)
def _print_items_description(
self,
description_dict: Dict[str, str],
section_name: str,
additional_info: Dict[str, str] = {},
):
self.console.print(self.ITEM_SECTION_STR.format(section_name))
for item_name, item_info in additional_info.items():
item_description = description_dict.get(item_name, "")
item_description = f": {item_description}" if item_description else ""
item_info = f" ([blue]{escape(item_info)}[/])" if item_info else ""
item_doc = self.ITEM_STR.format(
name=item_name, additional_info=item_info, description=escape(item_description)
).strip(":")
item_doc = Padding(item_doc, LEFT_BORDER_PADDING)
self.console.print(item_doc)
def print_documentation(self):
"""Prints the full documentation of the workflow.
This method prints the header of the documentation, the sources, the sinks,
the parameters and the tasks provided in the parsed workflow yaml file.
"""
self._print_header()
self._print_sources()
self._print_sinks()
self._print_parameters()
self._print_tasks()
class VibeWorkflowRunMonitor:
"""Class that abstracts the formatting of workflow run status
:param api_time_zone: The time zone of the API server.
:param detailed_task_info: If True, detailed information about task progress will be
included in the output (defaults to False).
"""
TITLE_STR = (
"[not italic]:earth_americas: "
"FarmVibes.AI :earth_africa: "
"[dodger_blue3]{}[/] :earth_asia: \n"
"Run name: [dodger_blue3]{}[/]\n"
"Run id: [dark_green]{}[/]\n"
"Run status: {}\n"
"Run duration: [dodger_blue3]{}[/][/]"
)
WARNING_HEADER_STR = "\n[yellow]:warning: Warnings :warning:[/]"
WARNING_STR = "\n{}\n[yellow]:warning: :warning: :warning:[/]"
TABLE_FIELDS = [
"Task Name",
"Status",
"Start Time",
"End Time",
"Duration",
]
SIMPLE_COMLUMN_NAME = "Progress"
DETAILED_COLUMN_NAME = "Subtasks\n([green]D[/]/[blue]R[/]/[yellow]Q[/]/[yellow]P[/]/[red]F[/])"
TIME_FORMAT = "%Y/%m/%d %H:%M:%S"
TIME_FORMAT_WITH_TZ = "%Y/%m/%d %H:%M:%S %Z"
PBAR_WIDTH = 20
def __init__(self, api_time_zone: tzfile, detailed_task_info: bool = False):
self.api_tz = api_time_zone
self.detailed_task_info = detailed_task_info
self.column_names = self.TABLE_FIELDS + [
self.DETAILED_COLUMN_NAME if self.detailed_task_info else self.SIMPLE_COMLUMN_NAME
]
self.client_tz = tzlocal()
self._populate_table()
console = Console()
console.clear()
self.live_context = Live(self.table, console=console, screen=False, auto_refresh=False)
def _get_time_str(self, time: Optional[datetime]) -> str:
if time is None:
return "N/A".center(len(self.TIME_FORMAT), " ")
return (
time.replace(tzinfo=self.api_tz)
.astimezone(tz=self.client_tz)
.strftime(self.TIME_FORMAT)
)
def _render_subtask_info(self, task_info: RunDetails) -> Union[Table, str]:
if task_info.subtasks is None:
return "-"
counts = Counter([RunStatus(r["status"]) for r in task_info.subtasks])
if self.detailed_task_info:
# Let's just print out informative text
return (
f"[green]{counts[RunStatus.done]}[/]/[blue]{counts[RunStatus.running]}[/]/"
f"[yellow]{counts[RunStatus.queued]}[/]/"
f"[yellow]{counts[RunStatus.pending]}[/]/[red]{counts[RunStatus.failed]}[/]"
)
# Let's render a nice looking progress bar
total = sum(counts.values())
subtasks = Table(
"bar",
"text",
show_edge=False,
show_footer=False,
show_header=False,
show_lines=False,
box=None, # Remove line between columns
)
subtasks.add_row(
ProgressBar(total=total, completed=counts[RunStatus.done], width=self.PBAR_WIDTH),
f"{counts[RunStatus.done]}/{total}",
)
return subtasks
def _add_row(self, task_name: str, task_info: RunDetails):
start_time_str = self._get_time_str(task_info.start_time)
end_time_str = self._get_time_str(task_info.end_time)
duration = strftimedelta(
self.time_or_now(task_info.start_time), self.time_or_now(task_info.end_time)
)
subtasks = self._render_subtask_info(task_info)
self.table.add_row(
task_name,
STATUS_STR_MAP[task_info.status],
start_time_str,
end_time_str,
duration,
subtasks,
)
def _init_table(self, monitored_warnings: List[Union[str, Warning]] = []):
"""Creates a new table and populate with wf-agnostic info"""
current_time_caption = warnings_caption = ""
self.table = Table(show_footer=False)
for col_name in self.column_names:
self.table.add_column(col_name)
# Build current time caption
current_time_caption = (
f"Last update: {datetime.now(tz=self.client_tz).strftime(self.TIME_FORMAT_WITH_TZ)}"
)
# Build monitored warnings caption
if monitored_warnings:
warnings_caption = "".join(
[self.WARNING_HEADER_STR] + [self.WARNING_STR.format(w) for w in monitored_warnings]
)
self.table.caption = current_time_caption + warnings_caption
def time_or_now(self, time: Optional[datetime]) -> datetime:
"""
Converts a given datetime object to the client's timezone.
If no datetime object is provided, the current time is used.
:param time: Datetime object to convert to the client's timezone.
:return: The datetime object converted to the client's timezone.
"""
return (
time.replace(tzinfo=self.api_tz).astimezone(tz=self.client_tz)
if time is not None
else datetime.now(tz=self.client_tz)
)
def _get_run_duration(
self, sorted_tasks: List[Tuple[str, RunDetails]], run_status: RunStatus
) -> str:
run_duration: str = ":hourglass_not_done:"
if sorted_tasks:
# Get the start time from the first submitted task
run_start_time = self.time_or_now(sorted_tasks[-1][1].submission_time)
# Get the end time of the last task (if finished) or current time otherwise
run_end_time = (
self.time_or_now(sorted_tasks[0][1].end_time)
if RunStatus.finished(run_status)
else datetime.now(tz=self.client_tz)
)
run_duration = strftimedelta(start=run_start_time, end=run_end_time)
return run_duration
def _populate_table(
self,
wf_name: Union[str, Dict[str, Any]] = ":hourglass_not_done:",
run_name: str = ":hourglass_not_done:",
run_id: str = ":hourglass_not_done:",
run_status: RunStatus = RunStatus.pending,
wf_tasks: Optional[Dict[str, RunDetails]] = None,
monitored_warnings: List[Union[str, Warning]] = [],
):
"""Method that creates a new table with updated task info"""
run_duration: str = ":hourglass_not_done:"
# Create new table
self._init_table(monitored_warnings)
# Populate Rows
if wf_tasks is None:
self.table.add_row(FETCHING_INFO_STR)
else:
# Sort tasks by reversed submission/start/end time (running tasks will be on top)
sorted_tasks = sorted(
wf_tasks.items(),
key=lambda t: (
self.time_or_now(t[1].submission_time),
self.time_or_now(t[1].start_time),
self.time_or_now(t[1].end_time),
),
reverse=True,
)
# Add each task to the table
for task_name, task_info in sorted_tasks:
self._add_row(task_name, task_info)
# Compute run duration
run_duration = self._get_run_duration(sorted_tasks, run_status)
# Populate Header
# Do not print the whole dict definition if it is a custom workflow
wf_name = f"Custom: '{wf_name['name']}'" if isinstance(wf_name, dict) else wf_name
self.table.title = self.TITLE_STR.format(
wf_name, run_name, run_id, STATUS_STR_MAP[run_status], run_duration
)
def update_run_status(
self,
wf_name: Union[str, Dict[str, Any]],
run_name: str,
run_id: str,
run_status: RunStatus,
wf_tasks: Dict[str, RunDetails],
monitored_warnings: List[Union[str, Warning]],
):
"""Updates the monitor table.
This method will update the monitor table with the latest information about the workflow
run, individual task status and monitored warnings.
:param wf_name: Name of the workflow being executed.
It can be a string or a custom workflow definition (as a dict).
:param run_name: Name of the workflow run.
:param run_id: Id of the workflow run.
:param run_status: Status of the run.
:param wf_tasks: Dictionary containing the details of each task in the workflow.
:param monitored_warnings: List of monitored warnings.
"""
self._populate_table(wf_name, run_name, run_id, run_status, wf_tasks, monitored_warnings)
self.live_context.update(self.table, refresh=True)
| EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/vibe_core/monitor.py |
import json
import os
import time
import warnings
from abc import ABC, abstractmethod
from dataclasses import asdict
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union, cast, overload
from urllib.parse import urljoin
import requests
import yaml
from dateutil.parser import ParserError, parse
from dateutil.tz import tzlocal
from dateutil.tz.tz import tzfile
from requests.exceptions import HTTPError
from shapely import geometry as shpg
from shapely.geometry.base import BaseGeometry
from vibe_core.data import BaseVibeDict, StacConverter
from vibe_core.data.core_types import BaseVibe
from vibe_core.data.json_converter import dump_to_json
from vibe_core.data.utils import deserialize_stac, serialize_input
from vibe_core.datamodel import (
RunConfigInput,
RunConfigUser,
RunDetails,
RunStatus,
SpatioTemporalJson,
TaskDescription,
)
from vibe_core.monitor import VibeWorkflowDocumenter, VibeWorkflowRunMonitor
from vibe_core.utils import ensure_list, format_double_escaped
FALLBACK_SERVICE_URL = "http://192.168.49.2:30000/"
"""Fallback URL for FarmVibes.AI service.
:meta hide-value:
"""
XDG_CONFIG_HOME = os.environ.get("XDG_CONFIG_HOME", os.path.expanduser("~/.config"))
"""Path to configuration file for FarmVibes.AI service.
:meta hide-value:
"""
FARMVIBES_AI_SERVICE_URL_PATH = os.path.join(XDG_CONFIG_HOME, "farmvibes-ai", "service_url")
"""Path to the local service URL file.
:meta hide-value:
"""
FARMVIBES_AI_REMOTE_SERVICE_URL_PATH = os.path.join(
XDG_CONFIG_HOME, "farmvibes-ai", "remote_service_url"
)
"""Path to the local remote service URL file.
:meta hide-value:
"""
DISK_FREE_THRESHOLD_BYTES = 50 * 1024 * 1024 * 1024 # 50 GiB
"""Threshold for disk space in bytes."""
TASK_SORT_KEY = "submission_time"
"""Key for sorting tasks."""
T = TypeVar("T", bound=BaseVibe, covariant=True)
InputData = Union[Dict[str, Union[T, List[T]]], List[T], T]
class WorkflowRun(ABC):
"""An abstract base class for workflow runs."""
@property
@abstractmethod
def status(self) -> str:
"""Gets the status of the workflow run.
:return: The status of the workflow run as a string.
:raises NotImplementedError: If the method is not implemented by a subclass.
"""
raise NotImplementedError
@property
@abstractmethod
def output(self) -> BaseVibeDict:
"""Gets the output of the workflow run.
:return: The output of the workflow run as a :class:`vibe_core.data.BaseVibeDict`.
:raises NotImplementedError: If the method is not implemented by a subclass.
"""
raise NotImplementedError
class Client(ABC):
"""An abstract base class for clients."""
@abstractmethod
def list_workflows(self) -> List[str]:
"""Lists all available workflows.
:return: A list of workflow names.
:raises NotImplementedError: If the method is not implemented by a subclass.
"""
raise NotImplementedError
@abstractmethod
def run(
self,
workflow: str,
geometry: BaseGeometry,
time_range: Tuple[datetime, datetime],
) -> WorkflowRun:
"""Runs a workflow.
:param workflow: The name of the workflow to run.
:param geometry: The geometry to run the workflow on.
:param time_range: The time range to run the workflow on.
:return: A :class:`WorkflowRun` object.
:raises NotImplementedError: If the method is not implemented by a subclass.
"""
raise NotImplementedError
class FarmvibesAiClient(Client):
"""A client for the FarmVibes.AI service.
:param baseurl: The base URL of the FarmVibes.AI service.
"""
default_headers: Dict[str, str] = {
"Accept": "application/json",
"Content-Type": "application/json",
}
"""The default headers to use for requests to the FarmVibes.AI service."""
def __init__(self, baseurl: str):
self.baseurl = baseurl
self.session = requests.Session()
self.session.headers.update(self.default_headers)
def _request(self, method: str, endpoint: str, *args: Any, **kwargs: Any):
"""Sends a request to the FarmVibes.AI service and handle errors.
:param method: The HTTP method to use (e.g., 'GET' or 'POST').
:param endpoint: The endpoint to request.
:param args: Positional arguments to pass to :meth:`requests.Session.request`.
:param kwargs: Keyword arguments to pass to :meth:`requests.Session.request`.
:return: The response from the FarmVibes.AI service.
"""
response = self.session.request(method, urljoin(self.baseurl, endpoint), *args, **kwargs)
try:
r = json.loads(response.text)
except json.JSONDecodeError:
r = response.text
try:
response.raise_for_status()
except HTTPError as e:
error_message = r.get("message", "") if isinstance(r, dict) else r
msg = f"{e}. {error_message}"
raise HTTPError(msg, response=e.response)
return cast(Any, r)
def _form_payload(
self,
workflow: Union[str, Dict[str, Any]],
parameters: Optional[Dict[str, Any]],
geometry: Optional[BaseGeometry],
time_range: Optional[Tuple[datetime, datetime]],
input_data: Optional[InputData[T]],
run_name: str,
) -> Dict[str, Any]:
"""Forms a payload dictionary for submitting a workflow run.
:param workflow: The name of the workflow to run or a dict containing
the workflow definition.
:param parameters: A dict of optional parameters to pass to the workflow.
The keys and values depend on the specific workflow definition.
:param geometry: The geometry to use for the input data.
It must be a valid shapely geometry object (e.g., Point or Polygon).
It will be converted to GeoJSON format internally.
Alternatively it can be None if input_data is provided instead.
.. note::
Either `geometry` and `time_range` or `input_data` must be provided.
.. warning::
Providing both `geometry` and `input_data` will result in an error.
:param time_range: The time range to use for the input data. It must be
a tuple of two datetime objects representing the start and end dates.
Alternatively it can be None if input_data is provided instead.
.. note::
Either `geometry` and `time_range` or `input_data` must be provided.
.. warning::
Providing both `time_range` and `input_data` will result in an error.
:param input_data: The input data to use for the workflow run.
It must be an instance of InputData or one of its subclasses
(e.g., SpatioTemporalJson or SpatioTemporalRaster). Alternatively
it can be None if geometry and time_range are provided instead.
.. note::
Either `geometry` and `time_range` or `input_data` must be provided.
.. warning::
Providing both `input_data` and either `geometry` or `time_range`
will result in an error.
:param run_name: The name to assign to the workflow run.
:return: A dict containing the payload for submitting a workflow run.
The keys are 'run_name', 'workflow', 'parameters', and 'user_input'.
"""
if input_data is not None:
user_input = serialize_input(input_data)
elif geometry is None or time_range is None:
raise ValueError("Either `input_data` or `geometry` and `time_range` are required")
else:
geojson = {
"features": [{"geometry": None, "type": "Feature"}],
"type": "FeatureCollection",
}
geojson["features"][0]["geometry"] = shpg.mapping(geometry)
user_input = SpatioTemporalJson(time_range[0], time_range[1], geojson)
return asdict(RunConfigInput(run_name, workflow, parameters, user_input))
def verify_disk_space(self):
"""Verifies that there is enough disk space available for the cache.
This method checks the system metrics returned by the FarmVibes.AI service
and compares the disk free space with a predefined threshold. If the disk
free space is below the threshold, a warning message is displayed to the user,
suggesting to clear the cache.
.. note::
The disk free space threshold is defined by :const:`DISK_FREE_THRESHOLD_BYTES`.
:raises: :exc:`RuntimeWarning` if the disk space is low.
"""
metrics = self.get_system_metrics()
df = cast(Optional[int], metrics.get("disk_free", None))
if df is not None and df < DISK_FREE_THRESHOLD_BYTES:
warnings.warn(
"The FarmVibes.AI cache is running low on disk space "
f"and only has {df / 1024 / 1024 / 1024} GiB left. "
"Please consider clearing the cache to free up space and "
"to avoid potential failures.",
category=RuntimeWarning,
)
def list_workflows(self) -> List[str]:
"""Lists all available workflows on the FarmVibes.AI service.
This method returns a list of workflow names that can be used to
submit workflow runs or to get more details about a specific workflow.
:return: A list of workflow names.
"""
return self._request("GET", "v0/workflows")
def describe_workflow(self, workflow_name: str) -> Dict[str, Any]:
"""Describes a workflow.
This method returns a dictionary containing the description of a
workflow, such as its inputs, outputs, parameters and tasks.
.. note::
The description is returned as a :class:`TaskDescription` object,
which is a dataclass that represents the structure and
properties of a workflow.
:param workflow_name: The name of the workflow to describe.
It must be one of the names returned by list_workflows().
:return: A dictionary containing the workflow description.
The keys are 'name', 'description', 'inputs', 'outputs' and 'parameters'.
"""
desc = self._request("GET", f"v0/workflows/{workflow_name}?return_format=description")
desc["description"] = TaskDescription(**desc["description"])
return desc
def get_system_metrics(self) -> Dict[str, Union[int, float, Tuple[float, ...]]]:
"""Gets system metrics from the FarmVibes.AI service.
This method returns a dictionary containing various system metrics,
such as CPU usage, memory usage and disk space.
:return: A dictionary containing system metrics.
"""
return self._request("GET", "v0/system-metrics")
def get_workflow_yaml(self, workflow_name: str) -> str:
"""Gets the YAML definition of a workflow.
This method returns a string containing the YAML definition of a
workflow. The YAML definition specifies the name and operations of
the workflow in a human-readable format.
:param workflow_name: The name of the workflow. It must be one
of the names returned by list_workflows().
:return: The YAML definition of the workflow.
"""
yaml_content = self._request("GET", f"v0/workflows/{workflow_name}?return_format=yaml")
return yaml.dump(yaml_content, default_flow_style=False, default_style="", sort_keys=False)
def cancel_run(self, run_id: str) -> str:
"""Cancels a workflow run.
This method sends a request to the FarmVibes.AI service to cancel
a workflow run that is in progress or pending. If the cancellation
is successful, the workflow run status will be set to 'cancelled'.
.. note::
The cancellation may take some time to take effect depending on
the state of the workflow run and the service availability.
.. warning::
A workflow run that is already completed or failed cannot be cancelled.
:param run_id: The ID of the workflow run to cancel.
:return: The message from the FarmVibes.AI service indicating whether
the cancellation was successful or not.
"""
return self._request("POST", f"v0/runs/{run_id}/cancel")["message"]
def describe_run(self, run_id: str) -> RunConfigUser:
"""Describes a workflow run.
This method returns a RunConfigUser object containing the description of a
workflow run, such as its name, status, inputs and outputs.
:param run_id: The ID of the workflow run to describe.
:return: A :class:`RunConfigUser` object containing the workflow run description.
"""
response = self._request("GET", f"v0/runs/{run_id}")
try:
run = RunConfigUser(**response)
for v in run.task_details.values():
if v.subtasks is not None:
v.subtasks = [RunDetails(**i) for i in v.subtasks]
except Exception as e:
raise RuntimeError(f"Failed to parse description for run {run_id}: {e}") from e
return run
def document_workflow(self, workflow_name: str) -> None:
"""Prints the documentation of a workflow.
This method prints a formatted documentation of a workflow,
including its name, description, inputs, outputs and parameters.
.. note::
The documentation is printed to stdout and can be redirected to
other outputs if needed.
:param workflow_name: The name of the workflow to document.
"""
wf_dict = self.describe_workflow(workflow_name)
documenter = VibeWorkflowDocumenter(
name=workflow_name,
sources=wf_dict["inputs"],
sinks=wf_dict["outputs"],
parameters=wf_dict["parameters"],
description=wf_dict["description"],
)
documenter.print_documentation()
def list_runs(
self,
ids: Optional[Union[str, List[str]]] = None,
fields: Optional[Union[str, List[str]]] = None,
):
"""Lists workflow runs on the FarmVibes.AI service.
:param ids: The IDs of the workflow runs to list.
If None, all workflow runs will be listed.
:param fields: The fields to return for each workflow run.
If None, all fields will be returned.
:return: A list of workflow runs. Each run is represented by a dictionary
with keys corresponding to the requested fields and values containing
the field values.
"""
ids = [f"ids={id}" for id in ensure_list(ids)] if ids is not None else []
fields = [f"fields={field}" for field in ensure_list(fields)] if fields is not None else []
query_str = "&".join(ids + fields)
return self._request("GET", f"v0/runs?{query_str}")
def get_run_by_id(self, id: str) -> "VibeWorkflowRun":
"""Gets a workflow run by ID.
This method returns a :class:`VibeWorkflowRun` object containing
the details of a workflow run by its ID.
:param id: The ID of the workflow run to get.
:return: A :class:`VibeWorkflowRun` object.
"""
fields = ["id", "name", "workflow", "parameters"]
run = self.list_runs(id, fields=fields)[0]
return VibeWorkflowRun(*(run[f] for f in fields), self) # type: ignore
def get_api_time_zone(self) -> tzfile:
"""Gets the time zone of the FarmVibes.AI REST-API.
This method returns a tzfile object representing the time zone of
the FarmVibes.AI REST-API. The time zone is determined by parsing
the 'date' header from the response of a GET request to the base URL
of the service. If the 'date' header is missing or invalid, a warning
is issued and the client time zone is used instead.
.. note::
The tzfile object is a subclass of datetime.tzinfo that represents
a time zone using an Olson database file.
:return: The time zone of the FarmVibes.AI REST-API as a tzfile object.
"""
tz = tzlocal()
response = self.session.request("GET", self.baseurl)
try:
dt = parse(response.headers["date"])
tz = dt.tzinfo if dt.tzinfo is not None else tzlocal()
except KeyError:
warnings.warn(
"Could not determine the time zone of the FarmVibes.AI REST-API. "
"'date' header is missing from the response. "
"Using the client time zone instead.",
category=RuntimeWarning,
)
except ParserError:
warnings.warn(
"Could not determine the time zone of the FarmVibes.AI REST-API. "
"Unable to parse the 'date' header from the response. "
"Using the client time zone instead.",
category=RuntimeWarning,
)
return cast(tzfile, tz)
@overload
def run(
self,
workflow: Union[str, Dict[str, Any]],
name: str,
*,
geometry: BaseGeometry,
time_range: Tuple[datetime, datetime],
parameters: Optional[Dict[str, Any]] = None,
) -> "VibeWorkflowRun":
...
@overload
def run(
self,
workflow: Union[str, Dict[str, Any]],
name: str,
*,
input_data: InputData[T],
parameters: Optional[Dict[str, Any]] = None,
) -> "VibeWorkflowRun":
...
def run(
self,
workflow: Union[str, Dict[str, Any]],
name: str,
*,
geometry: Optional[BaseGeometry] = None,
time_range: Optional[Tuple[datetime, datetime]] = None,
input_data: Optional[InputData[T]] = None,
parameters: Optional[Dict[str, Any]] = None,
) -> "VibeWorkflowRun":
"""Runs a workflow.
This method instantiates a workflow run using the provided data and parameters.
:param workflow: The name of the workflow to run or a dict containing
the workflow definition.
:param name: The name to assign to the workflow run.
:param geometry: The geometry to use for the input data.
It must be a valid shapely geometry object (e.g., Point or Polygon).
It will be converted to GeoJSON format internally.
Alternatively it can be None if input_data is provided instead.
.. note::
Either `geometry` and `time_range` or `input_data` must be provided.
.. warning::
Providing both `geometry` and `input_data` will result in an error.
:param time_range: The time range to use for the input data. It must be
a tuple of two datetime objects representing the start and end dates.
Alternatively it can be None if input_data is provided instead.
.. note::
Either `geometry` and `time_range` or `input_data` must be provided.
.. warning::
Providing both `time_range` and `input_data` will result in an error.
:param input_data: The input data to use for the workflow run.
It must be an instance of InputData or one of its subclasses
(e.g., SpatioTemporalJson or SpatioTemporalRaster). Alternatively
it can be None if geometry and time_range are provided instead.
.. note::
Either `geometry` and `time_range` or `input_data` must be provided.
.. warning::
Providing both `input_data` and either `geometry` or `time_range`
will result in an error.
:param parameters: A dict of optional parameters to pass to the workflow.
The keys and values depend on the specific workflow definition.
:return: A :class:`VibeWorkflowRun` object.
"""
self.verify_disk_space()
payload = dump_to_json(
self._form_payload(workflow, parameters, geometry, time_range, input_data, name),
)
response = self._request("POST", "v0/runs", data=payload)
return self.get_run_by_id(response["id"])
def resubmit_run(self, run_id: str) -> "VibeWorkflowRun":
"""
Resubmits a workflow run with the given run ID.
:param run_id: The ID of the workflow run to resubmit.
:return: The resubmitted workflow run.
"""
self.verify_disk_space()
response = self._request("POST", f"v0/runs/{run_id}/resubmit")
return self.get_run_by_id(response["id"])
class VibeWorkflowRun(WorkflowRun):
"""Represents a workflow run in FarmVibes.AI.
:param id: The ID of the workflow run.
:param name: The name of the workflow run.
:param workflow: The name of the workflow associated to the run.
:param parameters: The parameters associated to the workflow run, as a dict.
The keys and values depend on the specific workflow definition.
:param client: An instance of the :class:`FarmVibesAiClient` class.
"""
wait_s = 10
def __init__(
self,
id: str,
name: str,
workflow: str,
parameters: Dict[str, Any],
client: FarmvibesAiClient,
):
self.id = id
self.name = name
self.workflow = workflow
self.parameters = parameters
self.client = client
self._status = RunStatus.pending
self._reason = ""
self._output = None
self._task_details = None
def _convert_output(self, output: Dict[str, Any]) -> BaseVibeDict:
"""Converts the output of the workflow run to a :class:`BaseVibeDict`.
This method takes the output of the workflow run as a dictionary and
converts each value to a DataVibe object using a StacConverter object.
It returns a new dictionary with the same keys and converted values.
:param output: The output of the workflow run. It is a dictionary
with key-value pairs where each value is a STAC item in JSON format.
:return: The converted output of the workflow run. It is a dictionary
with key-value pairs where each value is a BaseVibe object that
represents a geospatial data asset.
"""
converter = StacConverter()
return {k: converter.from_stac_item(deserialize_stac(v)) for k, v in output.items()}
def _convert_task_details(self, details: Dict[str, Any]) -> Dict[str, RunDetails]:
"""Converts the task details of the workflow run to a :class:`RunDetails` dictionary.
This method takes the task details of the workflow run as a dictionary and converts
each value to a RunDetails object using keyword arguments. It returns a new dictionary
with the same keys and converted values. The keys are sorted by their corresponding
start time or end time if available.
:param details: The task details of the workflow run.
:return: The converted task details of the workflow run.
"""
return {
k: RunDetails(**v)
for k, v in sorted(
details.items(),
key=lambda x: cast(
datetime, parse(x[1][TASK_SORT_KEY]) if x[1][TASK_SORT_KEY] else datetime.now()
),
)
}
@property
def status(self) -> RunStatus:
"""Gets the status of the workflow run."""
if not RunStatus.finished(self._status):
self._status = RunStatus(self.client.list_runs(self.id)[0]["details.status"])
return self._status
@property
def task_details(self) -> Dict[str, RunDetails]:
"""Gets the task details of the workflow run."""
if self._task_details is not None:
return self._task_details
status = self.status
task_details = self._convert_task_details(
self.client.list_runs(ids=self.id, fields="task_details")[0]["task_details"]
)
if RunStatus.finished(status):
self._task_details = task_details
return task_details
@property
def task_status(self) -> Dict[str, str]:
"""Gets the task status of the workflow run."""
details = self.task_details
return {k: v.status for k, v in details.items()}
@property
def output(self) -> Optional[BaseVibeDict]:
"""Gets the output of the workflow run."""
if self._output is not None:
return self._output
run = self.client.describe_run(self.id)
self._status = run.details.status
if self._status != RunStatus.done:
return None
self._output = self._convert_output(run.output)
return self._output
@property
def reason(self) -> Optional[str]:
"""Gets the reason of the workflow run.
The reason is a string that describes the status of the workflow run.
In case of failure, it also contains the reason of the failure.
"""
status = self.status
if status == RunStatus.done:
self._reason = "Workflow run was successful."
elif status in [RunStatus.cancelled, RunStatus.cancelling]:
self._reason = f"Workflow run {status}."
elif status in [RunStatus.running, RunStatus.pending]:
self._reason = (
f"Workflow run is {status}. "
f"Check {self.__class__.__name__}.monitor() for task updates."
)
else: # RunStatus.failed
run = self.client.list_runs(self.id, "details.reason")[0]
self._reason = format_double_escaped(run["details.reason"])
return self._reason
def cancel(self) -> "VibeWorkflowRun":
"""Cancels the workflow run.
:return: The workflow run.
"""
self.client.cancel_run(self.id)
self.status
return self
def resubmit(self) -> "VibeWorkflowRun":
"""
Resubmits the current workflow run.
:return: The resubmitted workflow run instance.
"""
return self.client.resubmit_run(self.id)
def block_until_complete(self, timeout_s: Optional[int] = None) -> "VibeWorkflowRun":
"""Blocks until the workflow run execution completes or fails, with an optional
timeout in seconds.
:param timeout_s: Optional timeout in seconds to wait for the workflow to complete.
If not provided, the method will wait indefinitely.
:raises RuntimeError: If the run does not complete before timeout_s.
:return: The workflow run object.
"""
time_start = time.time()
while self.status not in (RunStatus.done, RunStatus.failed):
time.sleep(self.wait_s)
if timeout_s is not None and (time.time() - time_start) > timeout_s:
raise RuntimeError(
f"Timeout of {timeout_s}s reached while waiting for workflow completion"
)
return self
def monitor(
self,
refresh_time_s: int = 1,
refresh_warnings_time_min: int = 5,
timeout_min: Optional[int] = None,
detailed_task_info: bool = False,
):
"""Monitors the workflow run.
This method will block and print the status of the run each refresh_time_s seconds,
until the workflow run finishes or it reaches timeout_min minutes. It will also
print warnings every refresh_warnings_time_min minutes.
:param refresh_time_s: Refresh interval in seconds (defaults to 1 second).
:param refresh_warnings_time_min: Refresh interval in minutes for updating
the warning messages (defaults to 5 minutes).
:param timeout_min: The maximum time to monitor the workflow run, in minutes.
If not provided, the method will monitor indefinitely.
:param detailed_task_info: If True, detailed information about task progress
will be included in the output (defaults to False).
"""
with warnings.catch_warnings(record=True) as monitored_warnings:
monitor = VibeWorkflowRunMonitor(
api_time_zone=self.client.get_api_time_zone(),
detailed_task_info=detailed_task_info,
)
stop_monitoring = False
time_start = last_warning_refresh = time.time()
with monitor.live_context:
while not stop_monitoring:
monitor.update_run_status(
self.workflow,
self.name,
self.id,
self.status,
self.task_details,
[w.message for w in monitored_warnings],
)
time.sleep(refresh_time_s)
curent_time = time.time()
# Check for warnings every refresh_warnings_time_min minutes
if (curent_time - last_warning_refresh) / 60.0 > refresh_warnings_time_min:
self.client.verify_disk_space()
last_warning_refresh = curent_time
# Check for timeout
did_timeout = (
timeout_min is not None and (curent_time - time_start) / 60.0 > timeout_min
)
stop_monitoring = RunStatus.finished(self.status) or did_timeout
# Update one last time to make sure we have the latest state
monitor.update_run_status(
self.workflow,
self.name,
self.id,
self.status,
self.task_details,
[w.message for w in monitored_warnings],
)
def __repr__(self):
"""Gets the string representation of the workflow run.
:return: The string representation of the workflow run.
"""
return (
f"'{self.__class__.__name__}'(id='{self.id}', name='{self.name}',"
f" workflow='{self.workflow}', status='{self.status}')"
)
def get_local_service_url() -> str:
"""Retrieves the local service URL used to submit workflow runs to the FarmVibes.AI service.
This function attempts to read the service URL from a file, and if that fails,
it will return a fallback URL.
:return: The local service URL.
"""
try:
with open(FARMVIBES_AI_SERVICE_URL_PATH, "r") as fp:
return fp.read().strip()
except FileNotFoundError:
return FALLBACK_SERVICE_URL
def get_remote_service_url() -> str:
"""Gets the remote service URL.
:return: The remote service URL.
"""
try:
with open(FARMVIBES_AI_REMOTE_SERVICE_URL_PATH, "r") as fp:
return fp.read().strip()
except FileNotFoundError as e:
print(e)
raise
def get_default_vibe_client(url: str = "", connect_remote: bool = False):
"""Gets the default vibe client.
:param url: The URL.
:param connect_remote: Whether to connect remotely.
:return: The vibe client.
"""
if not url:
url = get_remote_service_url() if connect_remote else get_local_service_url()
return FarmvibesAiClient(url)
| EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/vibe_core/client.py |
import logging
import mimetypes
import os
from typing import Any
import requests
from requests.adapters import HTTPAdapter
from urllib3.util import Retry
CHUNK_SIZE = 1024 * 1024 # 1MB chunks
"""Size of the chunks to read from the server per request."""
REQUEST_RETRIES = 5
"""Number of retries to perform when a request fails."""
REQUEST_BACKOFF = 0.3
"""Back-off factor to apply between retries."""
CONNECT_TIMEOUT_S = 30
"""Time in seconds to wait for connection to the server before aborting."""
READ_TIMEOUT_S = 30
"""Time in seconds for each chunk read from the server."""
LOGGER = logging.getLogger(__name__)
def retry_session() -> requests.Session:
"""Creates a session with retry support.
This method creates a requests.Session object with retry support
configured to retry failed requests up to :const:`REQUEST_RETRIES` times
with a :const:`REQUEST_BACKOFF` time back-off factor.
:return: A configured requests.Session object
"""
session = requests.Session()
retry = Retry(
total=REQUEST_RETRIES,
read=REQUEST_RETRIES,
connect=REQUEST_RETRIES,
backoff_factor=REQUEST_BACKOFF,
)
# Had to ignore the type as urlib is loaded dinamically
# details here (https://github.com/microsoft/pylance-release/issues/597)
adapter = HTTPAdapter(max_retries=retry) # type: ignore
session.mount("http://", adapter)
session.mount("https://", adapter)
return session
def build_file_path(dir_name: str, file_name: str, type: str = "") -> str:
"""
Builds the full file path by combining the directory name, file name and
optional type to infer the file extension.
:param dir_name: Name of the directory.
:param file_name: Name of the file.
:param type: Type of the file (default is empty).
:return: The full file path.
"""
extension = mimetypes.guess_extension(type)
if not extension:
LOGGER.info(f"File extension could no be inferred with type {type}. Using no extension.")
extension = ""
file_path = os.path.join(dir_name, f"{file_name}{extension}")
return file_path
def download_file(
url: str,
file_path: str,
chunk_size: int = CHUNK_SIZE,
connect_timeout: float = CONNECT_TIMEOUT_S,
read_timeout: float = READ_TIMEOUT_S, # applies per chunk
**kwargs: Any,
) -> str:
"""Downloads a file from a given URL to the given file path.
The download is done using a retry session, to handle connection errors.
:param url: URL of the file to download.
:param file_path: Path where the file will be saved.
:param chunk_size: Amount of data to read from the server per request
(defaults to :const:`CHUNK_SIZE`).
:param connect_timeout: Time in seconds to wait for connection to the server before aborting
(defaults to :const:`CONNECT_TIMEOUT_S`).
:param read_timeout: Time in seconds for each chunk read from the server
(defaults to :const:`READ_TIMEOUT_S`).
:param kwargs: Additional keyword arguments to be passed to the request library call.
:return: Path of the saved file.
"""
session = retry_session()
try:
with session.get(url, stream=True, timeout=(connect_timeout, read_timeout), **kwargs) as r:
r.raise_for_status()
with open(file_path, "wb") as f:
for chunk in r.iter_content(chunk_size=chunk_size):
f.write(chunk)
return file_path
except requests.ConnectionError:
LOGGER.exception(f"Connection error when downloading remote asset {url}")
raise
except requests.HTTPError as err:
LOGGER.exception(
f"HTTP error (code {err.response.status_code}) when downloading remote asset {url}"
)
raise
def verify_url(
url: str,
connect_timeout: float = CONNECT_TIMEOUT_S,
**kwargs: Any,
) -> bool:
"""Verifies the validity of a given URL.
This method attempts to connect to the specified url and verifies
that it does not raise any HTTP or Connection errors.
:param url: The URL to check.
:param connect_timeout: Timeout when attempting to connect to the specified url.
Defaults to the value of :const:`CONNECT_TIMEOUT_S`.
:param kwargs: Additional keyword arguments to pass to the requests.get call.
:return: True if the URL is valid, False otherwise.
"""
status = True
session = retry_session()
try:
with session.get(url, stream=True, timeout=connect_timeout, **kwargs) as r:
r.raise_for_status()
except requests.ConnectionError:
LOGGER.warning(f"Connection error when verifying remote asset {url}")
status = False
except requests.HTTPError as err:
LOGGER.warning(
f"HTTP error (code {err.response.status_code}) when verifying remote asset {url}"
)
status = False
return status
| EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/vibe_core/file_downloader.py |
import codecs
import json
import zlib
from dataclasses import asdict, dataclass, field
from datetime import datetime
from enum import auto
from typing import Any, Dict, Final, List, Optional, Union, cast
from uuid import UUID
from dateutil.parser import parse
from strenum import StrEnum
from .data.core_types import OpIOType
from .data.json_converter import dump_to_json
SUMMARY_DEFAULT_FIELDS: Final[List[str]] = ["id", "workflow", "name", "details.status"]
@dataclass
class Message:
"""Dataclass that represents an API message."""
message: str
"""The message."""
id: Optional[str] = None
"""The id of the message."""
location: Optional[str] = None
"""The location of the message."""
@dataclass
class Region:
"""Dataclass that represents a region."""
name: str
"""The name of the region."""
geojson: Dict[str, Any] = field(default_factory=dict)
"""The geojson of the region."""
@dataclass
class SpatioTemporalJson:
"""Dataclass that represents a spatio temporal json."""
start_date: datetime
"""The start date of the spatio temporal json."""
end_date: datetime
"""The end date of the spatio temporal json."""
geojson: Dict[str, Any]
"""The geojson of the spatio temporal json."""
def __post_init__(self):
for attr in ("start_date", "end_date"):
if isinstance(some_date := getattr(self, attr), str):
setattr(self, attr, parse(some_date))
@dataclass
class RunBase:
"""Base dataclass for a run."""
name: str
"""The name of the run."""
workflow: Union[str, Dict[str, Any]]
"""The workflow of the run."""
parameters: Optional[Dict[str, Any]]
"""The parameters of the run."""
def __post_init__(self):
if isinstance(self.workflow, str):
try:
self.workflow = json.loads(self.workflow)
except json.decoder.JSONDecodeError:
pass
@dataclass
class RunConfigInput(RunBase):
"""Dataclass that represents a run config input."""
user_input: Union[SpatioTemporalJson, Dict[str, Any], List[Any]]
"""The user input of the run config (usually a region/geometry and time range)."""
def __post_init__(self):
super().__post_init__()
if isinstance(self.user_input, dict):
try:
self.user_input = SpatioTemporalJson(**self.user_input)
except TypeError:
# We need this because of BaseVibe.
pass
class RunStatus(StrEnum):
"""Enum that represents the status of a run."""
pending = auto()
"""The run is pending"""
queued = auto()
"""The run is queued."""
running = auto()
"""The run is running."""
failed = auto()
"""The run has failed."""
done = auto()
"""The run is done."""
cancelled = auto()
"""The run is cancelled."""
cancelling = auto()
"""The run is cancelling."""
@staticmethod
def finished(status: "RunStatus"):
"""Checks if a run has finished.
This method checks if a run status is either
:attr:`vibe_core.datamodel.RunStatus.done`,
:attr:`vibe_core.datamodel.RunStatus.cancelled`, or
:attr:`vibe_core.datamodel.RunStatus.failed`.
:param status: The status to check.
:return: Whether the run has finished.
"""
return status in (RunStatus.done, RunStatus.cancelled, RunStatus.failed)
@dataclass
class RunDetails:
"""Dataclass that encapsulates the details of a run."""
start_time: Optional[datetime] = None
"""The start time of the run."""
submission_time: Optional[datetime] = None
"""The submission time of the run."""
end_time: Optional[datetime] = None
"""The end time of the run."""
reason: Optional[str] = None
"""A description of the reason for the status of the run."""
status: RunStatus = RunStatus.pending # type: ignore
"""The status of the run."""
subtasks: Optional[List[Any]] = None
"""Details about the subtasks of the run."""
def __post_init__(self):
for time_field in ("start_time", "submission_time", "end_time"):
attr = cast(Union[str, datetime, None], getattr(self, time_field))
if isinstance(attr, str):
setattr(self, time_field, parse(attr))
@dataclass
class RunConfig(RunConfigInput):
"""Dataclass that represents a run config."""
id: UUID
"""The id of the run config."""
details: RunDetails
"""The details of the run config."""
task_details: Dict[str, RunDetails]
"""The details of the tasks of the run config."""
spatio_temporal_json: Optional[SpatioTemporalJson]
"""The spatio temporal json of the run config."""
output: str = ""
"""The output of the run."""
def set_output(self, value: OpIOType): # pydantic won't let us use a property setter
"""Sets the output of the run config.
:param value: The value to set the output to.
"""
self.output = encode(dump_to_json(value))
def __post_init__(self):
if isinstance(self.details, dict):
self.details = RunDetails(**self.details)
if self.spatio_temporal_json is not None and isinstance(self.spatio_temporal_json, dict):
try:
self.spatio_temporal_json = SpatioTemporalJson(**self.spatio_temporal_json)
except TypeError:
pass
for k, v in self.task_details.items():
if isinstance(v, dict):
self.task_details[k] = RunDetails(**v)
super().__post_init__()
class RunConfigUser(RunConfig):
"""Dataclass that represents a run config for the user."""
output: OpIOType
"""The output of the run."""
@classmethod
def from_runconfig(cls, run_config: RunConfig):
"""Creates a :class:`RunConfigUser` from a :class:`RunConfig`.
:param run_config: The run config to create the user run config from.
:return: The user run config.
"""
rundict = asdict(run_config)
output = rundict.pop("output")
rcu = cls(**rundict)
rcu.output = json.loads(decode(output)) if output else {}
return rcu
@staticmethod
def finished(status: "RunStatus"):
"""Checks if a run has finished.
This method checks if a given status is either
:attr:`vibe_core.datamodel.RunStatus.done`,
:attr:`vibe_core.datamodel.RunStatus.cancelled`, or
:attr:`vibe_core.datamodel.RunStatus.failed`.
:param status: The status to check.
:return: Whether the run has finished.
"""
return status in (RunStatus.done, RunStatus.cancelled, RunStatus.failed)
@dataclass
class TaskDescription:
"""Dataclass that represents a task description."""
inputs: Dict[str, str] = field(default_factory=dict)
"""The inputs of the task."""
outputs: Dict[str, str] = field(default_factory=dict)
"""The outputs of the task."""
parameters: Dict[str, str] = field(default_factory=dict)
"""The task parameters."""
task_descriptions: Dict[str, str] = field(default_factory=dict)
"""The descriptions of subtasks."""
short_description: str = ""
"""The short description of the task."""
long_description: str = ""
"""The long description of the task."""
def encode(data: str) -> str:
"""Encodes a string using zlib and base64 encoding.
This function compresses the data string with zlib and then encodes it into a base64 string.
:param data: The string to be encoded.
:return: The encoded string.
"""
return codecs.encode(zlib.compress(data.encode("utf-8")), "base64").decode("utf-8") # JSON 😞
def decode(data: str) -> str:
"""Decodes the given data using zlib and base64 encodings.
:param data: The string to decode.
:return: The decoded string.
"""
return zlib.decompress(codecs.decode(data.encode("utf-8"), "base64")).decode("utf-8") # JSON 😞
| EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/vibe_core/datamodel.py |
from .client import Client, FarmvibesAiClient
__all__ = ["Client", "FarmvibesAiClient"]
| EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/vibe_core/__init__.py |
from dataclasses import dataclass
from typing import Any, Dict, List, Tuple, TypeVar, Union
from vibe_core.data.core_types import OpIOType
T = TypeVar("T")
@dataclass
class MermaidVerticesMap:
"""
A map of vertices for a mermaid diagram extracted from a WorkflowSpec.
Each entry maps the source/sink/task name to the vertex label.
"""
sources: Dict[str, str]
"""Source map."""
sinks: Dict[str, str]
"""Sink map."""
tasks: Dict[str, str]
"""Task map."""
def ensure_list(input: Union[List[T], T]) -> List[T]:
"""Ensures that the given input is a list.
If the input is a single item, it is wrapped in a list.
:param input: List or single item to be wrapped in a list.
:return: A list containing the input item.
"""
if isinstance(input, list):
return input
return [input]
def get_input_ids(input: OpIOType) -> Dict[str, Union[str, List[str]]]:
"""Retrieve the IDs from an input OpIOType object.
This method will extract the IDs from an OpIOType object and return them as a dictionary,
where the keys are the names of the inputs and values are either strings or lists of strings.
:param input: The input object.
:return: A dictionary with the IDs of the input object.
"""
return {
k: [vv.get("id", "NO-ID") for vv in v] if isinstance(v, list) else v.get("id", "NO-ID")
for k, v in input.items()
}
def rename_keys(x: Dict[str, Any], key_dict: Dict[str, str]):
"""Renames the keys of a dictionary.
This utility function takes a dictionary `x` and a dictionary `key_dict`
mapping old keys to their new names, and returns a copy of `x` with the keys renamed.
:param x: The dictionary with the keys to be renamed.
:param key_dict: Dictionary mapping old keys to their new names.
:return: A copy of x with the keys renamed.
"""
renamed = x.copy()
for old_key, new_key in key_dict.items():
if old_key in x:
renamed[new_key] = x[old_key]
del renamed[old_key]
return renamed
def format_double_escaped(s: str):
"""Encodes and decodes a double escaped input string.
Useful for formatting status/reason strings of VibeWorkflowRun.
:param s: Input string to be processed.
:return: Formatted string.
"""
return s.encode("raw_unicode_escape").decode("unicode-escape")
def build_mermaid_edge(
origin: Tuple[str, str],
destination: Tuple[str, str],
vertices_origin: Dict[str, str],
vertices_destination: Dict[str, str],
) -> str:
"""Builds a mermaid edge from a pair of vertices.
:param origin: A pair of source/sink/task and port names.
:param destination: A pair of source/sink/task and port names.
:param vertices_origin: The vertex map to retrieve the mermaid vertex label for the origin.
:param vertices_destination: The vertex map to retrieve the mermaid vertex label
for the destination.
:return: The mermaid edge string.
"""
origin_vertex, origin_port = origin
destination_vertex, destination_port = destination
separator = "/" if origin_port and destination_port else ""
if origin_port == destination_port:
port_map = origin_port
else:
port_map = f"{origin_port}{separator}{destination_port}"
return (
f"{vertices_origin[origin_vertex]} "
f"-- {port_map} --> "
f"{vertices_destination[destination_vertex]}"
)
def draw_mermaid_diagram(vertices: MermaidVerticesMap, edges: List[str]) -> str:
"""Draws a mermaid diagram from a set of vertices and edges.
:param vertices: A map of vertices for a mermaid diagram extracted from a WorkflowSpec.
:param edges: A list of edges already formated with mermaid syntax.
:return: The mermaid diagram string.
"""
diagram = (
"graph TD\n"
+ "\n".join(
[f" {source}" for source in vertices.sources.values()]
+ [f" {sink}" for sink in vertices.sinks.values()]
+ [f" {task}" for task in vertices.tasks.values()]
)
+ "\n"
+ "\n".join([f" {edge}" for edge in edges])
)
return diagram
| EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/vibe_core/utils.py |
import json
import logging
import logging.handlers
import os
from logging import Filter, LogRecord, getLogger
from platform import node
from typing import Dict, List, Optional
LOG_FORMAT = "[%(asctime)s] [%(hostname)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s"
"""The default log format."""
JSON_FORMAT = (
'{"app_id": "%(app)s", "instance": "%(hostname)s", "level": "%(levelname)s", '
'"msg": %(json_message)s, "scope": "%(name)s", "time": "%(asctime)s", "type": "log", '
'"ver": "dev"}'
)
"""JSON log format."""
DEFAULT_LOGGER_LEVELS: Dict[str, str] = {
"gdal": "INFO",
"rasterio": "INFO",
"urllib3": "INFO",
"urllib3.connectionpool": "DEBUG",
"fiona": "INFO",
"werkzeug": "INFO",
"azure": "WARNING",
"matplotlib": "INFO",
"uvicorn": "WARNING",
}
"""The default log levels for the different loggers."""
class HostnameFilter(Filter):
"""Filter class to add hostname field to the log record."""
hostname = node()
def filter(self, record: LogRecord):
"""Adds a hostname field to the log record with the value of
the node() function from the platform module.
:param record: The log record to be filtered.
:return: True
"""
record.hostname = self.hostname
return True
class AppFilter(Filter):
"""Filter class to add app field to the log record.
:param app: The name of the application.
"""
def __init__(self, app: str):
super().__init__()
self.app = app
def filter(self, record: LogRecord):
"""Adds an app field to the log record with the value of the app attribute.
:param record: The log record to be filtered.
:return: True
"""
record.app = self.app
return True
class JsonMessageFilter(Filter):
"""Log filter to convert messages to JSON."""
def filter(self, record: LogRecord):
"""Converts the message of the log record to JSON.
:param record: The log record to be filtered.
:return: True
"""
record.json_message = json.dumps(record.getMessage())
return True
def change_logger_level(loggername: str, level: str):
"""Sets the default log level for a logger.
:param loggername: The name of the logger for which to set the log level.
:param level: The desired log level (e.g. INFO, DEBUG, WARNING).
"""
logger = getLogger(loggername)
logger.setLevel(level)
for handler in logger.handlers:
handler.setLevel(level)
def configure_logging(
default_level: Optional[str] = None,
logdir: Optional[str] = None,
logfile: str = f"{node()}.log",
json: bool = True,
appname: str = "",
):
"""Configures logging for the calling process.
This method will create a logger and set its level to the given default_level argument.
It will also create a StreamHandler and FileHandler if the logdir argument is provided,
with the respective logfile name. It will add filters for the application name, hostname
and json message, and set the formatter to JSON_FORMAT if json is True,
or LOG_FORMAT otherwise.
:param default_level: Default log level (defaults to 'DEBUG').
:param logdir: Path to the directory where the log file will be stored.
If not provided, no FileHandler will be added.
:param logfile: Name of the log file (defaults to '{node()}.log').
:param json: Flag to enable or disable JSON format (defaults to True).
:param appname: Application name to be filtered (defaults to "").
"""
handlers: List[logging.Handler] = [logging.StreamHandler()]
default_level = "INFO" if default_level is None else default_level
if logdir:
os.makedirs(logdir, exist_ok=True)
logfile = os.path.join(logdir, logfile)
handlers.append(logging.FileHandler(logfile))
logger = logging.getLogger()
for handler in handlers:
handler.addFilter(AppFilter(appname))
handler.addFilter(HostnameFilter())
handler.addFilter(JsonMessageFilter())
handler.setFormatter(logging.Formatter(JSON_FORMAT if json else LOG_FORMAT))
logger.addHandler(handler)
logger.setLevel(default_level)
for logger_name, level in DEFAULT_LOGGER_LEVELS.items():
change_logger_level(logger_name, level)
| EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/vibe_core/logconfig.py |
import os
from urllib.parse import unquote, urlparse
from urllib.request import url2pathname
def is_local(url: str):
"""Checks if uri refers to a local file.
:param url: The URL to check.
:return: True if the URL refers to a local file, False otherwise.
"""
url_parsed = urlparse(url)
return url_parsed.scheme in ("file", "") # Possibly a local file
def local_uri_to_path(uri: str) -> str:
"""
Maps 'file://' urls to paths.
If the input is already a path, leave it as is.
:param uri: The URI to convert.
:raises ValueError: If the URI is not local.
:return: The path corresponding to the URI.
"""
if not is_local(uri):
raise ValueError(f"Cannot convert remote URI {uri} to path")
parsed = urlparse(uri)
if parsed.scheme == "": # Assume it is a path
return uri
host = "{0}{0}{mnt}{0}".format(os.path.sep, mnt=parsed.netloc)
return os.path.normpath(os.path.join(host, url2pathname(unquote(parsed.path))))
def uri_to_filename(uri: str) -> str:
"""Parses the filename from an URI.
:param uri: The URI to convert.
:return: The filename associated with the URI.
"""
parsed_source_url = urlparse(uri)
source_path = unquote(parsed_source_url.path)
return os.path.basename(source_path)
| EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/vibe_core/uri.py |
import os
from typing import Any, Dict
import yaml
def write_yaml(path: str, contents: Dict[str, Any]):
"""Writes contents to a YAML file, creating the parent directory if it doesn't exist yet.
:param path: The path of the file to write.
:param contents: The contents to write to the file.
"""
parent = os.path.dirname(path)
if not os.path.exists(parent):
os.makedirs(parent)
with open(path, "w") as fp:
yaml.dump(contents, fp) # type: ignore
def write_file(path: str, contents: str):
"""
Writes contents to a file at the given path, creating the parent
directory if it doesn't exist yet.
:param path: The file path to write to.
:param contents: The contents to write in the file.
"""
parent = os.path.dirname(path)
if not os.path.exists(parent):
os.makedirs(parent)
with open(path, "w") as fp:
fp.write(contents)
| EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/vibe_core/file_utils.py |
import logging
from datetime import datetime, timezone
from shapely.geometry import Polygon
from vibe_core.client import FarmvibesAiClient, get_default_vibe_client
def main():
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
polygon_coords = [
(-88.062073563448919, 37.081397673802059),
(-88.026349330507315, 37.085463858128762),
(-88.026349330507315, 37.085463858128762),
(-88.012445388773259, 37.069230099135126),
(-88.035931592028305, 37.048441375086092),
(-88.068120429075847, 37.058833638440767),
(-88.062073563448919, 37.081397673802059),
]
polygon = Polygon(polygon_coords)
start_date = datetime(year=2021, month=2, day=1, tzinfo=timezone.utc)
end_date = datetime(year=2021, month=2, day=11, tzinfo=timezone.utc)
client: FarmvibesAiClient = get_default_vibe_client()
LOGGER.info(f"Successfully obtained a FarmVibes.AI client (addr={client.baseurl})")
LOGGER.info(f"available workflows: {client.list_workflows()}")
LOGGER.info("Running helloworld workflow...")
run = client.run(
"helloworld", "test_hello", geometry=polygon, time_range=(start_date, end_date)
)
try:
run.block_until_complete(30)
LOGGER.info(f"Successfully executed helloworld workflow. Result {run}")
except RuntimeError as e:
LOGGER.error(f"Failed to execute workflow. Reason: {e}")
raise
if __name__ == "__main__":
main()
| EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/vibe_core/farmvibes_ai_hello_world.py |
EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/vibe_core/testing/__init__.py |
|
from dataclasses import fields
from typing import List
import numpy as np
import rasterio
from shapely import geometry as shpg
from vibe_core.data import DataVibe
IGNORE_FIELDS = ["id", "assets"]
RTOL = 1e-5
ATOL = 1e-8
SHAPE_TOL = 1e-6
def assert_all_fields_close(
x: DataVibe, y: DataVibe, ignore_fields: List[str] = IGNORE_FIELDS
) -> None:
compare_fields = [f.name for f in fields(x) if f.name not in ignore_fields]
for f in compare_fields:
x_f = getattr(x, f)
y_f = getattr(y, f)
if f == "geometry":
# Option 1: Check if they are within each other with some tolerance
# (x.within(y.buffer(1e-6)) & x.buffer(1e-6).contains(y))
# Option 2: Check per point equivalence with a tolerance
assert shpg.shape(x_f).equals_exact(
shpg.shape(y_f), SHAPE_TOL
), f"Geometries are not equal with tolerance {SHAPE_TOL}"
else:
assert x_f == y_f, f"Field {f} is different: {x_f} != {y_f}"
def assert_all_close(x: DataVibe, y: DataVibe) -> None:
assert type(x) == type(y), f"Data types are different: {type(x)} != {type(y)}"
assert_all_fields_close(x, y)
for a1, a2 in zip(x.assets, y.assets):
assert a1.type == a2.type, f"Assets have different mimetypes: {a1.type} != {a2.type}"
if a1.type == "image/tiff":
with rasterio.open(a1.url) as src1:
with rasterio.open(a2.url) as src2:
assert src1.meta == src2.meta, "TIFF files have different metadata"
ar1 = src1.read()
ar2 = src2.read()
assert np.allclose(
ar1, ar2, rtol=RTOL, atol=ATOL, equal_nan=True
), f"Raster values are not all close with rtol={RTOL} and atol={ATOL}"
| EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/vibe_core/testing/comparison.py |
from dataclasses import dataclass, field
from typing import Dict, List, Tuple
from shapely import geometry as shpg
from .core_types import AssetVibe, DataSequence, DataVibe
from .products import DemProduct, GNATSGOProduct, LandsatProduct, NaipProduct
ChunkLimits = Tuple[int, int, int, int]
"""Type alias for chunk limits. Tuple of col_offset, row_offset, width, height."""
@dataclass
class Raster(DataVibe):
"""Represents raster data in FarmVibes.AI."""
bands: Dict[str, int]
"""A dictionary with the name of each band and its index in the raster data."""
def __post_init__(self):
super().__post_init__()
self.quantification_value = 1
@property
def raster_asset(self) -> AssetVibe:
"""Returns the raster asset from the list of assets.
:raises ValueError: If the raster asset cannot be found in the asset list.
:returns: The raster asset from the asset list.
"""
raster_asset = [a for a in self.assets if (a.type is not None) and ("image/" in a.type)]
if raster_asset:
return raster_asset[0]
raise ValueError(f"Could not find raster asset in asset list: {self.assets}")
@property
def visualization_asset(self) -> AssetVibe:
"""Returns the visualization asset from the asset list.
:raises ValueError: If the visualization asset cannot be found in the asset list.
:returns: The visualization asset from the asset list.
"""
vis_asset = [a for a in self.assets if a.type == "application/json"]
if vis_asset:
return vis_asset[0]
raise ValueError(f"Could not find visualization asset in asset list: {self.assets}")
@dataclass
class RasterSequence(DataSequence, Raster):
"""Represents a sequence of rasters"""
def add_item(self, item: Raster):
"""Adds a raster to the sequence
:param item: The raster to add to the sequence
"""
self.add_asset(item.raster_asset, item.time_range, shpg.shape(item.geometry))
@dataclass
class RasterChunk(Raster):
"""Represents a chunk of a raster."""
chunk_pos: Tuple[int, int]
"""The position of the chunk in the raster data, as a tuple of (column, row) indices."""
num_chunks: Tuple[int, int]
"""The total number of chunks in the raster data, as a
tuple of (number of columns, number of rows).
"""
limits: ChunkLimits
"""The limits of the chunk in the raster data, as a :const:`ChunkLimits` object.
These are indices, not coordinates.
"""
write_rel_limits: ChunkLimits
"""The relative limits of the chunk in the raster data asset. These are non-overlapping
indices that are used to write the chunk to the asset.
"""
@dataclass
class CategoricalRaster(Raster):
"""Represents a categorical raster."""
categories: List[str]
"""The list of categories in the raster."""
@dataclass
class CloudRaster(Raster):
"""Represents a cloud raster."""
bands: Dict[str, int] = field(init=False)
"""A dictionary with the name of each band and its index in the raster data."""
def __post_init__(self):
super().__post_init__()
self.bands = {"cloud": 0}
@dataclass
class RasterIlluminance(DataVibe):
"""Represents illuminance values for bands of a raster."""
illuminance: List[float]
"""The list of illuminance values for each band."""
@dataclass
class DemRaster(Raster, DemProduct):
"""Represents a DEM raster."""
pass
@dataclass
class NaipRaster(Raster, NaipProduct):
"""Represents a NAIP raster."""
pass
@dataclass
class LandsatRaster(LandsatProduct, Raster):
"""Represents a Landsat raster."""
pass
@dataclass
class GNATSGORaster(Raster, GNATSGOProduct):
"""Represents a gNATSGO raster of a specific variable."""
variable: str
"""The variable represented in the raster."""
| EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/vibe_core/data/rasters.py |
from dataclasses import dataclass
from typing import Any, Dict, List
from .core_types import BaseVibe, DataVibe
@dataclass
class ADMAgSeasonalFieldInput(BaseVibe):
"""
Represents an ADMAg Seasonal Field input.
"""
farmer_id: str
"""The ID of the farmer."""
seasonal_field_id: str
"""The ID of the seasonal field."""
boundary_id: str
"""The ID of the boundary."""
@dataclass
class TillageInformation:
"""Represents a tillage operation in a field ."""
start_date: str
"""The start date of the tillage operation."""
end_date: str
"""The end date of the tillage operation."""
implement: str
"""The implement used for the tillage operation."""
@dataclass
class FertilizerInformation:
"""Represents fertilizer practices operation."""
start_date: str
"""The start date of the practice."""
end_date: str
"""The end date of the practice."""
application_type: str
"""The type of fertilizer application."""
total_nitrogen: float
"""The total amount of nitrogen applied."""
enhanced_efficiency_phosphorus: str
"""The type of enhanced efficiency phosphorus used."""
@dataclass
class OrganicAmendmentInformation:
"""Represents an organic amendment practice operation."""
start_date: str
"""The start date of the organic amendment practice."""
end_date: str
"""The end date of the organic amendment practice."""
organic_amendment_type: str
"""The type of organic amendment applied."""
organic_amendment_amount: float
"""The amount of organic amendment applied."""
organic_amendment_percent_nitrogen: float
"""The percent nitrogen of the organic amendment."""
organic_amendment_carbon_nitrogen_ratio: float
"""The carbon to nitrogen ratio of the organic amendment."""
@dataclass
class HarvestInformation:
"""Represents a harvest operation in a field."""
is_grain: bool
"""Whether the crop is a grain (True) or not (False)."""
start_date: str
"""The start date of the harvest operation."""
end_date: str
"""The end date of the harvest operation."""
crop_yield: float
"""The yield of the crop, in kg/ha."""
stray_stover_hay_removal: float
"""The amount of stray stover or hay removed from the field after harvest, in kg/ha."""
@dataclass
class SeasonalFieldInformation(DataVibe):
"""Represents seasonal field information for a farm."""
crop_name: str
"""The name of the crop grown in the seasonal field."""
crop_type: str
"""The type of the crop grown in the seasonal field."""
properties: Dict[str, Any]
"""A dictionary of additional properties for the seasonal field."""
fertilizers: List[FertilizerInformation]
"""A list of :class:`FertilizerInformation` objects representing the
fertilizer practices in the seasonal field."""
harvests: List[HarvestInformation]
"""A list of :class:`HarvestInformation` objects representing the harvests
for the seasonal field."""
tillages: List[TillageInformation]
"""A list of :class:`TillageInformation` objects representing the tillage operations
for the seasonal field."""
organic_amendments: List[OrganicAmendmentInformation]
"""A list of :class:`OrganicAmendmentInformation` objects representing the organic
amendments for the seasonal field."""
| EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/vibe_core/data/farm.py |
import hashlib
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Dict, Tuple, Union
from shapely import wkt
from shapely.geometry import shape
from .core_types import DataVibe
def gen_forecast_time_hash_id(
name: str,
geometry: Dict[str, Any],
publish_time: Union[str, datetime],
time_range: Tuple[datetime, datetime],
):
"""Generates a SHA-256 hash ID for a forecast time, based on the input parameters.
:param name: The name of the forecast.
:param geometry: The geometry associated with the forecast, as a dictionary.
:param publish_time: The time when the forecast was published, as a string or a datetime object.
:param time_range: The time range of the forecast, as a tuple of two datetime objects.
:return: The SHA-256 hash ID of the forecast time.
"""
if type(publish_time) is datetime:
publish_time_str = publish_time.isoformat()
else:
publish_time_str = str(publish_time)
return hashlib.sha256(
(
name
+ wkt.dumps(shape(geometry))
+ publish_time_str
+ time_range[0].isoformat()
+ time_range[1].isoformat()
).encode()
).hexdigest()
@dataclass
class GfsForecast(DataVibe):
"""Represents a Global Forecast System (GFS) forecast."""
publish_time: str
"""The publication time of the forecast in ISO format."""
@dataclass
class WeatherVibe(DataVibe):
"""Represents weather data."""
pass
| EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/vibe_core/data/weather.py |
from .airbus import AirbusPrice, AirbusProduct, AirbusRaster
from .core_types import (
AssetVibe,
BaseVibe,
BaseVibeDict,
BBox,
DataSummaryStatistics,
DataVibe,
ExternalReference,
ExternalReferenceList,
FoodFeatures,
FoodVibe,
GeometryCollection,
GHGFlux,
GHGProtocolVibe,
ProteinSequence,
TimeRange,
TimeSeries,
TypeDictVibe,
gen_guid,
gen_hash_id,
CarbonOffsetInfo,
)
from .farm import (
ADMAgSeasonalFieldInput,
FertilizerInformation,
HarvestInformation,
OrganicAmendmentInformation,
SeasonalFieldInformation,
TillageInformation,
)
from .products import (
ChirpsProduct,
ClimatologyLabProduct,
DemProduct,
Era5Product,
GEDIProduct,
GNATSGOProduct,
LandsatProduct,
ModisProduct,
NaipProduct,
)
from .rasters import (
CategoricalRaster,
ChunkLimits,
CloudRaster,
DemRaster,
GNATSGORaster,
NaipRaster,
Raster,
RasterChunk,
RasterIlluminance,
RasterSequence,
)
from .sentinel import (
DownloadedSentinel1Product,
DownloadedSentinel2Product,
S2ProcessingLevel,
Sentinel1Product,
Sentinel1Raster,
Sentinel1RasterOrbitGroup,
Sentinel2CloudMask,
Sentinel2CloudMaskOrbitGroup,
Sentinel2CloudProbability,
Sentinel2Product,
Sentinel2Raster,
Sentinel2RasterOrbitGroup,
SentinelProduct,
SpaceEyeRaster,
TiledSentinel1Product,
)
from .utils import StacConverter
from .weather import GfsForecast, gen_forecast_time_hash_id
| EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/vibe_core/data/__init__.py |
import mimetypes
from dataclasses import dataclass, field
from typing import Dict, cast
from .core_types import AssetVibe, DataVibe, gen_guid
CDL_DOWNLOAD_URL = (
"https://www.nass.usda.gov/Research_and_Science/Cropland/Release/datasets/{}_30m_cdls.zip"
)
"""The base URL for downloading CropDataLayer data.
:meta hide-value:
"""
@dataclass
class DemProduct(DataVibe):
"""Represents metadata information about a Digital Elevation Map (DEM) tile.
The :class:`DemProduct` type is the expected output of a list-like operator
and the expected input type of a download-like operator.
"""
tile_id: str
"""The tile ID of the DEM tile."""
resolution: int
"""The resolution of the DEM tile."""
@dataclass
class NaipProduct(DataVibe):
"""Represents metadata information about a National Agricultural
Imagery Program (NAIP) tile.
The :class:`NaipProduct` type is the expected output of a list-like operator
and the type of a download-like operator.
"""
tile_id: str
"""The tile ID of the NAIP tile."""
year: int
"""The year of the NAIP tile."""
resolution: float
"""The resolution of the NAIP tile."""
@dataclass
class LandsatProduct(DataVibe):
"""Represents metadata information about a Landsat tile."""
tile_id: str = ""
"""The tile ID of the Landsat tile."""
asset_map: Dict[str, str] = field(default_factory=dict)
"""A dictionary mapping band names to asset IDs."""
def add_downloaded_band(self, band_name: str, asset_path: str):
"""Adds a downloaded band to the asset map.
:param band_name: The name of the band.
:param asset_path: The path to the downloaded asset.
"""
band_guid = gen_guid()
self.asset_map[band_name] = band_guid
self.assets.append(
AssetVibe(asset_path, cast(str, mimetypes.guess_type(asset_path)[0]), band_guid)
)
def get_downloaded_band(self, band_name: str) -> AssetVibe:
"""Retrieves the downloaded band with the given name from the asset map.
:param band_name: The name of the band to retrieve.
:return: The downloaded band with the given name.
:rtype: :class:`AssetVibe`
:raises ValueError: If the band with the given name is not found or downloaded.
"""
try:
band_guid = self.asset_map[band_name]
except KeyError:
raise ValueError(f"Band {band_name} not found or downloaded")
return next((a for a in self.assets if a.id == band_guid))
@dataclass
class ChirpsProduct(DataVibe):
"""Represents metadata information about a
Climate Hazards Group InfraRed Precipitation with Station data (CHIRPS) product.
"""
url: str
"""The URL of the CHIRPS product."""
@dataclass
class CDLProduct(DataVibe):
"""Represents metadata information about a Crop Data Layer (CDL) product."""
pass
@dataclass
class Era5Product(DataVibe):
"""Represents metadata information about an ERA5 product.
:var item_id: The item ID of the ERA5 product.
:var var: The variable of the ERA5 product.
:var cds_request: A dictionary with the CDS request parameters.
"""
item_id: str
var: str
cds_request: Dict[str, Dict[str, str]] = field(default_factory=dict)
@dataclass
class ModisProduct(DataVibe):
"""Represents metadata information about a
Moderate Resolution Imaging Spectroradiometer (MODIS) product.
"""
resolution: int
"""The resolution of the MODIS product."""
@dataclass
class GEDIProduct(DataVibe):
"""Represents metadata information about a
Global Ecosystem Dynamics Investigation (GEDI) product.
"""
product_name: str
"""The name of the GEDI product."""
start_orbit: int
"""The start orbit of the GEDI product."""
stop_orbit: int
"""The stop orbit of the GEDI product."""
processing_level: str
"""The processing level of the GEDI product."""
@dataclass
class GNATSGOProduct(DataVibe):
"""Represents metadata information about a
Gridded National Soil Survey Geographic Database (gNATSGO) product.
"""
pass
@dataclass
class ClimatologyLabProduct(DataVibe):
"""Represents metadata information about a Climatology Lab product."""
url: str
"""The URL of the Climatology Lab product."""
variable: str
"""The variable of the Climatology Lab product."""
| EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/vibe_core/data/products.py |
import json
from dataclasses import asdict, is_dataclass
from datetime import datetime
from typing import Any
from pydantic.dataclasses import dataclass as pydataclass
from pydantic.main import BaseModel
class DataclassJSONEncoder(json.JSONEncoder):
"""
A class that extends the `json.JSONEncoder` class to support
encoding of dataclasses and pydantic models.
"""
def default(self, obj: Any):
"""Encodes a dataclass or pydantic model to JSON.
:param obj: The object to encode.
:return: The JSON representation of the object.
"""
if is_dataclass(obj):
cls = pydataclass(obj.__class__).__pydantic_model__
exclude = {"hash_id"} if hasattr(obj.__class__, "hash_id") else {}
return json.loads(cls(**asdict(obj)).json(allow_nan=False, exclude=exclude))
if isinstance(obj, datetime):
return obj.isoformat()
if isinstance(obj, BaseModel):
return json.loads(obj.json(allow_nan=False))
return super().default(obj)
def dump_to_json(data: Any, **kwargs: Any) -> str:
"""Serializes an object to JSON using :class:`DataclassJSONEncoder`.
:param data: The object to serialize to JSON.
:param **kwargs: Additional keyword arguments to pass to the `json.dumps` method.
:return: A JSON string representation of the object.
"""
return json.dumps(
data,
allow_nan=False,
cls=DataclassJSONEncoder,
**kwargs,
)
| EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/vibe_core/data/json_converter.py |
import hashlib
import logging
import re
import uuid
from dataclasses import asdict, dataclass, field, fields, is_dataclass
from datetime import datetime, timezone
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import (
Any,
Callable,
ClassVar,
Dict,
List,
Optional,
Tuple,
Type,
Union,
cast,
get_args,
get_origin,
)
from pydantic.dataclasses import dataclass as pydataclass
from pydantic.main import BaseModel, ModelMetaclass
from shapely import geometry as shpg
from shapely import wkt
from shapely.geometry.base import BaseGeometry
from ..file_downloader import build_file_path, download_file
from ..uri import is_local
from . import data_registry
from .json_converter import dump_to_json
FARMVIBES_AI_BASE_SCHEMA = "schema"
FARMVIBES_AI_BASE_PYDANTIC_MODEL = "pydantic_model"
LOGGER = logging.getLogger(__name__)
BBox = Tuple[float, float, float, float]
"""Type alias for a bounding box, as a tuple of four floats (minx, miny, maxx, maxy)."""
TimeRange = Tuple[datetime, datetime]
"""Type alias for a time range, as a tuple of two `datetime` objects (start, end)."""
def gen_guid():
"""
Generates a random UUID as a string.
:return: A random UUID as a string.
"""
return str(uuid.uuid4())
def gen_hash_id(
name: str, geometry: Union[BaseGeometry, Dict[str, Any]], time_range: Tuple[datetime, datetime]
):
"""
Generates a hash ID based on a name, a geometry, and a time range.
:param name: The name associated with the hash ID.
:param geometry: The geometry associated with the hash ID,
either as a `BaseGeometry` object or as a dictionary.
:param time_range: The time range associated with the hash ID,
as a tuple of two `datetime` objects (start, end).
:return: A hash ID as a hexadecimal string.
"""
return hashlib.sha256(
(
name
+ wkt.dumps(shpg.shape(geometry))
+ time_range[0].isoformat()
+ time_range[1].isoformat()
).encode()
).hexdigest()
BaseVibeDict = Dict[str, Union["BaseVibe", List["BaseVibe"]]]
BaseUnion = Union["BaseVibe", List["BaseVibe"]]
DataVibeType = Union[Type["BaseVibe"], Type[List["BaseVibe"]]]
InnerIOType = Union[List[Dict[str, Any]], Dict[str, Any]]
OpIOType = Dict[str, InnerIOType]
class TypeDictVibe(Dict[str, DataVibeType]):
"""
A dictionary subclass used for type validation in FarmVibes.AI.
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v: Any) -> "BaseVibe":
"""
Validates a dictionary of values against FarmVibes.AI types.
This method takes a dictionary of values as input and returns a :class:`BaseVibe` object.
It validates each value in the dictionary against FarmVibes.AI types using the
:class:`TypeParser` class. If a value is not a FarmVibes.AI type, a `ValueError` is raised.
:param v: A dictionary of values to validate.
:return: A :class:`BaseVibe` object.
:raises ValueError: If a value in the dictionary is not a FarmVibes.AI type.
"""
try:
for key in v:
if isinstance(v[key], str):
v[key] = TypeParser.parse(v[key])
elif isinstance(get_origin(v[key]), Type):
args = get_args(v[key])[0]
origin = get_origin(args)
if origin is not None and issubclass(origin, List):
base = get_args(args)[0]
if issubclass(base, BaseVibe):
continue
elif issubclass(args, BaseVibe):
continue
else:
raise ValueError(f"Value for key {key} is not a FarmVibes.AI type")
return v
except TypeError:
raise ValueError
class TypeParser:
"""
A class that provides a method for parsing type specifications in FarmVibes.AI.
It is used to parse the type specifications of ports in :class:`BaseVibe` subclasses.
"""
logger: logging.Logger = logging.getLogger(f"{__name__}.TypeParser")
"""A logger for the class."""
type_pattern: "re.Pattern[str]" = re.compile(r"((\w+)\[)?(\w+)\]?")
"""A regular expression pattern to parse type specifications."""
inherit_pattern = re.compile(r"\s*\@INHERIT\((.*)\)\s*")
"""A regular expression pattern to parse type specifications that inherit from other ports."""
supported_container_types: List[str] = ["List"]
"""A list of supported container types."""
container_group: int = 1
"""The group in the regular expression pattern that matches thecontainer type."""
type_group: int = 2
"""The group in the regular expression pattern that matches the type."""
@classmethod
def parse(cls, typespec: str) -> DataVibeType:
"""
Parses a type specification string and returns a :class:`BaseVibe`
or a List[:class:`BaseVibe`].
It first checks if the type specification string includes inheritance, and if so,
returns an :class:`UnresolvedDataVibe` object. Otherwise, it extracts the container and
data IDs from the type specification string and retrieves the corresponding
:class:`BaseVibe` subclass from the `data_registry`. If the container or data ID is not
supported, a `ValueError` is raised.
:param typespec: A string representing the type specification.
:return: A :class:`BaseVibe` or a List[:class:`BaseVibe`] object.
:raises ValueError: If the container ID is not supported or the data ID
is not a :class:`BaseVibe` subclass.
:raises KeyError: If the data ID is not found in the `data_registry`.
"""
inherit = cls.inherit_pattern.findall(typespec)
if inherit:
# What `parse` returns needs to be a Type, and not a class instance. Because of
# that, we have to game the type system here, such that we output something that
# is a valid DataVibeType, and that has the name of the port we are inheriting from.
# So, by instantiating `UnresolvedDataVibe()`, which itself inherits from type,
# we are creating a new Type[BaseVibe]. The line below, by the way, is equivalent
# to `type(inherit[0][1], (), {})`, with the additional `Type[BaseVibe]` parent.
inherit_type = UnresolvedDataVibe(inherit[0], (), {})
return cast(DataVibeType, inherit_type)
typespec = typespec.replace("typing.", "").replace("vibe_core.data.", "")
matches = cls.type_pattern.findall(typespec)
containerid = matches[0][cls.container_group]
dataid = matches[0][cls.type_group]
if containerid and containerid not in cls.supported_container_types:
raise ValueError(f"Operation uses unsupported container {containerid}")
try:
datavibe = data_registry.retrieve(dataid)
if not issubclass(datavibe, BaseVibe):
raise ValueError(
f"Operation uses unsupported type {data_registry.get_name(datavibe)}"
)
datavibe_list = List[datavibe] # type: ignore
return datavibe_list if containerid else datavibe
except KeyError:
raise KeyError(f"Unable to find type {dataid}")
@dataclass
class AssetVibe:
"""Represents an asset in FarmVibes.AI."""
type: Optional[str]
"""An optional string representing the MIME type of the asset."""
id: str
"""A string representing the ID of the asset."""
path_or_url: str
"""A string representing the path or URL of the asset."""
_is_local: bool
_local_path: Optional[str]
def __init__(self, reference: str, type: Optional[str], id: str) -> None:
self._is_local = is_local(reference)
self._local_path = reference if self._is_local else None
self.path_or_url = reference
self.type = type
self.id = id
self._tmp_dir = TemporaryDirectory()
if type is None:
LOGGER.warning(f"Asset {self} created without defined mimetype")
def __del__(self):
try:
self._tmp_dir.cleanup()
except (AttributeError, FileNotFoundError):
LOGGER.info(f"Unable to clean temporary directory related to VibeAsset {self.url}")
@property
def local_path(self) -> str:
"""
Returns the local path of the asset.
If the asset is local, this method returns the local path of the asset. If the asset
is remote, it downloads the asset to a temporary directory (if not previously downloaded)
and returns the local path of the downloaded file.
:return: The local path of the asset.
"""
if self._is_local:
return cast(str, self._local_path)
# This is a remote asset
if self._local_path:
# The download was previously done
return self._local_path
# The asset is remote and there is no previous download
file_path = build_file_path(
self._tmp_dir.name, gen_guid(), "" if self.type is None else self.type
)
self._local_path = download_file(self.url, file_path)
return self._local_path
@property
def url(self) -> str:
"""
Returns the URL of the asset.
If the asset is local, this method returns the absolute URI of the local path.
Otherwise, it returns the original path or URL of the asset.
:return: The URL of the asset.
"""
if self._is_local:
return Path(self.local_path).absolute().as_uri()
return self.path_or_url
@dataclass
class BaseVibe:
"""
Represents a base class for FarmVibes.AI types.
"""
schema: ClassVar[Callable[[], Dict[str, Any]]]
pydantic_model: ClassVar[Callable[[], ModelMetaclass]]
def __init__(self):
pass
def __post_init__(self):
if "id" not in [f.name for f in fields(self.__class__)]:
self.id = self.hash_id
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "BaseVibe":
"""
A class method that creates a :class:`BaseVibe` object from a dictionary of values.
This method takes a dictionary of values as input and returns a :class:`BaseVibe` object.
If the class schema includes a bounding box (`bbox`) property, this method calculates the
bounding box from the `geometry` property using the `shapely.geometry` library.
If the `geometry` property is missing, a `ValueError` is raised.
Otherwise, this method creates a new instance of the Pydantic model and returns it.
:param data: A dictionary of values to create the :class:`BaseVibe` object from.
:return: A :class:`BaseVibe` object.
:raises ValueError: If the `geometry` property is missing and the class schema includes
a `bbox` property.
"""
if "bbox" in cls.schema()["properties"]:
try:
data["bbox"] = shpg.shape(data["geometry"]).bounds
except KeyError as e:
raise ValueError(f"Geometry is missing from {data}") from e
return cls.pydantic_model()(**data)
@property
def hash_id(self) -> str:
"""
Returns the hash ID of the object.
If the class has an `id` attribute that is a non-empty string, this method returns it.
Otherwise, it calculates the SHA-256 hash of the JSON representation of the object
and returns the hexadecimal digest.
:return: The hash ID of the object.
"""
if (
hasattr(self.__class__, "id")
and isinstance(self.__class__.id, str) # type: ignore
and self.id # type: ignore
):
return self.id # type: ignore
return hashlib.sha256(dump_to_json(self).encode()).hexdigest()
def __init_subclass__(cls, **kwargs): # type: ignore
super().__init_subclass__(**kwargs)
@classmethod
def schema(cls, *args, **kwargs): # type: ignore
return cls.pydantic_model().schema(*args, **kwargs)
@classmethod
def pydantic_model(cls): # type: ignore
if is_dataclass(cls):
if issubclass(cls, DataVibe):
@pydataclass
class PydanticAssetVibe(AssetVibe):
pass
@pydataclass
class Tmp(cls):
assets: List[PydanticAssetVibe]
class Config:
underscore_attrs_are_private = True
arbitrary_types_allowed = True
Tmp.__name__ = cls.__name__ # Tmp in the repr would confuse users
return Tmp.__pydantic_model__ # type: ignore
return pydataclass(cls).__pydantic_model__
if issubclass(cls, BaseModel):
return cls
raise NotImplementedError(f"{cls.__name__} is not a dataclass") # type:ignore
if not hasattr(cls, FARMVIBES_AI_BASE_SCHEMA):
setattr(cls, FARMVIBES_AI_BASE_SCHEMA, schema)
if not hasattr(cls, FARMVIBES_AI_BASE_PYDANTIC_MODEL):
setattr(cls, FARMVIBES_AI_BASE_PYDANTIC_MODEL, pydantic_model)
try:
data_registry.retrieve(cls.__name__)
except KeyError:
data_registry.register_vibe_datatype(cls)
class UnresolvedDataVibe(Type[BaseVibe], BaseVibe): # type: ignore
"""
Meta type that is equivalent to Python's `type` built-in.
The output of this class is a new *type*, not a regular object. This is used
internally by FarmVibes.AI and, in general, should never be instantiated.
In fact, even if this is instantiated, there's nothing useful that could be
done with an instance of this (which, again, is a new Type).
"""
def get_filtered_init_field_names(obj: Any, filter_fun: Callable[[Any], bool]):
"""
Returns a list of filtered field names for an object's `__init__` method.
:param obj: The object to retrieve the field names from.
:param filter_fun: A function that takes a field name as input and returns a boolean indicating
whether the field should be included in the output list.
:return: A list of filtered field names for the object's `__init__` method.
"""
src_fields = get_init_field_names(obj)
return list(filter(filter_fun, src_fields))
def get_filtered_init_fields(obj: Any, filter_fun: Callable[[Any], bool]):
"""
Returns a dictionary of filtered fields for an object's `__init__` method.
:param obj: The object to retrieve the field values from.
:param filter_fun: A function that takes a field name as input and returns a boolean indicating
whether the field should be included in the output dictionary.
:return: A dictionary of filtered field names and values for the object's `__init__` method.
"""
field_names = get_filtered_init_field_names(obj, filter_fun)
obj_dict = asdict(obj)
return {f: obj_dict[f] for f in field_names}
# TODO consider if we should consolidate geometry and datetime types.
@dataclass
class DataVibe(BaseVibe):
"""
Represents a data object in FarmVibes.AI.
"""
id: str
"""A string representing the unique identifier of the data object."""
time_range: TimeRange
"""A :const:`TimeRange` representing the timestamps of to the beginning and end of sample."""
bbox: BBox = field(init=False)
"""A :const:`BBox` representing the bounding box of the data object.
This field is calculated from the `geometry` property using the `shapely.geometry` library.
"""
geometry: Dict[str, Any]
"""A dictionary representing the geometry of the data object."""
assets: List[AssetVibe]
"""A list of :class:`AssetVibe` objects of the assets associated with the data object."""
SKIP_FIELDS: ClassVar[Tuple[str, ...]] = ("id", "assets", "hash_id", "bbox")
"""A tuple containing the fields to skip when calculating the hash ID of the object."""
def __post_init__(self):
self.bbox = shpg.shape(self.geometry).bounds # type: ignore
self.time_range = (
self.time_range[0].astimezone(timezone.utc),
self.time_range[1].astimezone(timezone.utc),
)
super().__post_init__()
# Type hint with class that we are defining? https://stackoverflow.com/a/35617812
@classmethod
def clone_from(cls, src: "DataVibe", id: str, assets: List[AssetVibe], **kwargs: Any):
"""
Creates a new :class:`DataVibe` object with updated fields.
This method takes a source :class:`DataVibe` object, a new `id` string, a list of new
:class:`AssetVibe` objects, and any additional keyword arguments to update the
fields of the source object. It returns a new :class:`DataVibe` object with the
updated fields.
:param cls: The class of the new :class:`DataVibe` object.
:param src: The source :class:`DataVibe` object to clone.
:param id: The new `id` string for the cloned object.
:param assets: The new list of :class:`AssetVibe` objects for the cloned object.
:param kwargs: Additional keyword arguments to update the fields of the cloned object.
:return: A new :class:`DataVibe` object with the updated fields.
"""
valid_names = [f for f in get_init_field_names(cls) if f not in cls.SKIP_FIELDS]
copy_args = get_filtered_init_fields(src, lambda x: x in valid_names)
copy_args.update(kwargs)
return cls(id=id, assets=assets, **copy_args)
def get_init_field_names(obj: Type[BaseVibe]) -> List[str]:
"""
Returns a list of field names for an object's `__init__` method.
:param obj: The :class:`BaseVibe` class to retrieve the field names from.
:return: A list of field names for the class's `__init__` method.
"""
return [f.name for f in fields(obj) if f.init]
@dataclass
class TimeSeries(DataVibe):
"""Represents a time series data object in FarmVibes.AI."""
pass
@dataclass
class DataSummaryStatistics(DataVibe):
"""Represents a data summary statistics object in FarmVibes.AI."""
pass
@dataclass
class DataSequence(DataVibe):
"""Represents a sequence of data assets in FarmVibes.AI."""
idx: int = field(init=False)
"""Number of data objects in the sequence."""
asset_order: Dict[str, int] = field(default_factory=dict)
"""A dictionary mapping asset IDs to their order in the sequence."""
asset_time_range: Dict[str, TimeRange] = field(default_factory=dict)
"""A dictionary mapping asset IDs to their time range."""
asset_geometry: Dict[str, BaseGeometry] = field(default_factory=dict)
"""A dictionary mapping asset IDs to their geometry."""
def __post_init__(self):
super().__post_init__()
lens = [len(i) for i in (self.asset_order, self.asset_time_range, self.asset_geometry)]
self.idx = lens[0]
if not all(i == self.idx for i in lens):
raise ValueError(f"Expected all asset maps to have the same length, found {lens}")
def add_item(self, item: DataVibe):
"""
Adds an item to the sequence.
:param item: The item to be added to the sequence.
"""
asset = item.assets[0]
self.add_asset(asset, item.time_range, shpg.shape(item.geometry))
def add_asset(self, asset: AssetVibe, time_range: TimeRange, geometry: BaseGeometry):
"""
Adds an asset to the sequence.
:param asset: The asset to add to the sequence.
:param time_range: The time range of the asset.
:param geometry: The geometry of the asset.
"""
self.assets.append(asset)
self.asset_order[asset.id] = self.idx
self.asset_time_range[asset.id] = time_range
self.asset_geometry[asset.id] = geometry
self.idx += 1
def get_ordered_assets(self, order_by: Optional[Dict[str, Any]] = None) -> List[AssetVibe]:
"""
Gets a list of assets in the sequence, ordered by the provided dictionary.
:param order_by: A dictionary mapping asset IDs to their order in the sequence.
If None, the assets will be ordered by their default order in the sequence.
:return: A list of assets in the sequence, ordered by the provided dictionary.
"""
if order_by is None:
order_by = self.asset_order
return sorted(self.assets, key=lambda x: order_by[x.id])
@dataclass
class ExternalReferenceList(DataVibe):
"""
Represents a list of external references in FarmVibes.AI.
"""
urls: List[str]
"""A list of URLs."""
@dataclass
class ExternalReference(DataVibe):
"""
Represents a single external reference in FarmVibes.AI.
"""
url: str
"""The URL representing the external reference."""
@dataclass
class GeometryCollection(DataVibe):
"""Represents a geometry collection in FarmVibes.AI."""
pass
@dataclass
class FoodVibe(BaseVibe):
"""
Represents a food object in FarmVibes.AI.
"""
dietary_fiber: float
"""The amount of dietary fiber in grams."""
magnesium: float
"""The amount of magnesium in milligrams."""
potassium: float
"""The amount of potassium in milligrams."""
manganese: float
"""The amount of manganese in milligrams."""
zinc: float
"""The amount of zinc in milligrams."""
iron: float
"""The amount of iron in milligrams."""
copper: float
"""The amount of copper in milligrams."""
protein: float
"""The amount of protein in grams."""
trp: float # Tryptophan content
"""The amount of tryptophan in grams."""
thr: float # Threonine content
"""The amount of threonine in grams."""
ile: float # Isoleucine content
"""The amount of isoleucine in grams."""
leu: float # Leucine content
"""The amount of leucine in grams."""
lys: float # Lysine content
"""The amount of lysine in grams."""
met: float # Methionine content
"""The amount of methionine in grams."""
cys: float # Cysteine content
"""The amount of cysteine in grams."""
phe: float # Phenylalanine content
"""The amount of phenylalanine in grams."""
tyr: float # Tyrosine content
"""The amount of tyrosine in grams."""
val: float # Valine content
"""The amount of valine in grams."""
arg: float # Arginine content
"""The amount of arginine in grams."""
his: float # Histidine content
"""The amount of histidine in grams."""
fasta_sequence: List[str]
"""A list with the amino acid sequence of the protein."""
protein_families: List[str]
"""A list with the protein families associated to the food."""
food_group: str
"""The food group the food belongs to."""
@dataclass
class FoodFeatures(DataVibe):
"""Represents the features of a food in FarmVibes.AI."""
pass
@dataclass
class ProteinSequence(DataVibe):
"""Represents a protein sequence in FarmVibes.AI."""
pass
@dataclass
class CarbonOffsetInfo(DataVibe):
"""
Represents carbon offset information.
"""
carbon: str
"""The carbon offset."""
@dataclass
class GHGFlux(DataVibe):
"""
Represents a greenhouse gas (GHG) flux in FarmVibes.AI.
"""
scope: str
"""The scope of the GHG flux."""
value: float
"""The value of the GHG flux."""
description: Optional[str]
"""An optional description of the GHG flux."""
@dataclass
class GHGProtocolVibe(DataVibe):
"""
Represents the inputs to Green House Gas fluxes estimation workflows.
This is a dataclass that has many attributes, due to the nature of the
calculations proposed by the GHG protocol methodology. Not all attributes are required.
Below we describe all of them, as well as the units they should be in.
"""
cultivation_area: float # hectares
"""The area of the field that is cultivated in hectares."""
total_yield: float # tonnes
"""The total yield of the field in tonnes."""
soil_texture_class: Optional[str] # sand / clay / silt
"""The texture class of the soil (one of the following: "sand", "clay", or "silt")."""
soil_clay_content: Optional[float]
"""The clay content of the soil in percentage."""
practice_adoption_period: Optional[int]
"""The number of years that the practice has been adopted."""
burn_area: Optional[float]
"""The area of the field that is burned in hectares."""
soil_management_area: Optional[float]
"""The area of the field that is managed in hectares."""
# fertilizer application {{{
# Synthetic fertilizers {{{
urea_amount: Optional[float] = 0.0 # kg per hectare
"""The amount of urea applied to the field in kilograms per hectare."""
synthetic_fertilizer_amount: Optional[float] = 0.0 # kg per hectare - not urea
"""The amount of synthetic fertilizer applied to the field in kilograms per hectare."""
synthetic_fertilizer_nitrogen_ratio: Optional[float] = 0.0 # percentage
"""The nitrogen ratio of the synthetic fertilizer applied to the field in percentage."""
# }}}
# Soil correction {{{
limestone_calcite_amount: Optional[float] = 0.0 # kg per hectare
"""The amount of limestone calcite applied to the field in kilograms per hectare."""
limestone_dolomite_amount: Optional[float] = 0.0 # kg per hectare
"""The amount of limestone dolomite applied to the field in kilograms per hectare."""
gypsum_amount: Optional[float] = 0.0 # kg per hectare
"""The amount of gypsum applied to the field in kilograms per hectare."""
# }}}
# Organic fertilizers {{{
organic_compound_amount: Optional[float] = 0.0 # kg per hectare
"""The amount of organic compound applied to the field in kilograms per hectare."""
manure_amount: Optional[float] = 0.0 # kg per hectare
"""The amount of manure applied to the field in kilograms per hectare."""
manure_birds_amount: Optional[float] = 0.0 # kg per hectare
"""The amount of manure from birds applied to the field in kilograms per hectare."""
organic_other_amount: Optional[float] = 0.0 # kg per hectare
"""The amount of other organic fertilizer applied to the field in kilograms per hectare."""
dry_matter_amount: Optional[float] = 0.0 # kg per hectare / Rice
"""The amount of dry matter applied to the field in kilograms per hectare."""
is_dry_matter_fermented: Optional[bool] = False # Yes/No / Rice
"""Whether the dry matter is fermented."""
vinasse_amount: Optional[float] = 0.0 # m^3 per hectare / Sugarcane
"""The amount of vinasse applied to the field in cubic meters per hectare."""
filter_cake_amount: Optional[float] = 0.0 # kg per hectare / Sugarcane
"""The amount of filter cake applied to the field in kilograms per hectare."""
filter_cake_application_area: Optional[float] = 0.0 # hectares / Sugarcane
"""The area of the field that is applied with filter cake in hectares."""
# }}}
# Green manure {{{
green_manure_amount: Optional[float] = 0.0 # kg per hectare
"""The amount of green manure applied to the field in kilograms per hectare."""
green_manure_grass_amount: Optional[float] = 0.0
"""The amount of green manure grass applied to the field in kilograms per hectare."""
green_manure_legumes_amount: Optional[float] = 0.0
"""The amount of green manure legumes applied to the field in kilograms per hectare."""
# }}}
# }}}
# Rice cultivation {{{
soil_preparation: Optional[str] = "" # early / conventional
"""Whether the soil uses "early" or "conventional" preparation."""
water_regime: Optional[str] = ""
"""The water regime of the field."""
# }}}
# Internal fuel {{{
diesel_type: Optional[str] = "DIESEL" # diesel(_b2|_b5|_b6|_b7|_b8|_b9|_b10)
"""The type of diesel used in the field."""
diesel_amount: Optional[float] = 0.0 # liters
"""The amount of diesel used in mechanical operations in the field in liters per hectare."""
gasoline_amount: Optional[float] = 0.0 # liters
"""The amount of gasoline used in mechanical operations in the field in liters per hectare."""
ethanol_amount: Optional[float] = 0.0 # liters
"""The amount of ethanol used in mechanical operations in the field in liters per hectare."""
# }}}
# Transport fuel {{{
transport_diesel_type: Optional[str] = "DIESEL" # diesel(_b2|_b5|_b6|_b7|_b8|_b9|_b10)
"""The type of diesel used in transporting produce from the farm to the market."""
transport_diesel_amount: Optional[float] = 0.0 # liters
"""Amount of diesel used in transporting produce from farm to market in liters per hectare."""
# }}}
current_land_use: str = "conventional_crops"
"""The current land use of the field (can be one of the following:
"conventional_crops", "direct_seeding", "sugarcane_with_burning", or
"sugarcane_without_burning").
"""
previous_land_use: str = "conventional_crops"
"""The previous land use of the field (can be one of the following:
"conventional_crops", "direct_seeding", "sugarcane_with_burning",
"native", "sugarcane_without_burning").
"""
biome: str = ""
"""The biome of the field (can be one of the following "US_FOREST",
"BRAZIL_AMAZON_FOREST", "BRAZIL_AMAZON_SAVANNA", "BRAZIL_CERRADO",
"BRAZIL_PANTANAL", "BRAZIL_CAATINGA", "BRAZIL_MATA_ATLANTICA", or
"BRAZIL_PAMPA").
"""
| EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/vibe_core/data/core_types.py |
import json
from dataclasses import fields
from datetime import datetime
from typing import _type_repr # type: ignore
from typing import (
Any,
Callable,
Dict,
List,
NamedTuple,
Tuple,
Type,
TypeVar,
Union,
cast,
get_args,
get_origin,
overload,
)
from pydantic import BaseModel
from pystac.asset import Asset
from pystac.item import Item
from shapely import geometry as shpg
from . import data_registry
from .core_types import (
AssetVibe,
BaseVibe,
DataVibe,
DataVibeType,
get_filtered_init_fields,
get_init_field_names,
)
T = TypeVar("T", bound=BaseVibe, covariant=True)
V = TypeVar("V")
class FieldConverter(NamedTuple):
"""A named tuple representing a field converter."""
serializer: Callable[[Any], Any]
"""Serializes a value."""
deserializer: Callable[[Any], Any]
"""A function that deserializes a value."""
def is_json_serializable(x: Any) -> bool:
"""Checks if a field is JSON serializable by Python's default serializer.
:param x: The value to check.
:return: True if the value is JSON serializable, False otherwise.
"""
try:
json.dumps(x)
return True
except (TypeError, OverflowError):
return False
def to_isoformat(x: datetime) -> str:
"""Converts a datetime object to an ISO format string.
:param x: The datetime object to convert.
:return: The ISO format string.
"""
return x.isoformat()
class StacConverter:
"""A class that converts :class:`BaseVibe` objects to STAC Items."""
field_converters = {
shpg.base.BaseGeometry: FieldConverter(shpg.mapping, shpg.shape),
datetime: FieldConverter(to_isoformat, datetime.fromisoformat),
}
"""A dictionary mapping field types to field converters."""
VIBE_DATA_TYPE_FIELD = "terravibes_data_type"
"""The name of the field that contains the data type of the :class:`BaseVibe` object."""
BASEVIBE_FALLBACK_DATETIME = datetime(1970, 1, 1)
"""The fallback datetime to use for :class:`BaseVibe` objects."""
def __init__(self):
pass
def sanitize_properties(self, properties: Dict[Any, Any]) -> Dict[Any, Any]:
"""Sanitizes a dictionary of properties to ensure they are JSON serializable.
:param properties: The dictionary of properties to sanitize.
:return: The sanitized dictionary of properties.
"""
out = {}
for k, v in properties.items():
if is_json_serializable(v):
out[k] = v
else:
Warning(f"Field {k} is not JSON serializable, it will not be added to STAC item.")
return out
def _serialize_type(self, field_value: Any, field_type: Any) -> Any:
converter = self.field_converters.get(field_type)
if converter is None:
if isinstance(field_value, BaseModel):
# We have to do this, otherwise our sanitizer will filter out
# this value
return json.loads(field_value.json())
return field_value
return converter.serializer(field_value)
def _deserialize_type(self, field_value: Any, field_type: Any) -> Any:
converter = self.field_converters.get(field_type)
if converter is None:
return field_value
return converter.deserializer(field_value)
def convert_field(
self, field_value: Any, field_type: Any, converter: Callable[[Any, Any], Any]
) -> Any:
"""Converts a field value to a given type, using a converter function.
:param field_value: The value of the field to convert.
:param field_type: The type to convert the field value to.
:param converter: The converter function to use.
:return: The converted field value.
"""
t_origin = get_origin(field_type)
t_args = get_args(field_type)
if t_origin is list and len(t_args) == 1:
return [self.convert_field(f, t_args[0], converter) for f in field_value]
if t_origin is dict and t_args:
return {k: self.convert_field(v, t_args[1], converter) for k, v in field_value.items()}
if t_origin is tuple and t_args:
if len(t_args) == 2 and t_args[1] == ...:
return tuple(self.convert_field(f, t_args[0], converter) for f in field_value)
return tuple(
self.convert_field(f, ta, converter) if ta is datetime else f
for f, ta in zip(field_value, t_args)
)
return converter(field_value, field_type)
def serialize_fields(
self, field_values: Dict[str, Any], field_types: Dict[str, Any]
) -> Dict[str, Any]:
"""Serializes a dictionary of fields.
:param field_values: The dictionary of field values to serialize.
:param field_types: The dictionary of field types to serialize to.
:return: The serialized dictionary of field values.
"""
return {
k: self.convert_field(v, field_types[k], self._serialize_type)
for k, v in field_values.items()
}
def deserialize_fields(
self, field_values: Dict[str, Any], field_types: Dict[str, Any]
) -> Dict[str, Any]:
"""Deserializes a dictionary of fields.
:param field_values: The dictionary of field values to deserialize.
:param field_types: The dictionary of field types to deserialize to.
:return: The deserialized dictionary of field values.
"""
return {
k: self.convert_field(v, field_types[k], self._deserialize_type)
for k, v in field_values.items()
}
@overload
def to_stac_item(self, input: BaseVibe) -> Item:
...
@overload
def to_stac_item(self, input: List[BaseVibe]) -> List[Item]:
...
def to_stac_item(self, input: Union[List[BaseVibe], BaseVibe]):
"""Converts a :class`BaseVibe` or a list of :class`BaseVibe` to a STAC item
or a list of STAC items.
:param input: The :class`BaseVibe` or list of :class`BaseVibe` to convert.
:return: A STAC item or a list of STAC items.
"""
if isinstance(input, list):
return [
self._to_stac_impl(i) if isinstance(i, DataVibe) else self._base_vibe_to_stac(i)
for i in input
]
if isinstance(input, DataVibe):
return self._to_stac_impl(input)
return self._base_vibe_to_stac(input)
def _extract_properties(self, input: BaseVibe) -> Dict[str, Any]:
# If this object inherits from BaseVibe but not from DataVibe, then the
# base is BaseVibe. Otherwise, the base is DataVibe.
# Whatever the base is, it is the input to `get_init_field_names`
regular_fields = get_init_field_names(
BaseVibe if not isinstance(input, DataVibe) else DataVibe
)
properties = get_filtered_init_fields(input, lambda x: x not in regular_fields)
property_types = {f.name: f.type for f in fields(input) if f.name in properties}
properties = self.serialize_fields(properties, property_types)
return properties
def _base_vibe_to_stac(self, input: BaseVibe) -> Item:
properties = self._extract_properties(input)
properties = self.sanitize_properties(properties)
extra_fields = {self.VIBE_DATA_TYPE_FIELD: data_registry.get_id(type(input))}
item = Item(
id=input.id,
datetime=self.BASEVIBE_FALLBACK_DATETIME
if not hasattr(input, "datetime")
else input.datetime, # type: ignore
bbox=None,
geometry=None,
properties=properties,
extra_fields=extra_fields,
)
return item
def _to_stac_impl(self, input: DataVibe) -> Item:
properties = self._extract_properties(input)
properties["start_datetime"] = input.time_range[0].isoformat()
properties["end_datetime"] = input.time_range[1].isoformat()
extra_fields = {self.VIBE_DATA_TYPE_FIELD: data_registry.get_id(type(input))}
properties = self.sanitize_properties(properties)
item = Item(
id=input.id,
datetime=input.time_range[0],
bbox=list(input.bbox),
geometry=input.geometry,
properties=properties,
extra_fields=extra_fields,
)
for asset in input.assets:
item.add_asset(
key=asset.id,
asset=Asset(href=asset.path_or_url, media_type=asset.type),
)
return item
@overload
def from_stac_item(self, input: Item) -> BaseVibe:
...
@overload
def from_stac_item(self, input: List[Item]) -> List[BaseVibe]:
...
def from_stac_item(self, input: Union[Item, List[Item]]) -> Union[BaseVibe, List[BaseVibe]]:
"""Converts a STAC item or a list of STAC items to a :class`BaseVibe`
or a list of :class`BaseVibe`.
:param input: The STAC item or list of STAC items to convert.
:return: A :class`BaseVibe` or a list of :class`BaseVibe`.
"""
if isinstance(input, list):
return [self._from_stac_impl(i) for i in input]
return self._from_stac_impl(input)
def _from_stac_impl(self, input: Item) -> BaseVibe:
# Figuring out type to create
vibe_data_type = self.resolve_type(input)
# Need to find the necessary arguments to the constructor of the type
init_fields = list(get_init_field_names(vibe_data_type))
init_field_types = {f.name: f.type for f in fields(vibe_data_type) if f.name in init_fields}
# Read properties from item stac into init fields
in_props: Dict[str, Any] = input.properties # type: ignore
data_kw = {f: in_props[f] for f in init_fields if f in in_props}
data_kw = self.deserialize_fields(data_kw, init_field_types)
data_kw.update(self._build_extra_kwargs(input, vibe_data_type))
# Creating actual object
return vibe_data_type(**data_kw)
def _build_extra_kwargs(self, input: Item, type: Type[BaseVibe]) -> Dict[str, Any]:
# Adding DataVibe-specific fields - think of better mechanism to do this...
data_kw = {}
if issubclass(type, DataVibe):
data_kw["id"] = input.id
data_kw["time_range"] = convert_time_range(input)
data_kw["geometry"] = input.geometry # type: ignore
data_kw["assets"] = [
AssetVibe(reference=a.href, type=a.media_type, id=id)
for id, a in input.assets.items()
]
return data_kw
def resolve_type(self, input: Item) -> Type[BaseVibe]:
"""Resolves the type of a :class`BaseVibe` object from a STAC item.
:param input: The STAC item to resolve the type from.
:return: The type of :class`BaseVibe`.
"""
extra_fields: Dict[str, Any] = input.extra_fields # type: ignore
if self.VIBE_DATA_TYPE_FIELD not in extra_fields:
return BaseVibe
return cast(
Type[BaseVibe],
data_registry.retrieve(extra_fields[self.VIBE_DATA_TYPE_FIELD]),
)
def convert_time_range(item: Item) -> Tuple[datetime, datetime]:
"""Converts the time range of a STAC item to a tuple of datetimes.
:param item: The STAC item to convert the time range for.
:return: A tuple of datetimes representing the start and end of the time range.
"""
conv_foo = datetime.fromisoformat
props: Dict[str, Any] = item.properties # type: ignore
if "start_datetime" in props and "end_datetime" in props:
return (
conv_foo(props["start_datetime"]),
conv_foo(props["end_datetime"]),
)
assert item.datetime is not None
return (item.datetime, item.datetime)
@overload
def serialize_stac(arg: Item) -> Dict[str, Any]:
...
@overload
def serialize_stac(arg: List[Item]) -> List[Dict[str, Any]]:
...
def serialize_stac(arg: Union[Item, List[Item]]):
"""Serializes a STAC item or a list of STAC items to a dictionary or a list of dictionaries.
:param arg: The STAC item or list of STAC items to serialize.
:return: A dictionary or a list of dictionaries representing the STAC item
or list of STAC items.
"""
if isinstance(arg, list):
return [item.to_dict(include_self_link=False) for item in arg]
return arg.to_dict(include_self_link=False)
@overload
def deserialize_stac(arg: Dict[str, Any]) -> Item:
...
@overload
def deserialize_stac(arg: List[Dict[str, Any]]) -> List[Item]:
...
def deserialize_stac(arg: Union[List[Dict[str, Any]], Dict[str, Any]]):
"""Deserializes a dictionary or a list of dictionaries to a STAC item
or a list of STAC items.
:param arg: The dictionary or list of dictionaries to deserialize.
:return: A STAC item or a list of STAC items.
"""
item_builder = Item.from_dict
if isinstance(arg, list):
return [item_builder(in_dict) for in_dict in arg]
return item_builder(arg)
@overload
def serialize_input(input_data: BaseVibe) -> Dict[str, Any]:
...
@overload
def serialize_input(input_data: List[T]) -> List[Dict[str, Any]]:
...
@overload
def serialize_input(
input_data: Dict[str, Union[T, List[T]]]
) -> Dict[str, Union[Dict[str, Any], List[Dict[str, Any]]]]:
...
def serialize_input(input_data: Any) -> Any:
"""Serializes a single :class`BaseVibe` object, or a list or dictionary of them,
to a STAC item or a list or dictionary of STAC items.
:param input_data: The :class`BaseVibe` object or a list or dictionary
of :class`BaseVibe` objects to serialize.
:return: A list, a dictionary or a single STAC Item representing the :class`BaseVibe`
object.
:raises NotImplementedError: If the input data is not a :class`BaseVibe` object,
or a list or dictionary of :class`BaseVibe`.
"""
# Dictionary where keys are workflow sources
if isinstance(input_data, dict):
return {k: serialize_input(v) for k, v in input_data.items()}
# Input is a list of elements
if isinstance(input_data, list):
return [serialize_input(i) for i in input_data]
if isinstance(input_data, BaseVibe):
return serialize_stac(StacConverter().to_stac_item(input_data))
raise NotImplementedError(f"Unable to serialize {input_data.__class__} objects to JSON")
def get_base_type(vibetype: DataVibeType) -> Type[BaseVibe]:
"""Determines the base type of a typing specification.
:param vibetype: The type to determine the base type of.
:return: The base type of vibetype.
:raises ValueError: If the type hierarchy contains nested container types
(e.g., List[List[:class:`DataVibe`]]).
Doctests:
>>> get_base_type(DataVibe)
vibe_core.data.DataVibe
>>> get_base_type([List[DataVibe])
vibe_core.data.DataVibe
"""
if not (is_container_type(vibetype) or isinstance(vibetype, type)):
raise ValueError(f"Argument {vibetype} is not a type")
if isinstance(vibetype, type):
return cast(Type[T], vibetype)
levels = 1
tmp = get_args(vibetype)
while tmp is not None and is_container_type(tmp[0]):
origin = get_origin(tmp[0])
if origin is None:
raise AssertionError("Found a None type in the hierarchy")
if not issubclass(origin, list):
raise ValueError(f"Container type {origin.__name__} is not supported")
tmp = get_args(tmp[0])
levels += 1
if levels > 1:
raise ValueError("Nested container types are not supported")
return tmp[0]
def is_container_type(typeclass: Union[Type[V], List[Type[V]]]) -> bool:
"""Checks if a type is a container type.
:param typeclass: The type to check.
:return: True if the type is a container type, False otherwise.
"""
return bool(get_args(typeclass))
def is_vibe_list(typeclass: DataVibeType) -> bool:
"""Checks if a type is a list of :class`BaseVibe` objects.
:param typeclass: The type to check.
:return: True if the type is a list of :class`BaseVibe` objects, False otherwise.
"""
origin = get_origin(typeclass)
return origin is not None and issubclass(origin, list)
def get_most_specific_type(types: List[DataVibeType]) -> DataVibeType:
"""Determines the most specific type of a list of types.
:param types: The list of types to determine the most specific type of.
:return: The most specific type of types.
:raises ValueError: If the types are not compatible.
"""
t_set = set(get_base_type(t) for t in types)
for t in t_set:
if all(issubclass(t, tt) for tt in t_set):
break
else:
types_str = ", ".join([f"'{_type_repr(t)}'" for t in t_set])
raise ValueError(f"Types {types_str} are not compatible")
if all(is_container_type(tt) for tt in types):
return List[t]
return t
| EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/vibe_core/data/utils.py |
from dataclasses import dataclass
from typing import Any, Dict
from .core_types import DataVibe
from .rasters import Raster
@dataclass
class AirbusProduct(DataVibe):
"""
Represents Airbus product metadata obtained from the search API.
Contains no image assets.
"""
acquisition_id: str
"""The ID of the acquisition."""
extra_info: Dict[str, Any]
"""A dictionary with extra information about the product."""
@dataclass
class AirbusPrice(DataVibe):
"""Represents the price of an Airbus product."""
price: float
"""The price of the product."""
@dataclass
class AirbusRaster(Raster, AirbusProduct):
"""
Represents an Airbus raster product, downloaded with specific product type,
radiometric processing, projection.
"""
pass
| EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/vibe_core/data/airbus.py |
import warnings
from typing import Any, Dict, Type, TypeVar, cast
GenericTypeVibe = TypeVar("GenericTypeVibe", bound=Type[Any])
"""Generic type for data registry."""
__DATA_REGISTRY: Dict[str, Type[Any]] = {}
def register_vibe_datatype(classdef: GenericTypeVibe) -> GenericTypeVibe:
"""Registers a class as a data type in the FarmVibes.AI data registry.
:param classdef: The class to register.
:return: The class.
"""
id = get_name(classdef)
if id in __DATA_REGISTRY:
warnings.warn(f"Class {id} already registered.", DeprecationWarning, stacklevel=2)
return cast(GenericTypeVibe, retrieve(id))
__DATA_REGISTRY[id] = classdef
return cast(GenericTypeVibe, classdef)
def retrieve(id: str) -> Type[Any]:
"""
Retrieves a registered data type from the FarmVibes.AI data registry.
:param id: The ID of the data type to retrieve.
:return: The registered data type.
"""
return __DATA_REGISTRY[id]
def get_name(classdef: Type[Any]) -> str:
"""
Gets the name of a class.
:param classdef: The class to get the name of.
:return: The name of the class.
"""
return classdef.__name__
def get_id(classdef: Type[Any]) -> str:
"""
Gets the ID of a class.
:param classdef: The class to get the ID of.
:return: The ID of the class.
"""
id = get_name(classdef)
return id
| EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/vibe_core/data/data_registry.py |
import mimetypes
from dataclasses import dataclass, field
from datetime import datetime
from enum import auto
from typing import Any, Dict, List, Union
from dateutil.parser import parse as parse_date
from strenum import StrEnum
from . import AssetVibe, DataVibe
from .core_types import TimeRange, gen_guid
from .rasters import CategoricalRaster, CloudRaster, Raster, RasterSequence
class S2ProcessingLevel(StrEnum):
"""Enum for Sentinel 2 processing levels."""
L1C = auto()
"""Level 1C processing level."""
L2A = auto()
"""Level 2A processing level."""
def discriminator_date(product_name: str) -> datetime:
"""A function that extracts the date from a Sentinel-2 product name.
:param product_name: The name of the Sentinel-2 product.
:return: The date of the Sentinel-2 product as a datetime object.
"""
return parse_date(product_name.split("_")[-1])
# TODO: remove the generic dictionary for a list of actual information that we want to keep.
# consider having two representations, one for Sentinel 1 another for Sentinel 2.
@dataclass
class SentinelProduct(DataVibe):
"""Represents a Sentinel product metadata (does not include the image data)."""
product_name: str
"""The name of the Sentinel product."""
orbit_number: int
"""The orbit number of the Sentinel product."""
relative_orbit_number: int
"""The relative orbit number of the Sentinel product."""
orbit_direction: str
"""The orbit direction of the Sentinel product."""
platform: str
"""The platform of the Sentinel product."""
extra_info: Dict[str, Any] # Allows for generic information to be stored.
"""A dictionary with extra information about the Sentinel product."""
@dataclass
class Sentinel1Product(SentinelProduct):
"""Represents a Sentinel-1 product metadata."""
sensor_mode: str
"""The sensor mode of the Sentinel-1 product."""
polarisation_mode: str
"""The polarisation mode of the Sentinel-1 product."""
@dataclass
class Sentinel2Product(SentinelProduct):
"""Represents a Sentinel-2 product metadata."""
tile_id: str
"""The tile ID of the Sentinel-2 product."""
processing_level: str
"""The processing level of the Sentinel-2 product."""
@dataclass
class CloudMask(CategoricalRaster, CloudRaster, Sentinel2Product):
"""Represents a cloud mask raster for a Sentinel-2 product."""
pass
@dataclass
class SentinelRaster(Raster, SentinelProduct):
"""Represents a raster for a Sentinel product."""
pass
@dataclass
class DownloadedSentinel2Product(Sentinel2Product):
"""Represents a downloaded Sentinel-2 product."""
CLOUD_MASK: str = "cloud_mask"
"""The key for the cloud mask asset in the asset map."""
asset_map: Dict[str, str] = field(default_factory=dict)
"""A dictionary mapping the band name to the asset ID."""
def add_downloaded_band(self, band_name: str, asset_path: str):
"""A method that adds a downloaded band to the asset map of
the :class:`DownloadedSentinel2Product` object.
:param band_name: The name of the band to add.
:param asset_path: The path to the downloaded band file.
:raises ValueError: If the file type is not supported (types other than TIFF or JP2).
"""
band_guid = gen_guid()
# Double check mime type
self.asset_map[band_name] = band_guid
# Check if this is also true for L1A
asset_type = mimetypes.guess_type(asset_path)[0]
if asset_type is None or asset_type not in ["image/tiff", "image/jp2"]:
raise ValueError(
f"TIFF and JP2 files supported for Sentinel2 downloads. Found {asset_type}."
)
self.assets.append(AssetVibe(asset_path, asset_type, band_guid))
def _lookup_asset(self, guid: str) -> AssetVibe:
def id_eq(x: AssetVibe):
return x.id == guid
return list(filter(id_eq, self.assets))[0]
def add_downloaded_cloudmask(self, asset_path: str):
"""A method that adds a downloaded cloud mask to the asset map of
the :class:`DownloadedSentinel2Product` object.
:param asset_path: The path to the downloaded cloud mask file.
"""
cloud_guid = gen_guid()
# Double check mime type
self.asset_map[self.CLOUD_MASK] = cloud_guid
self.assets.append(AssetVibe(asset_path, "application/gml+xml", cloud_guid))
def get_downloaded_band(self, band_name: str) -> AssetVibe:
"""A method that returns the downloaded band asset for the given band name.
:param band_name: The name of the band to return.
:return: The downloaded band asset.
"""
guid = self.asset_map[band_name]
return self._lookup_asset(guid)
def get_downloaded_cloudmask(self) -> AssetVibe:
"""A method that retrieves the downloaded cloud mask asset.
:return: The downloaded cloud mask asset.
"""
guid = self.asset_map[self.CLOUD_MASK]
return self._lookup_asset(guid)
@dataclass
class DownloadedSentinel1Product(Sentinel1Product):
"""Represents a downloaded Sentinel-1 product."""
ZIP_FILE = "zip"
"""The key for the zip asset in the asset map."""
asset_map: Dict[str, str] = field(default_factory=dict)
"""A dictionary mapping the band name to the asset ID."""
def _lookup_asset(self, guid: str) -> AssetVibe:
def id_eq(x: AssetVibe):
return x.id == guid
return list(filter(id_eq, self.assets))[0]
def add_zip_asset(self, asset_path: str):
"""A method that adds a downloaded zip asset to the asset map of
the :class:`DownloadedSentinel1Product` object.
:param asset_path: The path to the downloaded zip file.
"""
zip_guid = gen_guid()
# Double check mime type
self.asset_map[self.ZIP_FILE] = zip_guid
self.assets.append(AssetVibe(asset_path, "application/zip", zip_guid))
def get_zip_asset(self) -> AssetVibe:
"""A method that retrieves the downloaded zip asset.
:return: The downloaded zip asset.
"""
guid = self.asset_map[self.ZIP_FILE]
return self._lookup_asset(guid)
@dataclass
class Sentinel1Raster(Raster, Sentinel1Product):
"""Represents a raster for a Sentinel-1 product."""
tile_id: str
"""The tile ID of the raster."""
@dataclass
class Sentinel2Raster(Raster, Sentinel2Product):
"""Represents a raster for a Sentinel-2 product."""
def __post_init__(self):
super().__post_init__()
self.quantification_value = 10000
@dataclass
class Sentinel2CloudProbability(CloudRaster, Sentinel2Product):
"""Represents a cloud probability raster for a Sentinel-2 product."""
pass
@dataclass
class Sentinel2CloudMask(CloudMask, Sentinel2Product):
"""Represents a cloud mask raster for a Sentinel-2 product."""
pass
class SpaceEyeRaster(Sentinel2Raster):
"""Represents a SpaceEye raster."""
pass
@dataclass
class TiledSentinel1Product(DownloadedSentinel1Product):
"""Represents a tiled Sentinel-1 product."""
tile_id: str = ""
"""The tile ID of the product."""
def __post_init__(self):
if not self.tile_id:
raise ValueError("tile_id is a mandatory argument even though it isn't.")
return super().__post_init__()
@dataclass
class Sentinel1RasterOrbitGroup(Sentinel1Raster):
"""Represents a group of Sentinel-1 raster orbits."""
asset_map: Dict[str, str] = field(default_factory=dict)
"""A dictionary mapping the asset ID to the acquisition date."""
def add_raster(self, raster: Sentinel1Raster):
"""A method that adds a raster to the orbit group.
:param raster: The raster to add to the orbit group.
"""
asset = raster.raster_asset
self.asset_map[asset.id] = raster.time_range[0].isoformat()
self.assets.append(raster.raster_asset)
def get_ordered_assets(self) -> List[AssetVibe]:
"""A method that returns the assets in the orbit group in ascending
order of acquisition date.
:return: The list of sorted assets in the orbit group.
"""
return sorted(self.assets, key=lambda x: datetime.fromisoformat(self.asset_map[x.id]))
@dataclass
class Sentinel2RasterOrbitGroup(Sentinel2Raster):
"""Represents a group of Sentinel-2 raster orbits."""
asset_map: Dict[str, str] = field(default_factory=dict)
"""A dictionary mapping the asset ID to the acquisition date."""
def add_raster(self, raster: Sentinel2Raster):
"""A method that adds a raster to the orbit group.
:param raster: The raster to add to the orbit group.
"""
asset = raster.raster_asset
self.asset_map[asset.id] = discriminator_date(raster.product_name).isoformat()
self.assets.append(raster.raster_asset)
def get_ordered_assets(self) -> List[AssetVibe]:
"""A method that returns the assets in the orbit group in ascending
order of acquisition date.
:return: The list of sorted assets in the orbit group.
"""
return sorted(
self.assets, key=lambda x: datetime.fromisoformat(self.asset_map[x.id]), reverse=True
)
@dataclass
class Sentinel2CloudMaskOrbitGroup(Sentinel2CloudMask):
"""Represents a group of Sentinel-2 cloud mask orbits."""
asset_map: Dict[str, str] = field(default_factory=dict)
"""A dictionary mapping the asset ID to the acquisition date."""
def add_raster(self, raster: Sentinel2CloudMask):
"""A method that adds a raster to the orbit group.
:param raster: The raster to add to the orbit group.
"""
asset = raster.raster_asset
self.asset_map[asset.id] = discriminator_date(raster.product_name).isoformat()
self.assets.append(raster.raster_asset)
def get_ordered_assets(self) -> List[AssetVibe]:
"""A method that returns the assets in the orbit group in ascending
order of acquisition date.
:return: The list of sorted assets in the orbit group.
"""
return sorted(
self.assets, key=lambda x: datetime.fromisoformat(self.asset_map[x.id]), reverse=True
)
@dataclass
class TileSequence(RasterSequence):
"""Represents a sequence of rasters for a tile."""
write_time_range: TimeRange = field(default_factory=tuple)
"""The time range of the sequence."""
def __post_init__(self):
super().__post_init__()
if len(self.write_time_range) != 2:
raise ValueError(
"write_time_range must be a tuple of two datetime items,"
f"found {self.write_time_range=}"
)
@dataclass
class Sentinel1RasterTileSequence(TileSequence, Sentinel1Raster):
"""Represents a sequence of Sentinel-1 rasters for a tile."""
pass
@dataclass
class Sentinel2RasterTileSequence(TileSequence, Sentinel2Raster):
"""Represents a sequence of Sentinel-2 rasters for a tile."""
pass
@dataclass
class Sentinel2CloudMaskTileSequence(TileSequence, Sentinel2CloudMask):
"""Represents a sequence of Sentinel-2 cloud masks for a tile."""
pass
@dataclass
class SpaceEyeRasterSequence(TileSequence, SpaceEyeRaster):
"""Represents a sequence of SpaceEye rasters for a tile."""
pass
TileData = Union[Sentinel1Raster, Sentinel2Raster, Sentinel2CloudMask]
"""
A type alias for any of the tile data classes (:class:`Sentinel1Raster`,
:class:`Sentinel2Raster`, and :class:`Sentinel2CloudMask`).
"""
ListTileData = List[TileData]
"""A type alias for a list of :const:`TileData`."""
TileSequenceData = Union[
Sentinel1RasterTileSequence,
Sentinel2RasterTileSequence,
Sentinel2CloudMaskTileSequence,
]
"""
A type alias for any of the tile sequence data classes (:class:`Sentinel1RasterTileSequence`,
:class:`Sentinel2RasterTileSequence`, and :class:`Sentinel2CloudMaskTileSequence`).
"""
Tile2Sequence = {
Sentinel1Raster: Sentinel1RasterTileSequence,
Sentinel2Raster: Sentinel2RasterTileSequence,
Sentinel2CloudMask: Sentinel2CloudMaskTileSequence,
}
"""A dictionary mapping the tile data classes to the tile sequence data classes."""
Sequence2Tile = {
Sentinel1RasterTileSequence: Sentinel1Raster,
Sentinel2RasterTileSequence: Sentinel2Raster,
Sentinel2CloudMaskTileSequence: Sentinel2CloudMask,
SpaceEyeRasterSequence: SpaceEyeRaster,
}
"""A dictionary mapping tile sequence data classes to tile data classes."""
| EXA-1-master | exa/libraries/farmvibes-ai-main/src/vibe_core/vibe_core/data/sentinel.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
from io import open
from setuptools import find_packages, setup
setup(
name="torchscale",
version="0.2.0",
author="TorchScale Team",
author_email="[email protected]",
description="Transformers at any scale",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords="Transformers at any scale",
license="MIT",
url="https://github.com/microsoft/torchscale",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
install_requires=["torch>=1.8", "fairscale==0.4.0", "timm==0.4.12"],
python_requires=">=3.8.0",
classifiers=[
"Programming Language :: Python :: 3",
],
)
| EXA-1-master | exa/libraries/torchscale/setup.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
| EXA-1-master | exa/libraries/torchscale/torchscale/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import numpy as np
import torch
import torch.nn as nn
def fixed_pos_embedding(x):
seq_len, dim = x.shape
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim) / dim))
sinusoid_inp = (
torch.einsum("i , j -> i j", torch.arange(0, seq_len, dtype=torch.float), inv_freq).to(x)
)
return torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)
def rotate_every_two(x):
x1 = x[:, :, ::2]
x2 = x[:, :, 1::2]
x = torch.stack((-x2, x1), dim=-1)
return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')\
def duplicate_interleave(m):
"""
A simple version of `torch.repeat_interleave` for duplicating a matrix while interleaving the copy.
"""
dim0 = m.shape[0]
m = m.view(-1, 1) # flatten the matrix
m = m.repeat(1, 2) # repeat all elements into the 2nd dimension
m = m.view(dim0, -1) # reshape into a matrix, interleaving the copy
return m
def apply_rotary_pos_emb(x, sin, cos, scale=1):
sin, cos = map(lambda t: duplicate_interleave(t * scale), (sin, cos))
# einsum notation for lambda t: repeat(t[offset:x.shape[1]+offset,:], "n d -> () n () (d j)", j=2)
return (x * cos) + (rotate_every_two(x) * sin)
class XPOS(nn.Module):
def __init__(
self, head_dim, scale_base=512
):
super().__init__()
self.head_dim = head_dim
self.scale_base = scale_base
self.register_buffer(
"scale", (torch.arange(0, head_dim, 2) + 0.4 * head_dim) / (1.4 * head_dim)
)
def forward(self, x, offset=0, downscale=False):
length = x.shape[1]
min_pos = -(length + offset) // 2
max_pos = length + offset + min_pos
scale = self.scale ** torch.arange(min_pos, max_pos, 1).to(self.scale).div(self.scale_base)[:, None]
sin, cos = fixed_pos_embedding(scale)
if scale.shape[0] > length:
scale = scale[-length:]
sin = sin[-length:]
cos = cos[-length:]
if downscale:
scale = 1 / scale
x = apply_rotary_pos_emb(x, sin, cos, scale)
return x
| EXA-1-master | exa/libraries/torchscale/torchscale/component/xpos_relative_position.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import copy
import torch
import torch.nn as nn
def MultiwayWrapper(args, module, dim=1):
if args.multiway:
return MultiwayNetwork(module, dim=dim)
return module
def set_split_position(position):
def apply_fn(module):
if hasattr(module, "split_position"):
module.split_position = position
return apply_fn
class MultiwayNetwork(nn.Module):
def __init__(self, module, dim=1):
super().__init__()
self.dim = dim
self.A = module
self.B = copy.deepcopy(module)
self.B.reset_parameters()
self.split_position = -1
def forward(self, x, **kwargs):
if self.split_position == -1:
return self.A(x, **kwargs)
if self.split_position == 0:
return self.B(x, **kwargs)
x1, x2 = torch.split(
x,
[self.split_position, x.size(self.dim) - self.split_position],
dim=self.dim,
)
# x1, x2 = x[:self.split_position], x[self.split_position:]
y1, y2 = self.A(x1, **kwargs), self.B(x2, **kwargs)
return torch.cat([y1, y2], dim=self.dim)
class MutliwayEmbedding(MultiwayNetwork):
def __init__(self, modules, dim=1):
super(MultiwayNetwork, self).__init__()
self.dim = dim
assert len(modules) == 2
self.A = modules[0]
self.B = modules[1]
self.split_position = -1 | EXA-1-master | exa/libraries/torchscale/torchscale/component/multiway_network.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import math
import torch
import torch.nn.functional as F
from torch import nn
try:
from apex.normalization import FusedLayerNorm as LayerNorm
except ModuleNotFoundError:
from torch.nn import LayerNorm
from .multiway_network import MultiwayWrapper
from .xpos_relative_position import XPOS
class MultiheadAttention(nn.Module):
def __init__(
self,
args,
embed_dim,
num_heads,
dropout=0.0,
self_attention=False,
encoder_decoder_attention=False,
subln=False,
):
super().__init__()
self.args = args
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
self.scaling = self.head_dim**-0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert self.self_attention ^ self.encoder_decoder_attention
self.k_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True))
self.v_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True))
self.q_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True))
self.out_proj = MultiwayWrapper(
args, nn.Linear(embed_dim, embed_dim, bias=True)
)
self.inner_attn_ln = (
MultiwayWrapper(args, LayerNorm(self.embed_dim, eps=args.layernorm_eps))
if subln and self.self_attention
else None
)
self.dropout_module = torch.nn.Dropout(dropout)
self.xpos = (
XPOS(self.head_dim, args.xpos_scale_base)
if args.xpos_rel_pos and self.self_attention
else None
)
def reset_parameters(self):
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.out_proj.weight)
nn.init.constant_(self.out_proj.bias, 0.0)
def forward(
self,
query,
key,
value,
incremental_state=None,
key_padding_mask=None,
attn_mask=None,
rel_pos=None,
):
bsz, tgt_len, embed_dim = query.size()
src_len = tgt_len
assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}"
key_bsz, src_len, _ = key.size()
assert key_bsz == bsz, f"{query.size(), key.size()}"
assert value is not None
assert bsz, src_len == value.shape[:2]
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
q = q.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2)
k = k.view(bsz, src_len, self.num_heads, self.head_dim).transpose(1, 2)
v = v.view(bsz, src_len, self.num_heads, self.head_dim).transpose(1, 2)
q = q.reshape(bsz * self.num_heads, tgt_len, self.head_dim)
k = k.reshape(bsz * self.num_heads, src_len, self.head_dim)
v = v.reshape(bsz * self.num_heads, src_len, self.head_dim)
if incremental_state is not None:
if "prev_key" in incremental_state:
prev_key = incremental_state["prev_key"].view(
bsz * self.num_heads, -1, self.head_dim
)
prev_value = incremental_state["prev_value"].view(
bsz * self.num_heads, -1, self.head_dim
)
k = torch.cat([prev_key, k], dim=1)
v = torch.cat([prev_value, v], dim=1)
incremental_state["prev_key"] = k.view(
bsz, self.num_heads, -1, self.head_dim
)
incremental_state["prev_value"] = v.view(
bsz, self.num_heads, -1, self.head_dim
)
src_len = k.size(1)
if self.xpos is not None:
if incremental_state is not None:
offset = src_len - 1
else:
offset = 0
k = self.xpos(k, offset=0, downscale=True)
q = self.xpos(q, offset=offset, downscale=False)
attn_weights = torch.bmm(q, k.transpose(1, 2))
if attn_mask is not None:
attn_weights = torch.nan_to_num(attn_weights)
attn_mask = attn_mask.unsqueeze(0)
attn_weights += attn_mask
if key_padding_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if rel_pos is not None:
rel_pos = rel_pos.view(attn_weights.size())
attn_weights = attn_weights + rel_pos
attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).type_as(
attn_weights
)
attn_probs = self.dropout_module(attn_weights)
attn = torch.bmm(attn_probs, v)
attn = attn.transpose(0, 1).reshape(tgt_len, bsz, embed_dim).transpose(0, 1)
if self.inner_attn_ln is not None:
attn = self.inner_attn_ln(attn)
attn = self.out_proj(attn)
attn_weights = attn_weights.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
return attn, attn_weights
| EXA-1-master | exa/libraries/torchscale/torchscale/component/multihead_attention.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import math
import torch
import torch.nn as nn
class RelativePositionBias(nn.Module):
def __init__(
self, bidirectional=True, num_buckets=32, max_distance=128, n_heads=12
):
super().__init__()
self.bidirectional = bidirectional
self.num_buckets = num_buckets
self.max_distance = max_distance
self.n_heads = n_heads
self.relative_attention_bias = nn.Embedding(self.num_buckets, self.n_heads)
@staticmethod
def _relative_position_bucket(
relative_position, bidirectional=True, num_buckets=32, max_distance=128
):
ret = 0
n = -relative_position
if bidirectional:
num_buckets //= 2
ret += (n < 0).to(torch.long) * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
torch.log(n.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(
val_if_large, torch.full_like(val_if_large, num_buckets - 1)
)
ret += torch.where(is_small, n, val_if_large)
return ret
def compute_bias(self, qlen, klen, step=None):
step = 0 if step is None else step
context_position = torch.arange(
step,
step + qlen,
dtype=torch.long,
device=self.relative_attention_bias.weight.device,
)[:, None]
memory_position = torch.arange(
klen, dtype=torch.long, device=self.relative_attention_bias.weight.device
)[None, :]
relative_position = memory_position - context_position # shape (qlen, klen)
rp_bucket = self._relative_position_bucket(
relative_position, # shape (qlen, klen)
bidirectional=self.bidirectional,
num_buckets=self.num_buckets,
max_distance=self.max_distance,
)
rp_bucket = rp_bucket.to(self.relative_attention_bias.weight.device)
values = self.relative_attention_bias(
rp_bucket
) # shape (qlen, klen, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(
0
) # shape (1, num_heads, qlen, klen)
return values
def forward(self, batch_size, qlen, klen, step=None):
# shape (batch * num_heads, qlen, klen)
return (
self.compute_bias(qlen, klen, step)
.repeat(batch_size, 1, 1, 1)
.view(-1, qlen, klen)
)
| EXA-1-master | exa/libraries/torchscale/torchscale/component/relative_position_bias.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import torch
import torch.nn as nn
import torch.nn.functional as F
class VisionLanguageEmbedding(nn.Module):
def __init__(self, text_embed, vision_embed):
super().__init__()
self.text_embed = text_embed
self.vision_embed = vision_embed
def forward(self, textual_tokens, visual_tokens, **kwargs):
if textual_tokens is None:
return self.vision_embed(visual_tokens)
if visual_tokens is None:
return self.text_embed(textual_tokens)
x1 = self.vision_embed(visual_tokens)
x2 = self.text_embed(textual_tokens)
return torch.cat([x1, x2], dim=1)
class VisionEmbedding(nn.Module):
"""Image to Patch Embedding"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
contain_mask_token=False,
prepend_cls_token=False,
):
super().__init__()
img_size = (img_size, img_size)
patch_size = (patch_size, patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size
)
if contain_mask_token:
self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.mask_token = None
if prepend_cls_token:
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.cls_token = None
def num_position_embeddings(self):
if self.cls_token is None:
return self.num_patches
else:
return self.num_patches + 1
def forward(self, x, masked_position=None, **kwargs):
B, C, H, W = x.shape
assert (
H == self.img_size[0] and W == self.img_size[1]
), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
batch_size, seq_len, _ = x.size()
if masked_position is not None:
assert self.mask_token is not None
mask_token = self.mask_token.expand(batch_size, seq_len, -1)
w = masked_position.unsqueeze(-1).type_as(mask_token)
x = x * (1 - w) + mask_token * w
if self.cls_token is not None:
cls_tokens = self.cls_token.expand(
batch_size, -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
return x
class TextEmbedding(nn.Embedding):
def reset_parameters(self):
nn.init.normal_(self.weight, mean=0, std=self.embedding_dim**-0.5)
self._fill_padding_idx_with_zero()
class PositionalEmbedding(nn.Embedding):
def forward(
self,
x,
positions=None,
**kwargs,
):
if positions is None:
# being consistent with Fairseq, which starts from 2.
positions = (
torch.arange(2, x.size(1) + 2, device=x.device).long().unsqueeze(0)
)
return F.embedding(
positions,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
| EXA-1-master | exa/libraries/torchscale/torchscale/component/embedding.py |
import math
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.functional import scaled_dot_product_attention as flash_attention
try:
from apex.normalization import FusedLayerNorm as LayerNorm
except ModuleNotFoundError:
from torch.nn import LayerNorm
from .multiway_network import MultiwayWrapper
from .xpos_relative_position import XPOS
class MultiheadAttention(nn.Module):
def __init__(
self,
args,
embed_dim,
num_heads,
dropout=0.0,
self_attention=False,
encoder_decoder_attention=False,
subln=False,
use_flash=False,
):
super().__init__()
self.args = args
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert self.self_attention ^ self.encoder_decoder_attention
self.k_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True))
self.v_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True))
self.q_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True))
self.out_proj = MultiwayWrapper(
args, nn.Linear(embed_dim, embed_dim, bias=True)
)
self.inner_attn_ln = (
MultiwayWrapper(args, LayerNorm(self.embed_dim, eps=args.layernorm_eps))
if subln and self.self_attention
else None
)
self.dropout_module = torch.nn.Dropout(dropout)
self.xpos = (
XPOS(self.head_dim, args.xpos_scale_base)
if args.xpos_rel_pos and self.self_attention
else None
)
self.use_flash = use_flash
def reset_parameters(self):
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.out_proj.weight)
nn.init.constant_(self.out_proj.bias, 0.0)
def flash_attn(self, q, k, v, mask=None):
attn_output, attn_weights = flash_attention(q, k, v, mask=mask)
attn_output = attn_output.transpose(0, 1).reshape(tgt_len, bsz, embed_dim).transpose(0, 1)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0)
return attn_output, attn_weights
def forward(
self,
query,
key,
value,
incremental_state=None,
key_padding_mask=None,
attn_mask=None,
rel_pos=None,
):
bsz, tgt_len, embed_dim = query.size()
src_len = tgt_len
assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}"
key_bsz, src_len, _ = key.size()
assert key_bsz == bsz, f"{query.size(), key.size()}"
assert value is not None
assert bsz, src_len == value.shape[:2]
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
q = q.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2)
k = k.view(bsz, src_len, self.num_heads, self.head_dim).transpose(1, 2)
v = v.view(bsz, src_len, self.num_heads, self.head_dim).transpose(1, 2)
q = q.reshape(bsz * self.num_heads, tgt_len, self.head_dim)
k = k.reshape(bsz * self.num_heads, src_len, self.head_dim)
v = v.reshape(bsz * self.num_heads, src_len, self.head_dim)
if incremental_state is not None:
if "prev_key" in incremental_state:
prev_key = incremental_state["prev_key"].view(
bsz * self.num_heads, -1, self.head_dim
)
prev_value = incremental_state["prev_value"].view(
bsz * self.num_heads, -1, self.head_dim
)
k = torch.cat([prev_key, k], dim=1)
v = torch.cat([prev_value, v], dim=1)
incremental_state["prev_key"] = k.view(
bsz, self.num_heads, -1, self.head_dim
)
incremental_state["prev_value"] = v.view(
bsz, self.num_heads, -1, self.head_dim
)
src_len = k.size(1)
if self.xpos is not None:
if incremental_state is not None:
offset = src_len - 1
else:
offset = 0
k = self.xpos(k, offset=0, downscale=True)
q = self.xpos(q, offset=offset, downscale=False)
if self.use_flash:
attn_output, attn_weights = self.flash_attn(q, k, v, mask=attn_mask)
else:
attn_weights = torch.bmm(q, k.transpose(1, 2))
if attn_mask is not None:
attn_weights = torch.nan_to_num(attn_weights)
attn_mask = attn_mask.unsqueeze(0)
attn_weights += attn_mask
if key_padding_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if rel_pos is not None:
rel_pos = rel_pos.view(attn_weights.size())
attn_weights = attn_weights + rel_pos
attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).type_as(
attn_weights
)
attn_probs = self.dropout_module(attn_weights)
attn = torch.bmm(attn_probs, v)
attn = attn.transpose(0, 1).reshape(tgt_len, bsz, embed_dim).transpose(0, 1)
if self.inner_attn_ln is not None:
attn = self.inner_attn_ln(attn)
attn_output = self.out_proj(attn)
attn_weights = attn_weights.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
return attn_output, attn_weights | EXA-1-master | exa/libraries/torchscale/torchscale/component/new_multi.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import torch.nn as nn
from timm.models.layers import drop_path
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
def extra_repr(self):
return "p={}".format(self.drop_prob)
| EXA-1-master | exa/libraries/torchscale/torchscale/component/droppath.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
| EXA-1-master | exa/libraries/torchscale/torchscale/component/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import torch
import torch.nn as nn
import torch.nn.functional as F
try:
from apex.normalization import FusedLayerNorm as LayerNorm
except ModuleNotFoundError:
from torch.nn import LayerNorm
class set_torch_seed(object):
def __init__(self, seed):
assert isinstance(seed, int)
self.rng_state = self.get_rng_state()
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
def get_rng_state(self):
state = {"torch_rng_state": torch.get_rng_state()}
if torch.cuda.is_available():
state["cuda_rng_state"] = torch.cuda.get_rng_state()
return state
def set_rng_state(self, state):
torch.set_rng_state(state["torch_rng_state"])
if torch.cuda.is_available():
torch.cuda.set_rng_state(state["cuda_rng_state"])
def __enter__(self):
return self
def __exit__(self, *exc):
self.set_rng_state(self.rng_state)
def make_experts(args, embed_dim, expert_ffn_dim):
world_size = (
1
if not torch.distributed.is_initialized()
else torch.distributed.get_world_size()
)
expert_list = []
ddp_rank = args.ddp_rank
start_seed = torch.randint(1000000, (1,)).item()
# at least as many experts than gpus
if args.moe_expert_count >= world_size:
assert (
args.moe_expert_count % world_size == 0
), f"{args.moe_expert_count}, {world_size}"
local_moe_expert_count = args.moe_expert_count // world_size
for i in range(local_moe_expert_count):
with set_torch_seed(start_seed + ddp_rank * local_moe_expert_count + i):
expert_list.append(
FeedForwardNetwork(
embed_dim,
expert_ffn_dim,
args.activation_fn,
args.dropout,
args.activation_dropout,
args.layernorm_eps,
args.subln,
)
)
else:
assert (
world_size % args.moe_expert_count == 0
), f"{world_size}, {args.moe_expert_count}"
with set_torch_seed(start_seed + ddp_rank % args.moe_expert_count):
expert_list.append(
FeedForwardNetwork(
embed_dim,
expert_ffn_dim,
args.activation_fn,
args.dropout,
args.activation_dropout,
args.layernorm_eps,
args.subln,
)
)
experts = nn.ModuleList(expert_list)
return experts
def get_activation_fn(activation):
if activation == "relu":
return F.relu
elif activation == "gelu":
return F.gelu
else:
raise NotImplementedError
class FeedForwardNetwork(nn.Module):
def __init__(
self,
embed_dim,
ffn_dim,
activation_fn,
dropout,
activation_dropout,
layernorm_eps,
subln=False,
):
super().__init__()
self.embed_dim = embed_dim
self.activation_fn = get_activation_fn(activation=str(activation_fn))
self.activation_dropout_module = torch.nn.Dropout(activation_dropout)
self.dropout_module = torch.nn.Dropout(dropout)
self.fc1 = nn.Linear(self.embed_dim, ffn_dim)
self.fc2 = nn.Linear(ffn_dim, self.embed_dim)
self.ffn_layernorm = LayerNorm(ffn_dim, eps=layernorm_eps) if subln else None
def reset_parameters(self):
self.fc1.reset_parameters()
self.fc2.reset_parameters()
if self.ffn_layernorm is not None:
self.ffn_layernorm.reset_parameters()
def forward(self, x):
x_shape = x.shape
x = x.reshape(-1, x.size(-1))
x = self.fc1(x)
x = self.activation_fn(x.float()).type_as(x)
x = self.activation_dropout_module(x)
if self.ffn_layernorm is not None:
x = self.ffn_layernorm(x)
x = self.fc2(x)
x = x.view(x_shape)
x = self.dropout_module(x)
return x
| EXA-1-master | exa/libraries/torchscale/torchscale/component/feedforward_network.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
| EXA-1-master | exa/libraries/torchscale/torchscale/component/xmoe/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# NOTE: This is a mirror of the code in
# https://github.com/facebookresearch/fairscale/tree/master/fairscale/nn/moe
import logging
import time
from typing import Any, Tuple, cast
import torch
import torch.distributed as dist
from torch import Tensor
from torch.nn import Module, ModuleList
try:
from fairseq.modules.moe import MOELayer
has_fairseq = True
Base = MOELayer
except ModuleNotFoundError:
Base = Module
has_fairseq = False
try:
# To enable Tutel MoE optimizations:
# python3 -m pip install --user --upgrade git+https://github.com/microsoft/[email protected]
from tutel import moe as tutel_moe
has_tutel, fused_cumsum_sub_one = True, tutel_moe.fast_cumsum_sub_one
except ModuleNotFoundError:
has_tutel, fused_cumsum_sub_one = False, lambda mask: torch.cumsum(mask, dim=0) - 1
logger = logging.getLogger(__name__)
# einsum dimensions: (g)roup, (s)equence, (e)xpert, (m)odel, (c)apacity
# See https://arxiv.org/pdf/2006.16668.pdf for details.
# Based on https://github.com/pytorch/pytorch/pull/40762
class _AllToAll(torch.autograd.Function):
@staticmethod
def forward(ctx: Any, group: dist.ProcessGroup, input: Tensor) -> Tensor: # type: ignore
ctx.group = group
input = input.contiguous()
output = torch.empty_like(input)
if torch.distributed.is_initialized():
dist.all_to_all_single(output, input, group=group)
else:
assert group is None
output = input
return output
@staticmethod
def backward(ctx: Any, *grad_output: Tensor) -> Tuple[None, Tensor]:
return (None, _AllToAll.apply(ctx.group, *grad_output))
def _find_my_group_index(grouped_ranks):
my_rank = dist.get_rank()
for i, group in enumerate(grouped_ranks):
if my_rank in group:
return i
raise RuntimeError
def get_moe_group(moe_expert_count):
if dist.is_initialized():
if not hasattr(get_moe_group, "_moe_groups"):
world_size = dist.get_world_size()
if world_size <= moe_expert_count:
assert moe_expert_count % world_size == 0
moe_groups = [[i] for i in range(world_size)]
else:
assert world_size % moe_expert_count == 0
ranks_per_group = world_size // moe_expert_count
moe_groups = [
[i + j * moe_expert_count for j in range(ranks_per_group)]
for i in range(moe_expert_count)
]
get_moe_group._moe_group_idx = moe_groups
get_moe_group._moe_groups = [dist.new_group(g) for g in moe_groups]
my_group_idx = _find_my_group_index(get_moe_group._moe_group_idx)
return get_moe_group._moe_groups[my_group_idx]
def get_all2all_group(moe_expert_count):
if dist.is_initialized():
if not hasattr(get_all2all_group, "_all2all_groups"):
world_size = dist.get_world_size()
# more experts than world size
if world_size <= moe_expert_count:
assert moe_expert_count % world_size == 0
all2all_groups = [[i for i in range(world_size)]]
# larger world than num experts
else:
assert world_size % moe_expert_count == 0
ranks_per_group = world_size // moe_expert_count
all2all_groups = [
[i * moe_expert_count + j for j in range(moe_expert_count)]
for i in range(ranks_per_group)
]
get_all2all_group._all2all_group_idx = all2all_groups
get_all2all_group._all2all_groups = [
dist.new_group(g) for g in all2all_groups
]
my_group_idx = _find_my_group_index(get_all2all_group._all2all_group_idx)
return get_all2all_group._all2all_groups[my_group_idx]
class MOELayer(Base):
"""MOELayer module which implements MixtureOfExperts as described in Gshard_.
::
gate = Top2Gate(model_dim, num_experts)
moe = MOELayer(gate, expert)
output = moe(input)
l_aux = moe.l_aux
.. Gshard_: https://arxiv.org/pdf/2006.16668.pdf
Args:
gate (torch.nn.Module):
gate network
expert (torch.nn.Module):
expert network
"""
def __init__(self, gate, experts, args):
if has_fairseq:
super(Base, self).__init__()
else:
super().__init__()
self.gate = gate
if type(experts) == ModuleList:
self.experts = cast(ModuleList, experts)
else:
self.experts = ModuleList([experts])
self.expert_group = get_moe_group(args.moe_expert_count)
self.all2all_group = get_all2all_group(args.moe_expert_count)
self.world_size = dist.get_world_size(group=self.expert_group)
self.all2all_size = dist.get_world_size(group=self.all2all_group)
for p in experts.parameters():
p.expert = True # type: ignore
self.num_local_experts = len(self.experts)
self.args = args
self.in_generation = False
self.a2a_cuda_event_intervals = []
self.a2a_cpu_time_ms = 0.0
def forward(self, *input: Tensor, input_padding_mask=None, **kwargs: Any) -> Tensor:
assert len(input) == 1, "only single input Tensor supported"
input = input[0]
assert (
len(input.shape) == 3
), "input Tensor must have dimensions: (s)equence, (t)oken, (m)odel"
if input_padding_mask is not None:
assert (
len(input_padding_mask.shape) == 2
), "input Tensor must have dimensions: (s)equence, (t)oken"
assert input_padding_mask.shape[0] == input.shape[0]
assert input_padding_mask.shape[1] == input.shape[1]
# assert input.shape[0] % len(self.experts) == 0, "num tokens must be order of number of local experts"
# Implement Algorithm 2 from GShard paper.
d_model = input.shape[2]
# Pad to expected batch size
input_shape = list(input.shape)
expected_bsz = (
getattr(self.args, "batch_size", 0)
if self.training
else getattr(self.args, "batch_size_valid", 0)
)
# This indicates that --batch-size or --max-sentences is not specified
if expected_bsz is None:
expected_bsz = 0
# Note: Padding is not necessary at generation time at present
# because all DDP workers process the same batch. Also, batch size at generation time
# can be different from that present in the checkpoint state
if (
not self.in_generation
and expected_bsz != 0
and input_shape[0] != expected_bsz
):
logger.warning(
f"padding batch with unexpected size {input_shape[0]} (expected: {expected_bsz})"
)
assert input_shape[0] < expected_bsz, f"{input_shape[0]} < {expected_bsz}"
padded_input = torch.zeros(
(expected_bsz, input_shape[1], input_shape[2]),
dtype=input.dtype,
layout=input.layout,
device=input.device,
)
padded_input[: input_shape[0], :, :] = input
input = padded_input
padded_input_padding_mask = torch.ones(
(
expected_bsz,
input_shape[1],
),
dtype=torch.bool,
device=input.device,
)
if input_padding_mask is not None:
padded_input_padding_mask[: input_shape[0], :] = input_padding_mask
else:
padded_input_padding_mask[: input_shape[0], :] = False
input_padding_mask = padded_input_padding_mask
# Reshape into S tokens by dropping sequence dimension.
reshaped_input = input.reshape(-1, d_model)
reshaped_input_shape = reshaped_input.shape
reshaped_input_padding_mask = (
input_padding_mask.reshape(-1) if input_padding_mask is not None else None
)
# Doing padding here when --max-tokens is specified and not --batch-size or --max-sentences
# Pro of --max-tokens: more flexible for MT variable sequence lengths
# Con of --max-tokens: extra all-reduce needed to figure out optimal padding without running OOM
if expected_bsz == 0:
expected_dim = reshaped_input_shape[0] * torch.ones(
(1,), dtype=torch.long, device=input.device
)
dist.all_reduce(expected_dim, group=dist.group.WORLD, op=dist.ReduceOp.MAX)
expected_dim = int(expected_dim.item())
padded_input = torch.zeros(
(expected_dim, reshaped_input_shape[1]),
dtype=input.dtype,
layout=input.layout,
device=input.device,
)
padded_input[: reshaped_input_shape[0], :] = reshaped_input
reshaped_input = padded_input
padded_input_padding_mask = torch.ones(
(expected_dim,), dtype=torch.bool, device=padded_input.device
)
if reshaped_input_padding_mask is not None:
padded_input_padding_mask[
: reshaped_input_shape[0]
] = reshaped_input_padding_mask
else:
padded_input_padding_mask[: reshaped_input_shape[0]] = False
reshaped_input_padding_mask = padded_input_padding_mask
if has_tutel:
l_aux, self.metadata, C, E, indices_, locations_, gates_ = self.gate(
reshaped_input, reshaped_input_padding_mask
)
S, M = reshaped_input.size(0), reshaped_input.size(1)
if not hasattr(self, "_tutel_dispatcher"):
self._tutel_dispatcher = tutel_moe.fast_dispatcher(
E, C, M, dispatch_dtype=reshaped_input.dtype
)
self._tutel_dispatcher.update(indices_, locations_, gates_, capacity=C)
dispatched_input = self._tutel_dispatcher.encode(reshaped_input)
else:
l_aux, combine_weights, dispatch_mask, self.metadata = self.gate(
reshaped_input, reshaped_input_padding_mask
)
dispatch_mask = dispatch_mask.to(input.dtype).permute(
1, 2, 0
) # S,E,C -> E,C,S
E, C, S = dispatch_mask.size()
M = reshaped_input.size(1)
assert reshaped_input.size() == (S, M)
# einsum("sec,sm->ecm")
dispatched_input = torch.mm(
dispatch_mask.view(E * C, S), reshaped_input
) # -> (E*C),M
if self.all2all_size > 1:
dispatched_input = self.all_to_all_wrapper(dispatched_input)
# Re-shape after all-to-all: ecm -> gecm
dispatched_input = dispatched_input.reshape(
self.all2all_size, self.num_local_experts, -1, d_model
)
chunks = dispatched_input.chunk(self.num_local_experts, dim=1)
expert_outputs = []
for chunk, expert in zip(chunks, self.experts):
expert_outputs += [expert(chunk)]
expert_output = torch.cat(expert_outputs, dim=1)
if self.all2all_size > 1:
expert_output = self.all_to_all_wrapper(expert_output)
# Re-shape back: gecm -> ecm
expert_output = expert_output.reshape(
self.all2all_size * self.num_local_experts, -1, d_model
)
if has_tutel:
combined_output = self._tutel_dispatcher.decode(
expert_output.view(E * C, M)
)
else:
# einsum("sec,ecm->sm")
combined_output = combine_weights.view(S, E * C).mm(
expert_output.view(E * C, M)
)
# Remove padding here when --max-tokens is specified and not --batch-size or --max-sentences
combined_output = combined_output[: reshaped_input_shape[0], :]
combined_output = combined_output.reshape(input.shape)
combined_output = combined_output[: input_shape[0], :, :]
self.record_all_to_all_stats()
return combined_output, l_aux
def prepare_for_inference_(self):
self.in_generation = True
def all_to_all_wrapper(self, input: Tensor):
dummy_a2a = getattr(self.args, "dummy_a2a", False)
if dummy_a2a:
input = input.contiguous()
output = input.detach().clone()
return input
# always record times, since it is not a lot of overhead
# if we do not log it we simply clear it off in record_all_to_all_stats
cuda_start = torch.cuda.Event(enable_timing=True)
cuda_end = torch.cuda.Event(enable_timing=True)
cpu_start = time.time() * 1000
cuda_start.record()
output = _AllToAll.apply(self.all2all_group, input)
cuda_end.record()
cpu_end = time.time() * 1000
self.a2a_cpu_time_ms += cpu_end - cpu_start
self.a2a_cuda_event_intervals.append((cuda_start, cuda_end))
return output
def record_all_to_all_stats(self):
# controlled via an argument as we want to minimize any impact from torch.cuda.synchronize()
record_a2a_perf_stats = getattr(self.args, "record_a2a_perf_stats", False)
if record_a2a_perf_stats:
torch.cuda.synchronize()
self.metadata["all_to_all_cpu_time_ms"] = self.a2a_cpu_time_ms
a2a_cuda_time_ms = 0.0
for ev_start, ev_end in self.a2a_cuda_event_intervals:
a2a_cuda_time_ms += ev_start.elapsed_time(ev_end)
self.metadata["all_to_all_cuda_time_ms"] = a2a_cuda_time_ms
# reset stats
self.a2a_cpu_time_ms = 0.0
self.a2a_cuda_event_intervals = []
| EXA-1-master | exa/libraries/torchscale/torchscale/component/xmoe/moe_layer.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Implementation of Top2Gating described in https://arxiv.org/pdf/2006.16668.pdf
# Code is inspired by Top2GatingOnLogits from lingvo:
# https://github.com/tensorflow/lingvo/blob/21b8106c5f1d30a196c98eedc441d4fd70833b11/lingvo/core/moe_layers.py#L477
# NOTE: This is a mirror of the code in
# https://github.com/facebookresearch/fairscale/tree/master/fairscale/nn/moe
import math
from typing import Callable, Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor
from .moe_layer import fused_cumsum_sub_one, has_tutel
# use a fixed temperature to compute balance loss
TEMPERATURE_FOR_L_UAX = 0.07
# maximum capacity of 1 expert as a fraction of number of tokens in the batch
# Note: setting this to 1.0 causes inference to significantly slow down
EVAL_CAPACITY_TOKEN_FRACTION = 0.25
# logging
SAMPLE_FRACTION = 0.2
def top1gating(
logits: torch.Tensor,
input_mask: Optional[torch.Tensor] = None,
use_fp32=False,
capacity_factor=1.0,
eval_mode=False,
moe_eval_capacity_token_fraction=EVAL_CAPACITY_TOKEN_FRACTION,
use_xmoe=False,
gate_obj=None,
) -> Tuple[Tensor, Tensor, Tensor, Dict]:
"""Implements Top2Gating on logits."""
metadata = {}
if use_fp32:
orig_dtype = logits.dtype
logits = logits.float()
gates = F.softmax(logits, dim=1)
metadata["entropy_gating"] = entropy(probs=gates).mean().detach()
# gates has shape of SE
num_tokens = gates.shape[0]
num_experts = gates.shape[1]
if moe_eval_capacity_token_fraction > 0.0 and eval_mode:
capacity = math.ceil(moe_eval_capacity_token_fraction * num_tokens)
else:
# capacity = capacity_factor * S/E
capacity = int(capacity_factor * math.ceil(num_tokens / num_experts))
# Create a mask for 1st's expert per token
indices1_s = torch.argmax(gates, dim=1)
mask1 = one_hot(indices1_s, num_classes=num_experts, unsqueeze_indices=True)
if input_mask is not None and input_mask.any():
nonpadding = ~input_mask
mask1 = mask1 * nonpadding.unsqueeze(-1).to(mask1.dtype)
# for logging (percent of tokens routed to each expert)
expert1_hist = (
100
* torch.histc(
(indices1_s.squeeze() + 1), bins=num_experts, min=1, max=num_experts
)
/ num_tokens
)
metadata["unused_expert1_count"] = (expert1_hist == 0).sum()
expert1_hist = (
torch.sort(expert1_hist, dim=0, descending=True).values
+ torch.finfo(torch.float32).tiny
)
sample_count = max(math.ceil(num_experts * SAMPLE_FRACTION), 1)
metadata["expert1_balance_top"] = expert1_hist[:sample_count].sum()
metadata["expert1_balance_bottom"] = expert1_hist[-sample_count:].sum()
gates1_s = (gates * mask1).sum(dim=1)
# Compute locations in capacity buffer
locations1 = fused_cumsum_sub_one(mask1)
# Compute l_aux
me = torch.mean(gates, dim=0)
ce = torch.mean(mask1.to(gates.dtype), dim=0)
l_aux = torch.mean(me * ce)
l_aux = l_aux * num_experts * num_experts
if has_tutel:
locations1_s = torch.sum(locations1 * mask1, dim=1)
return (
l_aux,
metadata,
capacity,
num_experts,
[
indices1_s,
],
[
locations1_s,
],
[
gates1_s,
],
)
# Remove locations outside capacity from mask
mask1 = mask1 * torch.lt(locations1, capacity)
# Store the capacity location for each token
locations1_s = torch.sum(locations1 * mask1, dim=1)
# Calculate combine_weights and dispatch_mask
gates1 = gates1_s.unsqueeze(-1) * mask1.to(gates1_s.dtype) # einsum("s,se->se")
# locations1_sc = num_tokens * capacity
locations1_sc = one_hot(locations1_s, num_classes=capacity, unsqueeze_indices=True)
combine1_sec = torch.bmm(
# einsum("se,sc->sec")
gates1.unsqueeze(-1),
locations1_sc.to(gates1.dtype).unsqueeze(1),
)
dispatch_mask = combine1_sec.bool()
if use_fp32:
return l_aux, combine1_sec.to(orig_dtype), dispatch_mask, metadata
else:
return l_aux, combine1_sec, dispatch_mask, metadata
class Top1Gate(torch.nn.Module):
"""Gate module which implements Top2Gating as described in Gshard_.
::
gate = Top2Gate(model_dim, num_experts)
l_aux, combine_weights, dispatch_mask = gate(input)
.. Gshard_: https://arxiv.org/pdf/2006.16668.pdf
Args:
model_dim (int):
size of model embedding dimension
num_experts (ints):
number of experts in model
"""
wg: torch.nn.Linear
def __init__(
self,
model_dim: int,
num_experts: int,
use_fp32=False,
input_noise_type=None,
capacity_factor=1.0,
moe_eval_capacity_token_fraction=EVAL_CAPACITY_TOKEN_FRACTION,
use_xmoe=False,
) -> None:
# TODO: merge this to top2gate.py
#
super().__init__()
if not use_xmoe:
self.wg = torch.nn.Linear(model_dim, num_experts, bias=False)
else:
self.wg_reduction = torch.nn.Linear(model_dim, 16, bias=False)
wg = torch.empty(num_experts, 16)
torch.nn.init.orthogonal_(wg, gain=0.32)
self.register_parameter("wg", torch.nn.Parameter(wg))
self.use_xmoe = use_xmoe
self.use_fp32 = use_fp32
self.input_noise_type = input_noise_type
self.capacity_factor = capacity_factor
self.moe_eval_capacity_token_fraction = moe_eval_capacity_token_fraction
def forward(self, input, mask=None): # type: ignore
if self.use_xmoe:
input = self.wg_reduction(input)
with torch.no_grad():
wg_norm = self.wg.norm(p=2.0, dim=1, keepdim=True)
self.wg.mul_(1.5 / wg_norm)
logits = self._cosine(input, self.wg)
logits = self._make_finite(logits)
else:
logits = self.wg(input)
return top1gating(
logits,
mask,
use_fp32=self.use_fp32,
capacity_factor=self.capacity_factor,
eval_mode=not self.training,
moe_eval_capacity_token_fraction=self.moe_eval_capacity_token_fraction,
use_xmoe=self.use_xmoe,
gate_obj=self,
)
def _make_finite(self, scores):
ok = scores.isfinite()
if not ok.all():
# NaNs here can break the assignment algorithm
scores[~ok] = scores[ok].min()
return scores
def _get_gating_temperature(self, eps=1e-4):
if self.gating_t.data.item() < eps:
return eps
return self.gating_t
def _cosine(self, mat1, mat2, eps=1e-4):
assert mat1.dim() == 2
assert mat2.dim() == 2
# mat1 = F.normalize(mat1, p=2.0, dim=1, eps=eps)
mat2 = F.normalize(mat2.float(), p=2.0, dim=1, eps=eps)
return mat1.float().matmul(mat2.transpose(0, 1)).type_as(mat1)
gumbel_map: Dict[torch.device, Callable] = {}
def gumbel_rsample(shape: Tuple, device: torch.device) -> Tensor:
gumbel = gumbel_map.get(device)
if gumbel is None:
one = torch.tensor(1.0, device=device)
zero = torch.tensor(0.0, device=device)
gumbel = torch.distributions.gumbel.Gumbel(zero, one).rsample # type: ignore
gumbel_map[device] = gumbel
return gumbel(shape)
def one_hot(indices: torch.Tensor, num_classes: int, unsqueeze_indices=False) -> Tensor:
if unsqueeze_indices:
indices = indices.unsqueeze(-1)
assert indices.shape[-1] == 1, "last dimension of indices must be have size 1"
output = torch.zeros(
indices.shape[:-1] + (num_classes,), device=indices.device, dtype=indices.dtype
)
output.scatter_(len(output.shape) - 1, indices, 1)
return output
def entropy(probs):
logits = torch.distributions.utils.probs_to_logits(probs)
p_log_p = probs * logits
return -p_log_p.sum(-1)
def top2gating(
logits: torch.Tensor,
input_mask: Optional[torch.Tensor] = None,
use_fp32=False,
second_expert_policy="sampling",
normalize_gate_prob_before_dropping=False,
eval_mode=False,
moe_eval_capacity_token_fraction=0.25,
batch_prioritized_routing=False,
) -> Tuple[Tensor, Tensor, Tensor]:
"""Implements Top2Gating on logits."""
metadata = {}
if use_fp32:
orig_dtype = logits.dtype
logits = logits.float()
gates = F.softmax(logits, dim=1)
metadata["entropy_gating"] = entropy(probs=gates).mean().detach()
# gates has shape of SE
num_tokens = gates.shape[0]
num_experts = gates.shape[1]
if moe_eval_capacity_token_fraction > 0.0 and eval_mode:
capacity = math.ceil(moe_eval_capacity_token_fraction * num_tokens)
else:
# capacity = 2S/E
capacity = 2 * math.ceil(num_tokens / num_experts)
# Create a mask for 1st's expert per token
indices1_s = torch.argmax(gates, dim=1, keepdim=True)
mask1 = one_hot(indices1_s, num_experts)
if second_expert_policy == "sampling":
# Create a mask for 2nd's expert per token using Gumbel-max trick
# https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/
logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device)
else:
logits_w_noise = logits
# Replace top-expert with min value
logits_except1 = logits_w_noise.masked_fill(mask1.bool(), float("-inf"))
indices2_s = torch.argmax(logits_except1, dim=1, keepdim=True)
mask2 = one_hot(indices2_s, num_experts)
gates1_s = (gates * mask1).sum(dim=1)
gates2_s = (gates * mask2).sum(dim=1)
if normalize_gate_prob_before_dropping:
# Normalize gate probabilities
denom_s = gates1_s + gates2_s
# Avoid divide-by-zero
denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps)
gates1_s = gates1_s / denom_s
gates2_s = gates2_s / denom_s
if second_expert_policy == "random":
sampled = (2 * gates2_s) > torch.rand_like(gates2_s)
mask2 = mask2 * sampled.repeat(num_experts, 1).transpose(1, 0)
# Compute locations in capacity buffer
if input_mask is not None and input_mask.any():
nonpadding = ~input_mask
mask1 = mask1 * nonpadding.unsqueeze(-1).to(mask1.dtype)
mask2 = mask2 * nonpadding.unsqueeze(-1).to(mask1.dtype)
if batch_prioritized_routing:
# if batch_prioritized_routing:
importance_scores = -1 * gates.max(dim=1)[0]
sorted_mask1 = mask1[importance_scores.argsort(dim=0)]
sorted_cumsum1 = fused_cumsum_sub_one(sorted_mask1) * sorted_mask1
importance_sorted_locations1 = sorted_cumsum1[
importance_scores.argsort(dim=0).argsort(dim=0)
]
sorted_mask2 = mask2[importance_scores.argsort(dim=0)]
sorted_cumsum2 = fused_cumsum_sub_one(sorted_mask2) * sorted_mask2
importance_sorted_locations2 = sorted_cumsum2[
importance_scores.argsort(dim=0).argsort(dim=0)
]
importance_sorted_locations2 += torch.sum(mask1, dim=0, keepdim=True)
locations1, locations2 = (
importance_sorted_locations1,
importance_sorted_locations2,
)
else:
locations1 = fused_cumsum_sub_one(mask1)
locations2 = fused_cumsum_sub_one(mask2)
# Update 2nd's location by accounting for locations of 1st
locations2 += torch.sum(mask1, dim=0, keepdim=True)
# Compute l_aux
me = torch.mean(gates, dim=0)
ce = torch.mean(mask1.to(gates.dtype), dim=0)
l_aux = torch.mean(me * ce)
l_aux = l_aux * num_experts * num_experts
# for logging purposes
metadata["overflow_expert1"] = (
100 * torch.sum(mask1 * torch.ge(locations1, capacity)) / torch.sum(mask1)
)
metadata["overflow_expert2"] = (
100 * torch.sum(mask2 * torch.ge(locations2, capacity)) / torch.sum(mask2)
)
# Remove locations outside capacity from mask
mask1_, mask2_ = mask1, mask2
mask1 = mask1 * torch.lt(locations1, capacity)
mask2 = mask2 * torch.lt(locations2, capacity)
# for logging (percent of tokens routed to each expert)
expert1_hist = (
100
* torch.histc(
(indices1_s.squeeze() + 1), bins=num_experts, min=1, max=num_experts
)
/ num_tokens
)
metadata["unused_expert1_count"] = (expert1_hist == 0).sum()
expert1_hist = (
torch.sort(expert1_hist, dim=0, descending=True).values
+ torch.finfo(torch.float32).tiny
)
expert2_hist = (
100
* torch.histc(
(indices2_s.squeeze() + 1), bins=num_experts, min=1, max=num_experts
)
/ num_tokens
)
metadata["unused_expert2_count"] = (expert2_hist == 0).sum()
expert2_hist = (
torch.sort(expert2_hist, dim=0, descending=True).values
+ torch.finfo(torch.float32).tiny
)
sample_count = max(math.ceil(num_experts * SAMPLE_FRACTION), 1)
metadata["expert1_balance_top"] = expert1_hist[:sample_count].sum()
metadata["expert1_balance_bottom"] = expert1_hist[-sample_count:].sum()
metadata["expert2_balance_top"] = expert2_hist[:sample_count].sum()
metadata["expert2_balance_bottom"] = expert2_hist[-sample_count:].sum()
if not normalize_gate_prob_before_dropping:
# Normalize gate probabilities
gates1_s = (gates * mask1).sum(dim=1)
gates2_s = (gates * mask2).sum(dim=1)
denom_s = gates1_s + gates2_s
# Avoid divide-by-zero
denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps)
gates1_s /= denom_s
gates2_s /= denom_s
if has_tutel:
locations1_s = torch.sum(locations1 * mask1_, dim=1)
locations2_s = torch.sum(locations2 * mask2_, dim=1)
return (
l_aux,
metadata,
capacity,
num_experts,
[indices1_s, indices2_s],
[locations1_s, locations2_s],
[gates1_s, gates2_s],
)
# Store the capacity location for each token
locations1_s = torch.sum(locations1 * mask1, dim=1)
locations2_s = torch.sum(locations2 * mask2, dim=1)
# Calculate combine_weights and dispatch_mask
gates1 = gates1_s.unsqueeze(-1) * mask1.to(gates1_s.dtype) # einsum("s,se->se")
gates2 = gates2_s.unsqueeze(-1) * mask2.to(gates2_s.dtype) # einsum("s,se->se")
locations1_sc = one_hot(locations1_s, num_classes=capacity, unsqueeze_indices=True)
locations2_sc = one_hot(locations2_s, num_classes=capacity, unsqueeze_indices=True)
combine1_sec = torch.bmm(
# einsum("se,sc->sec")
gates1.unsqueeze(-1),
locations1_sc.to(gates1.dtype).unsqueeze(1),
)
combine2_sec = torch.bmm(
# einsum("se,sc->sec")
gates2.unsqueeze(-1),
locations2_sc.to(gates2.dtype).unsqueeze(1),
)
combine_weights = combine1_sec + combine2_sec
dispatch_mask = combine_weights.bool()
if use_fp32:
return l_aux, combine_weights.to(orig_dtype), dispatch_mask, metadata
else:
return l_aux, combine_weights, dispatch_mask, metadata
class Top2Gate(torch.nn.Module):
"""Gate module which implements Top2Gating as described in Gshard_.
::
gate = Top2Gate(model_dim, num_experts)
l_aux, combine_weights, dispatch_mask = gate(input)
.. Gshard_: https://arxiv.org/pdf/2006.16668.pdf
Args:
model_dim (int):
size of model embedding dimension
num_experts (ints):
number of experts in model
"""
wg: torch.nn.Linear
def __init__(
self,
model_dim: int,
num_experts: int,
use_fp32=False,
second_expert_policy="sampling",
normalize_gate_prob_before_dropping=False,
moe_eval_capacity_token_fraction=0.25,
batch_prioritized_routing=False,
use_xmoe=False,
) -> None:
super().__init__()
if not use_xmoe:
self.wg = torch.nn.Linear(model_dim, num_experts, bias=False)
else:
self.wg_reduction = torch.nn.Linear(model_dim, 16, bias=False)
wg = torch.empty(num_experts, 16)
torch.nn.init.orthogonal_(wg, gain=0.32)
self.register_parameter("wg", torch.nn.Parameter(wg))
self.use_fp32 = use_fp32
self.second_expert_policy = second_expert_policy
self.normalize_gate_prob_before_dropping = normalize_gate_prob_before_dropping
self.moe_eval_capacity_token_fraction = moe_eval_capacity_token_fraction
self.batch_prioritized_routing = batch_prioritized_routing
self.use_xmoe = use_xmoe
def forward(self, input, mask=None): # type: ignore
if self.use_xmoe:
input = self.wg_reduction(input)
with torch.no_grad():
wg_norm = self.wg.norm(p=2.0, dim=1, keepdim=True)
self.wg.mul_(1.5 / wg_norm)
logits = self._cosine(input, self.wg)
logits = self._make_finite(logits)
else:
logits = self.wg(input)
return top2gating(
logits,
mask,
use_fp32=self.use_fp32,
second_expert_policy=self.second_expert_policy,
normalize_gate_prob_before_dropping=self.normalize_gate_prob_before_dropping,
eval_mode=not self.training,
moe_eval_capacity_token_fraction=self.moe_eval_capacity_token_fraction,
batch_prioritized_routing=self.batch_prioritized_routing,
)
def _cosine(self, mat1, mat2, eps=1e-4):
assert mat1.dim() == 2
assert mat2.dim() == 2
# mat1 = F.normalize(mat1, p=2.0, dim=1, eps=eps)
mat2 = F.normalize(mat2.float(), p=2.0, dim=1, eps=eps)
return mat1.float().matmul(mat2.transpose(0, 1)).type_as(mat1)
def _make_finite(self, scores):
ok = scores.isfinite()
if not ok.all():
# NaNs here can break the assignment algorithm
scores[~ok] = scores[ok].min()
return scores
| EXA-1-master | exa/libraries/torchscale/torchscale/component/xmoe/routing.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import math
import numpy as np
import torch
import torch.nn as nn
from fairscale.nn import checkpoint_wrapper, wrap
from torchscale.architecture.utils import init_bert_params
from torchscale.component.droppath import DropPath
from torchscale.component.feedforward_network import FeedForwardNetwork, make_experts
from torchscale.component.multihead_attention import MultiheadAttention
from torchscale.component.relative_position_bias import RelativePositionBias
from torchscale.component.xmoe.moe_layer import MOELayer
from torchscale.component.xmoe.routing import Top1Gate, Top2Gate
try:
from apex.normalization import FusedLayerNorm as LayerNorm
except ModuleNotFoundError:
from torch.nn import LayerNorm
class DecoderLayer(nn.Module):
def __init__(
self,
args,
depth,
is_moe_layer=False,
is_encoder_decoder=False,
flash_attention=False
):
super().__init__()
self.args = args
self.embed_dim = args.decoder_embed_dim
self.dropout_module = torch.nn.Dropout(args.dropout)
if args.drop_path_rate > 0:
drop_path_prob = np.linspace(0, args.drop_path_rate, args.decoder_layers)[
depth
]
self.drop_path = DropPath(drop_path_prob)
else:
self.drop_path = None
self.self_attn = self.build_self_attention(self.embed_dim, args)
self.normalize_before = args.decoder_normalize_before
self.self_attn_layer_norm = LayerNorm(self.embed_dim, eps=args.layernorm_eps)
if not is_encoder_decoder:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = self.build_encoder_attention(self.embed_dim, args)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, eps=args.layernorm_eps)
self.is_moe_layer = is_moe_layer
self.ffn_dim = args.decoder_ffn_embed_dim
if not self.is_moe_layer:
self.ffn = self.build_ffn(
self.embed_dim,
self.args,
)
else:
if args.moe_top1_expert:
gate = Top1Gate(
self.embed_dim,
args.moe_expert_count,
use_fp32=args.moe_gating_use_fp32,
moe_eval_capacity_token_fraction=args.moe_eval_capacity_token_fraction,
use_xmoe=args.use_xmoe,
)
else:
gate = Top2Gate(
self.embed_dim,
args.moe_expert_count,
args.moe_gating_use_fp32,
args.moe_second_expert_policy,
args.moe_normalize_gate_prob_before_dropping,
args.moe_eval_capacity_token_fraction,
use_xmoe=args.use_xmoe,
)
experts = make_experts(args, self.embed_dim, self.ffn_dim)
self.moe_layer = MOELayer(gate, experts, args)
self.final_layer_norm = LayerNorm(self.embed_dim, eps=args.layernorm_eps)
if args.deepnorm:
if is_encoder_decoder:
self.alpha = math.pow(3.0 * args.decoder_layers, 0.25)
else:
self.alpha = math.pow(2.0 * args.decoder_layers, 0.25)
else:
self.alpha = 1.0
def build_ffn(self, embed_dim, args):
return FeedForwardNetwork(
embed_dim,
self.ffn_dim,
args.activation_fn,
args.dropout,
args.activation_dropout,
args.layernorm_eps,
args.subln,
)
def build_self_attention(self, embed_dim, args):
return MultiheadAttention(
args,
embed_dim,
args.decoder_attention_heads,
dropout=args.attention_dropout,
self_attention=True,
encoder_decoder_attention=False,
subln=args.subln,
)
def build_encoder_attention(self, embed_dim, args):
return MultiheadAttention(
args,
embed_dim,
args.decoder_attention_heads,
dropout=args.attention_dropout,
self_attention=False,
encoder_decoder_attention=True,
subln=args.subln,
)
def residual_connection(self, x, residual):
return residual * self.alpha + x
def forward(
self,
x,
encoder_out=None,
encoder_padding_mask=None,
incremental_state=None,
self_attn_mask=None,
self_attn_padding_mask=None,
self_attn_rel_pos=None,
cross_attn_rel_pos=None,
):
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
attn_mask=self_attn_mask,
rel_pos=self_attn_rel_pos,
)
x = self.dropout_module(x)
if self.drop_path is not None:
x = self.drop_path(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
if self.encoder_attn is not None and encoder_out is not None:
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=None,
rel_pos=cross_attn_rel_pos,
)
x = self.dropout_module(x)
if self.drop_path is not None:
x = self.drop_path(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
if not self.is_moe_layer:
x = self.ffn(x)
l_aux = None
else:
x, l_aux = self.moe_layer(x)
if self.drop_path is not None:
x = self.drop_path(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
return x, attn, None, l_aux
class Decoder(nn.Module):
def __init__(
self,
args,
embed_tokens=None,
embed_positions=None,
output_projection=None,
is_encoder_decoder=False,
**kwargs
):
super().__init__(**kwargs)
self.args = args
self.dropout_module = torch.nn.Dropout(args.dropout)
embed_dim = args.decoder_embed_dim
self.embed_dim = embed_dim
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
self.embed_tokens = embed_tokens
self.embed_positions = embed_positions
if (
output_projection is None
and not args.no_output_layer
and args.vocab_size > 0
):
self.output_projection = self.build_output_projection(args)
else:
self.output_projection = output_projection
if args.layernorm_embedding:
self.layernorm_embedding = LayerNorm(embed_dim, eps=args.layernorm_eps)
else:
self.layernorm_embedding = None
self.layers = nn.ModuleList([])
moe_freq = args.moe_freq
for i in range(args.decoder_layers):
is_moe_layer = moe_freq != 0 and (i + 1) % moe_freq == 0
self.layers.append(
self.build_decoder_layer(
args,
depth=i,
is_moe_layer=is_moe_layer,
is_encoder_decoder=is_encoder_decoder,
)
)
self.num_layers = len(self.layers)
if args.decoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim, eps=args.layernorm_eps)
else:
self.layer_norm = None
self.self_attn_relative_position = None
self.cross_attn_relative_position = None
if args.rel_pos_buckets > 0 and args.max_rel_pos > 0:
self.self_attn_relative_position = RelativePositionBias(
num_buckets=args.rel_pos_buckets,
max_distance=args.max_rel_pos,
n_heads=args.decoder_attention_heads,
)
if is_encoder_decoder:
self.cross_attn_relative_position = RelativePositionBias(
num_buckets=args.rel_pos_buckets,
max_distance=args.max_rel_pos,
n_heads=args.decoder_attention_heads,
)
if args.bert_init:
self.apply(init_bert_params)
if args.deepnorm:
if is_encoder_decoder:
init_scale = math.pow(12.0 * args.decoder_layers, 0.25)
else:
init_scale = math.pow(8.0 * args.decoder_layers, 0.25)
for name, p in self.named_parameters():
if (
"fc1" in name
or "fc2" in name
or "out_proj" in name
or "v_proj" in name
):
p.data.div_(init_scale)
if args.subln:
if is_encoder_decoder:
init_scale = math.sqrt(math.log(args.decoder_layers * 3))
else:
init_scale = math.sqrt(math.log(args.decoder_layers * 2))
for name, p in self.named_parameters():
if "encoder_attn" in name:
continue
if (
"fc1" in name
or "fc2" in name
or "out_proj" in name
or "v_proj" in name
):
p.data.mul_(init_scale)
def build_output_projection(
self,
args,
):
if args.share_decoder_input_output_embed:
output_projection = torch.nn.Linear(
self.embed_tokens.weight.shape[1],
self.embed_tokens.weight.shape[0],
bias=False,
)
output_projection.weight = self.embed_tokens.weight
else:
output_projection = torch.nn.Linear(
args.decoder_embed_dim, args.vocab_size, bias=False
)
torch.nn.init.normal_(
output_projection.weight, mean=0, std=args.decoder_embed_dim**-0.5
)
return output_projection
def build_decoder_layer(
self, args, depth, is_moe_layer=False, is_encoder_decoder=False
):
layer = DecoderLayer(
args,
depth,
is_moe_layer=is_moe_layer,
is_encoder_decoder=is_encoder_decoder,
)
if args.checkpoint_activations:
layer = checkpoint_wrapper(layer)
if args.fsdp:
layer = wrap(layer)
return layer
def forward_embedding(
self,
tokens,
token_embedding=None,
incremental_state=None,
):
positions = None
if self.embed_positions is not None:
positions = self.embed_positions(
tokens, incremental_state=incremental_state
)
if incremental_state is not None:
tokens = tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
if token_embedding is None:
token_embedding = self.embed_tokens(tokens)
x = embed = self.embed_scale * token_embedding
if positions is not None:
x += positions
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
return x, embed
def forward(
self,
prev_output_tokens,
self_attn_padding_mask=None,
encoder_out=None,
incremental_state=None,
features_only=False,
return_all_hiddens=False,
token_embeddings=None,
**kwargs
):
# embed tokens and positions
x, _ = self.forward_embedding(
prev_output_tokens, token_embeddings, incremental_state
)
# relative position
self_attn_rel_pos_bias = None
slen = prev_output_tokens.size(1)
if self.self_attn_relative_position is not None:
self_attn_rel_pos_bias = self.self_attn_relative_position(
batch_size=x.size(0), qlen=slen, klen=slen
)
if incremental_state is not None:
self_attn_rel_pos_bias = self_attn_rel_pos_bias[-1:, :, :]
cross_attn_rel_pos_bias = None
if self.cross_attn_relative_position is not None:
cross_attn_rel_pos_bias = self.cross_attn_relative_position(
batch_size=x.size(0),
qlen=slen,
klen=encoder_out["encoder_out"].size(1),
)
if incremental_state is not None:
cross_attn_rel_pos_bias = cross_attn_rel_pos_bias[-1:, :, :]
# decoder layers
inner_states = [x]
if encoder_out is None:
l_aux = []
else:
l_aux = encoder_out["l_aux"] if "l_aux" in encoder_out else []
for idx, layer in enumerate(self.layers):
if incremental_state is None:
self_attn_mask = torch.triu(
torch.zeros([x.size(1), x.size(1)])
.float()
.fill_(float("-inf"))
.type_as(x),
1,
)
else:
self_attn_mask = None
if idx not in incremental_state:
incremental_state[idx] = {}
x, layer_attn, _, l_aux_i = layer(
x,
encoder_out["encoder_out"] if encoder_out is not None else None,
encoder_out["encoder_padding_mask"]
if encoder_out is not None
else None,
incremental_state[idx] if incremental_state is not None else None,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
self_attn_rel_pos=self_attn_rel_pos_bias,
cross_attn_rel_pos=cross_attn_rel_pos_bias,
)
l_aux.append(l_aux_i)
inner_states.append(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
if not features_only:
x = self.output_layer(x)
return x, {
"inner_states": inner_states,
"l_aux": l_aux,
"attn": None,
}
def output_layer(self, features):
return self.output_projection(features)
| EXA-1-master | exa/libraries/torchscale/torchscale/architecture/decoder.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
class EncoderConfig(object):
def __init__(self, **kwargs):
self.encoder_embed_dim = kwargs.pop("encoder_embed_dim", 768)
self.encoder_attention_heads = kwargs.pop("encoder_attention_heads", 12)
self.encoder_ffn_embed_dim = kwargs.pop("encoder_ffn_embed_dim", 3072)
self.encoder_layers = kwargs.pop("encoder_layers", 12)
self.encoder_normalize_before = kwargs.pop("encoder_normalize_before", True)
self.normalize_output = kwargs.pop("normalize_output", True)
self.activation_fn = kwargs.pop("activation_fn", "gelu")
self.dropout = kwargs.pop("dropout", 0.0)
self.drop_path_rate = kwargs.pop("drop_path_rate", 0.0)
self.attention_dropout = kwargs.pop("attention_dropout", 0.0)
self.activation_dropout = kwargs.pop("activation_dropout", 0.0)
self.no_scale_embedding = kwargs.pop("no_scale_embedding", True)
self.layernorm_embedding = kwargs.pop("layernorm_embedding", False)
self.moe_freq = kwargs.pop("moe_freq", 0)
self.moe_top1_expert = kwargs.pop("moe_top1_expert", False)
self.moe_expert_count = kwargs.pop("moe_expert_count", 0)
self.moe_gating_use_fp32 = kwargs.pop("moe_gating_use_fp32", True)
self.moe_eval_capacity_token_fraction = kwargs.pop(
"moe_eval_capacity_token_fraction", 0.25
)
self.moe_second_expert_policy = kwargs.pop("moe_second_expert_policy", "random")
self.moe_normalize_gate_prob_before_dropping = kwargs.pop(
"moe_normalize_gate_prob_before_dropping", False
)
self.use_xmoe = kwargs.pop("use_xmoe", False)
self.rel_pos_buckets = kwargs.pop("rel_pos_buckets", 0)
self.max_rel_pos = kwargs.pop("max_rel_pos", 0)
self.deepnorm = kwargs.pop("deepnorm", False)
self.subln = kwargs.pop("subln", True)
self.bert_init = kwargs.pop("bert_init", False)
self.multiway = kwargs.pop("multiway", False)
self.share_encoder_input_output_embed = kwargs.pop(
"share_encoder_input_output_embed", False
)
self.max_source_positions = kwargs.pop("max_source_positions", 1024)
self.no_output_layer = kwargs.pop("no_output_layer", False)
self.layernorm_eps = kwargs.pop("layernorm_eps", 1e-5)
# Text
self.vocab_size = kwargs.pop("vocab_size", -1)
# Vision
self.img_size = kwargs.pop("img_size", 224)
self.patch_size = kwargs.pop("patch_size", 16)
self.in_chans = kwargs.pop("in_chans", 3)
# Fairscale
self.checkpoint_activations = kwargs.pop("checkpoint_activations", False)
self.fsdp = kwargs.pop("fsdp", False)
self.ddp_rank = kwargs.pop("ddp_rank", 0)
self.xpos_rel_pos = kwargs.pop("xpos_rel_pos", False)
self.xpos_scale_base = kwargs.pop("xpos_scale_base", 512)
if self.deepnorm:
self.encoder_normalize_before = False
self.subln = False
if self.subln:
self.encoder_normalize_before = True
self.deepnorm = False
if self.use_xmoe:
self.moe_normalize_gate_prob_before_dropping = True
self.moe_second_expert_policy = "random"
assert self.moe_freq > 0 and self.moe_expert_count > 0
def override(self, args):
for hp in self.__dict__.keys():
if getattr(args, hp, None) is not None:
self.__dict__[hp] = getattr(args, hp, None)
class DecoderConfig(object):
def __init__(self, **kwargs):
self.decoder_embed_dim = kwargs.pop("decoder_embed_dim", 768)
self.decoder_attention_heads = kwargs.pop("decoder_attention_heads", 12)
self.decoder_ffn_embed_dim = kwargs.pop("decoder_ffn_embed_dim", 3072)
self.decoder_layers = kwargs.pop("decoder_layers", 12)
self.decoder_normalize_before = kwargs.pop("decoder_normalize_before", True)
self.activation_fn = kwargs.pop("activation_fn", "gelu")
self.dropout = kwargs.pop("dropout", 0.0)
self.drop_path_rate = kwargs.pop("drop_path_rate", 0.0)
self.attention_dropout = kwargs.pop("attention_dropout", 0.0)
self.activation_dropout = kwargs.pop("activation_dropout", 0.0)
self.no_scale_embedding = kwargs.pop("no_scale_embedding", True)
self.layernorm_embedding = kwargs.pop("layernorm_embedding", False)
self.moe_freq = kwargs.pop("moe_freq", 0)
self.moe_top1_expert = kwargs.pop("moe_top1_expert", False)
self.moe_expert_count = kwargs.pop("moe_expert_count", 0)
self.moe_gating_use_fp32 = kwargs.pop("moe_gating_use_fp32", True)
self.moe_eval_capacity_token_fraction = kwargs.pop(
"moe_eval_capacity_token_fraction", 0.25
)
self.moe_second_expert_policy = kwargs.pop("moe_second_expert_policy", "random")
self.moe_normalize_gate_prob_before_dropping = kwargs.pop(
"moe_normalize_gate_prob_before_dropping", False
)
self.use_xmoe = kwargs.pop("use_xmoe", False)
self.rel_pos_buckets = kwargs.pop("rel_pos_buckets", 0)
self.max_rel_pos = kwargs.pop("max_rel_pos", 0)
self.deepnorm = kwargs.pop("deepnorm", False)
self.subln = kwargs.pop("subln", True)
self.bert_init = kwargs.pop("bert_init", False)
self.multiway = kwargs.pop("multiway", False)
self.share_decoder_input_output_embed = kwargs.pop(
"share_decoder_input_output_embed", False
)
self.max_target_positions = kwargs.pop("max_target_positions", 1024)
self.no_output_layer = kwargs.pop("no_output_layer", False)
self.layernorm_eps = kwargs.pop("layernorm_eps", 1e-5)
# Text
self.vocab_size = kwargs.pop("vocab_size", -1)
# Fairscale
self.checkpoint_activations = kwargs.pop("checkpoint_activations", False)
self.fsdp = kwargs.pop("fsdp", False)
self.ddp_rank = kwargs.pop("ddp_rank", 0)
self.xpos_rel_pos = kwargs.pop("xpos_rel_pos", False)
self.xpos_scale_base = kwargs.pop("xpos_scale_base", 512)
if self.deepnorm:
self.decoder_normalize_before = False
self.subln = False
if self.subln:
self.decoder_normalize_before = True
self.deepnorm = False
if self.use_xmoe:
self.moe_normalize_gate_prob_before_dropping = True
self.moe_second_expert_policy = "random"
assert self.moe_freq > 0 and self.moe_expert_count > 0
def override(self, args):
for hp in self.__dict__.keys():
if getattr(args, hp, None) is not None:
self.__dict__[hp] = getattr(args, hp, None)
class EncoderDecoderConfig(object):
def __init__(self, **kwargs):
self.encoder_embed_dim = kwargs.pop("encoder_embed_dim", 768)
self.encoder_attention_heads = kwargs.pop("encoder_attention_heads", 12)
self.encoder_ffn_embed_dim = kwargs.pop("encoder_ffn_embed_dim", 3072)
self.encoder_layers = kwargs.pop("encoder_layers", 12)
self.encoder_normalize_before = kwargs.pop("encoder_normalize_before", True)
self.decoder_embed_dim = kwargs.pop("decoder_embed_dim", 768)
self.decoder_attention_heads = kwargs.pop("decoder_attention_heads", 12)
self.decoder_ffn_embed_dim = kwargs.pop("decoder_ffn_embed_dim", 3072)
self.decoder_layers = kwargs.pop("decoder_layers", 12)
self.decoder_normalize_before = kwargs.pop("decoder_normalize_before", True)
self.activation_fn = kwargs.pop("activation_fn", "gelu")
self.dropout = kwargs.pop("dropout", 0.0)
self.drop_path_rate = kwargs.pop("drop_path_rate", 0.0)
self.attention_dropout = kwargs.pop("attention_dropout", 0.0)
self.activation_dropout = kwargs.pop("activation_dropout", 0.0)
self.no_scale_embedding = kwargs.pop("no_scale_embedding", True)
self.layernorm_embedding = kwargs.pop("layernorm_embedding", False)
self.moe_freq = kwargs.pop("moe_freq", 0)
self.moe_top1_expert = kwargs.pop("moe_top1_expert", False)
self.moe_expert_count = kwargs.pop("moe_expert_count", 0)
self.moe_gating_use_fp32 = kwargs.pop("moe_gating_use_fp32", True)
self.moe_eval_capacity_token_fraction = kwargs.pop(
"moe_eval_capacity_token_fraction", 0.25
)
self.moe_second_expert_policy = kwargs.pop("moe_second_expert_policy", "random")
self.moe_normalize_gate_prob_before_dropping = kwargs.pop(
"moe_normalize_gate_prob_before_dropping", False
)
self.use_xmoe = kwargs.pop("use_xmoe", False)
self.rel_pos_buckets = kwargs.pop("rel_pos_buckets", 0)
self.max_rel_pos = kwargs.pop("max_rel_pos", 0)
self.deepnorm = kwargs.pop("deepnorm", False)
self.subln = kwargs.pop("subln", True)
self.bert_init = kwargs.pop("bert_init", False)
self.multiway = kwargs.pop("multiway", False)
self.share_all_embeddings = kwargs.pop("share_all_embeddings", False)
self.share_decoder_input_output_embed = kwargs.pop(
"share_decoder_input_output_embed", False
)
self.max_source_positions = kwargs.pop("max_source_positions", 1024)
self.max_target_positions = kwargs.pop("max_target_positions", 1024)
self.no_output_layer = kwargs.pop("no_output_layer", False)
self.layernorm_eps = kwargs.pop("layernorm_eps", 1e-5)
# Text
self.vocab_size = kwargs.pop("vocab_size", -1)
# Fairscale
self.checkpoint_activations = kwargs.pop("checkpoint_activations", False)
self.fsdp = kwargs.pop("fsdp", False)
self.ddp_rank = kwargs.pop("ddp_rank", 0)
self.xpos_rel_pos = kwargs.pop("xpos_rel_pos", False)
self.xpos_scale_base = kwargs.pop("xpos_scale_base", 512)
if self.deepnorm:
self.encoder_normalize_before = False
self.decoder_normalize_before = False
self.subln = False
if self.subln:
self.encoder_normalize_before = True
self.decoder_normalize_before = True
self.deepnorm = False
if self.use_xmoe:
self.moe_normalize_gate_prob_before_dropping = True
self.moe_second_expert_policy = "random"
assert self.moe_freq > 0 and self.moe_expert_count > 0
def override(self, args):
for hp in self.__dict__.keys():
if getattr(args, hp, None) is not None:
self.__dict__[hp] = getattr(args, hp, None)
| EXA-1-master | exa/libraries/torchscale/torchscale/architecture/config.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import torch.nn as nn
from torchscale.architecture.decoder import Decoder
from torchscale.architecture.encoder import Encoder
class EncoderDecoder(nn.Module):
def __init__(
self,
args,
encoder_embed_tokens=None,
encoder_embed_positions=None,
decoder_embed_tokens=None,
decoder_embed_positions=None,
output_projection=None,
**kwargs
):
super().__init__()
self.args = args
if args.share_all_embeddings:
args.share_decoder_input_output_embed = True
self.encoder = Encoder(
args,
encoder_embed_tokens,
encoder_embed_positions,
is_encoder_decoder=True,
**kwargs
)
if args.share_all_embeddings and decoder_embed_tokens is None:
decoder_embed_tokens = self.encoder.embed_tokens
self.decoder = Decoder(
args,
decoder_embed_tokens,
decoder_embed_positions,
output_projection,
is_encoder_decoder=True,
**kwargs
)
def forward(
self,
src_tokens,
prev_output_tokens,
return_all_hiddens=False,
features_only=False,
**kwargs
):
encoder_out = self.encoder(src_tokens, return_all_hiddens=return_all_hiddens)
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
features_only=features_only,
return_all_hiddens=return_all_hiddens,
)
return decoder_out
| EXA-1-master | exa/libraries/torchscale/torchscale/architecture/encoder_decoder.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
| EXA-1-master | exa/libraries/torchscale/torchscale/architecture/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import math
import numpy as np
import torch
import torch.nn as nn
from fairscale.nn import checkpoint_wrapper, wrap
try:
from apex.normalization import FusedLayerNorm as LayerNorm
except ModuleNotFoundError:
from torch.nn import LayerNorm
from torchscale.architecture.utils import init_bert_params
from torchscale.component.droppath import DropPath
from torchscale.component.feedforward_network import FeedForwardNetwork, make_experts
from torchscale.component.multihead_attention import MultiheadAttention
from torchscale.component.multiway_network import MultiwayWrapper, set_split_position
from torchscale.component.relative_position_bias import RelativePositionBias
from torchscale.component.xmoe.moe_layer import MOELayer
from torchscale.component.xmoe.routing import Top1Gate, Top2Gate
class EncoderLayer(nn.Module):
def __init__(self, args, depth, is_moe_layer=False, is_encoder_decoder=False):
super().__init__()
self.args = args
self.embed_dim = args.encoder_embed_dim
self.self_attn = self.build_self_attention(self.embed_dim, args)
self.self_attn_layer_norm = MultiwayWrapper(args, LayerNorm(self.embed_dim, eps=args.layernorm_eps))
self.dropout_module = torch.nn.Dropout(args.dropout)
if args.drop_path_rate > 0:
drop_path_prob = np.linspace(0, args.drop_path_rate, args.encoder_layers)[
depth
]
self.drop_path = DropPath(drop_path_prob)
else:
self.drop_path = None
self.normalize_before = args.encoder_normalize_before
self.is_moe_layer = is_moe_layer
self.ffn_dim = args.encoder_ffn_embed_dim
if not self.is_moe_layer:
self.ffn = MultiwayWrapper(
args,
self.build_ffn(
self.embed_dim,
self.args,
),
)
else:
assert not self.args.multiway
if args.moe_top1_expert:
gate = Top1Gate(
self.embed_dim,
args.moe_expert_count,
use_fp32=args.moe_gating_use_fp32,
moe_eval_capacity_token_fraction=args.moe_eval_capacity_token_fraction,
use_xmoe=args.use_xmoe,
)
else:
gate = Top2Gate(
self.embed_dim,
args.moe_expert_count,
args.moe_gating_use_fp32,
args.moe_second_expert_policy,
args.moe_normalize_gate_prob_before_dropping,
args.moe_eval_capacity_token_fraction,
use_xmoe=args.use_xmoe,
)
experts = make_experts(args, self.embed_dim, self.ffn_dim)
self.moe_layer = MOELayer(gate, experts, args)
self.final_layer_norm = MultiwayWrapper(args, LayerNorm(self.embed_dim, eps=args.layernorm_eps))
if args.deepnorm:
if is_encoder_decoder:
self.alpha = (
math.pow(
math.pow(args.encoder_layers, 4) * args.decoder_layers, 0.0625
)
* 0.81
)
else:
self.alpha = math.pow(2.0 * args.encoder_layers, 0.25)
else:
self.alpha = 1.0
def build_ffn(self, embed_dim, args):
return FeedForwardNetwork(
embed_dim,
self.ffn_dim,
args.activation_fn,
args.dropout,
args.activation_dropout,
args.layernorm_eps,
args.subln,
)
def build_self_attention(self, embed_dim, args):
return MultiheadAttention(
args,
embed_dim,
args.encoder_attention_heads,
dropout=args.attention_dropout,
self_attention=True,
encoder_decoder_attention=False,
subln=args.subln,
)
def residual_connection(self, x, residual):
return residual * self.alpha + x
def forward(self, x, encoder_padding_mask, attn_mask=None, rel_pos=None, multiway_split_position=None, incremental_state=None):
if multiway_split_position is not None:
assert self.args.multiway
self.apply(set_split_position(multiway_split_position))
if attn_mask is not None:
attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), -1e8)
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, _ = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=encoder_padding_mask,
attn_mask=attn_mask,
rel_pos=rel_pos,
incremental_state=incremental_state,
)
x = self.dropout_module(x)
if self.drop_path is not None:
x = self.drop_path(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
if not self.is_moe_layer:
x = self.ffn(x)
l_aux = None
else:
x = x.transpose(0, 1)
x, l_aux = self.moe_layer(x)
x = x.transpose(0, 1)
if self.drop_path is not None:
x = self.drop_path(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
return x, l_aux
class Encoder(nn.Module):
def __init__(
self,
args,
embed_tokens=None,
embed_positions=None,
output_projection=None,
is_encoder_decoder=False,
**kwargs
):
self.args = args
super().__init__(**kwargs)
self.dropout_module = torch.nn.Dropout(args.dropout)
embed_dim = args.encoder_embed_dim
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
self.embed_tokens = embed_tokens
self.embed_positions = embed_positions
if (
output_projection is None
and not is_encoder_decoder
and not args.no_output_layer
and args.vocab_size > 0
):
self.output_projection = self.build_output_projection(args)
else:
self.output_projection = output_projection
if args.layernorm_embedding:
self.layernorm_embedding = MultiwayWrapper(
args, LayerNorm(embed_dim, eps=args.layernorm_eps), dim=1
)
else:
self.layernorm_embedding = None
self.layers = nn.ModuleList([])
moe_freq = args.moe_freq
for i in range(args.encoder_layers):
is_moe_layer = moe_freq != 0 and (i + 1) % moe_freq == 0
self.layers.append(
self.build_encoder_layer(
args,
depth=i,
is_moe_layer=is_moe_layer,
is_encoder_decoder=is_encoder_decoder,
)
)
self.num_layers = len(self.layers)
if args.encoder_normalize_before and args.normalize_output:
self.layer_norm = MultiwayWrapper(args, LayerNorm(embed_dim, eps=args.layernorm_eps))
else:
self.layer_norm = None
if args.rel_pos_buckets > 0 and args.max_rel_pos > 0:
self.relative_position = RelativePositionBias(
num_buckets=args.rel_pos_buckets,
max_distance=args.max_rel_pos,
n_heads=args.encoder_attention_heads,
)
else:
self.relative_position = None
if args.bert_init:
self.apply(init_bert_params)
if args.deepnorm:
if is_encoder_decoder:
init_scale = (
math.pow(
math.pow(args.encoder_layers, 4) * args.decoder_layers, 0.0625
)
/ 1.15
)
else:
init_scale = math.pow(8.0 * args.encoder_layers, 0.25)
for name, p in self.named_parameters():
if (
"fc1" in name
or "fc2" in name
or "out_proj" in name
or "v_proj" in name
):
p.data.div_(init_scale)
if args.subln:
if is_encoder_decoder:
init_scale = math.sqrt(
math.log(3 * args.decoder_layers)
* math.log(2 * args.encoder_layers)
/ 3
)
else:
init_scale = math.sqrt(math.log(args.encoder_layers * 2))
for name, p in self.named_parameters():
if (
"fc1" in name
or "fc2" in name
or "out_proj" in name
or "v_proj" in name
):
p.data.mul_(init_scale)
def build_output_projection(
self,
args,
):
if args.share_encoder_input_output_embed:
assert args.encoder_embedding_type == "language"
output_projection = torch.nn.Linear(
self.embed_tokens.weight.shape[1],
self.embed_tokens.weight.shape[0],
bias=False,
)
output_projection.weight = self.embed_tokens.weight
else:
output_projection = torch.nn.Linear(
args.encoder_embed_dim, args.vocab_size, bias=False
)
torch.nn.init.normal_(
output_projection.weight, mean=0, std=args.encoder_embed_dim**-0.5
)
return output_projection
def build_encoder_layer(
self, args, depth, is_moe_layer=False, is_encoder_decoder=False
):
layer = EncoderLayer(
args,
depth,
is_moe_layer=is_moe_layer,
is_encoder_decoder=is_encoder_decoder,
)
if args.checkpoint_activations:
layer = checkpoint_wrapper(layer)
if args.fsdp:
layer = wrap(layer)
return layer
def forward_embedding(
self,
src_tokens,
token_embedding=None,
positions=None,
):
if token_embedding is None:
token_embedding = self.embed_tokens(src_tokens)
x = embed = self.embed_scale * token_embedding
if self.embed_positions is not None:
if src_tokens is not None:
x = embed + self.embed_positions(src_tokens, positions=positions)
else:
x = embed + self.embed_positions(x, positions=positions)
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
return x, embed
def forward(
self,
src_tokens,
encoder_padding_mask=None,
attn_mask=None,
return_all_hiddens=False,
token_embeddings=None,
multiway_split_position=None,
features_only=False,
incremental_state=None,
positions=None,
**kwargs
):
assert src_tokens is not None or token_embeddings is not None
if encoder_padding_mask is None:
if src_tokens is not None:
encoder_padding_mask = torch.zeros_like(
src_tokens, device=src_tokens.device
).bool()
else:
encoder_padding_mask = torch.zeros(
[token_embeddings.size(0), token_embeddings.size(1)],
device=token_embeddings.device,
).bool()
if multiway_split_position is not None:
assert self.args.multiway
self.apply(set_split_position(multiway_split_position))
x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings, positions)
x = x * (1 - encoder_padding_mask.unsqueeze(-1).type_as(x))
encoder_states = []
if return_all_hiddens:
encoder_states.append(x)
rel_pos_bias = None
if self.relative_position is not None:
rel_pos_bias = self.relative_position(
batch_size=x.size(0), qlen=x.size(1), klen=x.size(1)
)
# incremental_state is not None during inference if we use the bidirectional encoder as a generator as in s2s-ft (https://arxiv.org/abs/2110.13640)
l_aux = []
for idx, layer in enumerate(self.layers):
x, l_aux_i = layer(
x,
encoder_padding_mask=encoder_padding_mask if incremental_state is None else None,
attn_mask=attn_mask,
rel_pos=rel_pos_bias,
multiway_split_position=multiway_split_position,
incremental_state=incremental_state[idx] if incremental_state is not None else None,
)
if return_all_hiddens:
assert encoder_states is not None
encoder_states.append(x)
l_aux.append(l_aux_i)
if self.layer_norm is not None:
x = self.layer_norm(x)
if not features_only and self.output_projection is not None:
x = self.output_projection(x)
return {
"encoder_out": x,
"encoder_embedding": encoder_embedding,
"encoder_padding_mask": encoder_padding_mask,
"encoder_states": encoder_states,
"l_aux": l_aux,
}
| EXA-1-master | exa/libraries/torchscale/torchscale/architecture/encoder.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import torch.nn as nn
from torchscale.component.multihead_attention import MultiheadAttention
from torchscale.component.multiway_network import MultiwayNetwork
def init_bert_params(module):
def normal_(data):
data.copy_(data.cpu().normal_(mean=0.0, std=0.02).to(data.device))
if isinstance(module, nn.Linear):
normal_(module.weight.data)
if module.bias is not None:
module.bias.data.zero_()
if isinstance(module, nn.Embedding):
normal_(module.weight.data)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
if isinstance(module, MultiheadAttention):
if isinstance(module.q_proj, MultiwayNetwork):
normal_(module.q_proj.A.weight.data)
normal_(module.q_proj.B.weight.data)
normal_(module.k_proj.A.weight.data)
normal_(module.k_proj.B.weight.data)
normal_(module.v_proj.A.weight.data)
normal_(module.v_proj.B.weight.data)
else:
normal_(module.q_proj.weight.data)
normal_(module.k_proj.weight.data)
normal_(module.v_proj.weight.data)
| EXA-1-master | exa/libraries/torchscale/torchscale/architecture/utils.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import torch
import torch.nn as nn
from torchscale.architecture.encoder import Encoder
from torchscale.component.embedding import (
PositionalEmbedding,
TextEmbedding,
VisionEmbedding,
)
from torchscale.component.multiway_network import MutliwayEmbedding
class BEiT3(nn.Module):
def __init__(self, args, **kwargs):
super().__init__()
self.args = args
assert args.multiway
assert args.vocab_size > 0
assert not args.share_encoder_input_output_embed
self.text_embed = TextEmbedding(args.vocab_size, args.encoder_embed_dim)
self.vision_embed = VisionEmbedding(
args.img_size,
args.patch_size,
args.in_chans,
args.encoder_embed_dim,
contain_mask_token=True,
prepend_cls_token=True,
)
# being consistent with Fairseq, which starts from 2 for position embedding
embed_positions = MutliwayEmbedding(
modules=[
PositionalEmbedding(self.vision_embed.num_position_embeddings() + 2, args.encoder_embed_dim),
PositionalEmbedding(args.max_source_positions, args.encoder_embed_dim),
],
dim=1,
)
self.encoder = Encoder(
args,
embed_tokens=None,
embed_positions=embed_positions,
output_projection=None,
is_encoder_decoder=False,
)
def forward(
self,
textual_tokens=None,
visual_tokens=None,
text_padding_position=None,
attn_mask=None,
vision_masked_position=None,
incremental_state=None,
positions=None,
):
assert textual_tokens is not None or visual_tokens is not None
if textual_tokens is None:
x = self.vision_embed(visual_tokens, vision_masked_position)
encoder_padding_mask = None
multiway_split_position = -1
elif visual_tokens is None:
x = self.text_embed(textual_tokens)
encoder_padding_mask = text_padding_position
multiway_split_position = 0
else:
x1 = self.vision_embed(visual_tokens, vision_masked_position)
multiway_split_position = x1.size(1)
x2 = self.text_embed(textual_tokens)
x = torch.cat([x1, x2], dim=1)
if text_padding_position is not None:
encoder_padding_mask = torch.cat(
[
torch.zeros(x1.shape[:-1]).to(x1.device).bool(),
text_padding_position,
],
dim=1,
)
else:
encoder_padding_mask = None
encoder_out = self.encoder(
src_tokens=None,
encoder_padding_mask=encoder_padding_mask,
attn_mask=attn_mask,
token_embeddings=x,
multiway_split_position=multiway_split_position,
incremental_state=incremental_state,
positions=positions,
)
encoder_out["multiway_split_position"] = multiway_split_position
return encoder_out
| EXA-1-master | exa/libraries/torchscale/torchscale/model/BEiT3.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
| EXA-1-master | exa/libraries/torchscale/torchscale/model/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import pytest
import torch
from torchscale.architecture.config import DecoderConfig
from torchscale.architecture.decoder import Decoder
testcases = [
{},
{"vocab_size": 64000},
{"activation_fn": "relu"},
{"drop_path_rate": 0.1},
{"decoder_normalize_before": False},
{"no_scale_embedding": False},
{"layernorm_embedding": True},
{"rel_pos_buckets": 32, "max_rel_pos": 256},
{"deepnorm": True, "subln": False, "decoder_normalize_before": False},
{"bert_init": True},
{"multiway": True},
{"share_decoder_input_output_embed": True},
{"checkpoint_activations": True},
{"fsdp": True},
]
@pytest.mark.parametrize("args", testcases)
def test_decoder(args):
config = DecoderConfig(**args)
model = Decoder(config)
prev_output_tokens = torch.ones(2, 10)
token_embeddings = torch.rand(2, 10, config.decoder_embed_dim)
model(
prev_output_tokens=prev_output_tokens,
token_embeddings=token_embeddings,
features_only=True,
)
| EXA-1-master | exa/libraries/torchscale/tests/test_decoder.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import pytest
import torch
from torchscale.architecture.config import EncoderConfig
from torchscale.architecture.encoder import Encoder
testcases = [
{},
{"vocab_size": 64000},
{"activation_fn": "relu"},
{"drop_path_rate": 0.1},
{"encoder_normalize_before": False},
{"no_scale_embedding": False},
{"layernorm_embedding": True},
{"rel_pos_buckets": 32, "max_rel_pos": 256},
{"deepnorm": True, "subln": False, "encoder_normalize_before": False},
{"bert_init": True},
{"multiway": True},
{"share_encoder_input_output_embed": True},
{"checkpoint_activations": True},
{"fsdp": True},
]
@pytest.mark.parametrize("args", testcases)
def test_encoder(args):
config = EncoderConfig(**args)
model = Encoder(config)
token_embeddings = torch.rand(2, 10, config.encoder_embed_dim)
model(src_tokens=None, token_embeddings=token_embeddings)
| EXA-1-master | exa/libraries/torchscale/tests/test_encoder.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
| EXA-1-master | exa/libraries/torchscale/tests/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import pytest
import torch
from torchscale.architecture.config import EncoderDecoderConfig
from torchscale.architecture.encoder_decoder import EncoderDecoder
from torchscale.component.embedding import PositionalEmbedding, TextEmbedding
testcases = [
{},
{"vocab_size": 64000},
{"activation_fn": "relu"},
{"drop_path_rate": 0.1},
{"encoder_normalize_before": False, "decoder_normalize_before": False},
{"no_scale_embedding": False},
{"layernorm_embedding": True},
{"rel_pos_buckets": 32, "max_rel_pos": 256},
{
"deepnorm": True,
"subln": False,
"encoder_normalize_before": False,
"decoder_normalize_before": False,
},
{"bert_init": True},
{"multiway": True},
{"share_decoder_input_output_embed": True},
{"share_all_embeddings": True},
{"checkpoint_activations": True},
{"fsdp": True},
]
@pytest.mark.parametrize("args", testcases)
def test_decoder(args):
config = EncoderDecoderConfig(**args)
model = EncoderDecoder(
config,
encoder_embed_tokens=TextEmbedding(64000, config.encoder_embed_dim),
decoder_embed_tokens=TextEmbedding(64000, config.decoder_embed_dim),
encoder_embed_positions=PositionalEmbedding(
config.max_source_positions, config.encoder_embed_dim
),
decoder_embed_positions=PositionalEmbedding(
config.max_target_positions, config.decoder_embed_dim
),
)
src_tokens = torch.ones(2, 20).long()
prev_output_tokens = torch.ones(2, 10).long()
model(
src_tokens=src_tokens,
prev_output_tokens=prev_output_tokens,
features_only=True,
)
| EXA-1-master | exa/libraries/torchscale/tests/test_encoder_decoder.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
| EXA-1-master | exa/libraries/torchscale/examples/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# flake8: noqa
import models
import tasks
import criterions
from fairseq_cli.generate import cli_main
if __name__ == "__main__":
cli_main()
| EXA-1-master | exa/libraries/torchscale/examples/fairseq/generate.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
| EXA-1-master | exa/libraries/torchscale/examples/fairseq/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# flake8: noqa
import models
import tasks
import criterions
from fairseq_cli.interactive import cli_main
if __name__ == "__main__":
cli_main()
| EXA-1-master | exa/libraries/torchscale/examples/fairseq/interactive.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# flake8: noqa
import models
import tasks
import criterions
from fairseq_cli.train import cli_main
if __name__ == "__main__":
cli_main()
| EXA-1-master | exa/libraries/torchscale/examples/fairseq/train.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import json
import logging
import os
from argparse import Namespace
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import sentencepiece as spm
from fairseq import utils
from fairseq.data import Dictionary
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.tasks import FairseqTask, register_task
from omegaconf import II, MISSING
from .data.mlm_loader import MLMLoader
logger = logging.getLogger(__name__)
SAMPLE_BREAK_MODE_CHOICES = ChoiceEnum(["none", "complete", "complete_doc", "eos"])
SHORTEN_METHOD_CHOICES = ChoiceEnum(["none", "truncate", "random_crop"])
@dataclass
class PretrainingConfig(FairseqDataclass):
data: str = field(
default=MISSING,
metadata={
"help": "colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner"
},
)
sample_break_mode: SAMPLE_BREAK_MODE_CHOICES = field(
default="complete",
metadata={
"help": 'If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
"of sentence, but may include multiple sentences per sample. "
'"complete_doc" is similar but respects doc boundaries. '
'If set to "eos", includes only one sentence per sample.'
},
)
tokens_per_sample: int = field(
default=1024,
metadata={"help": "max number of tokens per sample for LM dataset"},
)
mask_prob: float = field(
default=0.15,
metadata={"help": "probability of replacing a token with mask"},
)
leave_unmasked_prob: float = field(
default=0.1,
metadata={"help": "probability that a masked token is unmasked"},
)
random_token_prob: float = field(
default=0.1,
metadata={"help": "probability of replacing a token with a random token"},
)
freq_weighted_replacement: bool = field(
default=False,
metadata={"help": "sample random replacement words based on word frequencies"},
)
mask_whole_words: bool = field(
default=False,
metadata={"help": "mask whole words; you may also want to set --bpe"},
)
mask_multiple_length: int = field(
default=1,
metadata={"help": "repeat the mask indices multiple times"},
)
mask_stdev: float = field(
default=0.0,
metadata={"help": "stdev of the mask length"},
)
shorten_method: SHORTEN_METHOD_CHOICES = field(
default="none",
metadata={
"help": "if not none, shorten sequences that exceed --tokens-per-sample"
},
)
shorten_data_split_list: str = field(
default="",
metadata={
"help": "comma-separated list of dataset splits to apply shortening to, "
'e.g., "train,valid" (default: all dataset splits)'
},
)
seed: int = II("common.seed")
span_length: float = field(
default=3.0,
metadata={"help": "average span length for masking"},
)
remove_source_sentinel: bool = field(
default=False,
metadata={"help": "remove the source sentinel for the span corruption task"},
)
remove_target_sentinel: bool = field(
default=False,
metadata={"help": "remove the target sentinel for the span corruption task"},
)
batch_read_ahead: int = field(
default=100000,
metadata={"help": "batch read ahead size for infinibatch"},
)
required_batch_size_multiple: int = II("dataset.required_batch_size_multiple")
spm_model: str = field(
default="",
metadata={"help": "sentencepice model to tokenize the data"},
)
dict_file: str = field(
default="",
metadata={"help": ""},
)
pad_to_max_length: bool = field(
default=False,
)
@register_task("pretraining", dataclass=PretrainingConfig)
class PLMTask(FairseqTask):
def __init__(self, cfg, dictionary, tokenizer):
super().__init__(cfg)
self.cfg = cfg
self.dictionary = dictionary
self.tokenizer = tokenizer
self.seed = cfg.seed
self.mask_idx = dictionary.index("<mask>")
@classmethod
def setup_task(cls, cfg, **kwargs):
paths = utils.split_paths(cfg.data)
assert len(paths) > 0
if cfg.dict_file != "":
dictionary = Dictionary.load(cfg.dict_file)
else:
dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
# add mask token
dictionary.add_symbol("<mask>")
for i in range(100):
dictionary.add_symbol(f"<mask_{i}>")
dictionary.pad_to_multiple_(cfg.required_batch_size_multiple)
logger.info("dictionary: {} types".format(len(dictionary)))
# tokenizer = SentencepieceBPE(Namespace(sentencepiece_model=cfg.spm_model))
tokenizer = spm.SentencePieceProcessor()
tokenizer.Load(cfg.spm_model)
return cls(cfg, dictionary, tokenizer)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
self.datasets[split] = {
"data": json.load(open(f"{self.cfg.data}/json/{split}.json")),
"data_dir": self.cfg.data,
"shuffle": True if split == "train" else False,
}
self.datasets[split] = Namespace(**self.datasets[split])
def dataset(self, split):
if split not in self.datasets:
raise KeyError("Dataset not loaded: " + split)
return self.datasets[split]
def get_batch_iterator(
self,
dataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1,
data_buffer_size=0,
disable_iterator_cache=False,
**kwargs,
):
return MLMLoader(
self.cfg,
dataset,
self.dictionary,
self.tokenizer,
max_tokens=max_tokens,
max_sentences=max_sentences,
max_positions=max_positions,
ignore_invalid_inputs=ignore_invalid_inputs,
required_batch_size_multiple=required_batch_size_multiple,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
)
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
| EXA-1-master | exa/libraries/torchscale/examples/fairseq/tasks/pretraining.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import argparse
import importlib
import os
# register dataclass
TASK_DATACLASS_REGISTRY = {}
TASK_REGISTRY = {}
TASK_CLASS_NAMES = set()
# automatically import any Python files in the tasks/ directory
tasks_dir = os.path.dirname(__file__)
for file in os.listdir(tasks_dir):
path = os.path.join(tasks_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
task_name = file[: file.find(".py")] if file.endswith(".py") else file
module = importlib.import_module("tasks." + task_name)
# expose `task_parser` for sphinx
if task_name in TASK_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_task = parser.add_argument_group("Task name")
# fmt: off
group_task.add_argument('--task', metavar=task_name,
help='Enable this task with: ``--task=' + task_name + '``')
# fmt: on
group_args = parser.add_argument_group("Additional command-line arguments")
TASK_REGISTRY[task_name].add_args(group_args)
globals()[task_name + "_parser"] = parser
| EXA-1-master | exa/libraries/torchscale/examples/fairseq/tasks/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import torch
from infinibatch.iterators import CheckpointableIterator
from . import utils
class BaseBatchGen(CheckpointableIterator):
"""
This is a base class for batch generators that use infinibatch
"""
def __init__(self):
self._iter = None
self.epoch = 1
self.next_epoch_idx = 1
self.sharded_checkpoint = True
self.should_close_after_finished = True
def _build_iter(self):
"""
Build infinibatch iterator and assign to self._iter
"""
raise NotImplementedError()
def _move_to_tensor(self, batch):
def to_tensor(x):
return torch.tensor(x)
return utils.apply_to_sample(to_tensor, batch)
@property
def iterator(self):
if self._iter is None:
raise NotImplementedError("_build_iter() must called first")
return self._iter
def __iter__(self):
if self._iter is None:
raise NotImplementedError("_build_iter() must called first")
return self._iter
def __next__(self):
return next(self._iter)
def setstate(self, value):
self._iter.setstate(value)
def getstate(self):
return self._iter.getstate()
def close(self):
self._iter.close()
def __len__(self) -> int:
return 819200000
def next_epoch_itr(
self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True
):
return self
def end_of_epoch(self) -> bool:
return False
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
return self.getstate()
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
self.setstate(state_dict)
@property
def first_batch(self):
return "DUMMY"
| EXA-1-master | exa/libraries/torchscale/examples/fairseq/tasks/data/basic_loader.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
| EXA-1-master | exa/libraries/torchscale/examples/fairseq/tasks/data/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import collections
from random import Random
from typing import Dict, Iterable, Optional
import numpy as np
from infinibatch import iterators
def apply_to_sample(f, sample):
if hasattr(sample, "__len__") and len(sample) == 0:
return {}
def _apply(x):
if isinstance(x, np.ndarray):
return f(x)
elif isinstance(x, collections.OrderedDict):
# OrderedDict has attributes that needs to be preserved
od = collections.OrderedDict(
(key, _apply(value)) for key, value in x.items()
)
od.__dict__ = x.__dict__
return od
elif isinstance(x, dict):
return {key: _apply(value) for key, value in x.items()}
elif isinstance(x, list):
return [_apply(x) for x in x]
elif isinstance(x, tuple):
return tuple(_apply(x) for x in x)
elif isinstance(x, set):
return {_apply(x) for x in x}
else:
return x
return _apply(sample)
class NativeCheckpointableIterator(iterators.CheckpointableIterator):
def __init__(self, iterable: Iterable):
self._input_iterable = iterable
self.setstate(None)
def getstate(self) -> Dict:
return {"num_items_yielded": self._num_items_yielded}
def setstate(self, checkpoint: Optional[Dict]):
self._iterator = iter(self._input_iterable)
self._num_items_yielded = (
iterators._advance_iterator(self._iterator, checkpoint["num_items_yielded"])
if checkpoint is not None
else 0
)
def __next__(self):
item = next(self._iterator)
self._num_items_yielded += 1
return item
def close(self):
pass
class WeightIterator(object):
def __init__(self, weights, seed):
self.weights = weights
self.seed = seed
self.control_index = list(range(len(weights)))
self.setstate(None)
def __iter__(self):
return self
def getstate(self):
return {"random_state": self._random_state}
def setstate(self, checkpoint):
self._random_state = checkpoint["random_state"] if checkpoint else None
self._random = (
None # this will trigger the lazy initialization in self.__next__
)
def __next__(self):
if self._random is None:
self._random = Random(self.seed)
if self._random_state is not None:
self._random.setstate(self._random_state)
idx = self._random.choices(self.control_index, self.weights)[0]
self._random_state = self._random.getstate()
return idx
def close(self):
pass
| EXA-1-master | exa/libraries/torchscale/examples/fairseq/tasks/data/utils.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import copy
import itertools
import os
import numpy as np
from infinibatch import iterators
from .basic_loader import BaseBatchGen
from .utils import NativeCheckpointableIterator, WeightIterator
class MLMLoader(BaseBatchGen):
def __init__(
self,
args,
dataset,
dictionary,
tokenizer,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
):
super().__init__()
self.args = args
self.data = dataset.data
self.data_dir = dataset.data_dir
self.shuffle = dataset.shuffle
self.dictionary = dictionary
self.tokenizer = tokenizer
self.max_tokens = max_tokens
self.max_sentences = max_sentences
self.max_positions = max_positions
self.tokens_per_sample = args.tokens_per_sample
self.sample_break_mode = args.sample_break_mode
self.ignore_invalid_inputs = ignore_invalid_inputs
self.required_batch_size_multiple = required_batch_size_multiple
self.seed = str(seed)
self.num_shards = num_shards
self.shard_id = shard_id
self.batch_read_ahead = args.batch_read_ahead
self._build_iter()
def _build_iter(self):
tokenized_lines = self._multilingual_tokenize()
self.padded_batches = self._batchify(tokenized_lines)
prefetch_batches = iterators.PrefetchIterator(
self.padded_batches,
buffer_size=10000,
buffer_in_main_process=True,
log_empty_buffer_warning=True and self.shard_id == 0,
)
prefetch_batches = iterators.MapIterator(prefetch_batches, self._move_to_tensor)
self._iter = prefetch_batches
def _multilingual_tokenize(self):
multilingual_iters = []
weights = []
for data in self.data:
multilingual_iters.append(self._tokenize(data))
if "weight" in data:
weights.append(float(data["weight"]))
else:
weights.append(int(data["count"]))
if len(multilingual_iters) == 1:
return multilingual_iters[0]
sampling_iterator = WeightIterator(weights)
control_iterator = NativeCheckpointableIterator(sampling_iterator)
tokenized_lines = iterators.MultiplexIterator(
control_iterator, multilingual_iters
)
return tokenized_lines
def _tokenize(self, data):
"""
data:
{
'source': list[Path],
'source_lang': str,
'count': int,
'weight': float,
'name': str,
}
"""
dataset = list(
zip(
data["source"],
itertools.repeat(data["source_lang"]),
)
)
if self.shuffle:
chunk_files = iterators.InfinitePermutationSourceIterator(
dataset,
seed=self.seed,
shuffle=self.shuffle,
num_instances=self.num_shards,
instance_rank=self.shard_id,
)
else:
chunk_files = iterators.ChunkedSourceIterator(
dataset,
num_instances=self.num_shards,
instance_rank=self.shard_id,
)
tokenized_lines = iterators.SelectManyIterator(
chunk_files, lambda files: self._read_from_files(*files)
)
tokenized_lines = iterators.SamplingRandomMapIterator(
tokenized_lines, self._prepare, self.seed
)
return tokenized_lines
def _batchify(self, lines):
if self.max_sentences is not None:
if self.batch_read_ahead > 0:
lines = iterators.BlockwiseShuffleIterator(
lines, self.batch_read_ahead, self.seed
)
batches = iterators.FixedBatchIterator(lines, self.max_sentences)
else:
def dynamic_batch_size(sample):
lengths = [len(x) for x in sample]
batch_size = self.max_tokens // max(lengths)
batch_size = (
batch_size
// self.required_batch_size_multiple
* self.required_batch_size_multiple
)
return max(1, batch_size)
batches = iterators.BucketedReadaheadBatchIterator(
lines,
read_ahead=self.batch_read_ahead,
key=(lambda x: max(len(x[0]), len(x[1]))) if self.shuffle else None,
batch_size=dynamic_batch_size,
shuffle=self.shuffle,
seed=self.seed,
)
def collate(batch):
batch_size = len(batch)
mlm_source_max_length = max([len(x[0]) for x in batch])
mlm_target_max_length = max([len(x[1]) for x in batch])
s2s_source_max_length = max([len(x[2]) for x in batch])
s2s_target_max_length = max([len(x[3]) for x in batch])
if self.args.pad_to_max_length:
mlm_source_max_length = self.args.tokens_per_sample
mlm_target_max_length = self.args.tokens_per_sample
mlm_source_ids = np.full(
shape=(batch_size, mlm_source_max_length),
dtype=np.int32,
fill_value=self.dictionary.pad(),
)
mlm_target_ids = np.full(
shape=(batch_size, mlm_target_max_length),
dtype=np.int32,
fill_value=self.dictionary.pad(),
)
s2s_source_ids = np.full(
shape=(batch_size, s2s_source_max_length),
dtype=np.int32,
fill_value=self.dictionary.pad(),
)
s2s_target_ids = np.full(
shape=(batch_size, s2s_target_max_length - 1),
dtype=np.int32,
fill_value=self.dictionary.pad(),
)
s2s_prev_input_ids = np.full(
shape=(batch_size, s2s_target_max_length - 1),
dtype=np.int32,
fill_value=self.dictionary.pad(),
)
for i, (
mlm_input_ids,
mlm_label_ids,
s2s_input_ids,
s2s_label_ids,
) in enumerate(batch):
mlm_source_ids[i, : len(mlm_input_ids)] = mlm_input_ids
mlm_target_ids[i, : len(mlm_label_ids)] = mlm_label_ids
s2s_source_ids[i, : len(s2s_input_ids)] = s2s_input_ids
s2s_target_ids[i, : len(s2s_label_ids) - 1] = s2s_label_ids[1:]
s2s_prev_input_ids[i, : len(s2s_label_ids) - 1] = s2s_label_ids[:-1]
ret_batch = {
"net_input": {
"src_tokens": mlm_source_ids.astype(np.int64),
},
"target": mlm_target_ids.astype(np.int64),
"nsentences": batch_size,
"ntokens": sum([len(x[0]) for x in batch]),
}
return ret_batch
padded_batches = iterators.MapIterator(batches, collate)
return padded_batches
def _prepare(self, _random, doc):
nonmasked_tokens, masked_tokens = self._mask_lm(_random, doc)
nonnoise_spans, noise_spans = self._span_corruption(_random, doc)
return nonmasked_tokens, masked_tokens, nonnoise_spans, noise_spans
def _mask_lm(self, _random, doc):
def mask_tokens():
return "<mask>"
length = len(doc)
mask_tokens_num = int(length * self.args.mask_prob)
mask_tokens_num = min(max(mask_tokens_num, 1), length - 1)
possible_mask_positions = _random.sample(range(length), k=mask_tokens_num)
possible_mask_positions = sorted(possible_mask_positions)
nonmasked_tokens = copy.deepcopy(doc)
masked_tokens = [self.dictionary.pad() for _ in range(len(doc))]
for position in possible_mask_positions:
# masked_tokens.append(nonmasked_tokens[position])
masked_tokens[position] = nonmasked_tokens[position]
nonmasked_tokens[position] = self.dictionary.indices[mask_tokens()]
return nonmasked_tokens, masked_tokens
def _span_corruption(self, _random, doc):
def mask_tokens(i):
return f"<mask_{i}>"
length = len(doc)
noise_tokens_num = int(length * self.args.mask_prob)
noise_tokens_num = min(max(noise_tokens_num, 1), length - 1)
noise_spans_num = int(noise_tokens_num / self.args.span_length)
noise_spans_num = max(noise_spans_num, 1)
nonnoise_tokens_num = length - noise_tokens_num
if noise_spans_num == 1:
noise_split_positions = [0, noise_tokens_num]
else:
possible_split_positions = list(range(1, noise_tokens_num))
_random.shuffle(possible_split_positions)
noise_split_positions = sorted(
possible_split_positions[: noise_spans_num - 1]
)
noise_split_positions = [0] + noise_split_positions + [noise_tokens_num]
possible_insert_positions = list(range(nonnoise_tokens_num))
_random.shuffle(possible_insert_positions)
noise_insert_positions = sorted(possible_insert_positions[:noise_spans_num])
nonnoise_spans, noise_spans = [], []
last_end = 0
for i in range(noise_spans_num):
start_pos = noise_insert_positions[i] + noise_split_positions[i]
end_pos = noise_insert_positions[i] + noise_split_positions[i + 1]
mask_id = self.dictionary.indices[mask_tokens(i)]
if getattr(self.args, "remove_target_sentinel", False):
noise_spans.append(doc[start_pos:end_pos])
else:
noise_spans.append([mask_id] + doc[start_pos:end_pos])
if getattr(self.args, "remove_source_sentinel", False):
nonnoise_spans.extend(doc[last_end:start_pos])
else:
nonnoise_spans.extend(doc[last_end:start_pos] + [mask_id])
last_end = end_pos
nonnoise_spans.extend(doc[last_end:])
noise_spans = sum(noise_spans, [])
return nonnoise_spans, noise_spans
def _read_from_files(self, source_file, source_lang):
# data = []
file_path = os.path.join(self.data_dir, source_file)
if not os.path.exists(file_path):
print("| file {} not exists".format(file_path), flush=True)
return iter([]) # skip bad file
with open(file_path, "r", encoding="utf8") as f:
lines = f.read().strip().split("\n")
doc = [self.dictionary.bos()]
for line in lines:
if line == "":
if self.sample_break_mode == "complete_doc":
# data.append(doc)
yield doc
doc = [self.dictionary.bos()]
continue
tokenized_line = self.tokenizer.EncodeAsPieces(line)
tokenized_id = [
self.dictionary.index(token) for token in tokenized_line
] + [self.dictionary.eos_index]
if len(tokenized_id) > self.tokens_per_sample:
continue
if len(doc) + len(tokenized_id) > self.tokens_per_sample:
# data.append(doc)
yield doc
doc = [self.dictionary.bos()]
doc.extend(tokenized_id)
if len(doc) > 1 and len(doc) <= self.tokens_per_sample:
# data.append(doc)
yield doc
# return data
| EXA-1-master | exa/libraries/torchscale/examples/fairseq/tasks/data/mlm_loader.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import math
import warnings
import torch
import torch.distributed as dist
from fairseq.utils import multi_tensor_l2norm_available, multi_tensor_total_norm
@torch.no_grad()
def clip_grad_norm_(
params, max_norm, moe_expert_count, aggregate_norm_fn=None
) -> torch.Tensor:
def grad_exists(p):
return p is not None and getattr(p, "grad", None) is not None
if isinstance(params, torch.Tensor):
params = [params]
params = list(params)
params = list(filter(grad_exists, params))
grads, expert_grads, base_expert_grads, sharded_grads = [], [], [], []
denom = math.sqrt(max(dist.get_global_world_size(), moe_expert_count))
for p in params:
if hasattr(p, "expert"):
expert_grads.append(p.grad.detach() / denom)
elif hasattr(p, "base_expert"):
base_expert_grads.append(p.grad.detach())
elif hasattr(p, "_is_sharded"):
sharded_grads.append(p.grad.detach())
else:
grads.append(p.grad.detach())
if len(grads) == 0:
if len(params) > 0:
total_norm = params[0].new_tensor(0.0)
else:
total_norm = torch.tensor(0.0)
elif len(grads) == 1:
total_norm = torch.norm(grads[0], p=2, dtype=torch.float32)
else:
if multi_tensor_l2norm_available:
total_norm = multi_tensor_total_norm(grads)
else:
if torch.cuda.is_available():
warnings.warn(
"amp_C fused kernels unavailable, disabling multi_tensor_l2norm; "
"you may get better performance by installing NVIDIA's apex library"
)
device = torch.cuda.current_device()
elif grads[0].device.type == "xla":
device = grads[0].device
else:
device = torch.device("cpu")
total_norm = torch.norm(
torch.stack(
[torch.norm(g, p=2, dtype=torch.float32).to(device) for g in grads]
)
)
# calculate split_norm and all_reduce with other workers
norms = [total_norm]
for split_grads in [expert_grads, sharded_grads]:
if len(split_grads) == 0:
continue
split_norm = torch.norm(
torch.stack([torch.norm(g, p=2, dtype=torch.float32) for g in split_grads])
)
if dist.is_initialized():
split_norm.pow_(2)
dist.all_reduce(split_norm)
split_norm.sqrt_()
norms.append(split_norm)
if len(norms) > 1:
total_norm = torch.norm(torch.stack(norms))
if aggregate_norm_fn is not None:
total_norm = aggregate_norm_fn(total_norm)
if max_norm > 0:
max_norm = float(max_norm)
clip_coef = (max_norm / (total_norm + 1e-6)).clamp_(max=1)
for g in grads + expert_grads + sharded_grads + base_expert_grads:
g.mul_(clip_coef)
return total_norm
| EXA-1-master | exa/libraries/torchscale/examples/fairseq/utils/sparse_clip.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
| EXA-1-master | exa/libraries/torchscale/examples/fairseq/utils/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass, field
from typing import Optional
import torch
from fairseq import distributed_utils, utils
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import (
FairseqIncrementalDecoder,
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import DEFAULT_MIN_PARAMS_TO_WRAP, Embedding
from fairseq.modules import PositionalEmbedding
from omegaconf import II
from torchscale.architecture.config import DecoderConfig
from torchscale.architecture.decoder import Decoder
DEFAULT_MAX_TARGET_POSITIONS = 1024
logger = logging.getLogger(__name__)
@dataclass
class LanguageConfig(FairseqDataclass):
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="relu", metadata={"help": "activation function to use"}
)
dropout: float = field(default=0.1, metadata={"help": "dropout probability"})
attention_dropout: float = field(
default=0.0, metadata={"help": "dropout probability for attention weights"}
)
activation_dropout: float = field(
default=0.0, metadata={"help": "dropout probability after activation in FFN."}
)
relu_dropout: float = field(
default=0.0, metadata={"help": "dropout probability after activation in FFN."}
)
decoder_embed_dim: int = field(
default=512, metadata={"help": "decoder embedding dimension"}
)
decoder_output_dim: int = field(
default=512, metadata={"help": "decoder output dimension"}
)
decoder_input_dim: int = field(
default=512, metadata={"help": "decoder input dimension"}
)
decoder_ffn_embed_dim: int = field(
default=2048, metadata={"help": "decoder embedding dimension for FFN"}
)
decoder_layers: int = field(default=6, metadata={"help": "num decoder layers"})
decoder_attention_heads: int = field(
default=8, metadata={"help": "num decoder attention heads"}
)
decoder_normalize_before: bool = field(
default=False, metadata={"help": "apply layernorm before each decoder block"}
)
no_token_positional_embeddings: bool = field(
default=False,
metadata={
"help": "if set, disables positional embeddings (outside self attention)"
},
)
share_decoder_input_output_embed: bool = field(
default=False, metadata={"help": "share decoder input and output embeddings"}
)
decoder_learned_pos: bool = field(
default=False,
metadata={"help": "use learned positional embeddings in the decoder"},
)
layernorm_embedding: bool = field(
default=False, metadata={"help": "add layernorm to embedding"}
)
no_scale_embedding: bool = field(
default=False, metadata={"help": "if True, dont scale embeddings"}
)
checkpoint_activations: bool = field(
default=False, metadata={"help": "checkpoint activations at each layer"}
)
offload_activations: bool = field(
default=False,
metadata={"help": "move checkpointed activations to CPU after they are used."},
)
# config for Fully Sharded Data Parallel (FSDP) training
min_params_to_wrap: int = field(
default=DEFAULT_MIN_PARAMS_TO_WRAP,
metadata={
"help": (
"minimum number of params for a layer to be wrapped with FSDP() when "
"training with --ddp-backend=fully_sharded. Smaller values will "
"improve memory efficiency, but may make torch.distributed "
"communication less efficient due to smaller input sizes. This option "
"is set to 0 (i.e., always wrap) when --checkpoint-activations or "
"--offload-activations are passed."
)
},
)
moe_freq: int = field(
default=0,
metadata={"help": "Frequency at which we insert MoE Transformer layers"},
)
moe_expert_count: int = field(
default=0, metadata={"help": "Number of experts in each MoE Layer"}
)
moe_gating_use_fp32: bool = field(
default=False,
metadata={"help": "Use FP32 computations in MoE top2 gating function"},
)
moe_second_expert_policy: str = field(
default="sampling",
metadata={"help": "policy for second expert, options: all/sampling/random"},
)
moe_normalize_gate_prob_before_dropping: bool = field(
default=False,
metadata={
"help": "whether to normalize gate probs before or after dropping experts for capacity and randomization"
},
)
moe_expert_ffn_dim: Optional[int] = field(
default=None, metadata={"help": "MoE expert FFN dimension"}
)
moe_top1_expert: Optional[bool] = field(
default=False, metadata={"help": "Use top1 gate instead of top2"}
)
moe_eval_capacity_token_fraction: Optional[float] = field(
default=0.25,
metadata={
"help": (
"Default: 0.25, Fraction of tokens as capacity during validation, "
"if set to negative, use same as training. range: (0.0, 1.0]."
)
},
)
moe_normalize_expert_grad: Optional[str] = field(
default="world_size",
metadata={
"help": "Divide expert gradients by (1) 'world_size' (2) 'sqrt_world_size'"
},
)
record_a2a_perf_stats: Optional[bool] = field(
default=False,
metadata={"help": "records all to all perf stats during distributed training"},
)
dummy_a2a: Optional[bool] = field(
default=False,
metadata={
"help": "By passes all to all during distributed training by returning the input buffer as output"
},
)
moe_batch_prioritized_routing: Optional[bool] = field(
default=False,
metadata={
"help": "if true orders token by the gate prob before capacity dropping."
},
)
use_xmoe: Optional[bool] = field(
default=False,
)
# options from other parts of the config
add_bos_token: bool = II("task.add_bos_token")
tokens_per_sample: int = II("task.tokens_per_sample")
max_target_positions: Optional[int] = II("task.max_target_positions")
tpu: bool = II("common.tpu")
memory_efficient_fp16: bool = II("common.memory_efficient_fp16")
fp16: bool = II("common.fp16")
fp16_no_flatten_grads: bool = II("common.fp16_no_flatten_grads")
ddp_backend: str = II("distributed_training.ddp_backend")
world_size: int = II("distributed_training.distributed_world_size")
distributed_rank: int = II("distributed_training.distributed_rank")
ddp_rank: int = II("distributed_training.distributed_rank")
deepnorm: Optional[bool] = field(
default=False,
)
subln: Optional[bool] = field(
default=False,
)
rel_pos_buckets: Optional[int] = field(
default=0,
)
max_rel_pos: Optional[int] = field(
default=0,
)
xpos_rel_pos: Optional[bool] = field(
default=False,
)
xpos_scale_base: Optional[int] = field(
default=512,
)
@register_model("lm", dataclass=LanguageConfig)
class LanguageModel(FairseqLanguageModel):
def __init__(self, args, decoder):
self.args = args
super().__init__(decoder)
@classmethod
def build_model(cls, args, task):
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = getattr(
args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS
)
embed_tokens = cls.build_embedding(
args, task.source_dictionary, args.decoder_embed_dim
)
embed_positions = (
PositionalEmbedding(
args.max_target_positions,
args.decoder_embed_dim,
task.dictionary.pad(),
learned=args.decoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
if args.share_decoder_input_output_embed:
output_projection = torch.nn.Linear(
embed_tokens.weight.shape[1],
embed_tokens.weight.shape[0],
bias=False,
)
output_projection.weight = embed_tokens.weight
else:
output_projection = torch.nn.Linear(
args.decoder_embed_dim, len(task.dictionary), bias=False
)
torch.nn.init.normal_(
output_projection.weight, mean=0, std=args.decoder_embed_dim**-0.5
)
if getattr(args, "moe_freq", 0) > 0 and (
getattr(args, "fp16", False)
and not getattr(args, "memory_efficient_fp16", False)
and getattr(args, "ddp_backend", None) != "fully_sharded"
):
assert (
args.fp16_no_flatten_grads
), "If training moe models, set --fp16-no-flatten-grads to calculate correct gradnorm"
args.ddp_rank = distributed_utils.get_data_parallel_rank()
config = DecoderConfig()
config.override(args)
decoder = LMDecoder(
config,
embed_tokens,
embed_positions,
output_projection,
is_encoder_decoder=False,
dictionary=task.dictionary,
)
return cls(args, decoder)
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
return Embedding(len(dictionary), embed_dim, dictionary.pad())
class LMDecoder(Decoder, FairseqIncrementalDecoder):
def forward(self, src_tokens, **kwargs):
self_attn_padding_mask = src_tokens.eq(self.dictionary.pad())
return super().forward(src_tokens, self_attn_padding_mask, **kwargs)
def max_positions(self):
return self.embed_positions.max_positions
def reorder_incremental_state_scripting(
self,
incremental_state,
new_order,
):
for module in incremental_state:
for key in incremental_state[module]:
result = incremental_state[module][key].index_select(0, new_order)
incremental_state[module][key] = result
@register_model_architecture("lm", "lm_base")
def base_lm_architecture(args):
# backward compatibility for older model checkpoints
if hasattr(args, "no_tie_adaptive_proj"):
# previous models defined --no-tie-adaptive-proj, so use the existence of
# that option to determine if this is an "old" model checkpoint
args.no_decoder_final_norm = True # old models always set this to True
if args.no_tie_adaptive_proj is False:
args.tie_adaptive_proj = True
if hasattr(args, "decoder_final_norm"):
args.no_decoder_final_norm = not args.decoder_final_norm
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.adaptive_softmax_factor = getattr(args, "adaptive_softmax_factor", 4)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.base_layers = getattr(args, "base_layers", 0)
args.base_sublayers = getattr(args, "base_sublayers", 1)
args.base_shuffle = getattr(args, "base_shuffle", False)
args.add_bos_token = getattr(args, "add_bos_token", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.character_embeddings = getattr(args, "character_embeddings", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
# Model training is not stable without this
args.decoder_normalize_before = True
args.no_decoder_final_norm = getattr(args, "no_decoder_final_norm", False)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.adaptive_input_factor = getattr(args, "adaptive_input_factor", 4)
args.adaptive_input_cutoff = getattr(args, "adaptive_input_cutoff", None)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.tie_adaptive_proj = getattr(args, "tie_adaptive_proj", False)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
| EXA-1-master | exa/libraries/torchscale/examples/fairseq/models/language_modeling.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import argparse
import importlib
import os
MODEL_REGISTRY = {}
MODEL_DATACLASS_REGISTRY = {}
ARCH_MODEL_REGISTRY = {}
ARCH_MODEL_NAME_REGISTRY = {}
ARCH_MODEL_INV_REGISTRY = {}
ARCH_CONFIG_REGISTRY = {}
# automatically import any Python files in the models/ directory
models_dir = os.path.dirname(__file__)
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
model_name = file[: file.find(".py")] if file.endswith(".py") else file
module = importlib.import_module("models." + model_name)
# extra `model_parser` for sphinx
if model_name in MODEL_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_archs = parser.add_argument_group("Named architectures")
group_archs.add_argument(
"--arch", choices=ARCH_MODEL_INV_REGISTRY[model_name]
)
group_args = parser.add_argument_group("Additional command-line arguments")
MODEL_REGISTRY[model_name].add_args(group_args)
globals()[model_name + "_parser"] = parser
| EXA-1-master | exa/libraries/torchscale/examples/fairseq/models/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Dict, List, Optional, Tuple
import torch
from fairseq import distributed_utils, utils
from fairseq.distributed import fsdp_wrap
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import Embedding
from fairseq.modules import PositionalEmbedding
from torch import Tensor
from torchscale.architecture.config import DecoderConfig, EncoderConfig
from torchscale.architecture.encoder import Encoder
from .language_modeling import LMDecoder as MTDecoder
logger = logging.getLogger(__name__)
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
DEFAULT_MIN_PARAMS_TO_WRAP = int(1e8)
@register_model("mt")
class TranslationModel(FairseqEncoderDecoderModel):
def __init__(self, args, encoder, decoder):
super().__init__(encoder, decoder)
self.args = args
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--decoder-output-dim', type=int, metavar='N',
help='decoder output dimension (extra linear layer '
'if different from decoder embed dim')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--layernorm-embedding', action='store_true',
help='add layernorm to embedding')
parser.add_argument('--no-scale-embedding', action='store_true',
help='if True, dont scale embeddings')
parser.add_argument('--checkpoint-activations', action='store_true',
help='checkpoint activations at each layer, which saves GPU '
'memory usage at the cost of some additional compute')
parser.add_argument('--offload-activations', action='store_true',
help='checkpoint activations at each layer, then save to gpu. Sets --checkpoint-activations.')
# args for "Cross+Self-Attention for Transformer Models" (Peitz et al., 2019)
parser.add_argument('--no-cross-attention', default=False, action='store_true',
help='do not perform cross-attention')
parser.add_argument('--cross-self-attention', default=False, action='store_true',
help='perform cross+self-attention')
# args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019)
parser.add_argument('--encoder-layerdrop', type=float, metavar='D', default=0,
help='LayerDrop probability for encoder')
parser.add_argument('--decoder-layerdrop', type=float, metavar='D', default=0,
help='LayerDrop probability for decoder')
parser.add_argument('--encoder-layers-to-keep', default=None,
help='which layers to *keep* when pruning as a comma-separated list')
parser.add_argument('--decoder-layers-to-keep', default=None,
help='which layers to *keep* when pruning as a comma-separated list')
# args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020)
parser.add_argument('--quant-noise-pq', type=float, metavar='D', default=0,
help='iterative PQ quantization noise at training time')
parser.add_argument('--quant-noise-pq-block-size', type=int, metavar='D', default=8,
help='block size of quantization noise at training time')
parser.add_argument('--quant-noise-scalar', type=float, metavar='D', default=0,
help='scalar quantization noise and scalar quantization at training time')
# args for Fully Sharded Data Parallel (FSDP) training
parser.add_argument(
'--min-params-to-wrap', type=int, metavar='D', default=DEFAULT_MIN_PARAMS_TO_WRAP,
help=(
'minimum number of params for a layer to be wrapped with FSDP() when '
'training with --ddp-backend=fully_sharded. Smaller values will '
'improve memory efficiency, but may make torch.distributed '
'communication less efficient due to smaller input sizes. This option '
'is set to 0 (i.e., always wrap) when --checkpoint-activations or '
'--offload-activations are passed.'
)
)
# args for mixture-of-expert layers
parser.add_argument('--moe-freq', type=int, metavar='D', default=0,
help='Frequency at which we insert MoE Transformer layers')
parser.add_argument('--encoder-moe-freq', type=int, metavar='D', default=0,
help='Frequency at which we insert MoE Transformer encoder layers')
parser.add_argument('--decoder-moe-freq', type=int, metavar='D', default=0,
help='Frequency at which we insert MoE Transformer decoder layers')
parser.add_argument('--moe-expert-count', type=int, metavar='D', default=0,
help='Number of experts in each MoE Layer')
parser.add_argument('--moe-gating-use-fp32', default=False, action='store_true',
help="Use FP32 computations in MoE top2 gating function")
parser.add_argument('--moe-second-expert-policy', type=str, default='sampling',
help="policy for second expert, options: all/sampling/random")
parser.add_argument(
'--moe-normalize-gate-prob-before-dropping', default=False, action='store_true',
help=(
"whether to normalize gate probs before or after dropping experts "
"for capacity and randomization"
)
)
parser.add_argument('--moe-expert-ffn-dim', type=int, default=0,
help="MoE Expert FFN dimension")
parser.add_argument('--moe-top1-expert', default=False, action='store_true',
help="Use top1 gate instead of top2")
parser.add_argument(
'--moe-eval-capacity-token-fraction', type=float, default=0.25,
help=(
"Fraction of tokens as capacity during validation"
"if set to negative, use same as training. range: (0.0, 1.0]."
)
)
parser.add_argument('--moe-normalize-expert-grad', type=str, default='world_size',
help="Divide expert gradients by (1) 'world_size' (2) 'sqrt_world_size'")
parser.add_argument('--use-moe-pad-mask', default=False, action='store_true',
help="Don't route padding tokens to any expert")
parser.add_argument('--use-xmoe', default=False, action='store_true',
help="Enable X-Moe")
parser.add_argument('--freeze-moe', default=False, action='store_true',
help="Freeze MoE Params")
parser.add_argument('--deepnorm', default=False, action='store_true',
help="Enable DeepNorm")
parser.add_argument('--subln', default=False, action='store_true',
help="Enable SubLN")
parser.add_argument('--pretrained-dense-mt-model-path', type=str, default='')
# args for pseudo-MoE layers
parser.add_argument('--alternate-ffn-embed-dim', type=int, default=0,
help="FFN embed dim of alternate pseudo-MoE blocks")
parser.add_argument('--rel-pos-buckets', type=int, default=0,
help='')
parser.add_argument('--max-rel-pos', type=int, default=0,
help='')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if getattr(args, "max_source_positions", None) is None:
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
args.ddp_rank = distributed_utils.get_data_parallel_rank()
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError("--share-all-embeddings requires a joined dictionary")
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
)
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path
):
raise ValueError(
"--share-all-embeddings not compatible with --decoder-embed-path"
)
encoder_embed_tokens = cls.build_embedding(
args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = cls.build_embedding(
args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = cls.build_embedding(
args, tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
if getattr(args, "offload_activations", False):
args.checkpoint_activations = True # offloading implies checkpointing
encoder_embed_positions = (
PositionalEmbedding(
args.max_source_positions,
args.encoder_embed_dim,
src_dict.pad(),
learned=args.encoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
decoder_embed_positions = (
PositionalEmbedding(
args.max_target_positions,
args.decoder_embed_dim,
tgt_dict.pad(),
learned=args.decoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
if args.share_decoder_input_output_embed:
output_projection = torch.nn.Linear(
decoder_embed_tokens.weight.shape[1],
decoder_embed_tokens.weight.shape[0],
bias=False,
)
output_projection.weight = decoder_embed_tokens.weight
else:
output_projection = torch.nn.Linear(
args.decoder_embed_dim, len(tgt_dict), bias=False
)
torch.nn.init.normal_(
output_projection.weight, mean=0, std=args.decoder_embed_dim**-0.5
)
encoder = cls.build_encoder(
args,
encoder_embed_tokens,
encoder_embed_positions,
src_dict,
)
decoder = cls.build_decoder(
args,
decoder_embed_tokens,
decoder_embed_positions,
output_projection,
tgt_dict,
)
if not args.share_all_embeddings:
min_params_to_wrap = getattr(
args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP
)
# fsdp_wrap is a no-op when --ddp-backend != fully_sharded
encoder = fsdp_wrap(encoder, min_num_params=min_params_to_wrap)
decoder = fsdp_wrap(decoder, min_num_params=min_params_to_wrap)
return cls(args, encoder, decoder)
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
@classmethod
def build_encoder(cls, args, embed_tokens, embed_positions, dictionary):
config = EncoderConfig()
config.override(args)
return MTEncoder(
config,
embed_tokens,
embed_positions,
is_encoder_decoder=True,
dictionary=dictionary,
)
@classmethod
def build_decoder(
cls, args, embed_tokens, embed_positions, output_projection, dictionary
):
config = DecoderConfig()
config.override(args)
return MTDecoder(
config,
embed_tokens,
embed_positions,
output_projection,
is_encoder_decoder=True,
dictionary=dictionary,
)
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
return_all_hiddens: bool = False,
features_only: bool = False,
**kwargs
):
encoder_out = self.encoder(src_tokens, return_all_hiddens=return_all_hiddens)
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
features_only=features_only,
return_all_hiddens=return_all_hiddens,
)
return decoder_out
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Get normalized probabilities (or log probs) from a net's output."""
return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
class MTEncoder(Encoder, FairseqEncoder):
def forward(self, src_tokens, **kwargs):
self_attn_padding_mask = src_tokens.eq(self.dictionary.pad())
return super().forward(
src_tokens=src_tokens, encoder_padding_mask=self_attn_padding_mask, **kwargs
)
def reorder_encoder_out(self, encoder_out, new_order):
new_encoder_out = encoder_out["encoder_out"].index_select(0, new_order)
new_encoder_embedding = encoder_out["encoder_embedding"].index_select(
0, new_order
)
new_encoder_padding_mask = encoder_out["encoder_padding_mask"].index_select(
0, new_order
)
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(0, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask,
"encoder_embedding": new_encoder_embedding, # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
}
def max_positions(self):
return self.embed_positions.max_positions
@register_model_architecture("mt", "mt_base")
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.no_cross_attention = getattr(args, "no_cross_attention", False)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
args.is_moe = getattr(args, "is_moe", False)
args.selected_expert_count = getattr(args, "selected_expert_count", 2)
| EXA-1-master | exa/libraries/torchscale/examples/fairseq/models/machine_translation.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import logging
from dataclasses import dataclass, field
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import BaseFairseqModel, register_model, register_model_architecture
from fairseq.models.squad import SQuADHead
from fairseq.models.transformer import DEFAULT_MIN_PARAMS_TO_WRAP, Embedding
from fairseq.modules import PositionalEmbedding
from omegaconf import II
try:
from apex.normalization import FusedLayerNorm as LayerNorm
except ModuleNotFoundError:
from torch.nn import LayerNorm
from torchscale.architecture.config import EncoderConfig
from .machine_translation import MTEncoder as Encoder
DEFAULT_MAX_SOURCE_POSITIONS = 1024
logger = logging.getLogger(__name__)
@dataclass
class BertConfig(FairseqDataclass):
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="relu", metadata={"help": "activation function to use"}
)
dropout: float = field(default=0.1, metadata={"help": "dropout probability"})
attention_dropout: float = field(
default=0.0, metadata={"help": "dropout probability for attention weights"}
)
activation_dropout: float = field(
default=0.0, metadata={"help": "dropout probability after activation in FFN."}
)
encoder_embed_dim: int = field(
default=512, metadata={"help": "encoder embedding dimension"}
)
encoder_output_dim: int = field(
default=512, metadata={"help": "encoder output dimension"}
)
encoder_input_dim: int = field(
default=512, metadata={"help": "encoder input dimension"}
)
encoder_ffn_embed_dim: int = field(
default=2048, metadata={"help": "encoder embedding dimension for FFN"}
)
encoder_layers: int = field(default=6, metadata={"help": "num encoder layers"})
encoder_attention_heads: int = field(
default=8, metadata={"help": "num encoder attention heads"}
)
encoder_normalize_before: bool = field(
default=False, metadata={"help": "apply layernorm before each encoder block"}
)
no_encoder_final_norm: bool = field(
default=False,
metadata={"help": "don't add an extra layernorm after the last encoder block"},
)
no_token_positional_embeddings: bool = field(
default=False,
metadata={
"help": "if set, disables positional embeddings (outside self attention)"
},
)
share_encoder_input_output_embed: bool = field(
default=False, metadata={"help": "share encoder input and output embeddings"}
)
encoder_learned_pos: bool = field(
default=False,
metadata={"help": "use learned positional embeddings in the encoder"},
)
layernorm_embedding: bool = field(
default=False, metadata={"help": "add layernorm to embedding"}
)
no_scale_embedding: bool = field(
default=False, metadata={"help": "if True, dont scale embeddings"}
)
checkpoint_activations: bool = field(
default=False, metadata={"help": "checkpoint activations at each layer"}
)
offload_activations: bool = field(
default=False,
metadata={"help": "move checkpointed activations to CPU after they are used."},
)
# config for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019)
encoder_layerdrop: float = field(
default=0.0, metadata={"help": "LayerDrop probability for encoder"}
)
encoder_layers_to_keep: Optional[str] = field(
default=None,
metadata={
"help": "which layers to *keep* when pruning as a comma-separated list"
},
)
# config for Fully Sharded Data Parallel (FSDP) training
min_params_to_wrap: int = field(
default=DEFAULT_MIN_PARAMS_TO_WRAP,
metadata={
"help": (
"minimum number of params for a layer to be wrapped with FSDP() when "
"training with --ddp-backend=fully_sharded. Smaller values will "
"improve memory efficiency, but may make torch.distributed "
"communication less efficient due to smaller input sizes. This option "
"is set to 0 (i.e., always wrap) when --checkpoint-activations or "
"--offload-activations are passed."
)
},
)
max_source_positions: int = field(
default=1024, metadata={"help": "max source positions"}
)
pooler_activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="relu", metadata={"help": "activation function to use for pooler layer"}
)
pooler_dropout: float = field(
default=0.0,
metadata={"help": "dropout probability in the masked_lm pooler layers"},
)
# options from other parts of the config
# add_bos_token: bool = II("task.add_bos_token")
# tokens_per_sample: int = II("task.tokens_per_sample")
tpu: bool = II("common.tpu")
rel_pos_buckets: int = field(default=0, metadata={"help": ""})
max_rel_pos: int = field(default=0, metadata={"help": ""})
use_xmoe: Optional[bool] = field(
default=False,
)
moe_freq: int = field(
default=0,
metadata={"help": "Frequency at which we insert MoE Transformer layers"},
)
moe_expert_count: int = field(
default=0, metadata={"help": "Number of experts in each MoE Layer"}
)
moe_gating_use_fp32: bool = field(
default=False,
metadata={"help": "Use FP32 computations in MoE top2 gating function"},
)
moe_second_expert_policy: str = field(
default="sampling",
metadata={"help": "policy for second expert, options: all/sampling/random"},
)
moe_normalize_gate_prob_before_dropping: bool = field(
default=False,
metadata={
"help": "whether to normalize gate probs before or after dropping experts for capacity and randomization"
},
)
moe_expert_ffn_dim: Optional[int] = field(
default=None, metadata={"help": "MoE expert FFN dimension"}
)
moe_top1_expert: Optional[bool] = field(
default=False, metadata={"help": "Use top1 gate instead of top2"}
)
moe_eval_capacity_token_fraction: Optional[float] = field(
default=0.25,
metadata={
"help": (
"Default: 0.25, Fraction of tokens as capacity during validation, "
"if set to negative, use same as training. range: (0.0, 1.0]."
)
},
)
moe_normalize_expert_grad: Optional[str] = field(
default="world_size",
metadata={
"help": "Divide expert gradients by (1) 'world_size' (2) 'sqrt_world_size'"
},
)
record_a2a_perf_stats: Optional[bool] = field(
default=False,
metadata={"help": "records all to all perf stats during distributed training"},
)
dummy_a2a: Optional[bool] = field(
default=False,
metadata={
"help": "By passes all to all during distributed training by returning the input buffer as output"
},
)
moe_batch_prioritized_routing: Optional[bool] = field(
default=False,
metadata={
"help": "if true orders token by the gate prob before capacity dropping."
},
)
ddp_rank: int = II("distributed_training.distributed_rank")
deepnorm: Optional[bool] = field(
default=False,
)
subln: Optional[bool] = field(
default=False,
)
@register_model("mlm", dataclass=BertConfig)
class BertModel(BaseFairseqModel):
def __init__(self, args, encoder):
super().__init__()
self.args = args
self.encoder = encoder
self.padding_idx = self.encoder.embed_tokens.padding_idx
self.classification_heads = nn.ModuleDict()
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
args.max_source_positions = getattr(
args, "max_source_positions", DEFAULT_MAX_SOURCE_POSITIONS
)
embed_tokens = cls.build_embedding(
args, task.dictionary, args.encoder_embed_dim
)
embed_positions = (
PositionalEmbedding(
args.max_source_positions,
args.encoder_embed_dim,
task.dictionary.pad(),
learned=args.encoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
lm_head = cls.build_lm_head(
args,
args.encoder_embed_dim,
len(task.dictionary),
args.activation_fn,
weight=embed_tokens.weight,
)
config = EncoderConfig()
config.override(args)
encoder = Encoder(
config,
embed_tokens=embed_tokens,
embed_positions=embed_positions,
output_projection=lm_head,
is_encoder_decoder=False,
dictionary=task.dictionary,
)
return cls(args, encoder)
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
embed_tokens = Embedding(len(dictionary), embed_dim, dictionary.pad())
return embed_tokens
@classmethod
def build_lm_head(cls, args, embed_dim, output_dim, activation_fn, weight):
return LMHead(embed_dim, output_dim, activation_fn, weight)
def output_layer(self, features, masked_tokens=None):
return self.encoder.output_projection(features, masked_tokens=masked_tokens)
def register_classification_head(
self, name, num_classes=None, inner_dim=None, **kwargs
):
"""Register a classification head."""
if name in self.classification_heads:
prev_num_classes = self.classification_heads[name].out_proj.out_features
prev_inner_dim = self.classification_heads[name].dense.out_features
if num_classes != prev_num_classes or inner_dim != prev_inner_dim:
logger.warning(
're-registering head "{}" with num_classes {} (prev: {}) '
"and inner_dim {} (prev: {})".format(
name, num_classes, prev_num_classes, inner_dim, prev_inner_dim
)
)
self.classification_heads[name] = ClassificationHead(
self.args.encoder_embed_dim,
inner_dim or self.args.encoder_embed_dim,
num_classes,
self.args.pooler_activation_fn,
self.args.pooler_dropout,
)
def register_question_answering_head(self, name, num_classes=None):
self.classification_heads[name] = SQuADHead(
self.args.encoder_embed_dim,
)
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
# upgrade children modules
super().upgrade_state_dict_named(state_dict, name)
# Handle new classification heads present in the state dict.
current_head_names = (
[]
if not hasattr(self, "classification_heads")
else self.classification_heads.keys()
)
keys_to_delete = []
for k in state_dict.keys():
if not k.startswith(prefix + "classification_heads."):
continue
head_name = k[len(prefix + "classification_heads.") :].split(".")[0] # noqa: E203
num_classes = state_dict[
prefix + "classification_heads." + head_name + ".out_proj.weight"
].size(0)
inner_dim = state_dict[
prefix + "classification_heads." + head_name + ".dense.weight"
].size(0)
if getattr(self.args, "load_checkpoint_heads", False):
if head_name not in current_head_names:
self.register_classification_head(head_name, num_classes, inner_dim)
else:
if head_name not in current_head_names:
logger.warning(
"deleting classification head ({}) from checkpoint "
"not present in current model: {}".format(head_name, k)
)
keys_to_delete.append(k)
elif (
num_classes
!= self.classification_heads[head_name].out_proj.out_features
or inner_dim
!= self.classification_heads[head_name].dense.out_features
):
logger.warning(
"deleting classification head ({}) from checkpoint "
"with different dimensions than current model: {}".format(
head_name, k
)
)
keys_to_delete.append(k)
for k in keys_to_delete:
del state_dict[k]
# Copy any newly-added classification heads into the state dict
# with their current weights.
if hasattr(self, "classification_heads"):
cur_state = self.classification_heads.state_dict()
for k, v in cur_state.items():
if prefix + "classification_heads." + k not in state_dict:
logger.info("Overwriting " + prefix + "classification_heads." + k)
state_dict[prefix + "classification_heads." + k] = v
def get_normalized_probs_scriptable(
self,
net_output,
log_probs,
sample = None,
):
logits = net_output[0]
if log_probs:
return utils.log_softmax(logits, dim=-1)
else:
return utils.softmax(logits, dim=-1)
def forward(
self,
src_tokens=None,
features_only=False,
return_all_hiddens=False,
classification_head_name=None,
masked_tokens=None,
**kwargs
):
encoder_out = self.encoder(
src_tokens, features_only=True, return_all_hiddens=return_all_hiddens
)
x, extra = encoder_out["encoder_out"], encoder_out
if classification_head_name is not None:
x = self.classification_heads[classification_head_name](x)
elif not features_only:
x = self.output_layer(x, masked_tokens=masked_tokens)
return x, extra
class ClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self,
input_dim,
inner_dim,
num_classes,
activation_fn,
pooler_dropout,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = self.activation_fn(x.float()).type_as(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
class LMHead(nn.Module):
"""Head for masked language modeling."""
def __init__(self, embed_dim, output_dim, activation_fn, weight=None):
super().__init__()
self.dense = nn.Linear(embed_dim, embed_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.layer_norm = LayerNorm(embed_dim)
if weight is None:
weight = nn.Linear(embed_dim, output_dim, bias=False).weight
self.weight = weight
self.bias = nn.Parameter(torch.zeros(output_dim))
def forward(self, features, masked_tokens=None, **kwargs):
# Only project the masked tokens while training,
# saves both memory and computation
if masked_tokens is not None:
features = features[masked_tokens, :]
x = self.dense(features)
x = self.activation_fn(x.float()).type_as(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = F.linear(x, self.weight) + self.bias
return x
@register_model_architecture("mlm", "mlm_base")
def base_unilm_architecture(args):
if hasattr(args, "encoder_final_norm"):
args.no_encoder_final_norm = not args.encoder_final_norm
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.pooler_dropout = getattr(args, "pooler_dropout", 0.0)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 3072)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
# args.add_bos_token = getattr(args, "add_bos_token", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.share_encoder_input_output_embed = getattr(
args, "share_encoder_input_output_embed", True
)
args.encoder_output_dim = getattr(
args, "encoder_output_dim", args.encoder_embed_dim
)
args.encoder_input_dim = getattr(args, "encoder_input_dim", args.encoder_embed_dim)
# Model training is not stable without this
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.no_encoder_final_norm = getattr(args, "no_encoder_final_norm", False)
args.no_scale_embedding = getattr(args, "no_scale_embedding", True)
args.layernorm_embedding = getattr(args, "layernorm_embedding", True)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
| EXA-1-master | exa/libraries/torchscale/examples/fairseq/models/bert.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import MoECriterion, register_criterion, MoECriterionConfig
@register_criterion("masked_lm_moe_cross_entropy", dataclass=MoECriterionConfig)
class MaskedLMMoECrossEntropyCriterion(MoECriterion):
def compute_inner_loss(self, model, sample, reduce=True):
masked_tokens = sample["target"].ne(self.padding_idx)
sample_size = masked_tokens.int().sum()
masked_tokens = torch.where(
masked_tokens.any(),
masked_tokens,
masked_tokens.new([True]),
)
net_output = model(**sample["net_input"], masked_tokens=masked_tokens)
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output)
if masked_tokens is not None:
target = target[masked_tokens]
nll_loss = F.nll_loss(
lprobs,
target.view(-1),
ignore_index=self.padding_idx,
reduction="sum" if reduce else "none",
)
logging_output = {
"inner_loss": nll_loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
return net_output, nll_loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
MaskedLMMoECrossEntropyCriterion.reduce_moe_metrics(logging_outputs)
loss_sum = sum(log.get("inner_loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
# we divide by log(2) to convert the loss from base e to base 2
metrics.log_scalar(
"inner_loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
else:
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["inner_loss"].avg)
) | EXA-1-master | exa/libraries/torchscale/examples/fairseq/criterions/masked_lm_moe.py |
import importlib
import os
# automatically import any Python files in the criterions/ directory
for file in sorted(os.listdir(os.path.dirname(__file__))):
if file.endswith(".py") and not file.startswith("_"):
file_name = file[: file.find(".py")]
importlib.import_module("criterions." + file_name) | EXA-1-master | exa/libraries/torchscale/examples/fairseq/criterions/__init__.py |
EXA-1-master | exa/libraries/CMU-MultimodalSDK-master/mmsdk/__init__.py |
|
import fusion as fusion
import modules as modules
| EXA-1-master | exa/libraries/CMU-MultimodalSDK-master/mmsdk/mmmodelsdk/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.