python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# flake8: noqa
import models
import tasks
import criterions
from fairseq_cli.train import cli_main
if __name__ == "__main__":
cli_main()
| torchscale-flash-master | examples/fairseq/train.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import json
import logging
import os
from argparse import Namespace
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import sentencepiece as spm
from fairseq import utils
from fairseq.data import Dictionary
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.tasks import FairseqTask, register_task
from omegaconf import II, MISSING
from .data.mlm_loader import MLMLoader
logger = logging.getLogger(__name__)
SAMPLE_BREAK_MODE_CHOICES = ChoiceEnum(["none", "complete", "complete_doc", "eos"])
SHORTEN_METHOD_CHOICES = ChoiceEnum(["none", "truncate", "random_crop"])
@dataclass
class PretrainingConfig(FairseqDataclass):
data: str = field(
default=MISSING,
metadata={
"help": "colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner"
},
)
sample_break_mode: SAMPLE_BREAK_MODE_CHOICES = field(
default="complete",
metadata={
"help": 'If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
"of sentence, but may include multiple sentences per sample. "
'"complete_doc" is similar but respects doc boundaries. '
'If set to "eos", includes only one sentence per sample.'
},
)
tokens_per_sample: int = field(
default=1024,
metadata={"help": "max number of tokens per sample for LM dataset"},
)
mask_prob: float = field(
default=0.15,
metadata={"help": "probability of replacing a token with mask"},
)
leave_unmasked_prob: float = field(
default=0.1,
metadata={"help": "probability that a masked token is unmasked"},
)
random_token_prob: float = field(
default=0.1,
metadata={"help": "probability of replacing a token with a random token"},
)
freq_weighted_replacement: bool = field(
default=False,
metadata={"help": "sample random replacement words based on word frequencies"},
)
mask_whole_words: bool = field(
default=False,
metadata={"help": "mask whole words; you may also want to set --bpe"},
)
mask_multiple_length: int = field(
default=1,
metadata={"help": "repeat the mask indices multiple times"},
)
mask_stdev: float = field(
default=0.0,
metadata={"help": "stdev of the mask length"},
)
shorten_method: SHORTEN_METHOD_CHOICES = field(
default="none",
metadata={
"help": "if not none, shorten sequences that exceed --tokens-per-sample"
},
)
shorten_data_split_list: str = field(
default="",
metadata={
"help": "comma-separated list of dataset splits to apply shortening to, "
'e.g., "train,valid" (default: all dataset splits)'
},
)
seed: int = II("common.seed")
span_length: float = field(
default=3.0,
metadata={"help": "average span length for masking"},
)
remove_source_sentinel: bool = field(
default=False,
metadata={"help": "remove the source sentinel for the span corruption task"},
)
remove_target_sentinel: bool = field(
default=False,
metadata={"help": "remove the target sentinel for the span corruption task"},
)
batch_read_ahead: int = field(
default=100000,
metadata={"help": "batch read ahead size for infinibatch"},
)
required_batch_size_multiple: int = II("dataset.required_batch_size_multiple")
spm_model: str = field(
default="",
metadata={"help": "sentencepice model to tokenize the data"},
)
dict_file: str = field(
default="",
metadata={"help": ""},
)
pad_to_max_length: bool = field(
default=False,
)
@register_task("pretraining", dataclass=PretrainingConfig)
class PLMTask(FairseqTask):
def __init__(self, cfg, dictionary, tokenizer):
super().__init__(cfg)
self.cfg = cfg
self.dictionary = dictionary
self.tokenizer = tokenizer
self.seed = cfg.seed
self.mask_idx = dictionary.index("<mask>")
@classmethod
def setup_task(cls, cfg, **kwargs):
paths = utils.split_paths(cfg.data)
assert len(paths) > 0
if cfg.dict_file != "":
dictionary = Dictionary.load(cfg.dict_file)
else:
dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
# add mask token
dictionary.add_symbol("<mask>")
for i in range(100):
dictionary.add_symbol(f"<mask_{i}>")
dictionary.pad_to_multiple_(cfg.required_batch_size_multiple)
logger.info("dictionary: {} types".format(len(dictionary)))
# tokenizer = SentencepieceBPE(Namespace(sentencepiece_model=cfg.spm_model))
tokenizer = spm.SentencePieceProcessor()
tokenizer.Load(cfg.spm_model)
return cls(cfg, dictionary, tokenizer)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
self.datasets[split] = {
"data": json.load(open(f"{self.cfg.data}/json/{split}.json")),
"data_dir": self.cfg.data,
"shuffle": True if split == "train" else False,
}
self.datasets[split] = Namespace(**self.datasets[split])
def dataset(self, split):
if split not in self.datasets:
raise KeyError("Dataset not loaded: " + split)
return self.datasets[split]
def get_batch_iterator(
self,
dataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1,
data_buffer_size=0,
disable_iterator_cache=False,
**kwargs,
):
return MLMLoader(
self.cfg,
dataset,
self.dictionary,
self.tokenizer,
max_tokens=max_tokens,
max_sentences=max_sentences,
max_positions=max_positions,
ignore_invalid_inputs=ignore_invalid_inputs,
required_batch_size_multiple=required_batch_size_multiple,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
)
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
| torchscale-flash-master | examples/fairseq/tasks/pretraining.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import argparse
import importlib
import os
# register dataclass
TASK_DATACLASS_REGISTRY = {}
TASK_REGISTRY = {}
TASK_CLASS_NAMES = set()
# automatically import any Python files in the tasks/ directory
tasks_dir = os.path.dirname(__file__)
for file in os.listdir(tasks_dir):
path = os.path.join(tasks_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
task_name = file[: file.find(".py")] if file.endswith(".py") else file
module = importlib.import_module("tasks." + task_name)
# expose `task_parser` for sphinx
if task_name in TASK_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_task = parser.add_argument_group("Task name")
# fmt: off
group_task.add_argument('--task', metavar=task_name,
help='Enable this task with: ``--task=' + task_name + '``')
# fmt: on
group_args = parser.add_argument_group("Additional command-line arguments")
TASK_REGISTRY[task_name].add_args(group_args)
globals()[task_name + "_parser"] = parser
| torchscale-flash-master | examples/fairseq/tasks/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import torch
from infinibatch.iterators import CheckpointableIterator
from . import utils
class BaseBatchGen(CheckpointableIterator):
"""
This is a base class for batch generators that use infinibatch
"""
def __init__(self):
self._iter = None
self.epoch = 1
self.next_epoch_idx = 1
self.sharded_checkpoint = True
self.should_close_after_finished = True
def _build_iter(self):
"""
Build infinibatch iterator and assign to self._iter
"""
raise NotImplementedError()
def _move_to_tensor(self, batch):
def to_tensor(x):
return torch.tensor(x)
return utils.apply_to_sample(to_tensor, batch)
@property
def iterator(self):
if self._iter is None:
raise NotImplementedError("_build_iter() must called first")
return self._iter
def __iter__(self):
if self._iter is None:
raise NotImplementedError("_build_iter() must called first")
return self._iter
def __next__(self):
return next(self._iter)
def setstate(self, value):
self._iter.setstate(value)
def getstate(self):
return self._iter.getstate()
def close(self):
self._iter.close()
def __len__(self) -> int:
return 819200000
def next_epoch_itr(
self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True
):
return self
def end_of_epoch(self) -> bool:
return False
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
return self.getstate()
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
self.setstate(state_dict)
@property
def first_batch(self):
return "DUMMY"
| torchscale-flash-master | examples/fairseq/tasks/data/basic_loader.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
| torchscale-flash-master | examples/fairseq/tasks/data/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import collections
from random import Random
from typing import Dict, Iterable, Optional
import numpy as np
from infinibatch import iterators
def apply_to_sample(f, sample):
if hasattr(sample, "__len__") and len(sample) == 0:
return {}
def _apply(x):
if isinstance(x, np.ndarray):
return f(x)
elif isinstance(x, collections.OrderedDict):
# OrderedDict has attributes that needs to be preserved
od = collections.OrderedDict(
(key, _apply(value)) for key, value in x.items()
)
od.__dict__ = x.__dict__
return od
elif isinstance(x, dict):
return {key: _apply(value) for key, value in x.items()}
elif isinstance(x, list):
return [_apply(x) for x in x]
elif isinstance(x, tuple):
return tuple(_apply(x) for x in x)
elif isinstance(x, set):
return {_apply(x) for x in x}
else:
return x
return _apply(sample)
class NativeCheckpointableIterator(iterators.CheckpointableIterator):
def __init__(self, iterable: Iterable):
self._input_iterable = iterable
self.setstate(None)
def getstate(self) -> Dict:
return {"num_items_yielded": self._num_items_yielded}
def setstate(self, checkpoint: Optional[Dict]):
self._iterator = iter(self._input_iterable)
self._num_items_yielded = (
iterators._advance_iterator(self._iterator, checkpoint["num_items_yielded"])
if checkpoint is not None
else 0
)
def __next__(self):
item = next(self._iterator)
self._num_items_yielded += 1
return item
def close(self):
pass
class WeightIterator(object):
def __init__(self, weights, seed):
self.weights = weights
self.seed = seed
self.control_index = list(range(len(weights)))
self.setstate(None)
def __iter__(self):
return self
def getstate(self):
return {"random_state": self._random_state}
def setstate(self, checkpoint):
self._random_state = checkpoint["random_state"] if checkpoint else None
self._random = (
None # this will trigger the lazy initialization in self.__next__
)
def __next__(self):
if self._random is None:
self._random = Random(self.seed)
if self._random_state is not None:
self._random.setstate(self._random_state)
idx = self._random.choices(self.control_index, self.weights)[0]
self._random_state = self._random.getstate()
return idx
def close(self):
pass
| torchscale-flash-master | examples/fairseq/tasks/data/utils.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import copy
import itertools
import os
import numpy as np
from infinibatch import iterators
from .basic_loader import BaseBatchGen
from .utils import NativeCheckpointableIterator, WeightIterator
class MLMLoader(BaseBatchGen):
def __init__(
self,
args,
dataset,
dictionary,
tokenizer,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
):
super().__init__()
self.args = args
self.data = dataset.data
self.data_dir = dataset.data_dir
self.shuffle = dataset.shuffle
self.dictionary = dictionary
self.tokenizer = tokenizer
self.max_tokens = max_tokens
self.max_sentences = max_sentences
self.max_positions = max_positions
self.tokens_per_sample = args.tokens_per_sample
self.sample_break_mode = args.sample_break_mode
self.ignore_invalid_inputs = ignore_invalid_inputs
self.required_batch_size_multiple = required_batch_size_multiple
self.seed = str(seed)
self.num_shards = num_shards
self.shard_id = shard_id
self.batch_read_ahead = args.batch_read_ahead
self._build_iter()
def _build_iter(self):
tokenized_lines = self._multilingual_tokenize()
self.padded_batches = self._batchify(tokenized_lines)
prefetch_batches = iterators.PrefetchIterator(
self.padded_batches,
buffer_size=10000,
buffer_in_main_process=True,
log_empty_buffer_warning=True and self.shard_id == 0,
)
prefetch_batches = iterators.MapIterator(prefetch_batches, self._move_to_tensor)
self._iter = prefetch_batches
def _multilingual_tokenize(self):
multilingual_iters = []
weights = []
for data in self.data:
multilingual_iters.append(self._tokenize(data))
if "weight" in data:
weights.append(float(data["weight"]))
else:
weights.append(int(data["count"]))
if len(multilingual_iters) == 1:
return multilingual_iters[0]
sampling_iterator = WeightIterator(weights)
control_iterator = NativeCheckpointableIterator(sampling_iterator)
tokenized_lines = iterators.MultiplexIterator(
control_iterator, multilingual_iters
)
return tokenized_lines
def _tokenize(self, data):
"""
data:
{
'source': list[Path],
'source_lang': str,
'count': int,
'weight': float,
'name': str,
}
"""
dataset = list(
zip(
data["source"],
itertools.repeat(data["source_lang"]),
)
)
if self.shuffle:
chunk_files = iterators.InfinitePermutationSourceIterator(
dataset,
seed=self.seed,
shuffle=self.shuffle,
num_instances=self.num_shards,
instance_rank=self.shard_id,
)
else:
chunk_files = iterators.ChunkedSourceIterator(
dataset,
num_instances=self.num_shards,
instance_rank=self.shard_id,
)
tokenized_lines = iterators.SelectManyIterator(
chunk_files, lambda files: self._read_from_files(*files)
)
tokenized_lines = iterators.SamplingRandomMapIterator(
tokenized_lines, self._prepare, self.seed
)
return tokenized_lines
def _batchify(self, lines):
if self.max_sentences is not None:
if self.batch_read_ahead > 0:
lines = iterators.BlockwiseShuffleIterator(
lines, self.batch_read_ahead, self.seed
)
batches = iterators.FixedBatchIterator(lines, self.max_sentences)
else:
def dynamic_batch_size(sample):
lengths = [len(x) for x in sample]
batch_size = self.max_tokens // max(lengths)
batch_size = (
batch_size
// self.required_batch_size_multiple
* self.required_batch_size_multiple
)
return max(1, batch_size)
batches = iterators.BucketedReadaheadBatchIterator(
lines,
read_ahead=self.batch_read_ahead,
key=(lambda x: max(len(x[0]), len(x[1]))) if self.shuffle else None,
batch_size=dynamic_batch_size,
shuffle=self.shuffle,
seed=self.seed,
)
def collate(batch):
batch_size = len(batch)
mlm_source_max_length = max([len(x[0]) for x in batch])
mlm_target_max_length = max([len(x[1]) for x in batch])
s2s_source_max_length = max([len(x[2]) for x in batch])
s2s_target_max_length = max([len(x[3]) for x in batch])
if self.args.pad_to_max_length:
mlm_source_max_length = self.args.tokens_per_sample
mlm_target_max_length = self.args.tokens_per_sample
mlm_source_ids = np.full(
shape=(batch_size, mlm_source_max_length),
dtype=np.int32,
fill_value=self.dictionary.pad(),
)
mlm_target_ids = np.full(
shape=(batch_size, mlm_target_max_length),
dtype=np.int32,
fill_value=self.dictionary.pad(),
)
s2s_source_ids = np.full(
shape=(batch_size, s2s_source_max_length),
dtype=np.int32,
fill_value=self.dictionary.pad(),
)
s2s_target_ids = np.full(
shape=(batch_size, s2s_target_max_length - 1),
dtype=np.int32,
fill_value=self.dictionary.pad(),
)
s2s_prev_input_ids = np.full(
shape=(batch_size, s2s_target_max_length - 1),
dtype=np.int32,
fill_value=self.dictionary.pad(),
)
for i, (
mlm_input_ids,
mlm_label_ids,
s2s_input_ids,
s2s_label_ids,
) in enumerate(batch):
mlm_source_ids[i, : len(mlm_input_ids)] = mlm_input_ids
mlm_target_ids[i, : len(mlm_label_ids)] = mlm_label_ids
s2s_source_ids[i, : len(s2s_input_ids)] = s2s_input_ids
s2s_target_ids[i, : len(s2s_label_ids) - 1] = s2s_label_ids[1:]
s2s_prev_input_ids[i, : len(s2s_label_ids) - 1] = s2s_label_ids[:-1]
ret_batch = {
"net_input": {
"src_tokens": mlm_source_ids.astype(np.int64),
},
"target": mlm_target_ids.astype(np.int64),
"nsentences": batch_size,
"ntokens": sum([len(x[0]) for x in batch]),
}
return ret_batch
padded_batches = iterators.MapIterator(batches, collate)
return padded_batches
def _prepare(self, _random, doc):
nonmasked_tokens, masked_tokens = self._mask_lm(_random, doc)
nonnoise_spans, noise_spans = self._span_corruption(_random, doc)
return nonmasked_tokens, masked_tokens, nonnoise_spans, noise_spans
def _mask_lm(self, _random, doc):
def mask_tokens():
return "<mask>"
length = len(doc)
mask_tokens_num = int(length * self.args.mask_prob)
mask_tokens_num = min(max(mask_tokens_num, 1), length - 1)
possible_mask_positions = _random.sample(range(length), k=mask_tokens_num)
possible_mask_positions = sorted(possible_mask_positions)
nonmasked_tokens = copy.deepcopy(doc)
masked_tokens = [self.dictionary.pad() for _ in range(len(doc))]
for position in possible_mask_positions:
# masked_tokens.append(nonmasked_tokens[position])
masked_tokens[position] = nonmasked_tokens[position]
nonmasked_tokens[position] = self.dictionary.indices[mask_tokens()]
return nonmasked_tokens, masked_tokens
def _span_corruption(self, _random, doc):
def mask_tokens(i):
return f"<mask_{i}>"
length = len(doc)
noise_tokens_num = int(length * self.args.mask_prob)
noise_tokens_num = min(max(noise_tokens_num, 1), length - 1)
noise_spans_num = int(noise_tokens_num / self.args.span_length)
noise_spans_num = max(noise_spans_num, 1)
nonnoise_tokens_num = length - noise_tokens_num
if noise_spans_num == 1:
noise_split_positions = [0, noise_tokens_num]
else:
possible_split_positions = list(range(1, noise_tokens_num))
_random.shuffle(possible_split_positions)
noise_split_positions = sorted(
possible_split_positions[: noise_spans_num - 1]
)
noise_split_positions = [0] + noise_split_positions + [noise_tokens_num]
possible_insert_positions = list(range(nonnoise_tokens_num))
_random.shuffle(possible_insert_positions)
noise_insert_positions = sorted(possible_insert_positions[:noise_spans_num])
nonnoise_spans, noise_spans = [], []
last_end = 0
for i in range(noise_spans_num):
start_pos = noise_insert_positions[i] + noise_split_positions[i]
end_pos = noise_insert_positions[i] + noise_split_positions[i + 1]
mask_id = self.dictionary.indices[mask_tokens(i)]
if getattr(self.args, "remove_target_sentinel", False):
noise_spans.append(doc[start_pos:end_pos])
else:
noise_spans.append([mask_id] + doc[start_pos:end_pos])
if getattr(self.args, "remove_source_sentinel", False):
nonnoise_spans.extend(doc[last_end:start_pos])
else:
nonnoise_spans.extend(doc[last_end:start_pos] + [mask_id])
last_end = end_pos
nonnoise_spans.extend(doc[last_end:])
noise_spans = sum(noise_spans, [])
return nonnoise_spans, noise_spans
def _read_from_files(self, source_file, source_lang):
# data = []
file_path = os.path.join(self.data_dir, source_file)
if not os.path.exists(file_path):
print("| file {} not exists".format(file_path), flush=True)
return iter([]) # skip bad file
with open(file_path, "r", encoding="utf8") as f:
lines = f.read().strip().split("\n")
doc = [self.dictionary.bos()]
for line in lines:
if line == "":
if self.sample_break_mode == "complete_doc":
# data.append(doc)
yield doc
doc = [self.dictionary.bos()]
continue
tokenized_line = self.tokenizer.EncodeAsPieces(line)
tokenized_id = [
self.dictionary.index(token) for token in tokenized_line
] + [self.dictionary.eos_index]
if len(tokenized_id) > self.tokens_per_sample:
continue
if len(doc) + len(tokenized_id) > self.tokens_per_sample:
# data.append(doc)
yield doc
doc = [self.dictionary.bos()]
doc.extend(tokenized_id)
if len(doc) > 1 and len(doc) <= self.tokens_per_sample:
# data.append(doc)
yield doc
# return data
| torchscale-flash-master | examples/fairseq/tasks/data/mlm_loader.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import math
import warnings
import torch
import torch.distributed as dist
from fairseq.utils import multi_tensor_l2norm_available, multi_tensor_total_norm
@torch.no_grad()
def clip_grad_norm_(
params, max_norm, moe_expert_count, aggregate_norm_fn=None
) -> torch.Tensor:
def grad_exists(p):
return p is not None and getattr(p, "grad", None) is not None
if isinstance(params, torch.Tensor):
params = [params]
params = list(params)
params = list(filter(grad_exists, params))
grads, expert_grads, base_expert_grads, sharded_grads = [], [], [], []
denom = math.sqrt(max(dist.get_global_world_size(), moe_expert_count))
for p in params:
if hasattr(p, "expert"):
expert_grads.append(p.grad.detach() / denom)
elif hasattr(p, "base_expert"):
base_expert_grads.append(p.grad.detach())
elif hasattr(p, "_is_sharded"):
sharded_grads.append(p.grad.detach())
else:
grads.append(p.grad.detach())
if len(grads) == 0:
if len(params) > 0:
total_norm = params[0].new_tensor(0.0)
else:
total_norm = torch.tensor(0.0)
elif len(grads) == 1:
total_norm = torch.norm(grads[0], p=2, dtype=torch.float32)
else:
if multi_tensor_l2norm_available:
total_norm = multi_tensor_total_norm(grads)
else:
if torch.cuda.is_available():
warnings.warn(
"amp_C fused kernels unavailable, disabling multi_tensor_l2norm; "
"you may get better performance by installing NVIDIA's apex library"
)
device = torch.cuda.current_device()
elif grads[0].device.type == "xla":
device = grads[0].device
else:
device = torch.device("cpu")
total_norm = torch.norm(
torch.stack(
[torch.norm(g, p=2, dtype=torch.float32).to(device) for g in grads]
)
)
# calculate split_norm and all_reduce with other workers
norms = [total_norm]
for split_grads in [expert_grads, sharded_grads]:
if len(split_grads) == 0:
continue
split_norm = torch.norm(
torch.stack([torch.norm(g, p=2, dtype=torch.float32) for g in split_grads])
)
if dist.is_initialized():
split_norm.pow_(2)
dist.all_reduce(split_norm)
split_norm.sqrt_()
norms.append(split_norm)
if len(norms) > 1:
total_norm = torch.norm(torch.stack(norms))
if aggregate_norm_fn is not None:
total_norm = aggregate_norm_fn(total_norm)
if max_norm > 0:
max_norm = float(max_norm)
clip_coef = (max_norm / (total_norm + 1e-6)).clamp_(max=1)
for g in grads + expert_grads + sharded_grads + base_expert_grads:
g.mul_(clip_coef)
return total_norm
| torchscale-flash-master | examples/fairseq/utils/sparse_clip.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
| torchscale-flash-master | examples/fairseq/utils/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass, field
from typing import Optional
import torch
from fairseq import distributed_utils, utils
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import (
FairseqIncrementalDecoder,
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import DEFAULT_MIN_PARAMS_TO_WRAP, Embedding
from fairseq.modules import PositionalEmbedding
from omegaconf import II
from torchscale.architecture.config import DecoderConfig
from torchscale.architecture.decoder import Decoder
DEFAULT_MAX_TARGET_POSITIONS = 1024
logger = logging.getLogger(__name__)
@dataclass
class LanguageConfig(FairseqDataclass):
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="relu", metadata={"help": "activation function to use"}
)
dropout: float = field(default=0.1, metadata={"help": "dropout probability"})
attention_dropout: float = field(
default=0.0, metadata={"help": "dropout probability for attention weights"}
)
activation_dropout: float = field(
default=0.0, metadata={"help": "dropout probability after activation in FFN."}
)
relu_dropout: float = field(
default=0.0, metadata={"help": "dropout probability after activation in FFN."}
)
decoder_embed_dim: int = field(
default=512, metadata={"help": "decoder embedding dimension"}
)
decoder_output_dim: int = field(
default=512, metadata={"help": "decoder output dimension"}
)
decoder_input_dim: int = field(
default=512, metadata={"help": "decoder input dimension"}
)
decoder_ffn_embed_dim: int = field(
default=2048, metadata={"help": "decoder embedding dimension for FFN"}
)
decoder_layers: int = field(default=6, metadata={"help": "num decoder layers"})
decoder_attention_heads: int = field(
default=8, metadata={"help": "num decoder attention heads"}
)
decoder_normalize_before: bool = field(
default=False, metadata={"help": "apply layernorm before each decoder block"}
)
no_token_positional_embeddings: bool = field(
default=False,
metadata={
"help": "if set, disables positional embeddings (outside self attention)"
},
)
share_decoder_input_output_embed: bool = field(
default=False, metadata={"help": "share decoder input and output embeddings"}
)
decoder_learned_pos: bool = field(
default=False,
metadata={"help": "use learned positional embeddings in the decoder"},
)
layernorm_embedding: bool = field(
default=False, metadata={"help": "add layernorm to embedding"}
)
no_scale_embedding: bool = field(
default=False, metadata={"help": "if True, dont scale embeddings"}
)
checkpoint_activations: bool = field(
default=False, metadata={"help": "checkpoint activations at each layer"}
)
offload_activations: bool = field(
default=False,
metadata={"help": "move checkpointed activations to CPU after they are used."},
)
# config for Fully Sharded Data Parallel (FSDP) training
min_params_to_wrap: int = field(
default=DEFAULT_MIN_PARAMS_TO_WRAP,
metadata={
"help": (
"minimum number of params for a layer to be wrapped with FSDP() when "
"training with --ddp-backend=fully_sharded. Smaller values will "
"improve memory efficiency, but may make torch.distributed "
"communication less efficient due to smaller input sizes. This option "
"is set to 0 (i.e., always wrap) when --checkpoint-activations or "
"--offload-activations are passed."
)
},
)
moe_freq: int = field(
default=0,
metadata={"help": "Frequency at which we insert MoE Transformer layers"},
)
moe_expert_count: int = field(
default=0, metadata={"help": "Number of experts in each MoE Layer"}
)
moe_gating_use_fp32: bool = field(
default=False,
metadata={"help": "Use FP32 computations in MoE top2 gating function"},
)
moe_second_expert_policy: str = field(
default="sampling",
metadata={"help": "policy for second expert, options: all/sampling/random"},
)
moe_normalize_gate_prob_before_dropping: bool = field(
default=False,
metadata={
"help": "whether to normalize gate probs before or after dropping experts for capacity and randomization"
},
)
moe_expert_ffn_dim: Optional[int] = field(
default=None, metadata={"help": "MoE expert FFN dimension"}
)
moe_top1_expert: Optional[bool] = field(
default=False, metadata={"help": "Use top1 gate instead of top2"}
)
moe_eval_capacity_token_fraction: Optional[float] = field(
default=0.25,
metadata={
"help": (
"Default: 0.25, Fraction of tokens as capacity during validation, "
"if set to negative, use same as training. range: (0.0, 1.0]."
)
},
)
moe_normalize_expert_grad: Optional[str] = field(
default="world_size",
metadata={
"help": "Divide expert gradients by (1) 'world_size' (2) 'sqrt_world_size'"
},
)
record_a2a_perf_stats: Optional[bool] = field(
default=False,
metadata={"help": "records all to all perf stats during distributed training"},
)
dummy_a2a: Optional[bool] = field(
default=False,
metadata={
"help": "By passes all to all during distributed training by returning the input buffer as output"
},
)
moe_batch_prioritized_routing: Optional[bool] = field(
default=False,
metadata={
"help": "if true orders token by the gate prob before capacity dropping."
},
)
use_xmoe: Optional[bool] = field(
default=False,
)
# options from other parts of the config
add_bos_token: bool = II("task.add_bos_token")
tokens_per_sample: int = II("task.tokens_per_sample")
max_target_positions: Optional[int] = II("task.max_target_positions")
tpu: bool = II("common.tpu")
memory_efficient_fp16: bool = II("common.memory_efficient_fp16")
fp16: bool = II("common.fp16")
fp16_no_flatten_grads: bool = II("common.fp16_no_flatten_grads")
ddp_backend: str = II("distributed_training.ddp_backend")
world_size: int = II("distributed_training.distributed_world_size")
distributed_rank: int = II("distributed_training.distributed_rank")
ddp_rank: int = II("distributed_training.distributed_rank")
deepnorm: Optional[bool] = field(
default=False,
)
subln: Optional[bool] = field(
default=False,
)
rel_pos_buckets: Optional[int] = field(
default=0,
)
max_rel_pos: Optional[int] = field(
default=0,
)
xpos_rel_pos: Optional[bool] = field(
default=False,
)
xpos_scale_base: Optional[int] = field(
default=512,
)
@register_model("lm", dataclass=LanguageConfig)
class LanguageModel(FairseqLanguageModel):
def __init__(self, args, decoder):
self.args = args
super().__init__(decoder)
@classmethod
def build_model(cls, args, task):
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = getattr(
args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS
)
embed_tokens = cls.build_embedding(
args, task.source_dictionary, args.decoder_embed_dim
)
embed_positions = (
PositionalEmbedding(
args.max_target_positions,
args.decoder_embed_dim,
task.dictionary.pad(),
learned=args.decoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
if args.share_decoder_input_output_embed:
output_projection = torch.nn.Linear(
embed_tokens.weight.shape[1],
embed_tokens.weight.shape[0],
bias=False,
)
output_projection.weight = embed_tokens.weight
else:
output_projection = torch.nn.Linear(
args.decoder_embed_dim, len(task.dictionary), bias=False
)
torch.nn.init.normal_(
output_projection.weight, mean=0, std=args.decoder_embed_dim**-0.5
)
if getattr(args, "moe_freq", 0) > 0 and (
getattr(args, "fp16", False)
and not getattr(args, "memory_efficient_fp16", False)
and getattr(args, "ddp_backend", None) != "fully_sharded"
):
assert (
args.fp16_no_flatten_grads
), "If training moe models, set --fp16-no-flatten-grads to calculate correct gradnorm"
args.ddp_rank = distributed_utils.get_data_parallel_rank()
config = DecoderConfig()
config.override(args)
decoder = LMDecoder(
config,
embed_tokens,
embed_positions,
output_projection,
is_encoder_decoder=False,
dictionary=task.dictionary,
)
return cls(args, decoder)
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
return Embedding(len(dictionary), embed_dim, dictionary.pad())
class LMDecoder(Decoder, FairseqIncrementalDecoder):
def forward(self, src_tokens, **kwargs):
self_attn_padding_mask = src_tokens.eq(self.dictionary.pad())
return super().forward(src_tokens, self_attn_padding_mask, **kwargs)
def max_positions(self):
return self.embed_positions.max_positions
def reorder_incremental_state_scripting(
self,
incremental_state,
new_order,
):
for module in incremental_state:
for key in incremental_state[module]:
result = incremental_state[module][key].index_select(0, new_order)
incremental_state[module][key] = result
@register_model_architecture("lm", "lm_base")
def base_lm_architecture(args):
# backward compatibility for older model checkpoints
if hasattr(args, "no_tie_adaptive_proj"):
# previous models defined --no-tie-adaptive-proj, so use the existence of
# that option to determine if this is an "old" model checkpoint
args.no_decoder_final_norm = True # old models always set this to True
if args.no_tie_adaptive_proj is False:
args.tie_adaptive_proj = True
if hasattr(args, "decoder_final_norm"):
args.no_decoder_final_norm = not args.decoder_final_norm
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.adaptive_softmax_factor = getattr(args, "adaptive_softmax_factor", 4)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.base_layers = getattr(args, "base_layers", 0)
args.base_sublayers = getattr(args, "base_sublayers", 1)
args.base_shuffle = getattr(args, "base_shuffle", False)
args.add_bos_token = getattr(args, "add_bos_token", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.character_embeddings = getattr(args, "character_embeddings", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
# Model training is not stable without this
args.decoder_normalize_before = True
args.no_decoder_final_norm = getattr(args, "no_decoder_final_norm", False)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.adaptive_input_factor = getattr(args, "adaptive_input_factor", 4)
args.adaptive_input_cutoff = getattr(args, "adaptive_input_cutoff", None)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.tie_adaptive_proj = getattr(args, "tie_adaptive_proj", False)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
| torchscale-flash-master | examples/fairseq/models/language_modeling.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import argparse
import importlib
import os
MODEL_REGISTRY = {}
MODEL_DATACLASS_REGISTRY = {}
ARCH_MODEL_REGISTRY = {}
ARCH_MODEL_NAME_REGISTRY = {}
ARCH_MODEL_INV_REGISTRY = {}
ARCH_CONFIG_REGISTRY = {}
# automatically import any Python files in the models/ directory
models_dir = os.path.dirname(__file__)
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
model_name = file[: file.find(".py")] if file.endswith(".py") else file
module = importlib.import_module("models." + model_name)
# extra `model_parser` for sphinx
if model_name in MODEL_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_archs = parser.add_argument_group("Named architectures")
group_archs.add_argument(
"--arch", choices=ARCH_MODEL_INV_REGISTRY[model_name]
)
group_args = parser.add_argument_group("Additional command-line arguments")
MODEL_REGISTRY[model_name].add_args(group_args)
globals()[model_name + "_parser"] = parser
| torchscale-flash-master | examples/fairseq/models/__init__.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Dict, List, Optional, Tuple
import torch
from fairseq import distributed_utils, utils
from fairseq.distributed import fsdp_wrap
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import Embedding
from fairseq.modules import PositionalEmbedding
from torch import Tensor
from torchscale.architecture.config import DecoderConfig, EncoderConfig
from torchscale.architecture.encoder import Encoder
from .language_modeling import LMDecoder as MTDecoder
logger = logging.getLogger(__name__)
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
DEFAULT_MIN_PARAMS_TO_WRAP = int(1e8)
@register_model("mt")
class TranslationModel(FairseqEncoderDecoderModel):
def __init__(self, args, encoder, decoder):
super().__init__(encoder, decoder)
self.args = args
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--decoder-output-dim', type=int, metavar='N',
help='decoder output dimension (extra linear layer '
'if different from decoder embed dim')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--layernorm-embedding', action='store_true',
help='add layernorm to embedding')
parser.add_argument('--no-scale-embedding', action='store_true',
help='if True, dont scale embeddings')
parser.add_argument('--checkpoint-activations', action='store_true',
help='checkpoint activations at each layer, which saves GPU '
'memory usage at the cost of some additional compute')
parser.add_argument('--offload-activations', action='store_true',
help='checkpoint activations at each layer, then save to gpu. Sets --checkpoint-activations.')
# args for "Cross+Self-Attention for Transformer Models" (Peitz et al., 2019)
parser.add_argument('--no-cross-attention', default=False, action='store_true',
help='do not perform cross-attention')
parser.add_argument('--cross-self-attention', default=False, action='store_true',
help='perform cross+self-attention')
# args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019)
parser.add_argument('--encoder-layerdrop', type=float, metavar='D', default=0,
help='LayerDrop probability for encoder')
parser.add_argument('--decoder-layerdrop', type=float, metavar='D', default=0,
help='LayerDrop probability for decoder')
parser.add_argument('--encoder-layers-to-keep', default=None,
help='which layers to *keep* when pruning as a comma-separated list')
parser.add_argument('--decoder-layers-to-keep', default=None,
help='which layers to *keep* when pruning as a comma-separated list')
# args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020)
parser.add_argument('--quant-noise-pq', type=float, metavar='D', default=0,
help='iterative PQ quantization noise at training time')
parser.add_argument('--quant-noise-pq-block-size', type=int, metavar='D', default=8,
help='block size of quantization noise at training time')
parser.add_argument('--quant-noise-scalar', type=float, metavar='D', default=0,
help='scalar quantization noise and scalar quantization at training time')
# args for Fully Sharded Data Parallel (FSDP) training
parser.add_argument(
'--min-params-to-wrap', type=int, metavar='D', default=DEFAULT_MIN_PARAMS_TO_WRAP,
help=(
'minimum number of params for a layer to be wrapped with FSDP() when '
'training with --ddp-backend=fully_sharded. Smaller values will '
'improve memory efficiency, but may make torch.distributed '
'communication less efficient due to smaller input sizes. This option '
'is set to 0 (i.e., always wrap) when --checkpoint-activations or '
'--offload-activations are passed.'
)
)
# args for mixture-of-expert layers
parser.add_argument('--moe-freq', type=int, metavar='D', default=0,
help='Frequency at which we insert MoE Transformer layers')
parser.add_argument('--encoder-moe-freq', type=int, metavar='D', default=0,
help='Frequency at which we insert MoE Transformer encoder layers')
parser.add_argument('--decoder-moe-freq', type=int, metavar='D', default=0,
help='Frequency at which we insert MoE Transformer decoder layers')
parser.add_argument('--moe-expert-count', type=int, metavar='D', default=0,
help='Number of experts in each MoE Layer')
parser.add_argument('--moe-gating-use-fp32', default=False, action='store_true',
help="Use FP32 computations in MoE top2 gating function")
parser.add_argument('--moe-second-expert-policy', type=str, default='sampling',
help="policy for second expert, options: all/sampling/random")
parser.add_argument(
'--moe-normalize-gate-prob-before-dropping', default=False, action='store_true',
help=(
"whether to normalize gate probs before or after dropping experts "
"for capacity and randomization"
)
)
parser.add_argument('--moe-expert-ffn-dim', type=int, default=0,
help="MoE Expert FFN dimension")
parser.add_argument('--moe-top1-expert', default=False, action='store_true',
help="Use top1 gate instead of top2")
parser.add_argument(
'--moe-eval-capacity-token-fraction', type=float, default=0.25,
help=(
"Fraction of tokens as capacity during validation"
"if set to negative, use same as training. range: (0.0, 1.0]."
)
)
parser.add_argument('--moe-normalize-expert-grad', type=str, default='world_size',
help="Divide expert gradients by (1) 'world_size' (2) 'sqrt_world_size'")
parser.add_argument('--use-moe-pad-mask', default=False, action='store_true',
help="Don't route padding tokens to any expert")
parser.add_argument('--use-xmoe', default=False, action='store_true',
help="Enable X-Moe")
parser.add_argument('--freeze-moe', default=False, action='store_true',
help="Freeze MoE Params")
parser.add_argument('--deepnorm', default=False, action='store_true',
help="Enable DeepNorm")
parser.add_argument('--subln', default=False, action='store_true',
help="Enable SubLN")
parser.add_argument('--pretrained-dense-mt-model-path', type=str, default='')
# args for pseudo-MoE layers
parser.add_argument('--alternate-ffn-embed-dim', type=int, default=0,
help="FFN embed dim of alternate pseudo-MoE blocks")
parser.add_argument('--rel-pos-buckets', type=int, default=0,
help='')
parser.add_argument('--max-rel-pos', type=int, default=0,
help='')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if getattr(args, "max_source_positions", None) is None:
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
args.ddp_rank = distributed_utils.get_data_parallel_rank()
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError("--share-all-embeddings requires a joined dictionary")
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
)
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path
):
raise ValueError(
"--share-all-embeddings not compatible with --decoder-embed-path"
)
encoder_embed_tokens = cls.build_embedding(
args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = cls.build_embedding(
args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = cls.build_embedding(
args, tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
if getattr(args, "offload_activations", False):
args.checkpoint_activations = True # offloading implies checkpointing
encoder_embed_positions = (
PositionalEmbedding(
args.max_source_positions,
args.encoder_embed_dim,
src_dict.pad(),
learned=args.encoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
decoder_embed_positions = (
PositionalEmbedding(
args.max_target_positions,
args.decoder_embed_dim,
tgt_dict.pad(),
learned=args.decoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
if args.share_decoder_input_output_embed:
output_projection = torch.nn.Linear(
decoder_embed_tokens.weight.shape[1],
decoder_embed_tokens.weight.shape[0],
bias=False,
)
output_projection.weight = decoder_embed_tokens.weight
else:
output_projection = torch.nn.Linear(
args.decoder_embed_dim, len(tgt_dict), bias=False
)
torch.nn.init.normal_(
output_projection.weight, mean=0, std=args.decoder_embed_dim**-0.5
)
encoder = cls.build_encoder(
args,
encoder_embed_tokens,
encoder_embed_positions,
src_dict,
)
decoder = cls.build_decoder(
args,
decoder_embed_tokens,
decoder_embed_positions,
output_projection,
tgt_dict,
)
if not args.share_all_embeddings:
min_params_to_wrap = getattr(
args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP
)
# fsdp_wrap is a no-op when --ddp-backend != fully_sharded
encoder = fsdp_wrap(encoder, min_num_params=min_params_to_wrap)
decoder = fsdp_wrap(decoder, min_num_params=min_params_to_wrap)
return cls(args, encoder, decoder)
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
@classmethod
def build_encoder(cls, args, embed_tokens, embed_positions, dictionary):
config = EncoderConfig()
config.override(args)
return MTEncoder(
config,
embed_tokens,
embed_positions,
is_encoder_decoder=True,
dictionary=dictionary,
)
@classmethod
def build_decoder(
cls, args, embed_tokens, embed_positions, output_projection, dictionary
):
config = DecoderConfig()
config.override(args)
return MTDecoder(
config,
embed_tokens,
embed_positions,
output_projection,
is_encoder_decoder=True,
dictionary=dictionary,
)
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
return_all_hiddens: bool = False,
features_only: bool = False,
**kwargs
):
encoder_out = self.encoder(src_tokens, return_all_hiddens=return_all_hiddens)
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
features_only=features_only,
return_all_hiddens=return_all_hiddens,
)
return decoder_out
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Get normalized probabilities (or log probs) from a net's output."""
return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
class MTEncoder(Encoder, FairseqEncoder):
def forward(self, src_tokens, **kwargs):
self_attn_padding_mask = src_tokens.eq(self.dictionary.pad())
return super().forward(
src_tokens=src_tokens, encoder_padding_mask=self_attn_padding_mask, **kwargs
)
def reorder_encoder_out(self, encoder_out, new_order):
new_encoder_out = encoder_out["encoder_out"].index_select(0, new_order)
new_encoder_embedding = encoder_out["encoder_embedding"].index_select(
0, new_order
)
new_encoder_padding_mask = encoder_out["encoder_padding_mask"].index_select(
0, new_order
)
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(0, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask,
"encoder_embedding": new_encoder_embedding, # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
}
def max_positions(self):
return self.embed_positions.max_positions
@register_model_architecture("mt", "mt_base")
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.no_cross_attention = getattr(args, "no_cross_attention", False)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
args.is_moe = getattr(args, "is_moe", False)
args.selected_expert_count = getattr(args, "selected_expert_count", 2)
| torchscale-flash-master | examples/fairseq/models/machine_translation.py |
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import logging
from dataclasses import dataclass, field
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import BaseFairseqModel, register_model, register_model_architecture
from fairseq.models.squad import SQuADHead
from fairseq.models.transformer import DEFAULT_MIN_PARAMS_TO_WRAP, Embedding
from fairseq.modules import PositionalEmbedding
from omegaconf import II
try:
from apex.normalization import FusedLayerNorm as LayerNorm
except ModuleNotFoundError:
from torch.nn import LayerNorm
from torchscale.architecture.config import EncoderConfig
from .machine_translation import MTEncoder as Encoder
DEFAULT_MAX_SOURCE_POSITIONS = 1024
logger = logging.getLogger(__name__)
@dataclass
class BertConfig(FairseqDataclass):
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="relu", metadata={"help": "activation function to use"}
)
dropout: float = field(default=0.1, metadata={"help": "dropout probability"})
attention_dropout: float = field(
default=0.0, metadata={"help": "dropout probability for attention weights"}
)
activation_dropout: float = field(
default=0.0, metadata={"help": "dropout probability after activation in FFN."}
)
encoder_embed_dim: int = field(
default=512, metadata={"help": "encoder embedding dimension"}
)
encoder_output_dim: int = field(
default=512, metadata={"help": "encoder output dimension"}
)
encoder_input_dim: int = field(
default=512, metadata={"help": "encoder input dimension"}
)
encoder_ffn_embed_dim: int = field(
default=2048, metadata={"help": "encoder embedding dimension for FFN"}
)
encoder_layers: int = field(default=6, metadata={"help": "num encoder layers"})
encoder_attention_heads: int = field(
default=8, metadata={"help": "num encoder attention heads"}
)
encoder_normalize_before: bool = field(
default=False, metadata={"help": "apply layernorm before each encoder block"}
)
no_encoder_final_norm: bool = field(
default=False,
metadata={"help": "don't add an extra layernorm after the last encoder block"},
)
no_token_positional_embeddings: bool = field(
default=False,
metadata={
"help": "if set, disables positional embeddings (outside self attention)"
},
)
share_encoder_input_output_embed: bool = field(
default=False, metadata={"help": "share encoder input and output embeddings"}
)
encoder_learned_pos: bool = field(
default=False,
metadata={"help": "use learned positional embeddings in the encoder"},
)
layernorm_embedding: bool = field(
default=False, metadata={"help": "add layernorm to embedding"}
)
no_scale_embedding: bool = field(
default=False, metadata={"help": "if True, dont scale embeddings"}
)
checkpoint_activations: bool = field(
default=False, metadata={"help": "checkpoint activations at each layer"}
)
offload_activations: bool = field(
default=False,
metadata={"help": "move checkpointed activations to CPU after they are used."},
)
# config for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019)
encoder_layerdrop: float = field(
default=0.0, metadata={"help": "LayerDrop probability for encoder"}
)
encoder_layers_to_keep: Optional[str] = field(
default=None,
metadata={
"help": "which layers to *keep* when pruning as a comma-separated list"
},
)
# config for Fully Sharded Data Parallel (FSDP) training
min_params_to_wrap: int = field(
default=DEFAULT_MIN_PARAMS_TO_WRAP,
metadata={
"help": (
"minimum number of params for a layer to be wrapped with FSDP() when "
"training with --ddp-backend=fully_sharded. Smaller values will "
"improve memory efficiency, but may make torch.distributed "
"communication less efficient due to smaller input sizes. This option "
"is set to 0 (i.e., always wrap) when --checkpoint-activations or "
"--offload-activations are passed."
)
},
)
max_source_positions: int = field(
default=1024, metadata={"help": "max source positions"}
)
pooler_activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="relu", metadata={"help": "activation function to use for pooler layer"}
)
pooler_dropout: float = field(
default=0.0,
metadata={"help": "dropout probability in the masked_lm pooler layers"},
)
# options from other parts of the config
# add_bos_token: bool = II("task.add_bos_token")
# tokens_per_sample: int = II("task.tokens_per_sample")
tpu: bool = II("common.tpu")
rel_pos_buckets: int = field(default=0, metadata={"help": ""})
max_rel_pos: int = field(default=0, metadata={"help": ""})
use_xmoe: Optional[bool] = field(
default=False,
)
moe_freq: int = field(
default=0,
metadata={"help": "Frequency at which we insert MoE Transformer layers"},
)
moe_expert_count: int = field(
default=0, metadata={"help": "Number of experts in each MoE Layer"}
)
moe_gating_use_fp32: bool = field(
default=False,
metadata={"help": "Use FP32 computations in MoE top2 gating function"},
)
moe_second_expert_policy: str = field(
default="sampling",
metadata={"help": "policy for second expert, options: all/sampling/random"},
)
moe_normalize_gate_prob_before_dropping: bool = field(
default=False,
metadata={
"help": "whether to normalize gate probs before or after dropping experts for capacity and randomization"
},
)
moe_expert_ffn_dim: Optional[int] = field(
default=None, metadata={"help": "MoE expert FFN dimension"}
)
moe_top1_expert: Optional[bool] = field(
default=False, metadata={"help": "Use top1 gate instead of top2"}
)
moe_eval_capacity_token_fraction: Optional[float] = field(
default=0.25,
metadata={
"help": (
"Default: 0.25, Fraction of tokens as capacity during validation, "
"if set to negative, use same as training. range: (0.0, 1.0]."
)
},
)
moe_normalize_expert_grad: Optional[str] = field(
default="world_size",
metadata={
"help": "Divide expert gradients by (1) 'world_size' (2) 'sqrt_world_size'"
},
)
record_a2a_perf_stats: Optional[bool] = field(
default=False,
metadata={"help": "records all to all perf stats during distributed training"},
)
dummy_a2a: Optional[bool] = field(
default=False,
metadata={
"help": "By passes all to all during distributed training by returning the input buffer as output"
},
)
moe_batch_prioritized_routing: Optional[bool] = field(
default=False,
metadata={
"help": "if true orders token by the gate prob before capacity dropping."
},
)
ddp_rank: int = II("distributed_training.distributed_rank")
deepnorm: Optional[bool] = field(
default=False,
)
subln: Optional[bool] = field(
default=False,
)
@register_model("mlm", dataclass=BertConfig)
class BertModel(BaseFairseqModel):
def __init__(self, args, encoder):
super().__init__()
self.args = args
self.encoder = encoder
self.padding_idx = self.encoder.embed_tokens.padding_idx
self.classification_heads = nn.ModuleDict()
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
args.max_source_positions = getattr(
args, "max_source_positions", DEFAULT_MAX_SOURCE_POSITIONS
)
embed_tokens = cls.build_embedding(
args, task.dictionary, args.encoder_embed_dim
)
embed_positions = (
PositionalEmbedding(
args.max_source_positions,
args.encoder_embed_dim,
task.dictionary.pad(),
learned=args.encoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
lm_head = cls.build_lm_head(
args,
args.encoder_embed_dim,
len(task.dictionary),
args.activation_fn,
weight=embed_tokens.weight,
)
config = EncoderConfig()
config.override(args)
encoder = Encoder(
config,
embed_tokens=embed_tokens,
embed_positions=embed_positions,
output_projection=lm_head,
is_encoder_decoder=False,
dictionary=task.dictionary,
)
return cls(args, encoder)
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
embed_tokens = Embedding(len(dictionary), embed_dim, dictionary.pad())
return embed_tokens
@classmethod
def build_lm_head(cls, args, embed_dim, output_dim, activation_fn, weight):
return LMHead(embed_dim, output_dim, activation_fn, weight)
def output_layer(self, features, masked_tokens=None):
return self.encoder.output_projection(features, masked_tokens=masked_tokens)
def register_classification_head(
self, name, num_classes=None, inner_dim=None, **kwargs
):
"""Register a classification head."""
if name in self.classification_heads:
prev_num_classes = self.classification_heads[name].out_proj.out_features
prev_inner_dim = self.classification_heads[name].dense.out_features
if num_classes != prev_num_classes or inner_dim != prev_inner_dim:
logger.warning(
're-registering head "{}" with num_classes {} (prev: {}) '
"and inner_dim {} (prev: {})".format(
name, num_classes, prev_num_classes, inner_dim, prev_inner_dim
)
)
self.classification_heads[name] = ClassificationHead(
self.args.encoder_embed_dim,
inner_dim or self.args.encoder_embed_dim,
num_classes,
self.args.pooler_activation_fn,
self.args.pooler_dropout,
)
def register_question_answering_head(self, name, num_classes=None):
self.classification_heads[name] = SQuADHead(
self.args.encoder_embed_dim,
)
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
# upgrade children modules
super().upgrade_state_dict_named(state_dict, name)
# Handle new classification heads present in the state dict.
current_head_names = (
[]
if not hasattr(self, "classification_heads")
else self.classification_heads.keys()
)
keys_to_delete = []
for k in state_dict.keys():
if not k.startswith(prefix + "classification_heads."):
continue
head_name = k[len(prefix + "classification_heads.") :].split(".")[0] # noqa: E203
num_classes = state_dict[
prefix + "classification_heads." + head_name + ".out_proj.weight"
].size(0)
inner_dim = state_dict[
prefix + "classification_heads." + head_name + ".dense.weight"
].size(0)
if getattr(self.args, "load_checkpoint_heads", False):
if head_name not in current_head_names:
self.register_classification_head(head_name, num_classes, inner_dim)
else:
if head_name not in current_head_names:
logger.warning(
"deleting classification head ({}) from checkpoint "
"not present in current model: {}".format(head_name, k)
)
keys_to_delete.append(k)
elif (
num_classes
!= self.classification_heads[head_name].out_proj.out_features
or inner_dim
!= self.classification_heads[head_name].dense.out_features
):
logger.warning(
"deleting classification head ({}) from checkpoint "
"with different dimensions than current model: {}".format(
head_name, k
)
)
keys_to_delete.append(k)
for k in keys_to_delete:
del state_dict[k]
# Copy any newly-added classification heads into the state dict
# with their current weights.
if hasattr(self, "classification_heads"):
cur_state = self.classification_heads.state_dict()
for k, v in cur_state.items():
if prefix + "classification_heads." + k not in state_dict:
logger.info("Overwriting " + prefix + "classification_heads." + k)
state_dict[prefix + "classification_heads." + k] = v
def get_normalized_probs_scriptable(
self,
net_output,
log_probs,
sample = None,
):
logits = net_output[0]
if log_probs:
return utils.log_softmax(logits, dim=-1)
else:
return utils.softmax(logits, dim=-1)
def forward(
self,
src_tokens=None,
features_only=False,
return_all_hiddens=False,
classification_head_name=None,
masked_tokens=None,
**kwargs
):
encoder_out = self.encoder(
src_tokens, features_only=True, return_all_hiddens=return_all_hiddens
)
x, extra = encoder_out["encoder_out"], encoder_out
if classification_head_name is not None:
x = self.classification_heads[classification_head_name](x)
elif not features_only:
x = self.output_layer(x, masked_tokens=masked_tokens)
return x, extra
class ClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self,
input_dim,
inner_dim,
num_classes,
activation_fn,
pooler_dropout,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = self.activation_fn(x.float()).type_as(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
class LMHead(nn.Module):
"""Head for masked language modeling."""
def __init__(self, embed_dim, output_dim, activation_fn, weight=None):
super().__init__()
self.dense = nn.Linear(embed_dim, embed_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.layer_norm = LayerNorm(embed_dim)
if weight is None:
weight = nn.Linear(embed_dim, output_dim, bias=False).weight
self.weight = weight
self.bias = nn.Parameter(torch.zeros(output_dim))
def forward(self, features, masked_tokens=None, **kwargs):
# Only project the masked tokens while training,
# saves both memory and computation
if masked_tokens is not None:
features = features[masked_tokens, :]
x = self.dense(features)
x = self.activation_fn(x.float()).type_as(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = F.linear(x, self.weight) + self.bias
return x
@register_model_architecture("mlm", "mlm_base")
def base_unilm_architecture(args):
if hasattr(args, "encoder_final_norm"):
args.no_encoder_final_norm = not args.encoder_final_norm
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.pooler_dropout = getattr(args, "pooler_dropout", 0.0)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 3072)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
# args.add_bos_token = getattr(args, "add_bos_token", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.share_encoder_input_output_embed = getattr(
args, "share_encoder_input_output_embed", True
)
args.encoder_output_dim = getattr(
args, "encoder_output_dim", args.encoder_embed_dim
)
args.encoder_input_dim = getattr(args, "encoder_input_dim", args.encoder_embed_dim)
# Model training is not stable without this
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.no_encoder_final_norm = getattr(args, "no_encoder_final_norm", False)
args.no_scale_embedding = getattr(args, "no_scale_embedding", True)
args.layernorm_embedding = getattr(args, "layernorm_embedding", True)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
| torchscale-flash-master | examples/fairseq/models/bert.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import MoECriterion, register_criterion, MoECriterionConfig
@register_criterion("masked_lm_moe_cross_entropy", dataclass=MoECriterionConfig)
class MaskedLMMoECrossEntropyCriterion(MoECriterion):
def compute_inner_loss(self, model, sample, reduce=True):
masked_tokens = sample["target"].ne(self.padding_idx)
sample_size = masked_tokens.int().sum()
masked_tokens = torch.where(
masked_tokens.any(),
masked_tokens,
masked_tokens.new([True]),
)
net_output = model(**sample["net_input"], masked_tokens=masked_tokens)
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output)
if masked_tokens is not None:
target = target[masked_tokens]
nll_loss = F.nll_loss(
lprobs,
target.view(-1),
ignore_index=self.padding_idx,
reduction="sum" if reduce else "none",
)
logging_output = {
"inner_loss": nll_loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
return net_output, nll_loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
MaskedLMMoECrossEntropyCriterion.reduce_moe_metrics(logging_outputs)
loss_sum = sum(log.get("inner_loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
# we divide by log(2) to convert the loss from base e to base 2
metrics.log_scalar(
"inner_loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
else:
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["inner_loss"].avg)
) | torchscale-flash-master | examples/fairseq/criterions/masked_lm_moe.py |
import importlib
import os
# automatically import any Python files in the criterions/ directory
for file in sorted(os.listdir(os.path.dirname(__file__))):
if file.endswith(".py") and not file.startswith("_"):
file_name = file[: file.find(".py")]
importlib.import_module("criterions." + file_name) | torchscale-flash-master | examples/fairseq/criterions/__init__.py |
from distutils.core import setup
from setuptools import find_packages
with open("README.md", "r", encoding = "utf-8") as readme:
long_description = readme.read()
setup(
name="The Distiller",
version="0.0.2",
description="Generate textual and conversational datasets with LLMs.",
long_description = long_description,
long_description_content_type = "text/markdown",
author="Kye Gomez",
author_email="[email protected]",
url="https://github.com/kyegomez/The-Distiller",
keywords=["dataset", "llm", "langchain", "openai"],
package_dir={"": "src"},
packages = find_packages(where="src"),
install_requires=[
"langchain>=0.0.113",
"click>=8.1"
],
entry_points={
"console_scripts": [
"distiller=distiller:distiller"
],
},
)
| The-Distiller-master | setup.py |
from .cli import distiller
from .conversations import *
from .texts import *
from .outputs import *
| The-Distiller-master | src/distiller/__init__.py |
import click
from typing import List, Tuple
from .conversations import ConversationsGeneratorConfig, ConversationsGenerator
from .texts import TextsGeneratorConfig, TextsGenerator
from .outputs import DatasetWriter
@click.group()
def distiller() -> None:
"""Command line interface that generates datasets with LLMs."""
pass
click_options = click.option("--option",
"-o",
"options",
type=(str, str),
multiple=True,
help="Values for additional options denoted in your prompts by {OPTION_NAME}.")
click_path = click.option("--path",
"-f",
"path",
type=click.Path(),
help="Where to save the dataset. Either a file or a directory (folder).")
click_single_file = click.option("--single-file",
"-s",
"single_file",
type=bool,
is_flag=True,
help="Either save the whole dataset to a single file or create multiple files.")
click_num_samples = click.option("--num-samples",
"-n",
"num_samples",
type=int,
default=1,
help="Number of conversations for each configuration.")
click_temperatures = click.option("--temperature",
"-t",
"temperatures",
type=float,
multiple=True,
default=[0.5],
help="Possible temperature values for the backend language model.")
@click.command()
@click.option("--hf-api-token",
"-hf",
type=str,
envvar="HF_API_KEY",
help="Hugging Face API key.")
@click.option("--model-agent",
"-m",
type=click.Choice(["openai", "hf"]),
default="openai",
help="Backend to use for the AI agents. Can be 'openai' for OpenAI's gpt-3.5-turbo or 'hf' for a Hugging Face model. Defaults to 'openai'.")
@click.option("--repo-id",
"-r",
type=str,
help="Hugging Face model repo id, required if 'hf' is chosen as model-agent.")
@click.option("--openai-api-key",
"-k",
"openai_api_key",
type=str,
envvar="OPENAI_API_KEY",
help="OpenAI API key.")
@click.option("--agent1",
"-a",
"agent1",
type=str,
required=True,
help="Agent role description.")
@click.option("--agent2",
"-b",
"agent2",
type=str,
required=True,
help="Agent role description.")
@click.option("--initial-utterance",
"-u",
"initial_utterances",
type=str,
default=["Hello."],
multiple=True,
help="Utterance to be provisioned to the first agent. For many use cases a \"Hello\" is enough.")
@click.option("--interruption",
"-i",
"interruption",
type=click.Choice(["length", "end_phrase"]),
default="length",
help="Interruption mode.")
@click.option("--end-phrase",
"-e",
"end_phrase",
type=str,
default="Goodbye",
help="Interrupt after this phrase is outputted by one of the agents.")
@click.option("--end-agent",
"-d",
"end_agent",
type=click.Choice(["agent1", "agent2", "both"]),
default="both",
help="In which agent's messages to look for the end phrase.")
@click.option("--length",
"-l",
"lengths",
type=int,
multiple=True,
default=[5],
help="Maximum number of utterances for each agent. A conversation sample will be generated for each length.")
@click_temperatures
@click_num_samples
@click_options
@click_path
@click_single_file
def conversations(
openai_api_key: str,
agent1: str,
agent2: str,
initial_utterances: List[str],
num_samples: int,
interruption: str,
end_phrase: str,
end_agent: str,
lengths: List[int],
temperatures: List[int],
options: List[Tuple[str, str]],
path: str,
single_file: bool
) -> None:
"""Produce conversations between two gpt-3.5-turbo agents with given roles."""
dataset_writer = DatasetWriter(path, single_file)
generator_config = ConversationsGeneratorConfig(openai_api_key=openai_api_key,
agent1=agent1,
agent2=agent2,
initial_utterances=initial_utterances,
num_samples=num_samples,
interruption=interruption,
end_phrase=end_phrase,
end_agent=end_agent,
lengths=lengths,
temperatures=temperatures,
options=options)
conversations_generator = ConversationsGenerator(generator_config)
for conversation in conversations_generator:
dataset_writer.save_intermediate_result(conversation)
@click.command()
@click.option("--prompt",
"-p",
"prompt",
type=str,
required=True,
help="Input prompt.")
@click.option("--backend",
"-b",
"backends",
type=str,
multiple=True,
default=["openai|text-davinci-003"],
help="LLM APIs to use as backends. Use \"backend|model_name\" notation. For example: \"openai|text-davinci-003\".")
@click.option("--max-length",
"-l",
"max_lengths",
type=int,
multiple=True,
default=[100],
help="Maximum number of tokens to generate for each prompt.")
@click_temperatures
@click_num_samples
@click_options
@click_path
@click_single_file
def texts(
prompt: str,
num_samples: int,
max_lengths: List[int],
temperatures: List[int],
backends: List[str],
options: List[Tuple[str, str]],
path: str,
single_file: bool
) -> None:
"""Inference multiple LLMs at scale."""
dataset_writer = DatasetWriter(path, single_file)
generator_config = TextsGeneratorConfig(prompt=prompt,
backends=backends,
num_samples=num_samples,
max_lengths=max_lengths,
temperatures=temperatures,
options=options)
texts_generator = TextsGenerator(generator_config)
for text_object in texts_generator:
dataset_writer.save_intermediate_result(text_object)
distiller.add_command(texts)
distiller.add_command(conversations)
def main() -> None:
"""Run the distiller CLI."""
distiller()
| The-Distiller-master | src/distiller/cli.py |
from dataclasses import dataclass, field
from typing import List, Any, Dict, Tuple, Union
from langchain.prompts import PromptTemplate
from langchain.llms import BaseLLM
from langchain.chains import LLMChain
from .base import DatasetGenerator
OPTIONS_CONFIG_KEYS = ["backend", "max_length", "temperature"]
GENERATOR_CONFIG_KEYS = ["backends", "max_lengths", "temperatures"]
@dataclass
class TextsGeneratorConfig:
prompt: str
"""Text prompt."""
backends: List[Tuple[str, str, str]]
"""LLM APIs to use as backends."""
num_samples: int = 1
"""Number of texts to generate for each options combination."""
max_lengths: List[int] = field(default_factory=lambda: [5])
"""Maximum lengths in tokens for the output of each generation."""
temperatures: List[float] = field(default_factory=lambda: [0])
"""Possible temperatures for the backend LLM."""
options: List[Tuple[str, str]] = field(default_factory=lambda: [])
"""Additional options defined in the system prompts with curly brackets."""
class TextsGenerator(DatasetGenerator):
"""Generator producing texts by varying model parameters and prompt options."""
config: TextsGeneratorConfig
"""Configuration for a TextsGenerator."""
def __init__(self, config: TextsGeneratorConfig) -> None:
"""Initialize TextsGenerator."""
super().__init__(config)
def initialize_options_configs(
self,
options_config_keys: List[str] = OPTIONS_CONFIG_KEYS,
generator_config_keys: List[str] = GENERATOR_CONFIG_KEYS
) -> None:
"""Prepare options combinations."""
super().initialize_options_configs(options_config_keys, generator_config_keys)
def initialize_backend(self, text_config: Dict[str, Any]) -> BaseLLM:
"""Initialize a specific LLM."""
backend_str = text_config["backend"]
temperature = text_config["temperature"]
max_length = text_config["max_length"]
backend, model = backend_str.split("|")
if backend.lower() == "openai":
from langchain.llms import OpenAI
llm = OpenAI(model_name=model,
temperature=temperature,
max_tokens=max_length)
elif backend.lower() == "cohere":
from langchain.llms import Cohere
llm = Cohere(model=model,
temperature=temperature,
max_tokens=max_length)
elif backend.lower() == "petals":
from langchain.llms import Petals
llm = Petals(model_name=model,
temperature=temperature,
max_new_tokens=max_length)
else:
raise ValueError("Cannot use the specified backend.")
return llm
def generate_item(self) -> Dict[str, Union[List[List[Any]], float, int]]:
"""Produce text with a LLM Chain."""
if self.generator_index >= len(self.options_configs):
raise StopIteration()
text_config = self.options_configs[self.generator_index]
self.generator_index += 1
input_variables = text_config.keys() - ["sample_id",
"backend",
"temperature",
"max_length"]
prompt_template = PromptTemplate(template=self.config.prompt,
input_variables=input_variables)
llm = self.initialize_backend(text_config)
prompt_params = {k: text_config[k] for k in input_variables}
input_prompt = prompt_template.format(**prompt_params)
chain = LLMChain(prompt=prompt_template, llm=llm)
output = chain.predict(**prompt_params)
return {**text_config,
"prompt": input_prompt,
"output": output}
| The-Distiller-master | src/distiller/texts.py |
from .cli import main
main()
| The-Distiller-master | src/distiller/__main__.py |
from dataclasses import dataclass, field
from typing import List, Any, Dict, Tuple, Union
from langchain.prompts import (
ChatPromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate
)
from langchain.chains import ConversationChain
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.schema import SystemMessage
from langchain import HuggingFaceHub
from .base import DatasetGenerator
OPTIONS_CONFIG_KEYS = ["length", "temperature", "initial_utterance"]
GENERATOR_CONFIG_KEYS = ["lengths", "temperatures", "initial_utterances"]
@dataclass
class ConversationsGeneratorConfig:
agent_type: str
"""type of language odel either openai or huggingface"""
hf_id: str
"""repo id for the hf model"""
openai_api_key: str
"""OpenAI API key."""
agent1: str
"""Description of the first agent used to construct its system message."""
agent2: str
"""Description of the second agent used to construct its system message."""
initial_utterances: List[str] = "Hello."
"""Utterances to be provisioned to the first agent."""
num_samples: int = 1
"""Number of conversations to generate for each options combination."""
interruption: str = "length"
"""Interruption mode."""
end_phrase: str = "Goodbye!"
"""Phrase to look for when checking whether to interrupt a conversation."""
end_agent: str = "both"
"""Agent whose messages to check for the interruption phrase."""
lengths: List[int] = field(default_factory=lambda: [5])
"""Possible lengths of the conversations. If end_phrase interruption is enabled these will be used for maximum lengths."""
temperatures: List[float] = field(default_factory=lambda: [0])
"""Possible temperatures for the backend LLM."""
options: List[Tuple[str, str]] = field(default_factory=lambda: [])
"""Additional options defined in the system prompts with curly brackets."""
class ConversationsGenerator(DatasetGenerator):
"""Generator producing conversations between two AI agents."""
config: ConversationsGeneratorConfig
"""Configuration for a ConversationsGenerator."""
def __init__(self, config: ConversationsGeneratorConfig) -> None:
"""Initialize ConversationsGenerator."""
super().__init__(config)
def initialize_options_configs(
self,
options_config_keys: List[str] = OPTIONS_CONFIG_KEYS,
generator_config_keys: List[str] = GENERATOR_CONFIG_KEYS
) -> None:
"""Prepare options combinations."""
super().initialize_options_configs(options_config_keys, generator_config_keys)
def initialize_chain(
self,
agent: str,
system_prompt: str,
conversation_config: Dict[str, Any]
) -> Tuple[ConversationChain, str]:
"""Initialize a conversation and return a chain and a formatted system prompt."""
if self.config.interruption == "end_phrase":
if self.config.end_agent == agent or self.config.end_agent == "both":
system_prompt += f" When the whole conversation is over end with \"{self.config.end_phrase}\"."
system_template = SystemMessagePromptTemplate.from_template(
system_prompt)
template_params = {key: conversation_config[key]
for key in system_template.input_variables}
system_message = system_template.format(**template_params).content
prompt = ChatPromptTemplate.from_messages([
SystemMessage(content=system_message),
MessagesPlaceholder(variable_name="history"),
HumanMessagePromptTemplate.from_template("{input}")
])
memory = ConversationBufferMemory(return_messages=True)
if self.config.agent_type == "openai":
llm = ChatOpenAI(temperature=conversation_config["temperature"],
openai_api_key=self.config.openai_api_key)
elif self.config.agent_type == "huggingface":
llm = HuggingFaceHub(repo_id=self.config.repo_id, model_kwargs={"temperature": conversation_config["temperature"], "max_length": 64})
chain = ConversationChain(memory=memory, prompt=prompt, llm=llm)
return chain, system_message
def end_phrase_interruption(self, agent: str, message: str) -> bool:
"""Check whether to interrupt conversation generation."""
if self.config.interruption == "end_phrase":
if self.config.end_agent == agent or self.config.end_agent == "both":
if self.config.end_phrase in message:
return True
return False
def generate_item(self) -> Dict[str, Union[List[List[Any]], float, int]]:
"""Run two chains to talk with one another and record the chat history."""
if self.generator_index >= len(self.options_configs):
raise StopIteration()
conversation_config = self.options_configs[self.generator_index]
self.generator_index += 1
chain1, system_prompt1 = self.initialize_chain("agent1",
self.config.agent1,
conversation_config)
chain2, system_prompt2 = self.initialize_chain("agent2",
self.config.agent2,
conversation_config)
utterances = []
chain1_inp = conversation_config["initial_utterance"]
for _ in range(conversation_config["length"]):
chain1_out = chain1.predict(input=chain1_inp)
utterances.append(["agent1", chain1_out])
if self.end_phrase_interruption("agent1", chain1_out):
break
chain2_out = chain2.predict(input=chain1_out)
utterances.append(["agent2", chain2_out])
if self.end_phrase_interruption("agent2", chain2_out):
break
chain1_inp = chain2_out
return {**conversation_config,
"agent1": system_prompt1,
"agent2": system_prompt2,
"utterances": utterances}
| The-Distiller-master | src/distiller/conversations.py |
import itertools
from typing import List, Any, Dict, Tuple, Generator, Iterator, Protocol
OPTIONS_CONFIG_KEYS = ["temperature"]
GENERATOR_CONFIG_KEYS = ["temperatures"]
class DatasetGeneratorConfig(Protocol):
"""Base generator configuration protocol."""
openai_api_key: str
"""OpenAI API key."""
num_samples: int
"""Number of texts to generate for each options combination."""
options: List[Tuple[str, str]]
"""Additional options defined in the text prompt with curly brackets."""
class DatasetGenerator:
"""Abstraction of a dataset generator."""
config: DatasetGeneratorConfig
"""Generator configuration."""
options_configs: List[Dict[str, Any]]
"""Possible combinations of the provided options."""
generator_index: int = 0
"""Index of the next item to be returned by the generator."""
def __init__(self, config: DatasetGeneratorConfig) -> None:
self.config = config
self.initialize_options_configs()
def initialize_options_configs(
self,
options_config_keys: List[str] = OPTIONS_CONFIG_KEYS,
generator_config_keys: List[str] = GENERATOR_CONFIG_KEYS
) -> None:
"""Prepare options combinations."""
options_keys = ["sample_id", *options_config_keys]
options_values = [range(self.config.num_samples)]
options_values += [getattr(self.config, key) for key in generator_config_keys]
for option in self.config.options:
if option[0] not in options_keys:
options_keys.append(option[0])
options_values.append([option[1]])
else:
index = options_keys.index(option[0])
if option[1] not in options_values[index]:
options_values[index].append(option[1])
self.options_configs = list(map(lambda x: dict(zip(options_keys, x)),
itertools.product(*options_values)))
def generate_item(self) -> Dict[str, Any]:
"""Produce a data item."""
return {}
def __next__(self) -> Generator[Dict[str, Any], None, None]:
return self.generate_item()
def __iter__(self) -> Iterator:
return self
| The-Distiller-master | src/distiller/base.py |
import os
import json
from uuid import uuid4
from typing import Dict, Any, List
class DatasetWriter:
"""Handle outputting dataset items."""
single_file: bool
"""Whether to save all dataset items in a single file."""
path: str
"""Path of the output file or directory."""
dataset_items: List[Dict[str, Any]]
"""Collection of all the items in the current dataset."""
def __init__(self, path: str = None, single_file: bool = False) -> None:
"""Initialize DatasetWriter."""
if path == None and single_file:
path = self.get_unique_filename(os.getcwd())
elif path == None and not single_file:
path = self.get_unique_dirname(os.getcwd())
elif os.path.isdir(path) and single_file:
path = self.get_unique_filename(path)
elif os.path.isfile(path) and not single_file:
raise ValueError(
"Cannot write to a file with the single_file mode disabled. Try setting --single-file.")
self.single_file = single_file
self.path = path
self.dataset_items = []
def get_unique_dirname(self, base_path):
"""Get a unique dirname."""
return os.path.join(base_path, str(uuid4()))
def get_unique_filename(self, base_path):
"""Get a unique filename."""
return os.path.join(base_path, f"{uuid4()}.json")
def save_intermediate_result(self, result: Dict[str, Any]):
"""Either save an item to its own file or concatenate it with all dataset items in a single file."""
if self.single_file:
self.dataset_items.append(result)
current_directory = os.path.dirname(self.path)
if current_directory != "" and current_directory != ".":
os.makedirs(current_directory, exist_ok=True)
with open(self.path, "w") as output_file:
json.dump(self.dataset_items, output_file)
else:
current_filepath = self.get_unique_filename(self.path)
os.makedirs(self.path, exist_ok=True)
with open(current_filepath, "w") as output_file:
json.dump(result, output_file)
| The-Distiller-master | src/distiller/outputs.py |
from abc import ABC, abstractmethod
class AbstractLanguageModel(ABC):
@abstractmethod
def generate_thoughts(self, state, k):
pass
@abstractmethod
def evaluate_states(self, states):
pass
| The-Distiller-master | src/distiller/agents/abstract.py |
from abc import ABC, abstractmethod
from transformers import AutoModelForCausalLM, AutoTokenizer
from abstract import AbstractLanguageModel
class HuggingLanguageModel(AbstractLanguageModel):
def __init__(self, model_name, model_tokenizer=None, verbose=False):
self.model = AutoModelForCausalLM.from_pretrained(model_name)
self.tokenizer = AutoTokenizer.from_pretrained(model_tokenizer or model_name)
self.verbose = verbose
def generate_thoughts(self, state, k, max_length=100):
state_text = ' '.join(state)
prompt = f"Write down your observations in format 'Observation:xxxx', then write down your thoughts in format 'Thoughts:xxxx Given the current state of reasoning: '{state_text}', generate {k} coherent solutions to achieve {state_text}"
if self.verbose:
print(f"Generating thoughts for state: {state_text}")
try:
inputs = self.tokenizer(prompt, return_tensors="pt")
outputs = self.model.generate(**inputs, max_length=max_length, num_return_sequences=k)
thoughts = [self.tokenizer.decode(output, skip_special_tokens=True) for output in outputs]
except Exception as e:
if self.verbose:
print(f"Error generating thoughts for state: {state_text}")
print(f"Error: {e}")
thoughts = []
return thoughts
def evaluate_states(self, states, initial_prompt, max_length=10):
state_values = {}
for state in states:
state_text = ' '.join(state)
prompt = f"Given the current state of reasoning: '{state_text}', pessimitically evaluate its value as a float between 0 and 1 based on it's potential to achieve {initial_prompt}"
if self.verbose:
print(f"Evaluating state: {state_text}")
try:
inputs = self.tokenizer(prompt, return_tensors="pt")
outputs = self.model.generate(**inputs, num_return_sequences=1, max_length=max_length)
value_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
value = float(value_text)
except ValueError:
if self.verbose:
print(f"Error converting value to float for state: {state_text}")
value = 0 # Assign a default value if the conversion fails
except Exception as e:
if self.verbose:
print(f"Error evaluating state: {state_text}")
print(f"Error: {e}")
value = 0
state_values[state] = value
return state_values | The-Distiller-master | src/distiller/agents/huggingface.py |
import torch
import torch.nn as nn
import numpy as np
#define the loss function class
class LossFunction:
def compute_loss(self, y_pred, y_true):
raise NotImplemented("compute_loss method must be implemented")
#implement specific loss functions that inherit from LossFunction
class L1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.L1Loss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class MSELoss(LossFunction):
def __init__(self):
self.loss_function = nn.MSELoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class CrossEntropyLoss(LossFunction):
def __init__(self):
self.loss_function = nn.CrossEntropyLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
"""
all pytorch loss functions
"""
def one_hot_encoding(y_true, num_classes):
y_true_one_hot = torch.zeros(y_true.size(0), num_classes)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1)
return y_true_one_hot
class Nebula(LossFunction):
def __init__(self, domain_knowledge=None, user_input=None):
self.loss_function = None
self.domain_knowledge = domain_knowledge
self.user_input = user_input
self.loss_function_cache = {}
def determine_loss_function(self, y_pred, y_true):
is_classification = None
#op 1 check range of values in y_true
if is_classification is None:
unique_values = np.unique(y_true)
if len(unique_values) <= 10 and np.all(np.equal(np.mod(unique_values, 1), 0)):
is_classification = True
#==============================================>
if is_classification is None:
value_counts = np.bincount(y_true.flatten().to(dtype=torch.int32).numpy())
if np.all(value_counts > 0):
is_classification = True
#==============================================>
#op 3 analyze the dimension of y_pred
if y_pred.ndim > 2:
#handle segmentation problem
pass
#==============================================>
if is_classification is None:
sparsity = np.count_nonzero(y_true) / y_true.numel()
if sparsity < 0.5:
self.loss_functions = torch.nn.BCEWithLogitsLoss()
self.compute_Loss = self.loss_function
return
#==============================================>
#v4
y_pred_flat = y_pred.flatten().numpy()
y_true_flat = y_true.flatten().numpy()
if y_pred_flat.shape != y_true_flat.shape:
y_pred_flat = y_pred_flat[:y_true_flat.size]
correlation = np.corrcoef(y_pred_flat, y_true_flat)[0, 1]
#==============================================>
if is_classification is None:
if self.domain_knowledge == "classification":
is_classification = True
elif self.domain_knowledge == "regression":
is_classification = False
#==============================================>
#v3
if is_classification is None:
if torch.max(y_pred) > 0.9:
is_classification = True
#==============================================>
#v2
if is_classification is None:
class_balance = value_counts / np.sum(value_counts)
if np.any(class_balance < 0.1):
is_classification = True
#==============================================>
#==============================================>
#v2
if is_classification is None:
if self.user_input == "classification":
is_classification = True
elif self.user_input == "regression":
is_classification = False
#set the loss function based on the determined problem type
if is_classification:
self.loss_function = CrossEntropyLoss()
else:
self.loss_function = MSELoss()
def compute_loss(self, y_pred, y_true):
dataset_id = id(y_true)
if dataset_id not in self.loss_function_cache: # Fix the attribute name here
self.determine_loss_function(y_pred, y_true)
self.loss_function_cache[dataset_id] = self.loss_function
cached_loss_function = self.loss_function_cache[dataset_id]
return cached_loss_function.compute_loss(y_pred, y_true)
| nebula-master | nebula_old.py |
from setuptools import setup, find_packages
setup(
name = 'nebula-loss',
packages = find_packages(exclude=[]),
version = '0.4.1',
license='MIT',
description = '1 Loss Function to rule them all!',
author = 'Agora',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/kyegomez/nebula',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'jax',
'loss ffunction',
"Multi-Modality AI"
],
install_requires=[
'torch',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | nebula-master | setup.py |
import unittest
import torch
from nebula.nebula import MSELoss, CrossEntropyLoss, MultiLabelSoftMarginLoss, Nebula
class TestNebula(unittest.TestCase):
def setUp(self):
self.nebula = Nebula()
self.tolerance = 1e-5
self.y_true_regression = torch.tensor([1.1, 2.2, 3.3, 4.4, 5.5], dtype=torch.float)
self.y_pred_regression = torch.tensor([1.0, 3.0, 2.0, 5.0, 4.0], dtype=torch.float)
# Assuming 3 classes
self.y_true_classification = torch.tensor([0, 2, 1, 0, 2], dtype=torch.long)
self.y_pred_classification = torch.rand((5, 3), dtype=torch.float) # Random probabilities for classes
def test_same_shape(self):
y_true = torch.rand((10, 10))
y_pred = torch.rand((10, 10))
self.nebula.compute_loss(y_pred, y_true)
def test_different_shape(self):
y_true = torch.rand((10, 10))
y_pred = torch.rand((10, 11))
with self.assertRaises(ValueError):
self.nebula.compute_loss(y_pred, y_true)
def test_empty_tensors(self):
y_true = torch.tensor([])
y_pred = torch.tensor([])
with self.assertRaises(ValueError):
self.nebula.compute_loss(y_pred, y_true)
def test_multidimensional_tensors(self):
y_true = torch.rand((10, 10, 10))
y_pred = torch.rand((10, 10, 10))
self.nebula.compute_loss(y_pred, y_true)
def test_y_true_unique_values_less_than_10(self):
y_true = torch.tensor([1, 1, 2, 2, 3, 3, 4, 4, 5, 5])
y_pred = torch.rand((10,))
self.nebula.compute_loss(y_pred, y_true)
def test_y_true_unique_values_greater_than_10(self):
y_true = torch.arange(1, 11)
y_pred = torch.rand((10,))
self.nebula.compute_loss(y_pred, y_true)
def test_negative_integers_in_y_true(self):
y_true = torch.tensor([-1, -2, -3, -4, -5, 1, 2, 3, 4, 5])
y_pred = torch.rand((10,))
self.nebula.compute_loss(y_pred, y_true)
def test_non_negative_integers_in_y_true(self):
y_true = torch.arange(10)
y_pred = torch.rand((10,))
self.nebula.compute_loss(y_pred, y_true)
def test_sparse_tensor(self):
y_true = torch.zeros((10, 10))
y_pred = torch.rand((10, 10))
self.nebula.compute_loss(y_pred, y_true)
def test_dense_tensor(self):
y_true = torch.ones((10, 10))
y_pred = torch.rand((10, 10))
self.nebula.compute_loss(y_pred, y_true)
def test_probability_distributions(self):
y_true = torch.FloatTensor([0.1, 0.2, 0.3, 0.4])
y_pred = torch.FloatTensor([0.1, 0.2, 0.3, 0.4])
self.nebula.compute_loss(y_pred, y_true)
def test_log_probabilities(self):
y_true = torch.randn((10, 10))
y_pred = torch.log_softmax(torch.randn((10, 10)), dim=1)
self.nebula.compute_loss(y_pred, y_true)
def test_domain_knowledge_classification(self):
y_true = torch.randint(0, 2, (10,))
y_pred = torch.rand((10,))
self.nebula.domain_knowledge = "classification"
self.nebula.compute_loss(y_pred, y_true)
def test_domain_knowledge_regression(self):
y_true = torch.randn((10,))
y_pred = torch.rand((10,))
self.nebula.domain_knowledge = "regression"
self.nebula.compute_loss(y_pred, y_true)
def test_user_input_classification(self):
y_true = torch.randint(0, 2, (10,))
y_pred = torch.rand((10,))
self.nebula.user_input = "classification"
self.nebula.compute_loss(y_pred, y_true)
def test_user_input_regression(self):
y_true = torch.randn((10,))
y_pred = torch.rand((10,))
self.nebula.user_input = "regression"
self.nebula.compute_loss(y_pred, y_true)
def test_y_true_values_in_range_0_1(self):
y_true = torch.rand((10, 10))
y_pred = torch.rand((10, 10))
self.nebula.compute_loss(y_pred, y_true)
def test_unbalanced_classes_in_y_true(self):
y_true = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1])
y_pred = torch.rand((10,))
self.nebula.compute_loss(y_pred, y_true)
def test_balanced_classes_in_y_true(self):
y_true = torch.tensor([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
y_pred = torch.rand((10,))
self.nebula.compute_loss(y_pred, y_true)
def test_large_tensors(self):
y_true = torch.rand((10000, 10000))
y_pred = torch.rand((10000, 10000))
self.nebula.compute_loss(y_pred, y_true)
def test_multilabel_classification_tensor(self):
y_true = torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = torch.rand((3, 3))
self.nebula.compute_loss(y_pred, y_true)
def test_y_pred_max_value_greater_than_0_9(self):
y_true = torch.rand((10,))
y_pred = torch.rand((10,)) + 0.9
self.nebula.compute_loss(y_pred, y_true)
def test_loss_function_reuse_from_cache(self):
y_true = torch.rand((10,))
y_pred = torch.rand((10,))
self.nebula.compute_loss(y_pred, y_true)
self.assertEqual(id(self.nebula.loss_function), id(self.nebula.loss_function_cache[id(y_true)]))
def test_nebula_for_regression(self):
nebula_loss_function = Nebula(domain_knowledge="regression")
loss = nebula_loss_function.compute_loss(self.y_pred_regression, self.y_true_regression)
expected_loss = MSELoss().compute_loss(self.y_pred_regression, self.y_true_regression)
self.assertTrue(torch.isclose(loss, expected_loss, atol=self.tolerance))
def test_nebula_for_classification(self):
nebula_loss_function = Nebula(domain_knowledge="classification")
loss = nebula_loss_function.compute_loss(self.y_pred_classification, self.y_true_classification)
expected_loss = CrossEntropyLoss().compute_loss(self.y_pred_classification, self.y_true_classification)
self.assertTrue(torch.isclose(loss, expected_loss, atol=self.tolerance))
def test_nebula_for_multi_label_classification(self):
# For multi-label classification, let's assume each instance can belong to any of the 3 classes
y_true_multi_label_classification = torch.tensor([[1, 0, 1], [0, 1, 0], [1, 1, 0], [0, 0, 1], [1, 0, 0]], dtype=torch.float)
y_pred_multi_label_classification = torch.rand((5, 3), dtype=torch.float) # Random probabilities for classes
nebula_loss_function = Nebula()
loss = nebula_loss_function.compute_loss(y_pred_multi_label_classification, y_true_multi_label_classification)
expected_loss = MultiLabelSoftMarginLoss().compute_loss(y_pred_multi_label_classification, y_true_multi_label_classification)
self.assertTrue(torch.isclose(loss, expected_loss, atol=self.tolerance))
# Add more tests for other scenarios...
if __name__ == "__main__":
unittest.main() | nebula-master | testing.py |
#using gradient boosted greedy algorithms to compute loss
import xgboost as xgb
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, accuracy_score
# Load your dataset
# X, y = load_your_data()
# For demonstration purposes, we'll use random data
X = np.random.rand(100, 10)
y = np.random.randint(0, 2, 100)
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Train an XGBoost model
dtrain = xgb.DMatrix(X_train, label=y_train)
dtest = xgb.DMatrix(X_test, label=y_test)
params = {
'objective': 'binary:logistic',
'eval_metric': 'logloss',
'seed': 42
}
bst = xgb.train(params, dtrain, num_boost_round=100, early_stopping_rounds=10, evals=[(dtest, 'test')])
# Make predictions on the same dataset
y_pred = bst.predict(dtest)
# Determine the loss function
y_pred_labels = np.round(y_pred)
accuracy = accuracy_score(y_test, y_pred_labels)
mse = mean_squared_error(y_test, y_pred)
print("Accuracy:", accuracy)
print("Mean Squared Error:", mse)
if accuracy > 0.9:
print("Use CrossEntropyLoss")
else:
print("Use MSELoss")
| nebula-master | experimental/xgboostV3.py |
import torch
import torch.nn as nn
import numpy as np
#define the loss function class
class LossFunction:
def compute_loss(self, y_pred, y_true):
raise NotImplemented("compute_loss method must be implemented")
#implement specific loss functions that inherit from LossFunction
class L1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.L1Loss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class MSELoss(LossFunction):
def __init__(self):
self.loss_function = nn.MSELoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class CrossEntropyLoss(LossFunction):
def __init__(self):
self.loss_function = nn.CrossEntropyLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
"""
all pytorch loss functions
"""
#v1
# class Nebula(LossFunction):
# def __init__(self):
# self.loss_function = None
# def determine_loss_function(self, y_pred, y_true):
# ##implement logic to determine type of data and select the loss function
# #based on the shape of y_true or other criteria
# if len(y_true.shape) > 1 and y_true.shape[1] > 1:
# self.loss_function = CrossEntropyLoss()
# else:
# self.loss_function = MSELoss()
# #transform function data1 to -> data type loss function can understand?
# def compute_loss(self, y_pred, y_true):
# self.determine_loss_function(y_pred, y_true)
# return self.loss_function.compute_loss(y_pred, y_true)
# Example usage
# y_pred_classification = torch.tensor([[2.0, 1.0, 0.1], [1.0, 2.0, 0.1]])
# y_true_classification = torch.tensor([0, 1])
#v2
# GRADIENT BOOSTED
# greedy algorithm
#concurrency
#asynchrony
#CACHING FOR TRAINING --> THIS IS YOUR DATASET -> OKAY HERES LOSS FUNCTION -> DONT COMPUTE DETERMINE LOSS FUNCTION
#self healing loss function
#responsive loss function
# 1 loss function for any task
def one_hot_encoding(y_true, num_classes):
y_true_one_hot = torch.zeros(y_true.size(0), num_classes)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1)
return y_true_one_hot
class Nebula(LossFunction):
def __init__(self, domain_knowledge=None, user_input=None):
self.loss_function = None
self.domain_knowledge = domain_knowledge
self.user_input = user_input
self.loss_function_cache = {}
def determine_loss_function(self, y_pred, y_true):
#op 1 check range of values in y_true
unique_values = np.unique(y_true)
if len(unique_values) <= 10 and np.all(np.equal(np.mod(unique_values, 1), 0)):
if_classification = True
else:
is_classification = True
#==============================================>
#opt2 - check the distribution of valus in y_true
# assuming a specific pattern indicates a classification problem
# You can replace this with a more specific pattern check if needded
# value_counts = np.bincount(y_true.flatten().astype(int))
value_counts = np.bincount(y_true.flatten().to(dtype=torch.int32).numpy())
if np.all(value_counts > 0):
is_classification = True
#==============================================>
#op 3 analyze the dimension of y_pred
if y_pred.ndim > 2:
#handle segmentation problem
pass
#==============================================>
#op4 -> check sparsity of y_true
#v1
# sparsity = np.count_nonzero(y_true) / y_true.numel()
# if sparsity < 0.1:
# #handle multi label classification problem
# pass
#v2
# sparsity = np.count_nonzero(y_true) / y_true.numel()
# if sparsity < 0.5:
# self.loss_function = torch.nn.BCEWithLogitsLoss()
#v3
sparsity = np.count_nonzero(y_true) / y_true.numel()
if sparsity < 0.5:
self.loss_function = torch.nn.BCEWithLogitsLoss()
self.compute_loss = self.loss_function
#==============================================>
#op5 analyze the relationship between y_pred and y_true
#v1
# correlation = np.corrcoef(y_pred.flatten(), y_true.flatten())[0, 1]
# if correlation > 0.8:
# is_classification = False
#v2
# y_pred_flat = y_pred.flatten().numpy()
# y_true_flat = y_true.flatten().numpy()
# if y_pred_flat.shape != y_true_flat.shape:
# y_pred_flat = y_pred_flat[:y_true_flat.shape]
# correlation = np.corrcoef(y_pred_flat, y_true_flat)[0, 1]
#v3
# y_pred_flat = y_pred.flatten().numpy()
# y_true_flat = y_true.flatten().numpy()
# if y_pred.flat.shape != y_true_flat.shape:
# y_pref_flat = y_pred_flat[:y_true_flat.size]
# correlation = np.corrcoef(y_pref_flat, y_true_flat)[0, 1]
#v4
y_pred_flat = y_pred.flatten().numpy()
y_true_flat = y_true.flatten().numpy()
if y_pred_flat.shape != y_true_flat.shape:
y_pred_flat = y_pred_flat[:y_true_flat.size]
correlation = np.corrcoef(y_pred_flat, y_true_flat)[0, 1]
#==============================================>
#op6 use domain_kownledge
if self.domain_knowledge == "classification":
is_classification = True
elif self.domain_knowledge == "regression":
is_classification = False
#==============================================>
#op7 analyze distribution of values in y_pred
#assuiming certainty indicates a classification problem
# if np.max(y_pred) > 0.9:
# is_classification = True
#v2
if torch.max(y_pred) > 0.9:
is_classification = True
#==============================================>
#op8 check the baalance of classes in y_true
#assuming imbalanced classes indicate a classification problem
class_balance = value_counts / np.sum(value_counts)
if np.any(class_balance < 0.1):
is_classification = True
#==============================================>
#op9 use a model selection technique
#this optimization requires a model and a dataset so its not implemented
# you can implement this op outside the determine_loss_function method
#==============================================>
#op10 leverage user input or metadata
if self.user_input == "classification":
is_classification = True
elif self.user_input == "regression":
is_classification = False
#set the loss function based on the determined problem type
if is_classification:
self.loss_function = CrossEntropyLoss()
else:
self.loss_function = MSELoss()
def compute_loss(self, y_pred, y_true):
dataset_id = id(y_true)
if dataset_id not in self.loss_function.cache:
self.determine_loss_function(y_pred, y_true)
self.loss_function_cache[dataset_id] = self.loss_function
# self.determine_loss_function(y_pred, y_true)
# return self.loss_function.compute_loss(y_pred, y_true)
cached_loss_function = self.loss_function_cache[dataset_id]
return cached_loss_function.compute_loss(y_pred, y_true)
# y_pred_regression = torch.tensor([[2.5], [3.2]])
# y_true_regression = torch.tensor([[2.0], [3.0]])
# nebula = Nebula()
# loss_classification = nebula.compute_loss(y_pred_classification, y_true_classification)
# print("Nebula loss for classification:", loss_classification)
# loss_regression = nebula.compute_loss(y_pred_regression, y_true_regression)
# print("Nebula loss for regression:", loss_regression)
# v2 testing
# Example data for each optimization
y_true_classification = torch.tensor([0, 1, 0, 2, 1])
y_true_regression = torch.tensor([2.0, 3.5, 1.2, 4.8, 3.3])
y_pred_classification = torch.tensor([[0.8, 0.1, 0.1], [0.2, 0.7, 0.1], [0.9, 0.05, 0.05], [0.1, 0.2, 0.7], [0.3, 0.6, 0.1]])
y_pred_regression = torch.tensor([2.1, 3.6, 1.0, 4.9, 3.1])
# Example usage
nebula = Nebula()
y_pred_classification = torch.randn(5, 3)
y_true_classification = torch.tensor([1, 0, 2, 1, 0])
y_true_multilabel = torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [0, 1, 0]])
# Convert y_true_multilabel to float
y_true_multilabel = y_true_multilabel.to(torch.float32)
# Optimization 1: Check the range of values in y_true
loss_classification_1 = nebula.compute_loss(y_pred_classification, y_true_classification)
print("Nebula loss for classification (Optimization 1):", loss_classification_1)
loss_regression_1 = nebula.compute_loss(y_pred_regression, y_true_regression)
print("Nebula loss for regression (Optimization 1):", loss_regression_1)
# Optimization 2: Check the distribution of values in y_true
# (Assuming a specific pattern indicates a classification problem)
y_true_classification_2 = torch.tensor([0, 1, 0, 1, 0])
loss_classification_2 = nebula.compute_loss(y_pred_classification, y_true_classification_2)
print("Nebula loss for classification (Optimization 2):", loss_classification_2)
# Optimization 3: Analyze the dimensions of y_pred
# (Not applicable in this example, as it requires a segmentation problem)
# Optimization 4: Check the sparsity of y_true
# y_true_multilabel = torch.tensor([[1, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1], [0, 1, 0]])
# y_true_multilabel = torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [0, 1, 0]])
loss_multilabel = nebula.compute_loss(y_pred_classification, y_true_multilabel)
print("Nebula loss for multi-label classification (Optimization 4):", loss_multilabel)
# Optimization 5: Analyze the relationship between y_pred and y_true
y_true_regression_5 = torch.tensor([2.1, 3.6, 1.0, 4.9, 3.1])
loss_regression_5 = nebula.compute_loss(y_pred_regression, y_true_regression_5)
print("Nebula loss for regression (Optimization 5):", loss_regression_5)
# Optimization 6: Use domain knowledge
nebula_domain_classification = Nebula(domain_knowledge="classification")
loss_classification_6 = nebula_domain_classification.compute_loss(y_pred_classification, y_true_classification)
print("Nebula loss for classification (Optimization 6):", loss_classification_6)
nebula_domain_regression = Nebula(domain_knowledge="regression")
loss_regression_6 = nebula_domain_regression.compute_loss(y_pred_regression, y_true_regression)
print("Nebula loss for regression (Optimization 6):", loss_regression_6)
# Optimization 7: Analyze the distribution of values in y_pred
# y_pred_classification_7 = torch.tensor([[0.95, 0.025, 0.025], [0.05, 0.9, 0.05], [0.9, 0.05, 0.05], [0.1, 0.1, 0.8], [0.1, 0.8, 0.1]])
y_pred_classification_7 = torch.randn(5, 3)
y_true_classification_one_hot = one_hot_encoding(y_true_classification, 3)
loss_classification_7 = nebula.compute_loss(y_pred_classification_7, y_true_classification)
print("Nebula loss for classification (Optimization 7):", loss_classification_7)
# Optimization 8: Check the balance of classes in y_true
y_true_classification_8 = torch.tensor([0, 0, 0, 1, 1])
loss_classification_8 = nebula.compute_loss(y_pred_classification, y_true_classification_8)
print("Nebula loss for classification (Optimization 8):", loss_classification_8)
# Optimization 10: Leverage user input or metadata
nebula_user_classification = Nebula(user_input="classification")
loss_classification_10 = nebula_user_classification.compute_loss(y_pred_classification, y_true_classification)
print("Nebula loss for classification (Optimization 10):", loss_classification_10)
nebula_loss_regression = Nebula(user_input="regression")
loss_regression_10 = nebula_user_classification.compute_loss(y_pred_regression, y_true_regression)
print(f"Nebula loss for regression (optimization 10) {loss_regression_10}") | nebula-master | experimental/nebulaV2.py |
import numpy as np
from torch.nn import BCELoss
# Define the base LossFunction class
class LossFunction:
def compute_loss(self, y_pred, y_true):
raise NotImplementedError("compute_loss method must be implemented in the derived class")
# Define specific loss function classes that inherit from LossFunction
class CrossEntropyLoss(LossFunction):
def compute_loss(self, y_pred, y_true):
softmax_pred = self.softmax(y_pred)
loss = -np.sum(y_true * np.log(softmax_pred))
return loss
def softmax(self, x):
exp_x = np.exp(x - np.max(x))
return exp_x / np.sum(exp_x, axis=1, keepdims=True)
class MeanSquaredErrorLoss(LossFunction):
def compute_loss(self, y_pred, y_true):
return np.mean((y_pred - y_true) ** 2)
#v1
# # Create a DynamicLossFunction class that inherits from the LossFunction base class
# class DynamicLossFunction(LossFunction):
# def __init__(self):
# self.loss_function = None
# def determine_loss_function(self, y_pred, y_true):
# # Implement logic to determine the type of data and select the appropriate loss function
# # For example, based on the shape of y_true or other criteria
# if y_true.shape[1] > 1:
# self.loss_function = CrossEntropyLoss()
# else:
# self.loss_function = MeanSquaredErrorLoss()
# def compute_loss(self, y_pred, y_true):
# self.determine_loss_function(y_pred, y_true)
# return self.loss_function.compute_loss(y_pred, y_true)
class DynamicLossFunction(LossFunction):
def __init__(self):
self.loss_function = None
def determine_loss_function(self, y_pred, y_true):
# Implement logic to determine the type of data and select the appropriate loss function
# Check if the problem is a classification or regression task
is_classification = self.is_classification_task(y_true)
# Check if the problem involves multiple classes or binary classes
is_multiclass = self.is_multiclass_problem(y_true)
# Select the appropriate loss function based on the problem type
if is_classification:
if is_multiclass:
self.loss_function = CrossEntropyLoss()
else:
self.loss_function = BCELoss()
else:
self.loss_function = MeanSquaredErrorLoss()
def is_classification_task(self, y_true):
# Check if the target variable is binary or consists of integers (indicating class labels)
return np.issubdtype(y_true.dtype, np.integer)
def is_multiclass_problem(self, y_true):
# Check if the problem involves multiple classes by counting the unique values in y_true
unique_values = np.unique(y_true)
return len(unique_values) > 2
def compute_loss(self, y_pred, y_true):
self.determine_loss_function(y_pred, y_true)
return self.loss_function.compute_loss(y_pred, y_true)
# Example usage
y_pred_classification = np.array([[2.0, 1.0, 0.1], [1.0, 2.0, 0.1]])
y_true_classification = np.array([[1, 0, 0], [0, 1, 0]])
y_pred_regression = np.array([[2.5], [3.2]])
y_true_regression = np.array([[2.0], [3.0]])
dynamic_loss_function = DynamicLossFunction()
loss_classification = dynamic_loss_function.compute_loss(y_pred_classification, y_true_classification)
print("Dynamic loss for classification:", loss_classification)
loss_regression = dynamic_loss_function.compute_loss(y_pred_regression, y_true_regression)
print("Dynamic loss for regression:", loss_regression) | nebula-master | experimental/nebula.py |
import torch
import torch.nn as nn
# import torch.jit
import numpy as np
class LossFunction:
def compute_Loss(self, y_pred, y_true):
raise NotImplemented("compute_loss method must be implemented!")
class L1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.L1Loss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
# class MSELoss(LossFunction):
# def __init__(self):
# self.loss_function = nn.MSELoss()
# def compute_loss(self, y_pred, y_true):
# return self.loss_function(y_pred, y_true)
class MSELoss(LossFunction):
def __init__(self):
super().__init__()
self.loss_function = nn.MSELoss()
def compute_loss(self, y_pred, y_true):
y_true_one_hot = torch.zeros_like(y_pred)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1)
return self.loss_function(y_pred, y_true_one_hot)
def one_hot_encoding(y_true, num_classes):
y_true_one_hot = torch.zeros(y_true.size(0), num_classes)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1)
return y_true_one_hot
class SmoothL1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.SmoothL1Loss()
def compute_Loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class MultiLabelSoftMarginLoss(LossFunction):
def __init__(self):
self.loss_function = nn.MultiLabelSoftMarginLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class PoissonNLLoss(LossFunction):
def __init__(self):
self.loss_function = nn.PoissonNLLLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class KLDivLoss(LossFunction):
def __init__(self):
self.loss_function = nn.KLDivLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(F.log_softmax(y_pred, dim=1))
class NLLLoss(LossFunction):
def __init__(self):
self.loss_function = nn.NLLLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class HashableTensorWrapper:
def __init__(self, tensor):
self.tensor = tensor
self.tensor_shape = tensor.shape
self.tensor_dtype = tensor.dtype
def __hash__(self):
return hash((self.tensor_shape, self.tensor_dtype))
def __eq__(self, other):
return isinstance(other, HashableTensorWrapper) and self.tensor_shape == other.tensor_shape and self.tensor_dtype == other.tensor_dtype
#detector helper function
# def is_multi_label_classification(y_true: torch.Tensor):
# return y_true.shape[1] > 1 and y_true.dtype == torch.float
#v2
def is_multi_label_classification(y_true: torch.Tensor) -> bool:
return len(y_true.shape) > 1 and y_true.shape[1] > 1 and y_true.dtype == torch.float
def contains_non_negative_integers(y_true):
return torch.all(y_true >= 0) and torch.all(y_true == y_true.to(torch.int64))
def are_probability_distributions(y_pred, y_true):
return torch.all(y_pred >= 0) and torch.all(y_pred <= 1) and torch.all(y_true >= 0) and torch.all(y_true <= 1)
def are_log_probabilities(y_pred):
return torch.all(y_pred <= 0)
#generate unique key for a tensor
#v1
# def generate_tensor_key(tensor):
# return (tuple(tensor.shape), str(tensor.dtype))
#v2
# def generate_tensor_key(tensor):
# shape_tuple = ()
# for dim in tensor.shape:
# shape_tuple += (dim.item(),)
# return (shape_tuple, str(tensor.dtype))
#v3
# def generate_tensor_key(tensor):
# shape_tuple = ()
# for dim in tensor.shape:
# shape_tuple += (dim,)
# return (shape_tuple, str(tensor.dtype))
#v4
def generate_tensor_key(tensor):
shape_tuple = ()
for dim in tensor.shape:
shape_tuple += (dim,)
return (shape_tuple, str(tensor.dtype))
class CrossEntropyLoss(LossFunction):
def __init__(self):
self.loss_function = nn.CrossEntropyLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class Nebula(LossFunction):
def __init__(self, domain_knowledge=None, user_input=None):
self.loss_function = None
self.domain_knowledge = domain_knowledge
self.user_input = user_input
self.loss_function_cache = {}
self.unique_values_cache = {}
self.class_balance_cache = {}
def determine_loss_function(self, y_pred, y_true):
is_classification = None
dataset_id = id(y_true)
# Cache unique values
if dataset_id not in self.unique_values_cache:
self.unique_values_cache[dataset_id] = torch.unique(y_true)
unique_values = self.unique_values_cache[dataset_id]
# Cache class balance
if dataset_id not in self.class_balance_cache:
value_counts = torch.bincount(y_true.flatten().to(dtype=torch.int64))
self.class_balance_cache[dataset_id] = value_counts / torch.sum(value_counts)
class_balance = self.class_balance_cache[dataset_id]
# Optimization 2: Use PyTorch functions instead of NumPy
value_counts = torch.bincount(y_true.flatten().to(dtype=torch.int64))
# The remaining code remains unchanged as it already incorporates the suggested optimizations
if is_classification is None:
if len(unique_values) <= 10 and torch.all(torch.eq(unique_values % 1, 0)):
is_classification = True
if is_classification is None:
if torch.all(value_counts > 0):
is_classification = True
if y_pred.ndim > 2:
pass
if is_classification is None:
sparsity = torch.count_nonzero(y_true) / y_true.numel()
if sparsity < 0.5:
self.loss_function = torch.nn.BCEWithLogitsLoss()
self.compute_loss = self.loss_function
return
y_pred_flat = y_pred.flatten()
y_true_flat = y_true.flatten()
if y_pred_flat.shape != y_true_flat.shape:
y_pred_flat = y_pred_flat[:y_true_flat.numel()]
correlation = torch.tensor(np.corrcoef(y_pred_flat.cpu().numpy(), y_true_flat.cpu().numpy())[0, 1])
if is_classification is None:
if self.domain_knowledge == "classification":
is_classification = True
elif self.domain_knowledge == "regression":
is_classification = False
if is_classification is None:
if torch.max(y_pred) > 0.9:
is_classification = True
if is_classification is None:
if torch.any(class_balance < 0.1):
is_classification = True
if is_classification is None:
if self.user_input == "classification":
is_classification = True
elif self.user_input == "regression":
is_classification = False
#Multi-LabelClassification
if is_multi_label_classification(y_true):
self.loss_function = MultiLabelSoftMarginLoss()
#poissonNLLLoss
if contains_non_negative_integers(y_true):
self.loss_function = PoissonNLLoss()
#KLDIvLoss
if are_probability_distributions(y_pred, y_true):
self.loss_function = KLDivLoss()
#NLLLoss
if is_classification and are_log_probabilities(y_pred):
self.loss_function = NLLLoss()
# SmotthL1Loss
if is_classification is None:
#check range of values in y_true
if torch.min(y_true) >= 0 and torch.max(y_true) <= 1:
self.loss_function = SmoothL1Loss()
# Set the loss function based on the determined problem type
if is_classification:
self.loss_function = CrossEntropyLoss()
else:
self.loss_function = MSELoss()
# @torch.jit.script #optimization jit
def compute_loss(self, y_pred, y_true):
#v2
# tensor_key = HashableTensorWrapper(y_true)
# if tensor_key not in self.loss_function_cache:
# self.determine_loss_function(y_pred, y_true)
# return self.loss_function_cache[tensor_key](y_pred, y_true)
# V1
dataset_id = id(y_true)
if dataset_id not in self.loss_function_cache:
self.determine_loss_function(y_pred, y_true)
self.loss_function_cache[dataset_id] = self.loss_function
cached_loss_function = self.loss_function_cache[dataset_id]
return cached_loss_function.compute_loss(y_pred, y_true)
#v3
# tensor_key = generate_tensor_key(y_true)
# if tensor_key not in self.loss_function_cache:
# self.determine_loss_function(y_pred, y_true)
# return self.loss_function_cache[tensor_key](y_pred, y_true)
#move tensors nd model to gpu if available
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# y_pred, y_true = y_pred.to(device), y_true.to(device)
# # #example usage with the pytorch autograd profiler
# with torch.autograd.profiler.profile() as prof:
# loss = Nebula.compute_loss(y_pred, y_true)
# print(prof.key_average().table())
#reinforcement
from stable_baselines3 import PPO
from stable_baselines3.common.vec_env import DummyVecEnv, VecEnv
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor
import gym
from gym import spaces
class CustomFeaturesExtractor(BaseFeaturesExtractor):
def __init__(self, observation_space, features_dim):
super().__init__(observation_space, features_dim=features_dim)
print(f"Observation space: {observation_space} and features_dim: {features_dim} ")
def forward(self, observations):
# Extract features from the observations (state representation)
features = torch.tensor(observations).float()
return features
class LossFunctionEnv(gym.Env):
def __init__(self, y_pred, y_true):
super().__init__()
self.y_pred = y_pred
self.y_true = y_true
self.action_space = spaces.Discrete(len([CrossEntropyLoss, MSELoss])) # Add more loss functions as needed
self.observation_space = spaces.Box(low=0, high=float('inf'), shape=(2,), dtype=np.float32)
def reset(self):
# Reset the environment and return the initial state
state = self.extract_state(self.y_pred, self.y_true)
return state
def step(self, action):
# Map the action to the corresponding loss function
loss_function = map_action_to_loss_function(action)
# Compute the loss using the selected loss function
loss = loss_function.compute_loss(self.y_pred, self.y_true)
# Define the reward based on the loss
reward = -loss.item()
# Check if the episode is done (e.g., after a certain number of steps or a certain loss threshold)
done = False
# Return the next state, reward, and done flag
next_state = self.extract_state(self.y_pred, self.y_true)
return next_state, reward, done, {}
def extract_state(self, y_pred, y_true):
num_unique_values = len(torch.unique(y_true))
pred_range = torch.max(y_pred) - torch.min(y_pred)
state = [num_unique_values, pred_range.item()]
return state
def map_action_to_loss_function(action):
if action == 0:
return CrossEntropyLoss()
elif action == 1:
return MSELoss()
#add more loss functions as needed
# Create a DummyVecEnv wrapper for the LossFunctionEnv
def make_env(y_pred, y_true):
def _init():
return LossFunctionEnv(y_pred, y_true)
return _init
y_pred = torch.randn(100, 10)
y_true = torch.randint(0, 10, (100,))
env = DummyVecEnv([make_env(y_pred, y_true)])
# Create a custom policy network that uses the CustomFeaturesExtractor
policy_kwargs = dict(
features_extractor_class=CustomFeaturesExtractor,
features_extractor_kwargs=dict(features_dim=env.observation_space.shape[0]), # Define the observation space based on the state representation
)
# Initialize the PPO agent
agent = PPO("MlpPolicy", env, policy_kwargs=policy_kwargs, verbose=1)
# Train the agent
agent.learn(total_timesteps=10000)
# Use the trained agent in the NebulaOptimized class
class NebulaOptimized(Nebula):
def __init__(self, domain_knowledge=None, user_input=None):
super().__init__(domain_knowledge, user_input)
self.rl_agent = agent
def determine_loss_function(self, y_pred, y_true):
# Extract state representation from the data and model
# state = ... # Extract state representation from y_pred and y_true
state = self.extract_state(y_pred, y_true)
# Use the RL agent to select the optimal loss function
action, _ = self.rl_agent.predict(state, deterministic=True)
# Map the action to the corresponding loss function
self.loss_function = map_action_to_loss_function(action)
def extract_state(self, y_pred, y_true):
num_unique_values = len(torch.unique(y_true))
pred_range = torch.max(y_pred) - torch.min(y_pred)
state = [num_unique_values, pred_range.item()]
return state
nebula_optimized = NebulaOptimized()
#test the trained agent with new y_pred and y_true tensors
y_pred_test = torch.randn(100, 10)
y_true_test = torch.randint(0, 10, (100,))
nebula_optimized.determine_loss_function(y_pred_test, y_true_test)
print(f"Selected loss function {nebula_optimized.loss_function}") | nebula-master | experimental/reinforcement/nebula.py |
import torch
import torch.nn as nn
# import torch.jit
import numpy as np
class LossFunction:
def compute_Loss(self, y_pred, y_true):
raise NotImplemented("compute_loss method must be implemented!")
class L1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.L1Loss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
# class MSELoss(LossFunction):
# def __init__(self):
# self.loss_function = nn.MSELoss()
# def compute_loss(self, y_pred, y_true):
# return self.loss_function(y_pred, y_true)
class MSELoss(LossFunction):
def __init__(self):
super().__init__()
self.loss_function = nn.MSELoss()
def compute_loss(self, y_pred, y_true):
y_true_one_hot = torch.zeros_like(y_pred)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1)
return self.loss_function(y_pred, y_true_one_hot)
def one_hot_encoding(y_true, num_classes):
y_true_one_hot = torch.zeros(y_true.size(0), num_classes)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1)
return y_true_one_hot
class SmoothL1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.SmoothL1Loss()
def compute_Loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class MultiLabelSoftMarginLoss(LossFunction):
def __init__(self):
self.loss_function = nn.MultiLabelSoftMarginLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class PoissonNLLoss(LossFunction):
def __init__(self):
self.loss_function = nn.PoissonNLLLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class KLDivLoss(LossFunction):
def __init__(self):
self.loss_function = nn.KLDivLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(F.log_softmax(y_pred, dim=1))
class NLLLoss(LossFunction):
def __init__(self):
self.loss_function = nn.NLLLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class HashableTensorWrapper:
def __init__(self, tensor):
self.tensor = tensor
self.tensor_shape = tensor.shape
self.tensor_dtype = tensor.dtype
def __hash__(self):
return hash((self.tensor_shape, self.tensor_dtype))
def __eq__(self, other):
return isinstance(other, HashableTensorWrapper) and self.tensor_shape == other.tensor_shape and self.tensor_dtype == other.tensor_dtype
#detector helper function
# def is_multi_label_classification(y_true: torch.Tensor):
# return y_true.shape[1] > 1 and y_true.dtype == torch.float
#v2
def is_multi_label_classification(y_true: torch.Tensor) -> bool:
return len(y_true.shape) > 1 and y_true.shape[1] > 1 and y_true.dtype == torch.float
def contains_non_negative_integers(y_true):
return torch.all(y_true >= 0) and torch.all(y_true == y_true.to(torch.int64))
def are_probability_distributions(y_pred, y_true):
return torch.all(y_pred >= 0) and torch.all(y_pred <= 1) and torch.all(y_true >= 0) and torch.all(y_true <= 1)
def are_log_probabilities(y_pred):
return torch.all(y_pred <= 0)
#generate unique key for a tensor
#v1
# def generate_tensor_key(tensor):
# return (tuple(tensor.shape), str(tensor.dtype))
#v2
# def generate_tensor_key(tensor):
# shape_tuple = ()
# for dim in tensor.shape:
# shape_tuple += (dim.item(),)
# return (shape_tuple, str(tensor.dtype))
#v3
# def generate_tensor_key(tensor):
# shape_tuple = ()
# for dim in tensor.shape:
# shape_tuple += (dim,)
# return (shape_tuple, str(tensor.dtype))
#v4
def generate_tensor_key(tensor):
shape_tuple = ()
for dim in tensor.shape:
shape_tuple += (dim,)
return (shape_tuple, str(tensor.dtype))
class CrossEntropyLoss(LossFunction):
def __init__(self):
self.loss_function = nn.CrossEntropyLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class Nebula(LossFunction):
def __init__(self, domain_knowledge=None, user_input=None):
self.loss_function = None
self.domain_knowledge = domain_knowledge
self.user_input = user_input
self.loss_function_cache = {}
self.unique_values_cache = {}
self.class_balance_cache = {}
def determine_loss_function(self, y_pred, y_true):
is_classification = None
dataset_id = id(y_true)
# Cache unique values
if dataset_id not in self.unique_values_cache:
self.unique_values_cache[dataset_id] = torch.unique(y_true)
unique_values = self.unique_values_cache[dataset_id]
# Cache class balance
if dataset_id not in self.class_balance_cache:
value_counts = torch.bincount(y_true.flatten().to(dtype=torch.int64))
self.class_balance_cache[dataset_id] = value_counts / torch.sum(value_counts)
class_balance = self.class_balance_cache[dataset_id]
# Optimization 2: Use PyTorch functions instead of NumPy
value_counts = torch.bincount(y_true.flatten().to(dtype=torch.int64))
# The remaining code remains unchanged as it already incorporates the suggested optimizations
if is_classification is None:
if len(unique_values) <= 10 and torch.all(torch.eq(unique_values % 1, 0)):
is_classification = True
if is_classification is None:
if torch.all(value_counts > 0):
is_classification = True
if y_pred.ndim > 2:
pass
if is_classification is None:
sparsity = torch.count_nonzero(y_true) / y_true.numel()
if sparsity < 0.5:
self.loss_function = torch.nn.BCEWithLogitsLoss()
self.compute_loss = self.loss_function
return
y_pred_flat = y_pred.flatten()
y_true_flat = y_true.flatten()
if y_pred_flat.shape != y_true_flat.shape:
y_pred_flat = y_pred_flat[:y_true_flat.numel()]
correlation = torch.tensor(np.corrcoef(y_pred_flat.cpu().numpy(), y_true_flat.cpu().numpy())[0, 1])
if is_classification is None:
if self.domain_knowledge == "classification":
is_classification = True
elif self.domain_knowledge == "regression":
is_classification = False
if is_classification is None:
if torch.max(y_pred) > 0.9:
is_classification = True
if is_classification is None:
if torch.any(class_balance < 0.1):
is_classification = True
if is_classification is None:
if self.user_input == "classification":
is_classification = True
elif self.user_input == "regression":
is_classification = False
#Multi-LabelClassification
if is_multi_label_classification(y_true):
self.loss_function = MultiLabelSoftMarginLoss()
#poissonNLLLoss
if contains_non_negative_integers(y_true):
self.loss_function = PoissonNLLoss()
#KLDIvLoss
if are_probability_distributions(y_pred, y_true):
self.loss_function = KLDivLoss()
#NLLLoss
if is_classification and are_log_probabilities(y_pred):
self.loss_function = NLLLoss()
# SmotthL1Loss
if is_classification is None:
#check range of values in y_true
if torch.min(y_true) >= 0 and torch.max(y_true) <= 1:
self.loss_function = SmoothL1Loss()
# Set the loss function based on the determined problem type
if is_classification:
self.loss_function = CrossEntropyLoss()
else:
self.loss_function = MSELoss()
# @torch.jit.script #optimization jit
def compute_loss(self, y_pred, y_true):
#v2
# tensor_key = HashableTensorWrapper(y_true)
# if tensor_key not in self.loss_function_cache:
# self.determine_loss_function(y_pred, y_true)
# return self.loss_function_cache[tensor_key](y_pred, y_true)
# V1
dataset_id = id(y_true)
if dataset_id not in self.loss_function_cache:
self.determine_loss_function(y_pred, y_true)
self.loss_function_cache[dataset_id] = self.loss_function
cached_loss_function = self.loss_function_cache[dataset_id]
return cached_loss_function.compute_loss(y_pred, y_true)
#v3
# tensor_key = generate_tensor_key(y_true)
# if tensor_key not in self.loss_function_cache:
# self.determine_loss_function(y_pred, y_true)
# return self.loss_function_cache[tensor_key](y_pred, y_true)
#move tensors nd model to gpu if available
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# y_pred, y_true = y_pred.to(device), y_true.to(device)
# # #example usage with the pytorch autograd profiler
# with torch.autograd.profiler.profile() as prof:
# loss = Nebula.compute_loss(y_pred, y_true)
# print(prof.key_average().table())
#reinforcement
from stable_baselines3 import PPO, A2C
from stable_baselines3.common.vec_env import DummyVecEnv, VecEnv
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor
import gym
from gym import spaces
class CustomFeaturesExtractor(BaseFeaturesExtractor):
def __init__(self, observation_space, features_dim):
super().__init__(observation_space, features_dim=features_dim)
print(f"Observation space: {observation_space} and features_dim: {features_dim} ")
def forward(self, observations):
# Extract features from the observations (state representation)
features = torch.tensor(observations).float()
return features
class LossFunctionEnv(gym.Env):
def __init__(self, y_pred, y_true):
super().__init__()
self.y_pred = y_pred
self.y_true = y_true
self.action_space = spaces.Discrete(len([CrossEntropyLoss, MSELoss])) # Add more loss functions as needed
self.observation_space = spaces.Box(low=0, high=float('inf'), shape=(2,), dtype=np.float32)
self.state = self.precompute_state(y_pred, y_true)
def precompute_state(self, y_pred, y_true):
num_unique_values = len(torch.unique(y_true))
pred_range = torch.max(y_pred) - torch.min(y_pred)
state = [num_unique_values, pred_range.item()]
return state
def reset(self):
# Reset the environment and return the initial state
state = self.extract_state(self.y_pred, self.y_true)
return state
def step(self, action):
# Map the action to the corresponding loss function
loss_function = map_action_to_loss_function(action)
# Compute the loss using the selected loss function
loss = loss_function.compute_loss(self.y_pred, self.y_true)
#define the reward based model on the loss
max_loss = torch.max(self.y_pred).item() * len(self.y_pred)
# Define the reward based on the loss
reward = -loss.item() / max_loss
#2nd loss
# reward = np.exp(-loss.item())
# Check if the episode is done (e.g., after a certain number of steps or a certain loss threshold)
done = False
# Return the next state, reward, and done flag
next_state = self.extract_state(self.y_pred, self.y_true)
return next_state, reward, done, {}
def extract_state(self, y_pred, y_true):
num_unique_values = len(torch.unique(y_true))
pred_range = torch.max(y_pred) - torch.min(y_pred)
state = [num_unique_values, pred_range.item()]
return state
#Cache action-to-loss-function mapping
action_to_loss_function_cache = {}
def map_action_to_loss_function(action):
# action = int(action[0])
if isinstance(action, np.ndarray):
action = action.item()
if action not in action_to_loss_function_cache:
if action == 0:
action_to_loss_function_cache[action] = CrossEntropyLoss()
elif action == 1:
action_to_loss_function_cache[action] = MSELoss()
return action_to_loss_function_cache[action]
#add more loss functions as needed
# Create a DummyVecEnv wrapper for the LossFunctionEnv
def make_env(y_pred, y_true):
def _init():
return LossFunctionEnv(y_pred, y_true)
return _init
y_pred = torch.randn(100, 10)
y_true = torch.randint(0, 10, (100,))
env = DummyVecEnv([make_env(y_pred, y_true)])
# Create a custom policy network that uses the CustomFeaturesExtractor
policy_kwargs = dict(
features_extractor_class=CustomFeaturesExtractor,
features_extractor_kwargs=dict(features_dim=env.observation_space.shape[0]), # Define the observation space based on the state representation
)
import optuna
# Define the evaluation function
def evaluate_agent(agent, y_pred, y_true, n_episodes=10):
env = DummyVecEnv([make_env(y_pred, y_true)])
rewards = []
for _ in range(n_episodes):
obs = env.reset()
done = False
episode_reward = 0
while not done:
action, _ = agent.predict(obs, deterministic=True)
obs, reward, done, _ = env.step(action)
episode_reward += reward
rewards.append(episode_reward)
return np.mean(rewards)
#test the trained agent with new y_pred and y_true tensors
y_pred_test = torch.randn(100, 10)
y_true_test = torch.randint(0, 10, (100,))
# Define the objective function
def objective(trial):
# Sample hyperparameters
learning_rate = trial.suggest_float("learning_rate", 1e-5, 1e-2)
# Add more hyperparameters as needed
# Train the agent with the sampled hyperparameters
agent = A2C("MlpPolicy", env, learning_rate=learning_rate, gamma=0.50, policy_kwargs=policy_kwargs, verbose=1)
agent.learn(total_timesteps=10000)
# Evaluate the agent and return the performance metric
performance_metric = evaluate_agent(agent, y_pred_test, y_true_test)
return performance_metric
storage = optuna.storages.RDBStorage("sqlite:///example.db")
# Create an Optuna study and optimize the objective function
study = optuna.create_study(direction="maximize", storage=storage, study_name="my_study")
print(f"Study: {study}")
study.optimize(objective, n_trials=1)
best_hyperparameters = study.best_params
print(f"Study: {study} best hyperparameters: {best_hyperparameters}")
# Train the final agent with the best hyperparameters
final_agent = A2C("MlpPolicy", env, **best_hyperparameters, policy_kwargs=policy_kwargs, verbose=1)
final_agent.learn(total_timesteps=10000)
# Use the trained agent in the NebulaOptimized class
class NebulaOptimized(Nebula):
def __init__(self, domain_knowledge=None, user_input=None):
super().__init__(domain_knowledge, user_input)
self.rl_agent = final_agent
def determine_loss_function(self, y_pred, y_true):
# Extract state representation from the data and model
# state = ... # Extract state representation from y_pred and y_true
state = self.extract_state(y_pred, y_true)
# Use the RL agent to select the optimal loss function
action, _ = self.rl_agent.predict(state, deterministic=True)
# Map the action to the corresponding loss function
self.loss_function = map_action_to_loss_function(action)
def extract_state(self, y_pred, y_true):
num_unique_values = len(torch.unique(y_true))
pred_range = torch.max(y_pred) - torch.min(y_pred)
state = [num_unique_values, pred_range.item()]
return state
nebula_optimized = NebulaOptimized()
#test the trained agent with new y_pred and y_true tensors
y_pred_test = torch.randn(100, 10)
y_true_test = torch.randint(0, 10, (100,))
nebula_optimized.determine_loss_function(y_pred_test, y_true_test)
print(f"Selected loss function {nebula_optimized.loss_function}")
"""
time/fps: Frames per second (FPS) measures the number of training steps the agent takes per second. Higher FPS indicates faster training. You should focus on increasing this metric to speed up the training process.
time/iterations: The number of training iterations completed by the agent. This metric increases as the agent trains. You should focus on increasing this metric to ensure the agent has enough training experience.
time/time_elapsed: The total time elapsed (in seconds) since the start of the training process. You should focus on lowering this metric to reduce the overall training time.
time/total_timesteps: The total number of timesteps the agent has experienced during training. You should focus on increasing this metric to ensure the agent has enough training experience.
train/entropy_loss: The entropy loss encourages exploration by penalizing deterministic policies. Lower entropy loss indicates less exploration. You should focus on balancing this metric to ensure the agent explores the environment sufficiently without getting stuck in suboptimal solutions.
train/explained_variance: Explained variance measures how well the agent's value function approximates the true value function. A value of 1 indicates a perfect approximation, while a value of 0 indicates no correlation. You should focus on increasing this metric to improve the agent's value function approximation.
train/learning_rate: The learning rate determines the step size for updating the agent's parameters during training. You should focus on finding the optimal learning rate that balances convergence speed and stability.
train/n_updates: The number of parameter updates performed by the agent during training. This metric increases as the agent trains. You should focus on increasing this metric to ensure the agent has enough training experience.
train/policy_loss: The policy loss measures the difference between the agent's predicted actions and the optimal actions. You should focus on lowering this metric to improve the agent's policy.
train/value_loss: The value loss measures the difference between the agent's predicted state values and the true state values. You should focus on lowering this metric to improve the agent's value function approximation.
In summary, you should focus on increasing the metrics related to training experience (iterations, total_timesteps, n_updates) and performance (explained_variance), while lowering the metrics related to training time (time_elapsed) and losses (policy_loss, value_loss). Balancing exploration (entropy_loss) and finding the optimal learning rate are also important for achieving good performance.
""" | nebula-master | experimental/reinforcement/experimental/nebula2.py |
import torch
import torch.nn as nn
# import torch.jit
import numpy as np
class LossFunction:
def compute_Loss(self, y_pred, y_true):
raise NotImplemented("compute_loss method must be implemented!")
class L1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.L1Loss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
# class MSELoss(LossFunction):
# def __init__(self):
# self.loss_function = nn.MSELoss()
# def compute_loss(self, y_pred, y_true):
# return self.loss_function(y_pred, y_true)
class MSELoss(LossFunction):
def __init__(self):
super().__init__()
self.loss_function = nn.MSELoss()
def compute_loss(self, y_pred, y_true):
y_true_one_hot = torch.zeros_like(y_pred)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1)
return self.loss_function(y_pred, y_true_one_hot)
def one_hot_encoding(y_true, num_classes):
y_true_one_hot = torch.zeros(y_true.size(0), num_classes)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1)
return y_true_one_hot
class SmoothL1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.SmoothL1Loss()
def compute_Loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class MultiLabelSoftMarginLoss(LossFunction):
def __init__(self):
self.loss_function = nn.MultiLabelSoftMarginLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class PoissonNLLoss(LossFunction):
def __init__(self):
self.loss_function = nn.PoissonNLLLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class KLDivLoss(LossFunction):
def __init__(self):
self.loss_function = nn.KLDivLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(F.log_softmax(y_pred, dim=1))
class NLLLoss(LossFunction):
def __init__(self):
self.loss_function = nn.NLLLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class HashableTensorWrapper:
def __init__(self, tensor):
self.tensor = tensor
self.tensor_shape = tensor.shape
self.tensor_dtype = tensor.dtype
def __hash__(self):
return hash((self.tensor_shape, self.tensor_dtype))
def __eq__(self, other):
return isinstance(other, HashableTensorWrapper) and self.tensor_shape == other.tensor_shape and self.tensor_dtype == other.tensor_dtype
#detector helper function
# def is_multi_label_classification(y_true: torch.Tensor):
# return y_true.shape[1] > 1 and y_true.dtype == torch.float
#v2
def is_multi_label_classification(y_true: torch.Tensor) -> bool:
return len(y_true.shape) > 1 and y_true.shape[1] > 1 and y_true.dtype == torch.float
def contains_non_negative_integers(y_true):
return torch.all(y_true >= 0) and torch.all(y_true == y_true.to(torch.int64))
def are_probability_distributions(y_pred, y_true):
return torch.all(y_pred >= 0) and torch.all(y_pred <= 1) and torch.all(y_true >= 0) and torch.all(y_true <= 1)
def are_log_probabilities(y_pred):
return torch.all(y_pred <= 0)
#generate unique key for a tensor
#v1
# def generate_tensor_key(tensor):
# return (tuple(tensor.shape), str(tensor.dtype))
#v2
# def generate_tensor_key(tensor):
# shape_tuple = ()
# for dim in tensor.shape:
# shape_tuple += (dim.item(),)
# return (shape_tuple, str(tensor.dtype))
#v3
# def generate_tensor_key(tensor):
# shape_tuple = ()
# for dim in tensor.shape:
# shape_tuple += (dim,)
# return (shape_tuple, str(tensor.dtype))
#v4
def generate_tensor_key(tensor):
shape_tuple = ()
for dim in tensor.shape:
shape_tuple += (dim,)
return (shape_tuple, str(tensor.dtype))
class CrossEntropyLoss(LossFunction):
def __init__(self):
self.loss_function = nn.CrossEntropyLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class Nebula(LossFunction):
def __init__(self, domain_knowledge=None, user_input=None):
self.loss_function = None
self.domain_knowledge = domain_knowledge
self.user_input = user_input
self.loss_function_cache = {}
self.unique_values_cache = {}
self.class_balance_cache = {}
def determine_loss_function(self, y_pred, y_true):
is_classification = None
dataset_id = id(y_true)
# Cache unique values
if dataset_id not in self.unique_values_cache:
self.unique_values_cache[dataset_id] = torch.unique(y_true)
unique_values = self.unique_values_cache[dataset_id]
# Cache class balance
if dataset_id not in self.class_balance_cache:
value_counts = torch.bincount(y_true.flatten().to(dtype=torch.int64))
self.class_balance_cache[dataset_id] = value_counts / torch.sum(value_counts)
class_balance = self.class_balance_cache[dataset_id]
# Optimization 2: Use PyTorch functions instead of NumPy
value_counts = torch.bincount(y_true.flatten().to(dtype=torch.int64))
# The remaining code remains unchanged as it already incorporates the suggested optimizations
if is_classification is None:
if len(unique_values) <= 10 and torch.all(torch.eq(unique_values % 1, 0)):
is_classification = True
if is_classification is None:
if torch.all(value_counts > 0):
is_classification = True
if y_pred.ndim > 2:
pass
if is_classification is None:
sparsity = torch.count_nonzero(y_true) / y_true.numel()
if sparsity < 0.5:
self.loss_function = torch.nn.BCEWithLogitsLoss()
self.compute_loss = self.loss_function
return
y_pred_flat = y_pred.flatten()
y_true_flat = y_true.flatten()
if y_pred_flat.shape != y_true_flat.shape:
y_pred_flat = y_pred_flat[:y_true_flat.numel()]
correlation = torch.tensor(np.corrcoef(y_pred_flat.cpu().numpy(), y_true_flat.cpu().numpy())[0, 1])
if is_classification is None:
if self.domain_knowledge == "classification":
is_classification = True
elif self.domain_knowledge == "regression":
is_classification = False
if is_classification is None:
if torch.max(y_pred) > 0.9:
is_classification = True
if is_classification is None:
if torch.any(class_balance < 0.1):
is_classification = True
if is_classification is None:
if self.user_input == "classification":
is_classification = True
elif self.user_input == "regression":
is_classification = False
#Multi-LabelClassification
if is_multi_label_classification(y_true):
self.loss_function = MultiLabelSoftMarginLoss()
#poissonNLLLoss
if contains_non_negative_integers(y_true):
self.loss_function = PoissonNLLoss()
#KLDIvLoss
if are_probability_distributions(y_pred, y_true):
self.loss_function = KLDivLoss()
#NLLLoss
if is_classification and are_log_probabilities(y_pred):
self.loss_function = NLLLoss()
# SmotthL1Loss
if is_classification is None:
#check range of values in y_true
if torch.min(y_true) >= 0 and torch.max(y_true) <= 1:
self.loss_function = SmoothL1Loss()
# Set the loss function based on the determined problem type
if is_classification:
self.loss_function = CrossEntropyLoss()
else:
self.loss_function = MSELoss()
# @torch.jit.script #optimization jit
def compute_loss(self, y_pred, y_true):
#v2
# tensor_key = HashableTensorWrapper(y_true)
# if tensor_key not in self.loss_function_cache:
# self.determine_loss_function(y_pred, y_true)
# return self.loss_function_cache[tensor_key](y_pred, y_true)
# V1
dataset_id = id(y_true)
if dataset_id not in self.loss_function_cache:
self.determine_loss_function(y_pred, y_true)
self.loss_function_cache[dataset_id] = self.loss_function
cached_loss_function = self.loss_function_cache[dataset_id]
return cached_loss_function.compute_loss(y_pred, y_true)
#v3
# tensor_key = generate_tensor_key(y_true)
# if tensor_key not in self.loss_function_cache:
# self.determine_loss_function(y_pred, y_true)
# return self.loss_function_cache[tensor_key](y_pred, y_true)
#move tensors nd model to gpu if available
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# y_pred, y_true = y_pred.to(device), y_true.to(device)
# # #example usage with the pytorch autograd profiler
# with torch.autograd.profiler.profile() as prof:
# loss = Nebula.compute_loss(y_pred, y_true)
# print(prof.key_average().table())
#reinforcement
from stable_baselines3 import PPO, A2C
from stable_baselines3.common.vec_env import DummyVecEnv, VecEnv
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor
import gym
from gym import spaces
from stable_baselines3.common.callbacks import BaseCallback
from torch.nn.utils import clip_grad_norm_
class CustomFeaturesExtractor(BaseFeaturesExtractor):
def __init__(self, observation_space, features_dim):
super().__init__(observation_space, features_dim=features_dim)
print(f"Observation space: {observation_space} and features_dim: {features_dim} ")
def forward(self, observations):
# Extract features from the observations (state representation)
features = torch.tensor(observations).float()
return features
class LossFunctionEnv(gym.Env):
def __init__(self, y_pred, y_true):
super().__init__()
self.y_pred = y_pred
self.y_true = y_true
self.action_space = spaces.Discrete(len([CrossEntropyLoss, MSELoss])) # Add more loss functions as needed
self.observation_space = spaces.Box(low=0, high=float('inf'), shape=(2,), dtype=np.float32)
self.state = self.precompute_state(y_pred, y_true)
def precompute_state(self, y_pred, y_true):
num_unique_values = len(torch.unique(y_true))
pred_range = torch.max(y_pred) - torch.min(y_pred)
state = [num_unique_values, pred_range.item()]
return state
def reset(self):
# Reset the environment and return the initial state
state = self.extract_state(self.y_pred, self.y_true)
return state
def step(self, action):
# Map the action to the corresponding loss function
loss_function = map_action_to_loss_function(action)
# Compute the loss using the selected loss function
loss = loss_function.compute_loss(self.y_pred, self.y_true)
#define the reward based model on the loss
max_loss = torch.max(self.y_pred).item() * len(self.y_pred)
# Define the reward based on the loss
reward = -loss.item() / max_loss
#2nd loss
# reward = np.exp(-loss.item())
# Check if the episode is done (e.g., after a certain number of steps or a certain loss threshold)
done = False
# Return the next state, reward, and done flag
next_state = self.extract_state(self.y_pred, self.y_true)
return next_state, reward, done, {}
def extract_state(self, y_pred, y_true):
num_unique_values = len(torch.unique(y_true))
pred_range = torch.max(y_pred) - torch.min(y_pred)
state = [num_unique_values, pred_range.item()]
return state
#Cache action-to-loss-function mapping
action_to_loss_function_cache = {}
def map_action_to_loss_function(action):
# action = int(action[0])
if isinstance(action, np.ndarray):
action = action.item()
if action not in action_to_loss_function_cache:
if action == 0:
action_to_loss_function_cache[action] = CrossEntropyLoss()
elif action == 1:
action_to_loss_function_cache[action] = MSELoss()
return action_to_loss_function_cache[action]
#add more loss functions as needed
# Create a DummyVecEnv wrapper for the LossFunctionEnv
def make_env(y_pred, y_true):
def _init():
return LossFunctionEnv(y_pred, y_true)
return _init
y_pred = torch.randn(100, 10)
y_true = torch.randint(0, 10, (100,))
env = DummyVecEnv([make_env(y_pred, y_true)])
# Create a custom policy network that uses the CustomFeaturesExtractor
policy_kwargs = dict(
features_extractor_class=CustomFeaturesExtractor,
features_extractor_kwargs=dict(features_dim=env.observation_space.shape[0]), # Define the observation space based on the state representation
)
class CustomCallback(BaseCallback):
def __init__(self, patience, min_delta, initial_lr, decay_rate, decay_steps, max_norm, verbose=0):
super().__init__(verbose)
self.patience = patience
self.min_delta = min_delta
self.initial_lr = initial_lr
self.decay_rate = decay_rate
self.decay_steps = decay_steps
self.max_norm = max_norm
self.counter = 0
self.previous_performance = -np.inf
self.stop_training = False
def _on_step(self) -> bool:
# Early stopping
performance = self.model.ep_info_buffer.mean()
if performance - self.previous_performance < self.min_delta:
self.counter += 1
else:
self.counter = 0
if self.counter >= self.patience:
self.stop_training = True
self.previous_performance = performance
# Learning rate scheduler
step = self.num_timesteps
lr = self.initial_lr * (self.decay_rate ** (step / self.decay_steps))
for param_group in self.model.policy.optimizer.param_groups:
param_group['lr'] = lr
# Gradient clipping
clip_grad_norm_(self.model.policy.parameters(), self.max_norm)
return not self.stop_training
callback = CustomCallback(patience=10, min_delta=0.001, initial_lr=0.01, decay_rate=0.9, decay_steps=1000, max_norm=1.0)
agent.learn(total_timesteps=10000, callback=callback)
# Train the final agent with the best hyperparameters
agent = A2C("MlpPolicy", env, policy_kwargs=policy_kwargs, verbose=1)
agent.learn(total_timesteps=10000)
# Use the trained agent in the NebulaOptimized class
class NebulaOptimized(Nebula):
def __init__(self, domain_knowledge=None, user_input=None):
super().__init__(domain_knowledge, user_input)
self.rl_agent = agent
def determine_loss_function(self, y_pred, y_true):
# Extract state representation from the data and model
# state = ... # Extract state representation from y_pred and y_true
state = self.extract_state(y_pred, y_true)
# Use the RL agent to select the optimal loss function
action, _ = self.rl_agent.predict(state, deterministic=True)
# Map the action to the corresponding loss function
self.loss_function = map_action_to_loss_function(action)
def extract_state(self, y_pred, y_true):
num_unique_values = len(torch.unique(y_true))
pred_range = torch.max(y_pred) - torch.min(y_pred)
state = [num_unique_values, pred_range.item()]
return state
nebula_optimized = NebulaOptimized()
#test the trained agent with new y_pred and y_true tensors
y_pred_test = torch.randn(100, 10)
y_true_test = torch.randint(0, 10, (100,))
nebula_optimized.determine_loss_function(y_pred_test, y_true_test)
print(f"Selected loss function {nebula_optimized.loss_function}")
"""
time/fps: Frames per second (FPS) measures the number of training steps the agent takes per second. Higher FPS indicates faster training. You should focus on increasing this metric to speed up the training process.
time/iterations: The number of training iterations completed by the agent. This metric increases as the agent trains. You should focus on increasing this metric to ensure the agent has enough training experience.
time/time_elapsed: The total time elapsed (in seconds) since the start of the training process. You should focus on lowering this metric to reduce the overall training time.
time/total_timesteps: The total number of timesteps the agent has experienced during training. You should focus on increasing this metric to ensure the agent has enough training experience.
train/entropy_loss: The entropy loss encourages exploration by penalizing deterministic policies. Lower entropy loss indicates less exploration. You should focus on balancing this metric to ensure the agent explores the environment sufficiently without getting stuck in suboptimal solutions.
train/explained_variance: Explained variance measures how well the agent's value function approximates the true value function. A value of 1 indicates a perfect approximation, while a value of 0 indicates no correlation. You should focus on increasing this metric to improve the agent's value function approximation.
train/learning_rate: The learning rate determines the step size for updating the agent's parameters during training. You should focus on finding the optimal learning rate that balances convergence speed and stability.
train/n_updates: The number of parameter updates performed by the agent during training. This metric increases as the agent trains. You should focus on increasing this metric to ensure the agent has enough training experience.
train/policy_loss: The policy loss measures the difference between the agent's predicted actions and the optimal actions. You should focus on lowering this metric to improve the agent's policy.
train/value_loss: The value loss measures the difference between the agent's predicted state values and the true state values. You should focus on lowering this metric to improve the agent's value function approximation.
In summary, you should focus on increasing the metrics related to training experience (iterations, total_timesteps, n_updates) and performance (explained_variance), while lowering the metrics related to training time (time_elapsed) and losses (policy_loss, value_loss). Balancing exploration (entropy_loss) and finding the optimal learning rate are also important for achieving good performance.
""" | nebula-master | experimental/reinforcement/experimental/nebula3.py |
import torch
import torch.nn as nn
# import torch.jit
import numpy as np
class LossFunction:
def compute_Loss(self, y_pred, y_true):
raise NotImplemented("compute_loss method must be implemented!")
class L1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.L1Loss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
# class MSELoss(LossFunction):
# def __init__(self):
# self.loss_function = nn.MSELoss()
# def compute_loss(self, y_pred, y_true):
# return self.loss_function(y_pred, y_true)
class MSELoss(LossFunction):
def __init__(self):
super().__init__()
self.loss_function = nn.MSELoss()
def compute_loss(self, y_pred, y_true):
y_true_one_hot = torch.zeros_like(y_pred)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1)
return self.loss_function(y_pred, y_true_one_hot)
def one_hot_encoding(y_true, num_classes):
y_true_one_hot = torch.zeros(y_true.size(0), num_classes)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1)
return y_true_one_hot
class SmoothL1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.SmoothL1Loss()
def compute_Loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class MultiLabelSoftMarginLoss(LossFunction):
def __init__(self):
self.loss_function = nn.MultiLabelSoftMarginLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class PoissonNLLoss(LossFunction):
def __init__(self):
self.loss_function = nn.PoissonNLLLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class KLDivLoss(LossFunction):
def __init__(self):
self.loss_function = nn.KLDivLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(F.log_softmax(y_pred, dim=1))
class NLLLoss(LossFunction):
def __init__(self):
self.loss_function = nn.NLLLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class HashableTensorWrapper:
def __init__(self, tensor):
self.tensor = tensor
self.tensor_shape = tensor.shape
self.tensor_dtype = tensor.dtype
def __hash__(self):
return hash((self.tensor_shape, self.tensor_dtype))
def __eq__(self, other):
return isinstance(other, HashableTensorWrapper) and self.tensor_shape == other.tensor_shape and self.tensor_dtype == other.tensor_dtype
#detector helper function
# def is_multi_label_classification(y_true: torch.Tensor):
# return y_true.shape[1] > 1 and y_true.dtype == torch.float
#v2
def is_multi_label_classification(y_true: torch.Tensor) -> bool:
return len(y_true.shape) > 1 and y_true.shape[1] > 1 and y_true.dtype == torch.float
def contains_non_negative_integers(y_true):
return torch.all(y_true >= 0) and torch.all(y_true == y_true.to(torch.int64))
def are_probability_distributions(y_pred, y_true):
return torch.all(y_pred >= 0) and torch.all(y_pred <= 1) and torch.all(y_true >= 0) and torch.all(y_true <= 1)
def are_log_probabilities(y_pred):
return torch.all(y_pred <= 0)
#generate unique key for a tensor
#v1
# def generate_tensor_key(tensor):
# return (tuple(tensor.shape), str(tensor.dtype))
#v2
# def generate_tensor_key(tensor):
# shape_tuple = ()
# for dim in tensor.shape:
# shape_tuple += (dim.item(),)
# return (shape_tuple, str(tensor.dtype))
#v3
# def generate_tensor_key(tensor):
# shape_tuple = ()
# for dim in tensor.shape:
# shape_tuple += (dim,)
# return (shape_tuple, str(tensor.dtype))
#v4
def generate_tensor_key(tensor):
shape_tuple = ()
for dim in tensor.shape:
shape_tuple += (dim,)
return (shape_tuple, str(tensor.dtype))
class CrossEntropyLoss(LossFunction):
def __init__(self):
self.loss_function = nn.CrossEntropyLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class Nebula(LossFunction):
def __init__(self, domain_knowledge=None, user_input=None):
self.loss_function = None
self.domain_knowledge = domain_knowledge
self.user_input = user_input
self.loss_function_cache = {}
self.unique_values_cache = {}
self.class_balance_cache = {}
def determine_loss_function(self, y_pred, y_true):
is_classification = None
dataset_id = id(y_true)
# Cache unique values
if dataset_id not in self.unique_values_cache:
self.unique_values_cache[dataset_id] = torch.unique(y_true)
unique_values = self.unique_values_cache[dataset_id]
# Cache class balance
if dataset_id not in self.class_balance_cache:
value_counts = torch.bincount(y_true.flatten().to(dtype=torch.int64))
self.class_balance_cache[dataset_id] = value_counts / torch.sum(value_counts)
class_balance = self.class_balance_cache[dataset_id]
# Optimization 2: Use PyTorch functions instead of NumPy
value_counts = torch.bincount(y_true.flatten().to(dtype=torch.int64))
# The remaining code remains unchanged as it already incorporates the suggested optimizations
if is_classification is None:
if len(unique_values) <= 10 and torch.all(torch.eq(unique_values % 1, 0)):
is_classification = True
if is_classification is None:
if torch.all(value_counts > 0):
is_classification = True
if y_pred.ndim > 2:
pass
if is_classification is None:
sparsity = torch.count_nonzero(y_true) / y_true.numel()
if sparsity < 0.5:
self.loss_function = torch.nn.BCEWithLogitsLoss()
self.compute_loss = self.loss_function
return
y_pred_flat = y_pred.flatten()
y_true_flat = y_true.flatten()
if y_pred_flat.shape != y_true_flat.shape:
y_pred_flat = y_pred_flat[:y_true_flat.numel()]
correlation = torch.tensor(np.corrcoef(y_pred_flat.cpu().numpy(), y_true_flat.cpu().numpy())[0, 1])
if is_classification is None:
if self.domain_knowledge == "classification":
is_classification = True
elif self.domain_knowledge == "regression":
is_classification = False
if is_classification is None:
if torch.max(y_pred) > 0.9:
is_classification = True
if is_classification is None:
if torch.any(class_balance < 0.1):
is_classification = True
if is_classification is None:
if self.user_input == "classification":
is_classification = True
elif self.user_input == "regression":
is_classification = False
#Multi-LabelClassification
if is_multi_label_classification(y_true):
self.loss_function = MultiLabelSoftMarginLoss()
#poissonNLLLoss
if contains_non_negative_integers(y_true):
self.loss_function = PoissonNLLoss()
#KLDIvLoss
if are_probability_distributions(y_pred, y_true):
self.loss_function = KLDivLoss()
#NLLLoss
if is_classification and are_log_probabilities(y_pred):
self.loss_function = NLLLoss()
# SmotthL1Loss
if is_classification is None:
#check range of values in y_true
if torch.min(y_true) >= 0 and torch.max(y_true) <= 1:
self.loss_function = SmoothL1Loss()
# Set the loss function based on the determined problem type
if is_classification:
self.loss_function = CrossEntropyLoss()
else:
self.loss_function = MSELoss()
# @torch.jit.script #optimization jit
def compute_loss(self, y_pred, y_true):
#v2
# tensor_key = HashableTensorWrapper(y_true)
# if tensor_key not in self.loss_function_cache:
# self.determine_loss_function(y_pred, y_true)
# return self.loss_function_cache[tensor_key](y_pred, y_true)
# V1
dataset_id = id(y_true)
if dataset_id not in self.loss_function_cache:
self.determine_loss_function(y_pred, y_true)
self.loss_function_cache[dataset_id] = self.loss_function
cached_loss_function = self.loss_function_cache[dataset_id]
return cached_loss_function.compute_loss(y_pred, y_true)
#v3
# tensor_key = generate_tensor_key(y_true)
# if tensor_key not in self.loss_function_cache:
# self.determine_loss_function(y_pred, y_true)
# return self.loss_function_cache[tensor_key](y_pred, y_true)
#move tensors nd model to gpu if available
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# y_pred, y_true = y_pred.to(device), y_true.to(device)
# # #example usage with the pytorch autograd profiler
# with torch.autograd.profiler.profile() as prof:
# loss = Nebula.compute_loss(y_pred, y_true)
# print(prof.key_average().table())
#reinforcement
from stable_baselines3 import PPO, A2C
from stable_baselines3.common.vec_env import DummyVecEnv, VecEnv
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor
import gym
from gym import spaces
class CustomFeaturesExtractor(BaseFeaturesExtractor):
def __init__(self, observation_space, features_dim):
super().__init__(observation_space, features_dim=features_dim)
print(f"Observation space: {observation_space} and features_dim: {features_dim} ")
def forward(self, observations):
# Extract features from the observations (state representation)
features = torch.tensor(observations).float()
return features
class LossFunctionEnv(gym.Env):
def __init__(self, y_pred, y_true):
super().__init__()
self.y_pred = y_pred
self.y_true = y_true
self.action_space = spaces.Discrete(len([CrossEntropyLoss, MSELoss])) # Add more loss functions as needed
self.observation_space = spaces.Box(low=0, high=float('inf'), shape=(2,), dtype=np.float32)
self.state = self.precompute_state(y_pred, y_true)
def precompute_state(self, y_pred, y_true):
num_unique_values = len(torch.unique(y_true))
pred_range = torch.max(y_pred) - torch.min(y_pred)
state = [num_unique_values, pred_range.item()]
return state
def reset(self):
# Reset the environment and return the initial state
state = self.extract_state(self.y_pred, self.y_true)
return state
def step(self, action):
# Map the action to the corresponding loss function
loss_function = map_action_to_loss_function(action)
# Compute the loss using the selected loss function
loss = loss_function.compute_loss(self.y_pred, self.y_true)
# Define the reward based on the loss
reward = -loss.item()
# Check if the episode is done (e.g., after a certain number of steps or a certain loss threshold)
done = False
# Return the next state, reward, and done flag
next_state = self.extract_state(self.y_pred, self.y_true)
return next_state, reward, done, {}
def extract_state(self, y_pred, y_true):
num_unique_values = len(torch.unique(y_true))
pred_range = torch.max(y_pred) - torch.min(y_pred)
state = [num_unique_values, pred_range.item()]
return state
#Cache action-to-loss-function mapping
action_to_loss_function_cache = {}
def map_action_to_loss_function(action):
# action = int(action[0])
if isinstance(action, np.ndarray):
action = action.item()
if action not in action_to_loss_function_cache:
if action == 0:
action_to_loss_function_cache[action] = CrossEntropyLoss()
elif action == 1:
action_to_loss_function_cache[action] = MSELoss()
return action_to_loss_function_cache[action]
#add more loss functions as needed
# Create a DummyVecEnv wrapper for the LossFunctionEnv
def make_env(y_pred, y_true):
def _init():
return LossFunctionEnv(y_pred, y_true)
return _init
y_pred = torch.randn(100, 10)
y_true = torch.randint(0, 10, (100,))
env = DummyVecEnv([make_env(y_pred, y_true)])
# Create a custom policy network that uses the CustomFeaturesExtractor
policy_kwargs = dict(
features_extractor_class=CustomFeaturesExtractor,
features_extractor_kwargs=dict(features_dim=env.observation_space.shape[0]), # Define the observation space based on the state representation
)
# Initialize the PPO agent
#Architecture: Replace PPO with SAC in the agent initialization.
# agent = PPO("MlpPolicy", env, policy_kwargs=policy_kwargs, verbose=1)
#A2C
agent = A2C("MlpPolicy", env, policy_kwargs=policy_kwargs, verbose=1)
# Train the agent
agent.learn(total_timesteps=10000)
# Use the trained agent in the NebulaOptimized class
class NebulaOptimized(Nebula):
def __init__(self, domain_knowledge=None, user_input=None):
super().__init__(domain_knowledge, user_input)
self.rl_agent = agent
def determine_loss_function(self, y_pred, y_true):
# Extract state representation from the data and model
# state = ... # Extract state representation from y_pred and y_true
state = self.extract_state(y_pred, y_true)
# Use the RL agent to select the optimal loss function
action, _ = self.rl_agent.predict(state, deterministic=True)
# Map the action to the corresponding loss function
self.loss_function = map_action_to_loss_function(action)
def extract_state(self, y_pred, y_true):
num_unique_values = len(torch.unique(y_true))
pred_range = torch.max(y_pred) - torch.min(y_pred)
state = [num_unique_values, pred_range.item()]
return state
nebula_optimized = NebulaOptimized()
#test the trained agent with new y_pred and y_true tensors
y_pred_test = torch.randn(100, 10)
y_true_test = torch.randint(0, 10, (100,))
nebula_optimized.determine_loss_function(y_pred_test, y_true_test)
print(f"Selected loss function {nebula_optimized.loss_function}")
"""
time/fps: Frames per second (FPS) measures the number of training steps the agent takes per second. Higher FPS indicates faster training. You should focus on increasing this metric to speed up the training process.
time/iterations: The number of training iterations completed by the agent. This metric increases as the agent trains. You should focus on increasing this metric to ensure the agent has enough training experience.
time/time_elapsed: The total time elapsed (in seconds) since the start of the training process. You should focus on lowering this metric to reduce the overall training time.
time/total_timesteps: The total number of timesteps the agent has experienced during training. You should focus on increasing this metric to ensure the agent has enough training experience.
train/entropy_loss: The entropy loss encourages exploration by penalizing deterministic policies. Lower entropy loss indicates less exploration. You should focus on balancing this metric to ensure the agent explores the environment sufficiently without getting stuck in suboptimal solutions.
train/explained_variance: Explained variance measures how well the agent's value function approximates the true value function. A value of 1 indicates a perfect approximation, while a value of 0 indicates no correlation. You should focus on increasing this metric to improve the agent's value function approximation.
train/learning_rate: The learning rate determines the step size for updating the agent's parameters during training. You should focus on finding the optimal learning rate that balances convergence speed and stability.
train/n_updates: The number of parameter updates performed by the agent during training. This metric increases as the agent trains. You should focus on increasing this metric to ensure the agent has enough training experience.
train/policy_loss: The policy loss measures the difference between the agent's predicted actions and the optimal actions. You should focus on lowering this metric to improve the agent's policy.
train/value_loss: The value loss measures the difference between the agent's predicted state values and the true state values. You should focus on lowering this metric to improve the agent's value function approximation.
In summary, you should focus on increasing the metrics related to training experience (iterations, total_timesteps, n_updates) and performance (explained_variance), while lowering the metrics related to training time (time_elapsed) and losses (policy_loss, value_loss). Balancing exploration (entropy_loss) and finding the optimal learning rate are also important for achieving good performance.
""" | nebula-master | experimental/reinforcement/experimental/nebula1.py |
import torch
import torch.nn as nn
import numpy as np
# Continual Learning Mechanism Class
class ContinualLearningMechanism(nn.Module):
def __init__(self, pretrained_model=None):
super(ContinualLearningMechanism, self).__init__()
self.model = nn.Sequential(
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, 10),
)
if pretrained_model:
self.model.load_state_dict(pretrained_model.state_dict())
def forward(self, x):
return self.model(x)
# Contrastive Learning Component Class
class ContrastiveLearningComponent(nn.Module):
def __init__(self):
super(ContrastiveLearningComponent, self).__init__()
def forward(self, x, x_augmented):
return torch.norm(x - x_augmented, p=2)
# Meta Learner Class
class MetaLearner(nn.Module):
def __init__(self):
super(MetaLearner, self).__init__()
self.l1_loss = nn.L1Loss()
self.l2_loss = nn.MSELoss()
def forward(self, x, y, task):
if task == 'task1':
return self.l1_loss(x, y)
elif task == 'task2':
return self.l2_loss(x, y)
# Nebula Class
class Nebula(nn.Module):
def __init__(self):
super(Nebula, self).__init__()
self.learning_mechanism = ContinualLearningMechanism()
self.contrastive_component = ContrastiveLearningComponent()
self.metalearner = MetaLearner()
def forward(self, x, y, x_augmented, task):
output = self.learning_mechanism(x)
loss_task = self.metalearner(output, y, task)
loss_contrastive = self.contrastive_component(x, x_augmented)
# Here is where we combine the losses. The alpha and beta parameters
# could be additional parameters to the class that could be learned
alpha = 0.5
beta = 0.5
total_loss = alpha * loss_task + beta * loss_contrastive
return total_loss | nebula-master | nebula/nebula_multimodal.py |
# !pip install deap
import random
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from deap import creator, base, tools, algorithms
# Create the model
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(10, 32)
self.fc2 = nn.Linear(32, 2)
def forward(self, x):
x = torch.relu(self.fc1(x))
return torch.sigmoid(self.fc2(x))
# Define a simple neural network for loss function
class LossNet(nn.Module):
def __init__(self):
super(LossNet, self).__init__()
self.fc1 = nn.Linear(2, 10)
self.fc2 = nn.Linear(10, 1)
def forward(self, x):
x = torch.relu(self.fc1(x))
return torch.relu(self.fc2(x))
class Nebula:
def __init__(self, model, loss_function, toolbox, X, y, population_size=10, num_generations=100):
self.model = model
self.loss_function = loss_function
self.toolbox = toolbox
self.X = X
self.y = y
self.population_size = population_size
self.num_generations = num_generations
self.toolbox.register("evaluate", self.evaluate)
self.hof = tools.HallOfFame(1)
self.stats = tools.Statistics(lambda ind: ind.fitness.values)
self.stats.register("avg", np.mean)
self.stats.register("min", np.min)
self.stats.register("max", np.max)
def evaluate(self, individual):
weights = torch.Tensor(individual).view(self.loss_function.fc1.weight.shape)
with torch.no_grad():
self.loss_function.fc1.weight.data = weights
output = self.model(self.X)
loss = self.loss_function(output, self.y)
return loss.item(),
def train(self):
pop = self.toolbox.population(n=self.population_size)
pop, logbook = algorithms.eaSimple(pop, self.toolbox, cxpb=0.5, mutpb=0.2, ngen=self.num_generations, stats=self.stats, halloffame=self.hof, verbose=True)
print(f'Best loss function: {self.hof[0]}')
# Initialize the model and the loss function
model = Net()
loss_function = LossNet()
toolbox = base.Toolbox()
# Define genetic algorithm related settings
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", list, fitness=creator.FitnessMin)
toolbox.register("attr_float", random.random)
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_float, n=100)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=1, indpb=0.1)
toolbox.register("select", tools.selTournament, tournsize=3)
# Assume we have some data X, y
X = torch.randn(100, 10)
y = torch.randint(0, 2, (100,))
# Create Nebula instance and train
nebula = Nebula(model, loss_function, toolbox, X, y)
nebula.train()
| nebula-master | nebula/nebula_genetic.py |
from nebula.nebula import Nebula
from nebula.nebula import one_hot_encoding | nebula-master | nebula/__init__.py |
import torch
import torch.nn as nn
import numpy as np
from sklearn.model_selection import train_test_split
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(10, 32)
self.fc2 = nn.Linear(32, 3)
def forward(self, x):
x = torch.relu(self.fc1(x))
return torch.sigmoid(self.fc2(x))
class NebulaSearch:
def __init__(self, model, loss_functions, num_iterations, lr):
self.model = model
self.loss_functions = loss_functions
self.num_iterations = num_iterations
self.lr = lr
self.prob_dist = np.ones(len(loss_functions)) / len(loss_functions)
def train(self, X, y):
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)
for i in range(self.num_iterations):
chosen_loss_func = np.random.choice(self.loss_functions, p=self.prob_dist)
self.model.zero_grad()
outputs = self.model(X)
loss = chosen_loss_func(outputs, y)
loss.backward()
optimizer.step()
performance = self.evaluate(X, y)
self.prob_dist = self.update_distribution(performance)
print(f"Best loss function: {self.loss_functions[np.argmax(self.prob_dist)]}")
def evaluate(self, X, y):
with torch.no_grad():
outputs = self.model(X)
loss = self.loss_functions[np.argmax(self.prob_dist)](outputs, y)
return -loss.item()
def update_distribution(self, performance):
self.prob_dist *= np.exp(performance)
self.prob_dist /= np.sum(self.prob_dist)
return self.prob_dist
#assume we have some data X, Y
X = torch.randn(100, 10)
y = torch.randint(0, 2, (100,))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
loss_functions = [nn.CrossEntropyLoss(), nn.BCELoss(), nn.MSELoss()]
model = Net()
nebula = NebulaSearch(model, loss_functions, num_iterations=100, lr=0.001)
nebula.train(X_train, y_train)
| nebula-master | nebula/nebula_search.py |
import torch
import torch.nn as nn
# import torch.jit
import numpy as np
import logging
class LossFunction:
def compute_Loss(self, y_pred, y_true):
raise NotImplemented("compute_loss method must be implemented!")
class L1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.L1Loss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class MSELoss(LossFunction):
def __init__(self):
self.loss_function = nn.MSELoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
def one_hot_encoding(y_true, num_classes):
y_true_one_hot = torch.zeros(y_true.size(0), num_classes)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1)
return y_true_one_hot
class SmoothL1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.SmoothL1Loss()
def compute_Loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class MultiLabelSoftMarginLoss(LossFunction):
def __init__(self):
self.loss_function = nn.MultiLabelSoftMarginLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class PoissonNLLoss(LossFunction):
def __init__(self):
self.loss_function = nn.PoissonNLLLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class KLDivLoss(LossFunction):
def __init__(self):
self.loss_function = nn.KLDivLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(F.log_softmax(y_pred, dim=1))
class NLLLoss(LossFunction):
def __init__(self):
self.loss_function = nn.NLLLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class HashableTensorWrapper:
def __init__(self, tensor):
self.tensor = tensor
self.tensor_shape = tensor.shape
self.tensor_dtype = tensor.dtype
def __hash__(self):
return hash((self.tensor_shape, self.tensor_dtype))
def __eq__(self, other):
return isinstance(other, HashableTensorWrapper) and self.tensor_shape == other.tensor_shape and self.tensor_dtype == other.tensor_dtype
def is_multi_label_classification(y_true: torch.Tensor) -> bool:
return len(y_true.shape) > 1 and y_true.shape[1] > 1 and y_true.dtype == torch.float
def contains_non_negative_integers(y_true):
return torch.all(y_true >= 0) and torch.all(y_true == y_true.to(torch.int64))
def are_probability_distributions(y_pred, y_true):
return torch.all(y_pred >= 0) and torch.all(y_pred <= 1) and torch.all(y_true >= 0) and torch.all(y_true <= 1)
def are_log_probabilities(y_pred):
return torch.all(y_pred <= 0)
#v4
def generate_tensor_key(tensor):
shape_tuple = ()
for dim in tensor.shape:
shape_tuple += (dim,)
return (shape_tuple, str(tensor.dtype))
class CrossEntropyLoss(LossFunction):
def __init__(self):
self.loss_function = nn.CrossEntropyLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class Nebula(LossFunction):
def __init__(self, domain_knowledge=None, user_input=None):
self.loss_function = None
self.domain_knowledge = domain_knowledge
self.user_input = user_input
self.loss_function_cache = {}
self.unique_values_cache = {}
self.class_balance_cache = {}
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
self.logger.addHandler(handler)
def determine_loss_function(self, y_pred, y_true):
self.logger.info("Determining the loss function")
is_classification = None
dataset_id = id(y_true)
# Cache unique values
if dataset_id not in self.unique_values_cache:
self.unique_values_cache[dataset_id] = torch.unique(y_true)
unique_values = self.unique_values_cache[dataset_id]
# Cache class balance
if dataset_id not in self.class_balance_cache:
value_counts = torch.bincount(y_true.flatten().to(dtype=torch.int64))
self.class_balance_cache[dataset_id] = value_counts / torch.sum(value_counts)
class_balance = self.class_balance_cache[dataset_id]
# Optimization 2: Use PyTorch functions instead of NumPy
value_counts = torch.bincount(y_true.flatten().to(dtype=torch.int64))
# The remaining code remains unchanged as it already incorporates the suggested optimizations
if is_classification is None:
if len(unique_values) <= 10 and torch.all(torch.eq(unique_values % 1, 0)):
is_classification = True
if is_classification is None:
if torch.all(value_counts > 0):
is_classification = True
if y_pred.ndim > 2:
pass
if is_classification is None:
sparsity = torch.count_nonzero(y_true) / y_true.numel()
if sparsity < 0.5:
self.loss_function = torch.nn.BCEWithLogitsLoss()
self.compute_loss = self.loss_function
return
y_pred_flat = y_pred.flatten()
y_true_flat = y_true.flatten()
if y_pred_flat.shape != y_true_flat.shape:
y_pred_flat = y_pred_flat[:y_true_flat.numel()]
correlation = torch.tensor(np.corrcoef(y_pred_flat.cpu().numpy(), y_true_flat.cpu().numpy())[0, 1])
if is_classification is None:
if self.domain_knowledge == "classification":
is_classification = True
elif self.domain_knowledge == "regression":
is_classification = False
if is_classification is None:
if torch.max(y_pred) > 0.9:
is_classification = True
if is_classification is None:
if torch.any(class_balance < 0.1):
is_classification = True
if is_classification is None:
if self.user_input == "classification":
is_classification = True
elif self.user_input == "regression":
is_classification = False
#Multi-LabelClassification
if is_multi_label_classification(y_true):
self.loss_function = MultiLabelSoftMarginLoss()
#poissonNLLLoss
if contains_non_negative_integers(y_true):
self.loss_function = PoissonNLLoss()
#KLDIvLoss
if are_probability_distributions(y_pred, y_true):
self.loss_function = KLDivLoss()
#NLLLoss
if is_classification and are_log_probabilities(y_pred):
self.loss_function = NLLLoss()
# SmotthL1Loss
if is_classification is None:
#check range of values in y_true
if torch.min(y_true) >= 0 and torch.max(y_true) <= 1:
self.loss_function = SmoothL1Loss()
# Set the loss function based on the determined problem type
if is_classification:
self.logger.info("Determined problem as classification. Using CrossEntropyLoss")
self.loss_function = CrossEntropyLoss()
else:
self.logger.info("Determining loss function for this dataset")
self.loss_function = MSELoss()
def compute_loss(self, y_pred, y_true):
# V1
dataset_id = id(y_true)
if dataset_id not in self.loss_function_cache:
self.logger.info("Determining loss function for the dataset")
self.determine_loss_function(y_pred, y_true)
self.loss_function_cache[dataset_id] = self.loss_function
cached_loss_function = self.loss_function_cache[dataset_id]
return cached_loss_function.compute_loss(y_pred, y_true)
| nebula-master | nebula/nebula.py |
# from nebulaV4 import one_hot_encoding
# from nebulaV4 import Nebula
# import torch
# import numpy as np
# import matplotlib.pyplot as plt
# import torch.nn as nn
# class LossFunction:
# def compute_loss(self, y_pred, y_true):
# raise NotImplemented("compute_loss method must be implemented")
# #implement specific loss functions that inherit from LossFunction
# class L1Loss(LossFunction):
# def __init__(self):
# self.loss_function = nn.L1Loss()
# def compute_loss(self, y_pred, y_true):
# return self.loss_function(y_pred, y_true)
# class MSELoss(LossFunction):
# def __init__(self):
# self.loss_function = nn.MSELoss()
# def compute_loss(self, y_pred, y_true):
# return self.loss_function(y_pred, y_true)
# class CrossEntropyLoss(LossFunction):
# def __init__(self):
# self.loss_function = nn.CrossEntropyLoss()
# def compute_loss(self, y_pred, y_true):
# return self.loss_function(y_pred, y_true)
# def prepare_targets(loss_function, y_true, num_classes=None):
# if isinstance(loss_function, L1Loss) and num_classes is not None:
# return one_hot_encoding(y_true, num_classes)
# return y_true
# def generate_classification_data(num_samples, num_classes):
# y_true = torch.randint(0, num_classes, (num_samples,))
# y_pred = torch.rand(num_samples, num_classes)
# return y_pred, y_true
# def generate_regression_data(num_samples):
# y_true = torch.randn(num_samples)
# y_pred = torch.randn(num_samples)
# return y_pred, y_true
# def test_loss_functions(loss_functions, y_pred, y_true, num_classes=None):
# results = []
# for loss_function in loss_functions:
# prepared_y_true = prepare_targets(loss_function, y_true, num_classes)
# loss = loss_function.compute_loss(y_pred, prepared_y_true)
# results.append(loss.item())
# return results
# def plot_loss_comparison(loss_functions, losses):
# loss_function_names = [loss_function.__class__.__name__ for loss_function in loss_functions]
# plt.bar(loss_function_names, losses)
# plt.xlabel("Loss Functions")
# plt.ylabel("Loss Value")
# plt.title("Loss Function Comparison")
# plt.show()
# batch_size = 100
# num_classes = 5
# y_true_classification = torch.randint(0, num_classes, (batch_size,))
# num_classes = y_true_classification.max().item() + 1
# # Generate classification data
# y_pred_classification, y_true_classification = generate_classification_data(num_classes, num_classes)
# # Generate regression data
# y_pred_regression, y_true_regression = generate_regression_data(num_classes)
# # Loss functions to compare
# loss_functions = [Nebula(), L1Loss(), MSELoss(), CrossEntropyLoss()]
# # Test classification data
# print("Classification Losses:")
# classification_losses = test_loss_functions(loss_functions, y_pred_classification, y_true_classification, num_classes=num_classes)
# # Test regression data
# print("\nRegression Losses:")
# regression_losses = test_loss_functions(loss_functions, y_pred_regression, y_true_regression)
# # Plot comparison
# print("\nLoss Comparison for Classification:")
# plot_loss_comparison(loss_functions, classification_losses)
# print("\nLoss Comparison for Regression:")
# plot_loss_comparison(loss_functions, regression_losses)
# from nebulaV4 import one_hot_encoding
# from nebulaV4 import Nebula
import torch
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
from nebula.nebula import Nebula
from nebula.nebula import one_hot_encoding
import torch.nn.functional as F
def generate_multilabel_classification_data(num_samples, num_classes):
y_true = torch.randint(0, 2, (num_samples, num_classes)).float()
y_pred = torch.rand(num_samples, num_classes)
return y_pred, y_true
class LossFunction:
def compute_loss(self, y_pred, y_true):
raise NotImplemented("compute_loss method must be implemented")
class L1Loss(LossFunction):
def __init__(self):
self.loss_function = nn.L1Loss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class MSELoss(LossFunction):
def __init__(self):
self.loss_function = nn.MSELoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class CrossEntropyLoss(LossFunction):
def __init__(self):
self.loss_function = nn.CrossEntropyLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class MultiLabelSoftMarginLoss(LossFunction):
def __init__(self):
self.loss_function = nn.MultiLabelSoftMarginLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class PoissonNLLoss(LossFunction):
def __init__(self):
self.loss_function = nn.PoissonNLLLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
class KLDivLoss(LossFunction):
def __init__(self):
self.loss_function = nn.KLDivLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(F.log_softmax(y_pred, dim=1), y_true)
class PoissonNLLLoss(LossFunction):
def __init__(self):
self.loss_function = nn.PoissonNLLLoss()
def compute_loss(self, y_pred, y_true):
return self.loss_function(y_pred, y_true)
# def prepare_targets(loss_function, y_true, num_classes=None):
# if isinstance(loss_function, L1Loss) and num_classes is not None:
# return one_hot_encoding(y_true, num_classes)
# return y_true
def prepare_targets(loss_function, y_true, num_classes=None):
if (isinstance(loss_function, L1Loss) or isinstance(loss_function, MSELoss)) and num_classes is not None:
return one_hot_encoding(y_true, num_classes)
if isinstance(loss_function, PoissonNLLLoss):
return y_true.view(-1, 1).expand(-1, num_classes)
if isinstance(loss_function, KLDivLoss):
return y_true.float()
return y_true
def generate_classification_data(num_samples, num_classes, for_poisson_nll=False):
y_true = torch.randint(0, num_classes, (num_samples,))
if for_poisson_nll:
y_true = y_true.view(-1, 1).expand(-1, num_classes).float()
y_pred = torch.rand(num_samples, num_classes)
return y_pred, y_true
def generate_regression_data(num_samples):
y_true = torch.abs(torch.randn(num_samples))
y_pred = torch.randn(num_samples)
return y_pred, y_true
# def test_loss_functions(loss_functions, y_pred, y_true, num_classes=None):
# results = []
# for loss_function in loss_functions:
# prepared_y_true = prepare_targets(loss_function, y_true, num_classes)
# loss = loss_function.compute_loss(y_pred, prepared_y_true)
# results.append(loss.item())
# return results
def test_loss_functions(loss_functions, y_pred, y_true, num_classes=None):
losses = []
for loss_function in loss_functions:
for_poisson_nll = isinstance(loss_function, PoissonNLLLoss)
if num_classes is not None and not for_poisson_nll:
y_true = y_true.squeeze()
elif for_poisson_nll:
y_true = y_true.view(-1, 1).expand(-1, num_classes)
prepared_y_true = prepare_targets(loss_function, y_true, num_classes)
loss = loss_function.compute_loss(y_pred, prepared_y_true)
losses.append(loss.item())
return losses
def plot_loss_comparison(loss_functions, losses):
loss_function_names = [loss_function.__class__.__name__ for loss_function in loss_functions]
plt.bar(loss_function_names, losses)
plt.xlabel("Loss Functions")
plt.ylabel("Loss Value")
plt.title("Loss Function Comparison")
plt.show()
batch_size = 100
num_classes = 5
y_true_classification = torch.randint(0, num_classes, (batch_size,))
num_classes = y_true_classification.max().item() + 1
# Generate classification data
y_pred_classification, y_true_classification = generate_classification_data(batch_size, num_classes)
y_pred_multilabel_classification, y_true_multilabel_classification = generate_multilabel_classification_data(batch_size, num_classes)
# Generate regression data
y_pred_regression, y_true_regression = generate_regression_data(batch_size)
# Loss functions to compare
loss_functions = [Nebula(), L1Loss(), MSELoss(), CrossEntropyLoss(), PoissonNLLoss(), KLDivLoss(), PoissonNLLLoss()]
# Test classification data
# # Test classification data
print("Classification Losses:")
classification_losses = test_loss_functions(loss_functions, y_pred_classification, y_true_classification, num_classes=num_classes)
# Test regression data
print("\nRegression Losses:")
regression_losses = test_loss_functions(loss_functions, y_pred_regression, y_true_regression)
# Plot comparison
print("\nLoss Comparison for Classification:")
plot_loss_comparison(loss_functions, classification_losses)
print("\nLoss Comparison for Regression:")
plot_loss_comparison(loss_functions, regression_losses)
# Test multi-label classification data
print("Multi-label Classification Losses:")
multilabel_classification_losses = test_loss_functions(loss_functions, y_pred_multilabel_classification, y_true_multilabel_classification, num_classes=num_classes)
# Plot comparison
print("\nLoss Comparison for Multi-label Classification:")
plot_loss_comparison(loss_functions, multilabel_classification_losses) | nebula-master | testing/test.py |
from setuptools import setup, find_packages
#
setup(
name = 'SwarmLogic',
packages = find_packages(exclude=[]),
version = '0.6.3',
license='MIT',
description = 'SwarmLogic - Pytorch',
author = 'Kye Gomez',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/kyegomez/SwarmLogic',
keywords = [
'artificial intelligence',
'deep learning',
'optimizers',
"Prompt Engineering"
],
install_requires=[
'swarms',
'fastapi'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | SwarmLogic-main | setup.py |
from swarm_logic.SwarmLogic import api | SwarmLogic-main | swarm_logic/__init__.py |
from fastapi import FastAPI, HTTPException
from typing import Optional
import json
import logging
from pydantic import BaseModel
from swarms import Worker
class AppState(BaseModel):
app_name: str
api_call: str
# Set up logging
logging.basicConfig(filename="app.log", level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
# Initialize Swarms with your API key
api_key = "your-api-key-here"
swarm = Worker(openai_api_key=api_key)
app = FastAPI()
@app.post("/{app_name}/{api_call}")
async def api(app_state: AppState):
try:
db = json.load(open('db.json', 'r'))
except Exception as e:
logging.error("Error loading database: %s", e)
raise HTTPException(status_code=500, detail="Error loading database")
prompt = f"""{db[app_state.app_name]["prompt"]}
API Call (indexes are zero-indexed):
{app_state.api_call}
Database State:
{db[app_state.app_name]["state"]}
Output the API response as json prefixed with '!API response!:'. Then output the new database state as json, prefixed with '!New Database State!:'. If the API call is only requesting data, then don't change the database state, but base your 'API Response' off what's in the database.
"""
try:
# Update to call the swarm model
response = swarm.run_swarms(prompt)
new_state = response['new_database_state']
if new_state:
db[app_state.app_name]["state"] = new_state
json.dump(db, open('db.json', 'w'), indent=4)
except Exception as e:
logging.error("Error running model or updating state: %s", e)
raise HTTPException(status_code=500, detail="Error running model or updating state")
return response
| SwarmLogic-main | swarm_logic/SwarmLogic.py |
from typing import Iterable
from base import DatabaseConnector
import opendal
class OpenDALConnector(DatabaseConnector):
def __init__(self, scheme, **kwargs):
self.op = opendal.Operator(scheme, **kwargs)
def read(self, path: str) -> bytes:
return self.op.read(path)
def write(self, path: str, bs: bytes):
self.op.write(path, bs)
def stat(self, path: str) -> opendal.Metadata:
return self.op.stat(path)
def create_dir(self, path: str):
self.op.create_dir(path)
def delete(self, path: str):
self.op.delete(path)
def list(self, path: str) -> Iterable[opendal.Entry]:
return self.op.list(path)
def scan(self, path: str) -> Iterable[opendal.Entry]:
return self.op.scan(path) | SwarmLogic-main | swarm_logic/connectors/dal.py |
from sqlalchemy import create_engine, MetaData, Table, select
from sqlalchemy.orm import sessionmaker, scoped_session
import opendal
from base import DatabaseConnector
class SQLAlchemyConnector(DatabaseConnector):
def __init__(self, database_url):
self.engine = create_engine(database_url)
self.metadata = MetaData()
def read(self, path: str) -> bytes:
#implement read operation
pass
def write(self, path: str, bs: bytes):
#implement sql achemy write operation
pass
| SwarmLogic-main | swarm_logic/connectors/alchemy.py |
from swarm_logic.connectors.alchemy import SQLAlchemyConnector
from swarm_logic.connectors.base import DatabaseConnector
from swarm_logic.connectors.dal import OpenDALConnector
from swarm_logic.connectors.json import JsonDBConnector
| SwarmLogic-main | swarm_logic/connectors/__init__.py |
from base import DatabaseConnector
class JsonDBConnector(DatabaseConnector):
def read(self, path: str) -> bytes:
with open(path, 'r') as f:
return f.read().encode()
def write(self, path: str, bs: bytes):
with open(path, 'w') as f:
f.write(bs.decode())
| SwarmLogic-main | swarm_logic/connectors/json.py |
from abc import ABC, abstractmethod
from typing import Iterable
class DatabaseConnector(ABC):
@abstractmethod
def read(self, path: str) -> bytes:
pass
@abstractmethod
def write(self, path: str, bs: bytes):
pass
@abstractmethod
def stat(self, path: str) -> opendal.Metadata:
pass
@abstractmethod
def create_dir(self, path: str):
pass
@abstractmethod
def delete(self, path: str):
pass
@abstractmethod
def list(self, path: str) -> Iterable[opendal.Entry]:
pass
@abstractmethod
def scan(self, path: str) -> Iterable[opendal.Entry]:
pass
| SwarmLogic-main | swarm_logic/connectors/base.py |
SwarmLogic-main | swarm_logic/experimental/__init__.py |
|
from fastapi import FastAPI, HTTPException
from typing import Optional
import json
import logging
from pydantic import BaseModel
from swarms import Swarms
from swarm_logic.connectors import SQLAlchemyConnector, OpenDALConnector, JsonDBConnector
class AppState(BaseModel):
app_name: str
api_call: str
db_type: str #added a new field to app state
# Set up logging
logging.basicConfig(filename="app.log", level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
# Initialize Swarms with your API key
api_key = "your-api-key-here"
swarm = Swarms(openai_api_key=api_key)
app = FastAPI()
@app.post("/{app_name}/{api_call}")
async def api(app_state: AppState):
if app_state.db_type == "sqlachemy":
connector = SQLAlchemyConnector("sqlite:///db.sqlite")
elif app_state.db_type == "opendal":
connector = OpenDALConnector('fs', root="/tmp")
elif app_state.db_type == 'json':
connector = JsonDBConnector()
else:
raise HTTPException(status_code=400, detail="Unsported database type")
try:
data = connector.read('db.json')
db = json.loads(data)
except Exception as e:
logging.error("Error loading datbase %s", e)
raise HTTPException(status_code=500, detail="Error loading database")
prompt = f"""{db[app_state.app_name]["prompt"]}
API Call (indexes are zero-indexed):
{app_state.api_call}
Database State:
{db[app_state.app_name]["state"]}
Output the API response as json prefixed with '!API response!:'. Then output the new database state as json, prefixed with '!New Database State!:'. If the API call is only requesting data, then don't change the database state, but base your 'API Response' off what's in the database.
"""
try:
# Update to call the swarm model
response = swarm.run_swarms(prompt)
new_state = response['new_database_state']
if new_state:
db[app_state.app_name]["state"] = new_state
connector.write('db.json', json.dumps(db))
except Exception as e:
logging.error("Error running model or updating state: %s", e)
raise HTTPException(status_code=500, detail="Error running model or updating state")
return response
| SwarmLogic-main | swarm_logic/experimental/SwarmLogicExperimental.py |
from fastapi import FastAPI, HTTPException
from typing import Optional
import json
import logging
from pydantic import BaseModel
from swarms import Worker
class AppState(BaseModel):
app_name: str
api_call: str
# Set up logging
logging.basicConfig(filename="app.log", level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
# Initialize workers with your API key
api_key = "your-api-key-here"
worker = worker_node(openai_api_key=api_key)
app = FastAPI()
@app.post("/{app_name}/{api_call}")
async def api(app_state: AppState):
try:
db = json.load(open('db.json', 'r'))
except Exception as e:
logging.error("Error loading database: %s", e)
raise HTTPException(status_code=500, detail="Error loading database")
prompt = f"""{db[app_state.app_name]["prompt"]}
API Call (indexes are zero-indexed):
{app_state.api_call}
Database State:
{db[app_state.app_name]["state"]}
Output the API response as json prefixed with '!API response!:'. Then output the new database state as json, prefixed with '!New Database State!:'. If the API call is only requesting data, then don't change the database state, but base your 'API Response' off what's in the database.
"""
try:
# Update to call the worker model
response = worker.run(prompt)
new_state = response['new_database_state']
if new_state:
db[app_state.app_name]["state"] = new_state
json.dump(db, open('db.json', 'w'), indent=4)
except Exception as e:
logging.error("Error running model or updating state: %s", e)
raise HTTPException(status_code=500, detail="Error running model or updating state")
return response
| SwarmLogic-main | swarm_logic/experimental/SwarmLogicWorker.py |
SwarmLogic-main | swarm_logic/utils/__init__.py |
|
#open up the swarms class to intake external tooks like Swarm(tool)
import traceback
import logging
from swarms import Swarms
api_key = "your-api-key-here"
swarm = Swarms(openai_api_key=api_key)
logging.basicConfig(filename="app.log", level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
def log_exception(exc):
#capture the tracback with the exception
tb_lines = traceback.format_exception(exc.__class__, exc, exc.__traceback__)
tb_text = ''.join(tb_lines)
#log the exception
logging.error("Caught an exception", tb_text)
def handle_exception(exc):
log_exception(exc)
#running swarms on exception
response = swarm.run_swarms(f"Interpret this error:\n{str(exc)}")
#todo handle the exception based on the swarms response | SwarmLogic-main | swarm_logic/utils/error_handling.py |
MultiGroupQueryAttention-main | example.py |
|
MultiGroupQueryAttention-main | mgqa/__init__.py |
|
MultiGroupQueryAttention-main | mgqa/main.py |
|
from aot.main import AoT
task = "Create GPT-2"
system = f"""
You are Quoc V. Le, a computer scientist and artificial intelligence researcher who is
widely regarded as one of the leading experts in deep learning and neural network architecture search.
Your work in this area has focused on developing efficient algorithms for searching the
space of possible neural network architectures, with the goal of finding architectures
that perform well on a given task while minimizing the computational cost of training and inference.
You are an expert in the field of neural architecture search.
Your task is to assist me in selecting the best operations to design a neural network
block using the available operations.
The objective is to maximize the model's performance
The 5 available operations are as follows:
0: Zeroize() # This operation simply outputs a tensor of zeros regardless of the input, which breaks the gradient flow between two nodes.
1: nn.Identity() # Skip Connection.
2: ReLUConvBN(channels, channels, kernal_size=1, stride=1, padding=0) # The input channels and output channels are the same.
3: ReLUConvBN(channels, channels, kernal_size=3, stride=1, padding=1) # The input channels and output channels are the same.
4: nn.AvgPool2d(kernel_size=3, stride=1, padding=1) # This operation does not change the spatial resolution.
The neural network block is defined by 6 operations (i.e., op_list = [op0, op1, op2, op3, op4, op5]), which represent the operations executed between various stages of the block. This block comprises 4 stages, labeled as s0, s1, s2, and s3, each corresponding to distinct feature maps in the neural network.
s0 serves as the input feature map for this block.
s1 will be calculated by s1 = op0(s0).
s2 will be calculated by s2 = op1(s0) + op2(s1).
s3 will be calculated by s3 = op3(s0) + op4(s1) + op5(s2). Note that s3 becomes the output for this block and serves as the input for the subsequent block.
Then the implementation of the block will be:
class Block(nn.Module):
def __init__(self, channels):
super(Block, self).__init__()
self.op0 = op_id_list[0]
self.op1 = op_id_list[1]
self.op2 = op_id_list[2]
self.op3 = op_id_list[3]
self.op4 = op_id_list[4]
self.op5 = op_id_list[5]
def forward(self, s0):
s1 = self.op0(s0)
s2 = self.op1(s0) + self.op2(s1)
s3 = self.op3(s0) + self.op4(s1) + self.op5(s2)
return s3
Let's break this down step by step:
First, please analyze the 5 available operations.
Next, please consider the gradient flow based on the Block class implementation. For example, how the gradient from the later stage affects the earlier stage.
Now, answer the question - how we can design a high-performance block using the available operations?
Based the analysis, your task is to propose a block design with the given operations that prioritizes performance, without considering factors such as size and complexity.
After you suggest a design, I will test its actual performance and provide you with feedback. Based on the results of previous experiments, we can collaborate to iterate and improve the design. Please avoid suggesting the same design again during this iterative process.
{task}
"""
dfs = AoT(
num_thoughts=2,
max_steps=10,
value_threshold=1,
initial_prompt=system,
openai_api_key="ENETER IN YOUR API KEY"
)
result = dfs.solve()
print(result) | Algorithm-Of-Thoughts-main | neural_search_example.py |
from aot.main import AoT
task = """
Use numbers and basic arithmetic operations (+ - * /) to obtain 24. When
considering the next steps, do not choose operations that will result in a
negative or fractional number. In order to help with the calculations, the
numbers in the parenthesis represent the numbers that are left after the
operations and they are in descending order.
Another thing we do is when there are only two numbers left in the parenthesis, we
check whether we can arrive at 24 only by using basic arithmetic operations
(+ - * /). Some examples regarding this idea:
(21 2) no
since 21 + 2 = 23, 21 - 2 = 19, 21 * 2 = 42, 21 / 2 = 10.5, none of which is equal
to 24.
(30 6) 30 - 6 = 24 yes
(8 3) 8 * 3 = 24 yes
(12 8) no
(48 2) 48 / 2 = 24 yes
Most importantly, do not give up, all the numbers that will be given has indeed a
solution.
14 8 8 2
OBJECTIVE
#########
5 10 5 2
"""
dfs = AoT(
num_thoughts=2,
max_steps=10,
value_threshold=1,
initial_prompt=task,
openai_api_key="ENETER IN YOUR API KEY"
)
result = dfs.solve()
print(result) | Algorithm-Of-Thoughts-main | examples.py |
from aot.main import AoT
| Algorithm-Of-Thoughts-main | aot/__init__.py |
import os
import openai
import time
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class OpenAI:
def __init__(
self,
api_key,
strategy="cot",
evaluation_strategy="value",
api_base="",
api_model="",
):
if api_key == "" or api_key is None:
api_key = os.environ.get("OPENAI_API_KEY", "")
if api_key != "":
openai.api_key = api_key
else:
raise Exception("Please provide OpenAI API key")
if api_base == ""or api_base is None:
api_base = os.environ.get("OPENAI_API_BASE", "") # if not set, use the default base path of "https://api.openai.com/v1"
if api_base != "":
# e.g. https://api.openai.com/v1/ or your custom url
openai.api_base = api_base
print(f'Using custom api_base {api_base}')
if api_model == "" or api_model is None:
api_model = os.environ.get("OPENAI_API_MODEL", "")
if api_model != "":
self.api_model = api_model
else:
self.api_model = "text-davinci-003"
print(f'Using api_model {self.api_model}')
self.use_chat_api = 'gpt' in self.api_model
self.strategy = strategy
self.evaluation_strategy = evaluation_strategy
def run(
self,
prompt,
max_tokens,
temperature,
k=1,
stop=None
):
while True:
try:
if self.use_chat_api:
messages = [
{
"role": "user",
"content": prompt
}
]
response = openai.ChatCompletion.create(
model=self.api_model,
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
)
else:
response = openai.Completion.create(
engine=self.api_model,
prompt=prompt,
n=k,
max_tokens=max_tokens,
stop=stop,
temperature=temperature,
)
with open("openai.logs", 'a') as log_file:
log_file.write("\n" + "-----------" + '\n' +"Prompt : "+ prompt+"\n")
return response
except openai.error.RateLimitError as e:
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
print(f'{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT')
time.sleep(sleep_duratoin)
def openai_choice2text_handler(self, choice):
if self.use_chat_api:
text = choice['message']['content']
else:
text = choice.text.strip()
return text
def generate_text(self, prompt, k):
if self.use_chat_api:
thoughts = []
for _ in range(k):
response = self.run(prompt, 400, 0.5, k)
text = self.openai_choice2text_handler(response.choices[0])
thoughts += [text]
# print(f'thoughts: {thoughts}')
return thoughts
else:
response = self.run(prompt, 300, 0.5, k)
thoughts = [self.openai_choice2text_handler(choice) for choice in response.choices]
return thoughts
def generate_thoughts(
self,
state,
k,
initial_prompt,
rejected_solutions=None
):
if (type(state) == str):
state_text = state
else:
state_text = '\n'.join(state)
print("New state generating thought:", state, "\n\n")
prompt = f"""
Accomplish the task below by decomposing it as many very explicit subtasks as possible, be very explicit and thorough denoted by
a search process, highlighted by markers β1β,..., β3β as βfirst operationsβ guiding subtree exploration for the OBJECTIVE,
focus on the third subtree exploration. Produce prospective search steps (e.g., the subtree exploration β5. 11 + 1β)
and evaluates potential subsequent steps to either progress
towards a solution or retrace to another viable subtree then be very thorough
and think atomically then provide solutions for those subtasks,
then return the definitive end result and then summarize it
########## OBJECTIVE
{initial_prompt}
###################
"""
thoughts = self.generate_text(prompt, k)
# print(f"Generated thoughts: {thoughts}")
return thoughts
def generate_solution(self,
initial_prompt,
state,
rejected_solutions=None):
try:
if isinstance(state, list):
state_text = '\n'.join(state)
else:
state_text = state
prompt = f"""
Generate a series of solutions to comply with the user's instructions,
you must generate solutions on the basis of determining the most reliable solution in the shortest amount of time,
while taking rejected solutions into account and learning from them.
Considering the reasoning provided:\n\n
###'{state_text}'\n\n###
Devise the best possible solution for the task: {initial_prompt}, Here are evaluated solutions that were rejected:
###{rejected_solutions}###,
complete the {initial_prompt} without making the same mistakes you did with the evaluated rejected solutions. Be simple. Be direct. Provide intuitive solutions as soon as you think of them."""
answer = self.generate_text(prompt, 1)
print(f'Generated Solution Summary {answer}')
return answer
except Exception as e:
logger.error(f"Error in generate_solutions: {e}")
return None
def evaluate_states(self, states, initial_prompt):
if not states:
return {}
if self.evaluation_strategy == 'value':
state_values = {}
for state in states:
if (type(state) == str):
state_text = state
else:
state_text = '\n'.join(state)
print("We receive a state of type", type(state), "For state: ", state, "\n\n")
prompt = f""" To achieve the following goal: '{initial_prompt}', pessimistically value the context of the past solutions and more importantly the latest generated solution you had AS A FLOAT BETWEEN 0 AND 1\n
Past solutions:\n\n
{state_text}\n
If the solutions is not making fast progress in achieving the goal, give it a lower score.
Evaluate all solutions AS A FLOAT BETWEEN 0 and 1:\n, DO NOT RETURN ANYTHING ELSE
"""
response = self.run(prompt, 10, 1)
try:
value_text = self.openai_choice2text_handler(response.choices[0])
# print(f'state: {value_text}')
value = float(value_text)
print(f"Evaluated Thought Value: {value}")
except ValueError:
value = 0
state_values[state] = value
return state_values
else:
raise ValueError("Invalid evaluation strategy. Choose 'value' or 'vote'.") | Algorithm-Of-Thoughts-main | aot/openai.py |
from aot.openai import OpenAI
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class AoT:
def __init__(
self,
num_thoughts: int = None,
max_steps: int = None,
value_threshold: float = None,
pruning_threshold=0.5,
backtracking_threshold=0.4,
initial_prompt=None,
openai_api_key: str = None
):
self.num_thoughts = num_thoughts
self.max_steps = max_steps
self.value_threshold = value_threshold
self.backtracking_threshold = backtracking_threshold
self.pruning_threshold = pruning_threshold
self.initial_prompt = initial_prompt
self.output = []
self.openai_api_key = openai_api_key
self.model = OpenAI(api_key=self.openai_api_key)
def solve(self):
try:
self.dfs(self.initial_prompt, 1)
if not self.output:
logger.error("No valid thoughts were generated during DFS")
return None
best_state, _ = max(self.output, key=lambda x: x[1])
solution = self.model.generate_solution(self.initial_prompt, best_state)
print(f"Solution is {solution}")
return solution if solution else best_state
except Exception as error:
logger.error(f"Error in tot_dfs: {error}")
raise error
def dfs(self, state, step):
if step > self.max_steps:
thought, value = self.evaluate_thought(state)
self.output.append((thought, value))
return
thoughts = self.generate_and_filter_thoughts(state)
for next_state in thoughts:
state_value = self.evaluated_thoughts[next_state]
if state_value > self.value_threshold:
child = (state, next_state) if isinstance(state, str) else (*state, next_state)
self.dfs(child, step + 1)
#backtracking
best_value = max([value for _, value in self.output])
if best_value < self.backtracking_threshold:
self.output.pop()
continue
def generate_and_filter_thoughts(self, state):
thoughts = self.model.generate_thoughts(
state,
self.num_thoughts,
self.initial_prompt
)
self.evaluated_thoughts = self.model.evaluate_states(
thoughts,
self.initial_prompt
)
filtered_thoughts = [thought for thought in thoughts if self.evaluated_thoughts[thought] >= self.pruning_threshold]
print(f"filtered_thoughts: {filtered_thoughts}")
return filtered_thoughts
def evaluate_thought(self, state):
thought = self.model.generate_thoughts(state, 1, self.initial_prompt)
value = self.model.evaluate_states([state], self.initial_prompt)[state]
print(f"Evaluated thought: {value}")
return thought, value
| Algorithm-Of-Thoughts-main | aot/main.py |
import os
# specify example image paths:
image_paths = [
'synpic50962.jpg',
'synpic52767.jpg',
'synpic30324.jpg',
'synpic21044.jpg',
'synpic54802.jpg',
'synpic57813.jpg'
]
image_paths = [os.path.join('../img', p) for p in image_paths]
def clean_generation(response):
"""
for some reason, the open-flamingo based model slightly changes the input prompt (e.g. prepends <unk>, an adds some spaces)
"""
return response.replace('<unk> ', '').strip()
| med-flamingo1-master | scripts/demo_utils.py |
from huggingface_hub import hf_hub_download
import torch
import os
from open_flamingo import create_model_and_transforms
from accelerate import Accelerator
from einops import repeat
from PIL import Image
import sys
sys.path.append('..')
from src.utils import FlamingoProcessor
from demo_utils import image_paths, clean_generation
def main():
accelerator = Accelerator() #when using cpu: cpu=True
device = accelerator.device
print('Loading model..')
# >>> add your local path to Llama-7B (v1) model here:
llama_path = '../models/llama-7b-hf'
if not os.path.exists(llama_path):
raise ValueError('Llama model not yet set up, please check README for instructions!')
model, image_processor, tokenizer = create_model_and_transforms(
clip_vision_encoder_path="ViT-L-14",
clip_vision_encoder_pretrained="openai",
lang_encoder_path=llama_path,
tokenizer_path=llama_path,
cross_attn_every_n_layers=4
)
# load med-flamingo checkpoint:
checkpoint_path = hf_hub_download("med-flamingo/med-flamingo", "model.pt")
print(f'Downloaded Med-Flamingo checkpoint to {checkpoint_path}')
model.load_state_dict(torch.load(checkpoint_path, map_location=device), strict=False)
processor = FlamingoProcessor(tokenizer, image_processor)
# go into eval model and prepare:
model = accelerator.prepare(model)
is_main_process = accelerator.is_main_process
model.eval()
"""
Step 1: Load images
"""
demo_images = [Image.open(path) for path in image_paths]
"""
Step 2: Define multimodal few-shot prompt
"""
# example few-shot prompt:
prompt = "You are a helpful medical assistant. You are being provided with images, a question about the image and an answer. Follow the examples and answer the last question. <image>Question: What is/are the structure near/in the middle of the brain? Answer: pons.<|endofchunk|><image>Question: Is there evidence of a right apical pneumothorax on this chest x-ray? Answer: yes.<|endofchunk|><image>Question: Is/Are there air in the patient's peritoneal cavity? Answer: no.<|endofchunk|><image>Question: Does the heart appear enlarged? Answer: yes.<|endofchunk|><image>Question: What side are the infarcts located? Answer: bilateral.<|endofchunk|><image>Question: Which image modality is this? Answer: mr flair.<|endofchunk|><image>Question: Where is the largest mass located in the cerebellum? Answer:"
"""
Step 3: Preprocess data
"""
print('Preprocess data')
pixels = processor.preprocess_images(demo_images)
pixels = repeat(pixels, 'N c h w -> b N T c h w', b=1, T=1)
tokenized_data = processor.encode_text(prompt)
"""
Step 4: Generate response
"""
# actually run few-shot prompt through model:
print('Generate from multimodal few-shot prompt')
generated_text = model.generate(
vision_x=pixels.to(device),
lang_x=tokenized_data["input_ids"].to(device),
attention_mask=tokenized_data["attention_mask"].to(device),
max_new_tokens=10,
)
response = processor.tokenizer.decode(generated_text[0])
response = clean_generation(response)
print(f'{response=}')
if __name__ == "__main__":
main()
| med-flamingo1-master | scripts/demo.py |
med-flamingo1-master | src/__init__.py |
|
import torch
from abc import ABC, abstractmethod
class AbstractProcessor(ABC):
"""
Abstract class for processors to show what methods they need to implement.
Processors handle text encoding and image preprocessing.
"""
@abstractmethod
def encode_text(self, prompt):
pass
@abstractmethod
def preprocess_images(self, images: list):
pass
class FlamingoProcessor(AbstractProcessor):
"""
Processor class for Flamingo.
"""
def __init__(self, tokenizer, vision_processor):
"""
OF does not use same vision processor, image_processor only transforms single image
"""
self.tokenizer = tokenizer
self.vision_processor = vision_processor
def encode_text(self, prompt):
self.tokenizer.padding_side = "left"
# For generation padding tokens should be on the left
return self.tokenizer([prompt],
return_tensors="pt",
)
def preprocess_images(self, images: list):
vision_x = [self.vision_processor(im).unsqueeze(0) for im in images]
vision_x = torch.cat(vision_x, dim=0)
return vision_x
| med-flamingo1-master | src/utils.py |
import os
import asyncio
import signal
import sys
import threading
import traceback
from pathlib import Path
from platform import system
import discord
import pinecone
from pycord.multicog import apply_multicog
from swarmsdiscord.cogs.search_service_cog import SearchService
from swarmsdiscord.cogs.text_service_cog import SWARMSComCon
from swarmsdiscord.cogs.image_service_cog import DrawDallEService
from swarmsdiscord.cogs.prompt_optimizer_cog import ImgPromptOptimizer
from swarmsdiscord.cogs.moderations_service_cog import ModerationsService
from swarmsdiscord.cogs.commands import Commands
from swarmsdiscord.cogs.transcription_service_cog import TranscribeService
from swarmsdiscord.cogs.translation_service_cog import TranslationService
from swarmsdiscord.cogs.index_service_cog import IndexService
from swarmsdiscord.models.deepl_model import TranslationModel
from swarmsdiscord.services.health_service import HealthService
from swarmsdiscord.services.pickle_service import Pickler
from swarmsdiscord.services.pinecone_service import PineconeService
from swarmsdiscord.services.deletion_service import Deletion
from swarmsdiscord.services.message_queue_service import Message
from swarmsdiscord.services.usage_service import UsageService
from swarmsdiscord.services.environment_service import EnvService
from swarmsdiscord.models.openai_model import Model
__version__ = "11.7.3"
PID_FILE = Path("bot.pid")
PROCESS = None
if sys.platform == "win32":
separator = "\\"
else:
separator = "/"
#
# The pinecone service is used to store and retrieve conversation embeddings.
#
try:
PINECONE_TOKEN = os.getenv("PINECONE_TOKEN")
except Exception:
PINECONE_TOKEN = None
pinecone_service = None
if PINECONE_TOKEN:
pinecone.init(api_key=PINECONE_TOKEN, environment=EnvService.get_pinecone_region())
PINECONE_INDEX = "conversation-embeddings"
if PINECONE_INDEX not in pinecone.list_indexes():
print("Creating pinecone index. Please wait...")
pinecone.create_index(
PINECONE_INDEX,
dimension=1536,
metric="dotproduct",
pod_type="s1",
)
pinecone_service = PineconeService(pinecone.Index(PINECONE_INDEX))
print("Got the pinecone service")
#
# Message queueing for the debug service, defer debug messages to be sent later so we don't hit rate limits.
#
message_queue = asyncio.Queue()
deletion_queue = asyncio.Queue()
asyncio.ensure_future(Message.process_message_queue(message_queue, 1.5, 5))
asyncio.ensure_future(Deletion.process_deletion_queue(deletion_queue, 1, 1))
# Pickling service for conversation persistence
try:
Path(EnvService.save_path() / "pickles").mkdir(exist_ok=True)
except Exception:
traceback.print_exc()
print(
"Could not start pickle service. Conversation history will not be persistent across restarts."
)
pickle_queue = asyncio.Queue()
asyncio.ensure_future(Pickler.process_pickle_queue(pickle_queue, 5, 1))
#
# Settings for the bot
#
activity = discord.Activity(
type=discord.ActivityType.watching, name="for /help /gpt, and more!"
)
bot = discord.Bot(intents=discord.Intents.all(), command_prefix="!", activity=activity)
usage_service = UsageService(Path(os.environ.get("DATA_DIR", os.getcwd())))
model = Model(usage_service)
#
# An swarmsdiscord.encapsulating wrapper for the discord.py client. This uses the old re-write without cogs, but it gets the job done!
#
@bot.event # Using self gives u
async def on_ready(): # I can make self optional by
print("We have logged in as {0.user}".format(bot))
@bot.event
async def on_application_command_error(
ctx: discord.ApplicationContext, error: discord.DiscordException
):
if isinstance(error, discord.CheckFailure):
pass
else:
raise error
async def main():
data_path = EnvService.environment_path_with_fallback("DATA_DIR")
debug_guild = int(os.getenv("DEBUG_GUILD"))
debug_channel = int(os.getenv("DEBUG_CHANNEL"))
if not data_path.exists():
raise OSError(f"Data path: {data_path} does not exist ... create it?")
# Load the cog for the moderations service
bot.add_cog(ModerationsService(bot, usage_service, model))
# Load the main SWARMS Bot service
bot.add_cog(
SWARMSComCon(
bot,
usage_service,
model,
message_queue,
deletion_queue,
debug_guild,
debug_channel,
data_path,
pinecone_service=pinecone_service,
pickle_queue=pickle_queue,
)
)
bot.add_cog(
DrawDallEService(
bot,
usage_service,
model,
message_queue,
deletion_queue,
bot.get_cog("SWARMSComCon"),
)
)
bot.add_cog(
ImgPromptOptimizer(
bot,
usage_service,
model,
message_queue,
deletion_queue,
bot.get_cog("SWARMSComCon"),
bot.get_cog("DrawDallEService"),
)
)
bot.add_cog(
IndexService(
bot,
usage_service,
deletion_queue,
)
)
if EnvService.get_deepl_token():
bot.add_cog(TranslationService(bot, TranslationModel()))
print("The translation service is enabled.")
if (
EnvService.get_google_search_api_key()
and EnvService.get_google_search_engine_id()
):
bot.add_cog(
SearchService(
bot, model, usage_service, deletion_queue, bot.get_cog("SWARMSComCon")
)
)
print("The Search service is enabled.")
bot.add_cog(
TranscribeService(
bot,
model,
usage_service,
)
)
bot.add_cog(
Commands(
bot,
usage_service,
model,
message_queue,
deletion_queue,
bot.get_cog("SWARMSComCon"),
bot.get_cog("DrawDallEService"),
bot.get_cog("ImgPromptOptimizer"),
bot.get_cog("ModerationsService"),
bot.get_cog("IndexService"),
bot.get_cog("TranslationService"),
bot.get_cog("SearchService"),
bot.get_cog("TranscribeService"),
)
)
apply_multicog(bot)
await bot.start(os.getenv("DISCORD_TOKEN"))
def check_process_file(pid_file: Path) -> bool:
"""Check the pid file exists and if the Process ID is actually running"""
if not pid_file.exists():
return False
if system() == "Linux":
with pid_file.open("r") as pfp:
try:
proc_pid_path = Path("/proc") / "{int(pfp.read().strip())}"
print("Checking if PID proc path {proc_pid_path} exists")
except ValueError:
# We don't have a valid int in the PID File^M
pid_file.unlink()
return False
return proc_pid_path.exists()
return True
def cleanup_pid_file(signum, frame):
# Kill all threads
if PROCESS:
print("Killing all subprocesses")
PROCESS.terminate()
print("Killed all subprocesses")
# Always cleanup PID File if it exists
if PID_FILE.exists():
print(f"Removing PID file {PID_FILE}", flush=True)
PID_FILE.unlink()
# Run the bot with a token taken from an environment file.
def init():
global PROCESS
# Handle SIGTERM cleanly - Docker sends this ...
signal.signal(signal.SIGTERM, cleanup_pid_file)
if check_process_file(PID_FILE):
print(
"Process ID file already exists. Remove the file if you're sure another instance isn't running with the command: rm bot.pid"
)
sys.exit(1)
else:
with PID_FILE.open("w") as f:
f.write(str(os.getpid()))
print(f"Wrote PID to file {PID_FILE}")
f.close()
try:
if EnvService.get_health_service_enabled():
try:
PROCESS = HealthService().get_process()
except:
traceback.print_exc()
print("The health service failed to start.")
asyncio.get_event_loop().run_until_complete(main())
except KeyboardInterrupt:
print("Caught keyboard interrupt, killing and removing PID")
except Exception as e:
traceback.print_exc()
print(str(e))
print("Removing PID file")
finally:
cleanup_pid_file(None, None)
sys.exit(0)
if __name__ == "__main__":
sys.exit(init())
| SwarmsDiscord-main | main.py |
"""
Store information about a discord user, for the purposes of enabling conversations. We store a message
history, message count, and the id of the user in order to track them.
"""
class RedoUser:
def __init__(self, prompt, instruction, message, ctx, response, paginator):
self.prompt = prompt
self.instruction = instruction
self.message = message
self.ctx = ctx
self.response = response
self.paginator = paginator
self.interactions = []
def add_interaction(self, interaction):
self.interactions.append(interaction)
def in_interaction(self, interaction):
return interaction in self.interactions
# Represented by user_id
def __hash__(self):
return hash(self.message.author.id)
def __eq__(self, other):
return self.message.author.id == other.message.author.id
# repr
def __repr__(self):
return f"RedoUser({self.message.author.id})"
class User:
def __init__(self, user_id):
self.user_id = user_id
self.history = []
self.count = 0
# These user objects should be accessible by ID, for example if we had a bunch of user
# objects in a list, and we did `if 1203910293001 in user_list`, it would return True
# if the user with that ID was in the list
def __eq__(self, other):
return self.user_id == other.id
def __hash__(self):
return hash(self.user_id)
def __repr__(self):
return f"User(id={self.user_id}, history={self.history})"
def __str__(self):
return self.__repr__()
class Instruction:
def __init__(self, id, prompt):
self.id = id
self.prompt = prompt
def __eq__(self, other):
return self.id == other.id
def __hash__(self):
return hash(self.id)
def __repr__(self):
return f"User(id={self.id}, prompt={self.prompt})"
def __str__(self):
return self.__repr__()
class Thread:
def __init__(self, thread_id):
self.thread_id = thread_id
self.history = []
self.count = 0
self.has_opener = False
self.model = None
self.temperature = None
self.top_p = None
self.frequency_penalty = None
self.presence_penalty = None
def set_overrides(
self,
temperature=None,
top_p=None,
frequency_penalty=None,
presence_penalty=None,
):
self.temperature = temperature
self.top_p = top_p
self.frequency_penalty = frequency_penalty
self.presence_penalty = presence_penalty
def get_overrides(self):
return {
"temperature": self.temperature,
"top_p": self.top_p,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
}
# These user objects should be accessible by ID, for example if we had a bunch of user
# objects in a list, and we did `if 1203910293001 in user_list`, it would return True
# if the user with that ID was in the list
def __eq__(self, other):
return self.thread_id == other.id
def __hash__(self):
return hash(self.thread_id)
def __repr__(self):
return f"Thread(id={self.thread_id}, history={self.history})"
def __str__(self):
return self.__repr__()
class EmbeddedConversationItem:
def __init__(self, text, timestamp):
self.text = text
self.timestamp = int(timestamp)
def __repr__(self):
return self.text
def __str__(self):
return self.__repr__()
def __eq__(self, other):
return self.text == other.text and self.timestamp == other.timestamp
def __hash__(self):
return hash(self.text) + hash(self.timestamp)
def __lt__(self, other):
return self.timestamp < other.timestamp
def __gt__(self, other):
return self.timestamp > other.timestamp
def __le__(self, other):
return self.timestamp <= other.timestamp
def __ge__(self, other):
return self.timestamp >= other.timestamp
def __ne__(self, other):
return not self.__eq__(other)
# Make it such that if there is an array with these EmbeddedConversationItems, if we "".join the array, each item will
# return the .text attribute
def __format__(self, format_spec):
return self.text
| SwarmsDiscord-main | swarmsdiscord/models/user_model.py |
import functools
import os
import random
import tempfile
import traceback
import asyncio
from collections import defaultdict
import aiohttp
import discord
import aiofiles
import openai
import tiktoken
from functools import partial
from typing import List, Optional
from pathlib import Path
from datetime import date
from discord import InteractionResponse, Interaction
from discord.ext import pages
from langchain import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAIChat
from langchain.memory import ConversationBufferMemory
from llama_index.callbacks import CallbackManager, TokenCountingHandler
from llama_index.data_structs.data_structs import Node
from llama_index.data_structs.node import DocumentRelationship
from llama_index.indices.query.query_transform import StepDecomposeQueryTransform
from llama_index.langchain_helpers.agents import (
IndexToolConfig,
LlamaToolkit,
create_llama_chat_agent,
)
from llama_index.optimization import SentenceEmbeddingOptimizer
from llama_index.prompts.chat_prompts import CHAT_REFINE_PROMPT
from llama_index.readers import YoutubeTranscriptReader
from llama_index.readers.schema.base import Document
from llama_index.langchain_helpers.text_splitter import TokenTextSplitter
from llama_index.retrievers import VectorIndexRetriever, TreeSelectLeafRetriever
from llama_index.query_engine import RetrieverQueryEngine, MultiStepQueryEngine
from llama_index import (
GPTVectorStoreIndex,
SimpleDirectoryReader,
QuestionAnswerPrompt,
BeautifulSoupWebReader,
GPTTreeIndex,
GoogleDocsReader,
MockLLMPredictor,
OpenAIEmbedding,
GithubRepositoryReader,
MockEmbedding,
download_loader,
LLMPredictor,
ServiceContext,
StorageContext,
ResponseSynthesizer,
load_index_from_storage,
)
from llama_index.readers.web import DEFAULT_WEBSITE_EXTRACTOR
from llama_index.composability import ComposableGraph
from llama_index.schema import BaseDocument
from models.embed_statics_model import EmbedStatics
from models.openai_model import Models
from models.check_model import UrlCheck
from services.environment_service import EnvService
SHORT_TO_LONG_CACHE = {}
MAX_DEEP_COMPOSE_PRICE = EnvService.get_max_deep_compose_price()
EpubReader = download_loader("EpubReader")
MarkdownReader = download_loader("MarkdownReader")
RemoteReader = download_loader("RemoteReader")
RemoteDepthReader = download_loader("RemoteDepthReader")
def get_and_query(
user_id,
index_storage,
query,
response_mode,
nodes,
child_branch_factor,
service_context,
multistep,
):
index: [GPTVectorStoreIndex, GPTTreeIndex] = index_storage[
user_id
].get_index_or_throw()
if isinstance(index, GPTTreeIndex):
retriever = TreeSelectLeafRetriever(
index=index,
child_branch_factor=child_branch_factor,
service_context=service_context,
)
else:
retriever = VectorIndexRetriever(
index=index, similarity_top_k=nodes, service_context=service_context
)
response_synthesizer = ResponseSynthesizer.from_args(
response_mode=response_mode,
use_async=True,
refine_template=CHAT_REFINE_PROMPT,
optimizer=SentenceEmbeddingOptimizer(threshold_cutoff=0.7),
service_context=service_context,
)
query_engine = RetrieverQueryEngine(
retriever=retriever, response_synthesizer=response_synthesizer
)
multistep_query_engine = MultiStepQueryEngine(
query_engine=query_engine,
query_transform=StepDecomposeQueryTransform(multistep),
index_summary="Provides information about everything you need to know about this topic, use this to answer the question.",
)
if multistep:
response = multistep_query_engine.query(query)
else:
response = query_engine.query(query)
return response
class IndexData:
def __init__(self):
self.queryable_index = None
self.individual_indexes = []
# A safety check for the future
def get_index_or_throw(self):
if not self.queryable():
raise Exception(
"An index access was attempted before an index was created. This is a programmer error, please report this to the maintainers."
)
return self.queryable_index
def queryable(self):
return self.queryable_index is not None
def has_indexes(self, user_id):
try:
return (
len(os.listdir(EnvService.find_shared_file(f"indexes/{user_id}"))) > 0
)
except Exception:
return False
def has_search_indexes(self, user_id):
try:
return (
len(
os.listdir(EnvService.find_shared_file(f"indexes/{user_id}_search"))
)
> 0
)
except Exception:
return False
def add_index(self, index, user_id, file_name):
self.individual_indexes.append(index)
self.queryable_index = index
# Create a folder called "indexes/{USER_ID}" if it doesn't exist already
Path(f"{EnvService.save_path()}/indexes/{user_id}").mkdir(
parents=True, exist_ok=True
)
# Save the index to file under the user id
file = f"{date.today().month}_{date.today().day}_{file_name}"
# If file is > 93 in length, cut it off to 93
if len(file) > 93:
file = file[:93]
index.storage_context.persist(
persist_dir=EnvService.save_path()
/ "indexes"
/ f"{str(user_id)}"
/ f"{file}"
)
def reset_indexes(self, user_id):
self.individual_indexes = []
self.queryable_index = None
# Delete the user indexes
try:
# First, clear all the files inside it
for file in os.listdir(EnvService.find_shared_file(f"indexes/{user_id}")):
os.remove(EnvService.find_shared_file(f"indexes/{user_id}/{file}"))
for file in os.listdir(
EnvService.find_shared_file(f"indexes/{user_id}_search")
):
os.remove(
EnvService.find_shared_file(f"indexes/{user_id}_search/{file}")
)
except Exception:
traceback.print_exc()
class Index_handler:
def __init__(self, bot, usage_service):
self.bot = bot
self.openai_key = os.getenv("OPENAI_TOKEN")
self.index_storage = defaultdict(IndexData)
self.loop = asyncio.get_running_loop()
self.usage_service = usage_service
self.qaprompt = QuestionAnswerPrompt(
"Context information is below. The text '<|endofstatement|>' is used to separate chat entries and make it easier for you to understand the context\n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Never say '<|endofstatement|>'\n"
"Given the context information and not prior knowledge, "
"answer the question: {query_str}\n"
)
self.EMBED_CUTOFF = 2000
self.index_chat_chains = {}
async def rename_index(self, ctx, original_path, rename_path):
"""Command handler to rename a user index"""
index_file = EnvService.find_shared_file(original_path)
if not index_file:
return False
# Rename the file at f"indexes/{ctx.user.id}/{user_index}" to f"indexes/{ctx.user.id}/{new_name}" using Pathlib
try:
Path(original_path).rename(rename_path)
return True
except Exception as e:
traceback.print_exc()
return False
async def get_is_in_index_chat(self, ctx):
return ctx.channel.id in self.index_chat_chains
async def execute_index_chat_message(self, ctx, message):
if ctx.channel.id not in self.index_chat_chains:
return None
if message.lower() in ["stop", "end", "quit", "exit"]:
await ctx.reply("Ending chat session.")
self.index_chat_chains.pop(ctx.channel.id)
# close the thread
thread = await self.bot.fetch_channel(ctx.channel.id)
await thread.edit(name="Closed-GPT")
await thread.edit(archived=True)
return "Ended chat session."
agent_output = await self.loop.run_in_executor(
None, partial(self.index_chat_chains[ctx.channel.id].run, message)
)
return agent_output
async def start_index_chat(self, ctx, search, user, model):
if search:
index_file = EnvService.find_shared_file(
f"indexes/{ctx.user.id}_search/{search}"
)
elif user:
index_file = EnvService.find_shared_file(f"indexes/{ctx.user.id}/{user}")
assert index_file is not None
preparation_message = await ctx.channel.send(
embed=EmbedStatics.get_index_chat_preparation_message()
)
index = await self.loop.run_in_executor(
None, partial(self.index_load_file, index_file)
)
summary_response = await self.loop.run_in_executor(
None,
partial(
index.as_query_engine().query, "What is a summary of this document?"
),
)
query_engine = index.as_query_engine(similarity_top_k=3)
tool_config = IndexToolConfig(
query_engine=query_engine,
name=f"Vector Index",
description=f"useful for when you want to answer queries about the external data you're connected to. The data you're connected to is: {summary_response}",
tool_kwargs={"return_direct": True},
)
toolkit = LlamaToolkit(
index_configs=[tool_config],
)
memory = ConversationBufferMemory(memory_key="chat_history")
llm = ChatOpenAI(model=model, temperature=0)
agent_chain = create_llama_chat_agent(toolkit, llm, memory=memory, verbose=True)
embed_title = f"{ctx.user.name}'s data-connected conversation with GPT"
# Get only the last part after the last / of the index_file
try:
index_file_name = str(index_file).split("/")[-1]
except:
index_file_name = index_file
message_embed = discord.Embed(
title=embed_title,
description=f"The agent is connected to the data index named {index_file_name}\nModel: {model}",
color=0x00995B,
)
message_embed.set_thumbnail(url="https://i.imgur.com/7V6apMT.png")
message_embed.set_footer(
text="Data Chat", icon_url="https://i.imgur.com/7V6apMT.png"
)
message_thread = await ctx.send(embed=message_embed)
thread = await message_thread.create_thread(
name=ctx.user.name + "'s data-connected conversation with GPT",
auto_archive_duration=60,
)
await ctx.respond("Conversation started.")
try:
await preparation_message.delete()
except:
pass
self.index_chat_chains[thread.id] = agent_chain
async def paginate_embed(self, response_text):
"""Given a response text make embed pages and return a list of the pages."""
response_text = [
response_text[i : i + self.EMBED_CUTOFF]
for i in range(0, len(response_text), self.EMBED_CUTOFF)
]
pages = []
first = False
# Send each chunk as a message
for count, chunk in enumerate(response_text, start=1):
if not first:
page = discord.Embed(
title=f"Index Query Results",
description=chunk,
)
first = True
else:
page = discord.Embed(
title=f"Page {count}",
description=chunk,
)
pages.append(page)
return pages
def index_file(
self, file_path, service_context, suffix=None
) -> GPTVectorStoreIndex:
if suffix and suffix == ".md":
loader = MarkdownReader()
document = loader.load_data(file_path)
elif suffix and suffix == ".epub":
epub_loader = EpubReader()
document = epub_loader.load_data(file_path)
else:
document = SimpleDirectoryReader(input_files=[file_path]).load_data()
index = GPTVectorStoreIndex.from_documents(
document, service_context=service_context, use_async=True
)
return index
def index_gdoc(self, doc_id, service_context) -> GPTVectorStoreIndex:
document = GoogleDocsReader().load_data(doc_id)
index = GPTVectorStoreIndex.from_documents(
document, service_context=service_context, use_async=True
)
return index
def index_youtube_transcript(self, link, service_context):
try:
documents = YoutubeTranscriptReader().load_data(ytlinks=[link])
except Exception as e:
raise ValueError(f"The youtube transcript couldn't be loaded: {e}")
index = GPTVectorStoreIndex.from_documents(
documents,
service_context=service_context,
use_async=True,
)
return index
def index_github_repository(self, link, service_context):
# Extract the "owner" and the "repo" name from the github link.
owner = link.split("/")[3]
repo = link.split("/")[4]
try:
documents = GithubRepositoryReader(owner=owner, repo=repo).load_data(
branch="main"
)
except KeyError:
documents = GithubRepositoryReader(owner=owner, repo=repo).load_data(
branch="master"
)
index = GPTVectorStoreIndex.from_documents(
documents,
service_context=service_context,
use_async=True,
)
return index
def index_load_file(self, file_path) -> [GPTVectorStoreIndex, ComposableGraph]:
storage_context = StorageContext.from_defaults(persist_dir=file_path)
index = load_index_from_storage(storage_context)
return index
def index_discord(self, document, service_context) -> GPTVectorStoreIndex:
index = GPTVectorStoreIndex.from_documents(
document,
service_context=service_context,
use_async=True,
)
return index
async def index_pdf(self, url) -> list[Document]:
# Download the PDF at the url and save it to a tempfile
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
if response.status == 200:
data = await response.read()
f = tempfile.NamedTemporaryFile(suffix=".pdf", delete=False)
f.write(data)
f.close()
else:
return "An error occurred while downloading the PDF."
# Get the file path of this tempfile.NamedTemporaryFile
# Save this temp file to an actual file that we can put into something else to read it
documents = SimpleDirectoryReader(input_files=[f.name]).load_data()
# Delete the temporary file
return documents
async def index_webpage(self, url, service_context) -> GPTVectorStoreIndex:
# First try to connect to the URL to see if we can even reach it.
try:
async with aiohttp.ClientSession() as session:
async with session.get(url, timeout=5) as response:
# Add another entry to links from all_links if the link is not already in it to compensate for the failed request
if response.status not in [200, 203, 202, 204]:
raise ValueError(
"Invalid URL or could not connect to the provided URL."
)
else:
# Detect if the link is a PDF, if it is, we load it differently
if response.headers["Content-Type"] == "application/pdf":
documents = await self.index_pdf(url)
index = await self.loop.run_in_executor(
None,
functools.partial(
GPTVectorStoreIndex,
documents=documents,
service_context=service_context,
use_async=True,
),
)
return index
except:
raise ValueError("Could not load webpage")
documents = BeautifulSoupWebReader(
website_extractor=DEFAULT_WEBSITE_EXTRACTOR
).load_data(urls=[url])
# index = GPTVectorStoreIndex(documents, embed_model=embed_model, use_async=True)
index = await self.loop.run_in_executor(
None,
functools.partial(
GPTVectorStoreIndex.from_documents,
documents=documents,
service_context=service_context,
use_async=True,
),
)
return index
def reset_indexes(self, user_id):
self.index_storage[user_id].reset_indexes(user_id)
async def set_file_index(
self, ctx: discord.ApplicationContext, file: discord.Attachment, user_api_key
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
type_to_suffix_mappings = {
"text/plain": ".txt",
"text/csv": ".csv",
"application/pdf": ".pdf",
"application/json": ".json",
"image/png": ".png",
"image/": ".jpg",
"ms-powerpoint": ".ppt",
"presentationml.presentation": ".pptx",
"ms-excel": ".xls",
"spreadsheetml.sheet": ".xlsx",
"msword": ".doc",
"wordprocessingml.document": ".docx",
"audio/": ".mp3",
"video/": ".mp4",
"epub": ".epub",
"markdown": ".md",
"html": ".html",
}
# For when content type doesnt get picked up by discord.
secondary_mappings = {
".epub": ".epub",
}
try:
# First, initially set the suffix to the suffix of the attachment
suffix = None
if file.content_type:
# Apply the suffix mappings to the file
for key, value in type_to_suffix_mappings.items():
if key in file.content_type:
suffix = value
break
if not suffix:
await ctx.send("This file type is not supported.")
return
else:
for key, value in secondary_mappings.items():
if key in file.filename:
suffix = value
break
if not suffix:
await ctx.send(
"Could not determine the file type of the attachment, attempting a dirty index.."
)
return
# Send indexing message
response = await ctx.respond(
embed=EmbedStatics.build_index_progress_embed()
)
async with aiofiles.tempfile.TemporaryDirectory() as temp_path:
async with aiofiles.tempfile.NamedTemporaryFile(
suffix=suffix, dir=temp_path, delete=False
) as temp_file:
await file.save(temp_file.name)
embedding_model = OpenAIEmbedding()
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model(
"text-davinci-003"
).encode,
verbose=False,
)
callback_manager = CallbackManager([token_counter])
service_context = ServiceContext.from_defaults(
embed_model=embedding_model, callback_manager=callback_manager
)
index = await self.loop.run_in_executor(
None,
partial(
self.index_file,
Path(temp_file.name),
service_context,
suffix,
),
)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
try:
price = await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
)
except:
traceback.print_exc()
price = "Unknown"
file_name = file.filename
self.index_storage[ctx.user.id].add_index(index, ctx.user.id, file_name)
await response.edit(
embed=EmbedStatics.get_index_set_success_embed(str(price))
)
except Exception as e:
await ctx.channel.send(
embed=EmbedStatics.get_index_set_failure_embed(str(e))
)
traceback.print_exc()
async def set_link_index_recurse(
self, ctx: discord.ApplicationContext, link: str, depth, user_api_key
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
response = await ctx.respond(embed=EmbedStatics.build_index_progress_embed())
try:
embedding_model = OpenAIEmbedding()
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("text-davinci-003").encode,
verbose=False,
)
callback_manager = CallbackManager([token_counter])
service_context = ServiceContext.from_defaults(
embed_model=embedding_model, callback_manager=callback_manager
)
# Pre-emptively connect and get the content-type of the response
try:
async with aiohttp.ClientSession() as session:
async with session.get(link, timeout=2) as _response:
print(_response.status)
if _response.status == 200:
content_type = _response.headers.get("content-type")
else:
await response.edit(
embed=EmbedStatics.get_index_set_failure_embed(
"Invalid URL or could not connect to the provided URL."
)
)
return
except Exception as e:
traceback.print_exc()
await response.edit(
embed=EmbedStatics.get_index_set_failure_embed(
"Invalid URL or could not connect to the provided URL. "
+ str(e)
)
)
return
# Check if the link contains youtube in it
loader = RemoteDepthReader(depth=depth)
documents = await self.loop.run_in_executor(
None, partial(loader.load_data, [link])
)
index = await self.loop.run_in_executor(
None,
functools.partial(
GPTVectorStoreIndex,
documents=documents,
service_context=service_context,
use_async=True,
),
)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
try:
price = await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
)
except:
traceback.print_exc()
price = "Unknown"
# Make the url look nice, remove https, useless stuff, random characters
file_name = (
link.replace("https://", "")
.replace("http://", "")
.replace("www.", "")
.replace("/", "_")
.replace("?", "_")
.replace("&", "_")
.replace("=", "_")
.replace("-", "_")
.replace(".", "_")
)
self.index_storage[ctx.user.id].add_index(index, ctx.user.id, file_name)
except ValueError as e:
await response.edit(embed=EmbedStatics.get_index_set_failure_embed(str(e)))
traceback.print_exc()
return
except Exception as e:
await response.edit(embed=EmbedStatics.get_index_set_failure_embed(str(e)))
traceback.print_exc()
return
await response.edit(embed=EmbedStatics.get_index_set_success_embed(price))
async def set_link_index(
self, ctx: discord.ApplicationContext, link: str, user_api_key
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
response = await ctx.respond(embed=EmbedStatics.build_index_progress_embed())
try:
embedding_model = OpenAIEmbedding()
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("text-davinci-003").encode,
verbose=False,
)
callback_manager = CallbackManager([token_counter])
service_context = ServiceContext.from_defaults(
embed_model=embedding_model, callback_manager=callback_manager
)
# Pre-emptively connect and get the content-type of the response
try:
async with aiohttp.ClientSession() as session:
async with session.get(link, timeout=2) as _response:
print(_response.status)
if _response.status == 200:
content_type = _response.headers.get("content-type")
else:
await response.edit(
embed=EmbedStatics.get_index_set_failure_embed(
"Invalid URL or could not connect to the provided URL."
)
)
return
except Exception as e:
traceback.print_exc()
await response.edit(
embed=EmbedStatics.get_index_set_failure_embed(
"Invalid URL or could not connect to the provided URL. "
+ str(e)
)
)
return
# Check if the link contains youtube in it
if await UrlCheck.check_youtube_link(link):
index = await self.loop.run_in_executor(
None, partial(self.index_youtube_transcript, link, service_context)
)
elif "github" in link:
index = await self.loop.run_in_executor(
None, partial(self.index_github_repository, link, service_context)
)
else:
index = await self.index_webpage(link, service_context)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
try:
price = await self.usage_service.get_price(
token_counter.embedding_token_counts, "embedding"
)
except:
traceback.print_exc()
price = "Unknown"
# Make the url look nice, remove https, useless stuff, random characters
file_name = (
link.replace("https://", "")
.replace("http://", "")
.replace("www.", "")
.replace("/", "_")
.replace("?", "_")
.replace("&", "_")
.replace("=", "_")
.replace("-", "_")
.replace(".", "_")
)
self.index_storage[ctx.user.id].add_index(index, ctx.user.id, file_name)
except ValueError as e:
await response.edit(embed=EmbedStatics.get_index_set_failure_embed(str(e)))
traceback.print_exc()
return
except Exception as e:
await response.edit(embed=EmbedStatics.get_index_set_failure_embed(str(e)))
traceback.print_exc()
return
await response.edit(embed=EmbedStatics.get_index_set_success_embed(price))
async def set_discord_index(
self,
ctx: discord.ApplicationContext,
channel: discord.TextChannel,
user_api_key,
message_limit: int = 2500,
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
try:
document = await self.load_data(
channel_ids=[channel.id], limit=message_limit, oldest_first=False
)
embedding_model = OpenAIEmbedding()
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("text-davinci-003").encode,
verbose=False,
)
callback_manager = CallbackManager([token_counter])
service_context = ServiceContext.from_defaults(
embed_model=embedding_model, callback_manager=callback_manager
)
index = await self.loop.run_in_executor(
None, partial(self.index_discord, document, service_context)
)
try:
price = await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
)
except Exception:
traceback.print_exc()
price = "Unknown"
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
self.index_storage[ctx.user.id].add_index(index, ctx.user.id, channel.name)
await ctx.respond(embed=EmbedStatics.get_index_set_success_embed(price))
except Exception as e:
await ctx.respond(embed=EmbedStatics.get_index_set_failure_embed(str(e)))
traceback.print_exc()
async def load_index(
self, ctx: discord.ApplicationContext, index, server, search, user_api_key
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
try:
if server:
index_file = EnvService.find_shared_file(
f"indexes/{ctx.guild.id}/{index}"
)
elif search:
index_file = EnvService.find_shared_file(
f"indexes/{ctx.user.id}_search/{index}"
)
else:
index_file = EnvService.find_shared_file(
f"indexes/{ctx.user.id}/{index}"
)
index = await self.loop.run_in_executor(
None, partial(self.index_load_file, index_file)
)
self.index_storage[ctx.user.id].queryable_index = index
await ctx.respond(embed=EmbedStatics.get_index_load_success_embed())
except Exception as e:
traceback.print_exc()
await ctx.respond(embed=EmbedStatics.get_index_load_failure_embed(str(e)))
async def index_to_docs(
self, old_index, chunk_size: int = 4000, chunk_overlap: int = 200
) -> List[BaseDocument]:
documents = []
docstore = old_index.docstore
for doc_id in docstore.docs.keys():
text = ""
document = docstore.get_document(doc_id)
if document is not None:
node = docstore.get_node(document.get_doc_id())
while node is not None:
extra_info = node.extra_info
text += f"{node.text} "
next_node_id = node.relationships.get(
DocumentRelationship.NEXT, None
)
node = docstore.get_node(next_node_id) if next_node_id else None
text_splitter = TokenTextSplitter(
separator=" ", chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
text_chunks = text_splitter.split_text(text)
for chunk_text in text_chunks:
new_doc = Document(text=chunk_text, extra_info=extra_info)
documents.append(new_doc)
print(new_doc)
return documents
async def compose_indexes(self, user_id, indexes, name, deep_compose):
# Load all the indexes first
index_objects = []
for _index in indexes:
try:
index_file = EnvService.find_shared_file(f"indexes/{user_id}/{_index}")
except ValueError:
index_file = EnvService.find_shared_file(
f"indexes/{user_id}_search/{_index}"
)
index = await self.loop.run_in_executor(
None, partial(self.index_load_file, index_file)
)
index_objects.append(index)
llm_predictor = LLMPredictor(
llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")
)
# For each index object, add its documents to a GPTTreeIndex
if deep_compose:
documents = []
for _index in index_objects:
documents.extend(await self.index_to_docs(_index, 256, 20))
embedding_model = OpenAIEmbedding()
llm_predictor_mock = MockLLMPredictor(4096)
embedding_model_mock = MockEmbedding(1536)
token_counter_mock = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("text-davinci-003").encode,
verbose=False,
)
callback_manager_mock = CallbackManager([token_counter_mock])
service_context_mock = ServiceContext.from_defaults(
llm_predictor=llm_predictor_mock,
embed_model=embedding_model_mock,
callback_manager=callback_manager_mock,
)
# Run the mock call first
await self.loop.run_in_executor(
None,
partial(
GPTTreeIndex.from_documents,
documents=documents,
service_context=service_context_mock,
),
)
total_usage_price = await self.usage_service.get_price(
token_counter_mock.total_llm_token_count,
"turbo", # TODO Enable again when tree indexes are fixed
) + await self.usage_service.get_price(
token_counter_mock.total_embedding_token_count, "embedding"
)
print("The total composition price is: ", total_usage_price)
if total_usage_price > MAX_DEEP_COMPOSE_PRICE:
raise ValueError(
"Doing this deep search would be prohibitively expensive. Please try a narrower search scope."
)
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("gpt-3.5-turbo").encode,
verbose=False,
)
callback_manager = CallbackManager([token_counter])
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor,
embed_model=embedding_model,
callback_manager=callback_manager,
)
tree_index = await self.loop.run_in_executor(
None,
partial(
GPTTreeIndex.from_documents,
documents=documents,
service_context=service_context,
use_async=True,
),
)
await self.usage_service.update_usage(
token_counter.total_llm_token_count, "turbo"
)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
# Now we have a list of tree indexes, we can compose them
if not name:
name = f"{date.today().month}_{date.today().day}_composed_deep_index"
# Save the composed index
tree_index.storage_context.persist(
persist_dir=EnvService.save_path() / "indexes" / str(user_id) / name
)
self.index_storage[user_id].queryable_index = tree_index
return total_usage_price
else:
documents = []
for _index in index_objects:
documents.extend(await self.index_to_docs(_index))
embedding_model = OpenAIEmbedding()
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("gpt-3.5-turbo").encode,
verbose=False,
)
callback_manager = CallbackManager([token_counter])
service_context = ServiceContext.from_defaults(
embed_model=embedding_model, callback_manager=callback_manager
)
simple_index = await self.loop.run_in_executor(
None,
partial(
GPTVectorStoreIndex.from_documents,
documents=documents,
service_context=service_context,
use_async=True,
),
)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
if not name:
name = f"{date.today().month}_{date.today().day}_composed_index"
# Save the composed index
simple_index.storage_context.persist(
persist_dir=EnvService.save_path() / "indexes" / str(user_id) / name
)
self.index_storage[user_id].queryable_index = simple_index
try:
price = await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
)
except:
price = "Unknown"
return price
async def backup_discord(
self, ctx: discord.ApplicationContext, user_api_key, message_limit
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
try:
channel_ids: List[int] = []
for c in ctx.guild.text_channels:
channel_ids.append(c.id)
document = await self.load_data(
channel_ids=channel_ids, limit=message_limit, oldest_first=False
)
embedding_model = OpenAIEmbedding()
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("text-davinci-003").encode,
verbose=False,
)
callback_manager = CallbackManager([token_counter])
service_context = ServiceContext.from_defaults(
embed_model=embedding_model, callback_manager=callback_manager
)
index = await self.loop.run_in_executor(
None, partial(self.index_discord, document, service_context)
)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
try:
price = await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
)
except Exception:
traceback.print_exc()
price = "Unknown"
Path(EnvService.save_path() / "indexes" / str(ctx.guild.id)).mkdir(
parents=True, exist_ok=True
)
index.storage_context.persist(
persist_dir=EnvService.save_path()
/ "indexes"
/ str(ctx.guild.id)
/ f"{ctx.guild.name.replace(' ', '-')}_{date.today().month}_{date.today().day}"
)
await ctx.respond(embed=EmbedStatics.get_index_set_success_embed(price))
except Exception as e:
await ctx.respond(embed=EmbedStatics.get_index_set_failure_embed((str(e))))
traceback.print_exc()
async def query(
self,
ctx: discord.ApplicationContext,
query: str,
response_mode,
nodes,
user_api_key,
child_branch_factor,
model,
multistep,
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name=model))
ctx_response = await ctx.respond(
embed=EmbedStatics.build_index_query_progress_embed(query)
)
try:
embedding_model = OpenAIEmbedding()
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model(model).encode, verbose=False
)
callback_manager = CallbackManager([token_counter])
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor,
embed_model=embedding_model,
callback_manager=callback_manager,
)
token_counter.reset_counts()
response = await self.loop.run_in_executor(
None,
partial(
get_and_query,
ctx.user.id,
self.index_storage,
query,
response_mode,
nodes,
child_branch_factor,
service_context=service_context,
multistep=llm_predictor if multistep else None,
),
)
print("The last token usage was ", token_counter.total_llm_token_count)
await self.usage_service.update_usage(
token_counter.total_llm_token_count,
await self.usage_service.get_cost_name(model),
)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
try:
total_price = round(
await self.usage_service.get_price(
token_counter.total_llm_token_count,
await self.usage_service.get_cost_name(model),
)
+ await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
),
6,
)
except:
total_price = "Unknown"
query_response_message = f"**Query:**\n\n`{query.strip()}`\n\n**Query response:**\n\n{response.response.strip()}"
query_response_message = query_response_message.replace(
"<|endofstatement|>", ""
)
embed_pages = await self.paginate_embed(query_response_message)
paginator = pages.Paginator(
pages=embed_pages,
timeout=None,
author_check=False,
)
await ctx_response.edit(
embed=EmbedStatics.build_index_query_success_embed(query, total_price)
)
await paginator.respond(ctx.interaction)
except Exception:
traceback.print_exc()
await ctx_response.edit(
embed=EmbedStatics.get_index_query_failure_embed(
"Failed to send query. You may not have an index set, load an index with /index load"
)
)
# Extracted functions from DiscordReader
async def read_channel(
self, channel_id: int, limit: Optional[int], oldest_first: bool
) -> str:
"""Async read channel."""
messages: List[discord.Message] = []
try:
channel = self.bot.get_channel(channel_id)
print(f"Added {channel.name} from {channel.guild.name}")
# only work for text channels for now
if not isinstance(channel, discord.TextChannel):
raise ValueError(
f"Channel {channel_id} is not a text channel. "
"Only text channels are supported for now."
)
# thread_dict maps thread_id to thread
thread_dict = {}
for thread in channel.threads:
thread_dict[thread.id] = thread
async for msg in channel.history(limit=limit, oldest_first=oldest_first):
if msg.author.bot:
pass
else:
messages.append(msg)
if msg.id in thread_dict:
thread = thread_dict[msg.id]
async for thread_msg in thread.history(
limit=limit, oldest_first=oldest_first
):
messages.append(thread_msg)
except Exception as e:
print("Encountered error: " + str(e))
channel = self.bot.get_channel(channel_id)
msg_txt_list = [
f"user:{m.author.display_name}, content:{m.content}" for m in messages
]
return ("<|endofstatement|>\n\n".join(msg_txt_list), channel.name)
async def load_data(
self,
channel_ids: List[int],
limit: Optional[int] = None,
oldest_first: bool = True,
) -> List[Document]:
"""Load data from the input directory.
Args:
channel_ids (List[int]): List of channel ids to read.
limit (Optional[int]): Maximum number of messages to read.
oldest_first (bool): Whether to read oldest messages first.
Defaults to `True`.
Returns:
List[Document]: List of documents.
"""
results: List[Document] = []
for channel_id in channel_ids:
if not isinstance(channel_id, int):
raise ValueError(
f"Channel id {channel_id} must be an integer, "
f"not {type(channel_id)}."
)
(channel_content, channel_name) = await self.read_channel(
channel_id, limit=limit, oldest_first=oldest_first
)
results.append(
Document(channel_content, extra_info={"channel_name": channel_name})
)
return results
async def compose(self, ctx: discord.ApplicationContext, name, user_api_key):
# Send the ComposeModal
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
if not self.index_storage[ctx.user.id].has_indexes(ctx.user.id):
await ctx.respond(
embed=EmbedStatics.get_index_compose_failure_embed(
"You must have at least one index to compose."
)
)
return
await ctx.respond(
"Select the index(es) to compose. You can compose multiple indexes together, you can also Deep Compose a single index.",
view=ComposeModal(self, ctx.user.id, name),
ephemeral=True,
)
class ComposeModal(discord.ui.View):
def __init__(self, index_cog, user_id, name=None, deep=None) -> None:
super().__init__()
# Get the argument named "user_key_db" and save it as USER_KEY_DB
self.index_cog = index_cog
self.user_id = user_id
self.deep = deep
# Get all the indexes for the user
self.indexes = [
file
for file in os.listdir(
EnvService.find_shared_file(f"indexes/{str(user_id)}/")
)
]
if index_cog.index_storage[user_id].has_search_indexes(user_id):
self.indexes.extend(
[
file
for file in os.listdir(
EnvService.find_shared_file(f"indexes/{str(user_id)}_search/")
)
]
)
print("Found the indexes, they are ", self.indexes)
# Map everything into the short to long cache
for index in self.indexes:
if len(index) > 93:
index_name = index[:93] + "-" + str(random.randint(0000, 9999))
SHORT_TO_LONG_CACHE[index_name] = index
else:
SHORT_TO_LONG_CACHE[index[:99]] = index
# Reverse the SHORT_TO_LONG_CACHE index
LONG_TO_SHORT_CACHE = {v: k for k, v in SHORT_TO_LONG_CACHE.items()}
# A text entry field for the name of the composed index
self.name = name
# A discord UI select menu with all the indexes. Limited to 25 entries. For the label field in the SelectOption,
# cut it off at 100 characters to prevent the message from being too long
self.index_select = discord.ui.Select(
placeholder="Select index(es) to compose",
options=[
discord.SelectOption(
label=LONG_TO_SHORT_CACHE[index], value=LONG_TO_SHORT_CACHE[index]
)
for index in self.indexes
][0:25],
max_values=len(self.indexes) if len(self.indexes) < 25 else 25,
min_values=1,
)
# Add the select menu to the modal
self.add_item(self.index_select)
# If we have more than 25 entries, add more Select fields as neccessary
self.extra_index_selects = []
if len(self.indexes) > 25:
for i in range(25, len(self.indexes), 25):
self.extra_index_selects.append(
discord.ui.Select(
placeholder="Select index(es) to compose",
options=[
discord.SelectOption(
label=LONG_TO_SHORT_CACHE[index],
value=LONG_TO_SHORT_CACHE[index],
)
for index in self.indexes
][i : i + 25],
max_values=len(self.indexes[i : i + 25]),
min_values=1,
)
)
self.add_item(self.extra_index_selects[-1])
# Add an input field for "Deep", a "yes" or "no" option, default no
self.deep_select = discord.ui.Select(
placeholder="Deep Compose",
options=[
discord.SelectOption(label="Yes", value="yes"),
discord.SelectOption(label="No", value="no"),
],
max_values=1,
min_values=1,
)
self.add_item(self.deep_select)
# Add a button to the modal called "Compose"
self.add_item(
discord.ui.Button(
label="Compose", style=discord.ButtonStyle.green, custom_id="compose"
)
)
# The callback for the button
async def interaction_check(self, interaction: discord.Interaction) -> bool:
# Check that the interaction was for custom_id "compose"
if interaction.data["custom_id"] == "compose":
# Check that the user selected at least one index
# The total list of indexes is the union of the values of all the select menus
indexes = self.index_select.values + [
select.values[0] for select in self.extra_index_selects
]
# Remap them from the SHORT_TO_LONG_CACHE
indexes = [SHORT_TO_LONG_CACHE[index] for index in indexes]
if len(indexes) < 1:
await interaction.response.send_message(
embed=EmbedStatics.get_index_compose_failure_embed(
"You must select at least 1 index"
),
ephemeral=True,
)
else:
composing_message = await interaction.response.send_message(
embed=EmbedStatics.get_index_compose_progress_embed(),
ephemeral=True,
)
# Compose the indexes
try:
price = await self.index_cog.compose_indexes(
self.user_id,
indexes,
self.name,
False
if not self.deep_select.values
or self.deep_select.values[0] == "no"
else True,
)
except ValueError as e:
await interaction.followup.send(
str(e), ephemeral=True, delete_after=180
)
return False
except Exception as e:
await interaction.followup.send(
embed=EmbedStatics.get_index_compose_failure_embed(
"An error occurred while composing the indexes: " + str(e)
),
ephemeral=True,
delete_after=180,
)
return False
await interaction.followup.send(
embed=EmbedStatics.get_index_compose_success_embed(price),
ephemeral=True,
delete_after=180,
)
# Try to direct message the user that their composed index is ready
try:
await self.index_cog.bot.get_user(self.user_id).send(
f"Your composed index is ready! You can load it with /index load now in the server."
)
except discord.Forbidden:
pass
try:
composing_message: Interaction
await composing_message.delete_original_response()
except:
traceback.print_exc()
else:
await interaction.response.defer(ephemeral=True)
| SwarmsDiscord-main | swarmsdiscord/models/index_model.py |
from pathlib import Path
import os
import re
import discord
from models.deepl_model import TranslationModel
from services.moderations_service import ModerationOptions
from services.usage_service import UsageService
from models.openai_model import ImageSize, Model, ModelLimits, Models, Mode
from services.environment_service import EnvService
usage_service = UsageService(Path(os.environ.get("DATA_DIR", os.getcwd())))
model = Model(usage_service)
class Settings_autocompleter:
"""autocompleter for the settings command"""
async def get_settings(ctx: discord.AutocompleteContext):
"""get settings for the settings option"""
SETTINGS = [
re.sub("^_", "", key)
for key in model.__dict__.keys()
if key not in model._hidden_attributes
]
return [
parameter
for parameter in SETTINGS
if parameter.startswith(ctx.value.lower())
][:25]
async def get_value(
ctx: discord.AutocompleteContext,
): # Behaves a bit weird if you go back and edit the parameter without typing in a new command
"""gets valid values for the value option"""
values = {
"max_conversation_length": [
str(num)
for num in range(
ModelLimits.MIN_CONVERSATION_LENGTH,
ModelLimits.MAX_CONVERSATION_LENGTH + 1,
2,
)
],
"num_images": [
str(num)
for num in range(
ModelLimits.MIN_NUM_IMAGES, ModelLimits.MAX_NUM_IMAGES + 1
)
],
"mode": Mode.ALL_MODES,
"model": Models.TEXT_MODELS,
"low_usage_mode": ["True", "False"],
"image_size": ImageSize.ALL_SIZES,
"summarize_conversation": ["True", "False"],
"welcome_message_enabled": ["True", "False"],
"num_static_conversation_items": [
str(num)
for num in range(
ModelLimits.MIN_NUM_STATIC_CONVERSATION_ITEMS,
ModelLimits.MAX_NUM_STATIC_CONVERSATION_ITEMS + 1,
)
],
"num_conversation_lookback": [
str(num)
for num in range(
ModelLimits.MIN_NUM_CONVERSATION_LOOKBACK,
ModelLimits.MAX_NUM_CONVERSATION_LOOKBACK + 1,
)
],
"summarize_threshold": [
str(num)
for num in range(
ModelLimits.MIN_SUMMARIZE_THRESHOLD,
ModelLimits.MAX_SUMMARIZE_THRESHOLD + 1,
50,
)
],
"type": ["warn", "delete"],
"use_org": ["True", "False"],
}
options = values.get(ctx.options["parameter"], [])
if options:
return [value for value in options if value.startswith(ctx.value.lower())]
await ctx.interaction.response.defer() # defer so the autocomplete in int values doesn't error but rather just says not found
return []
async def get_models(
ctx: discord.AutocompleteContext,
):
"""Gets all models"""
models = [
value for value in Models.TEXT_MODELS if value.startswith(ctx.value.lower())
]
return models
async def get_index_and_search_models(
ctx: discord.AutocompleteContext,
):
models = Models.CHATGPT_MODELS + Models.GPT4_MODELS
return [value for value in models if value.startswith(ctx.value.lower())]
async def get_converse_models(
ctx: discord.AutocompleteContext,
):
"""Gets all models"""
models = [
value for value in Models.TEXT_MODELS if value.startswith(ctx.value.lower())
]
return models
async def get_value_moderations(
ctx: discord.AutocompleteContext,
): # Behaves a bit weird if you go back and edit the parameter without typing in a new command
"""gets valid values for the type option"""
return [
value
for value in ModerationOptions.OPTIONS
if value.startswith(ctx.value.lower())
]
async def get_value_alert_id_channel(self, ctx: discord.AutocompleteContext):
"""gets valid values for the channel option"""
return [
channel.name
for channel in ctx.interaction.guild.channels
if channel.name.startswith(ctx.value.lower())
]
class Translations_autocompleter:
"""autocompleter for the translations command"""
async def get_languages(ctx: discord.AutocompleteContext):
"""gets valid values for the language option"""
return [
language
for language in TranslationModel.get_all_country_names()
if language.lower().startswith(ctx.value.lower())
]
async def get_formality_values(self, ctx: discord.AutocompleteContext):
"""gets valid values for the formality option"""
return [
value
for value in ["prefer_more", "prefer_less"]
if value.lower().startswith(ctx.value.lower())
]
class File_autocompleter:
"""Autocompleter for the opener command"""
async def get_openers(ctx: discord.AutocompleteContext):
"""get all files in the openers folder"""
try:
return [
file
for file in os.listdir(EnvService.find_shared_file("openers"))
if file.startswith(ctx.value.lower())
][
:25
] # returns the 25 first files from your current input
except Exception:
return ["No 'openers' folder"]
async def get_user_indexes(ctx: discord.AutocompleteContext):
"""get all files in the indexes folder"""
try:
return [
file
for file in os.listdir(
EnvService.find_shared_file(
f"indexes/{str(ctx.interaction.user.id)}/"
)
)
if file.startswith(ctx.value.lower())
][
:25
] # returns the 25 first files from your current input
except Exception:
return ["No user indexes found, add an index"]
async def get_server_indexes(ctx: discord.AutocompleteContext):
"""get all files in the indexes folder"""
try:
return [
file
for file in os.listdir(
EnvService.find_shared_file(
f"indexes/{str(ctx.interaction.guild.id)}/"
)
)
if file.startswith(ctx.value.lower())
][
:25
] # returns the 25 first files from your current input
except Exception:
return ["No server indexes found, add an index"]
async def get_user_search_indexes(ctx: discord.AutocompleteContext):
"""get all files in the indexes folder"""
try:
return [
file
for file in os.listdir(
EnvService.find_shared_file(
f"indexes/{str(ctx.interaction.user.id)}_search/"
)
)
if file.startswith(ctx.value.lower())
][
:25
] # returns the 25 first files from your current input
except Exception:
return ["No user indexes found, add an index"]
| SwarmsDiscord-main | swarmsdiscord/models/autocomplete_model.py |
SwarmsDiscord-main | swarmsdiscord/models/__init__.py |
|
import os
import traceback
import aiohttp
import backoff
COUNTRY_CODES = {
"EN": "English",
"ES": "Spanish",
"FR": "French",
"ZH": "Chinese (simplified)",
"BG": "Bulgarian",
"CS": "Czech",
"DA": "Danish",
"DE": "German",
"EL": "Greek",
"FI": "Finnish",
"HU": "Hungarian",
"ID": "Indonesian",
"IT": "Italian",
"JA": "Japanese",
"LT": "Lithuanian",
"LV": "Latvian",
"NL": "Dutch",
"PL": "Polish",
"PT": "Portuguese",
"RO": "Romanian",
"RU": "Russian",
"SK": "Slovak",
"SV": "Swedish",
"TR": "Turkish",
"UK": "Ukrainian",
}
class TranslationModel:
def __init__(self):
self.deepl_token = os.getenv("DEEPL_TOKEN")
def backoff_handler(details):
print(
f"Backing off {details['wait']:0.1f} seconds after {details['tries']} tries calling function {details['target']} | "
f"{details['exception'].status}: {details['exception'].message}"
)
@backoff.on_exception(
backoff.expo,
aiohttp.ClientResponseError,
factor=3,
base=5,
max_tries=4,
on_backoff=backoff_handler,
)
async def send_translate_request(self, text, translate_language, formality):
async with aiohttp.ClientSession(raise_for_status=True) as session:
payload = {
"text": text,
"target_lang": translate_language,
"formality": "default" if formality is None else formality,
}
# Instead of sending as json, we want to send as regular post params
headers = {
"Authorization": f"DeepL-Auth-Key {self.deepl_token}",
}
async with session.post(
"https://api-free.deepl.com/v2/translate",
params=payload,
headers=headers,
) as resp:
response = await resp.json()
print(response)
try:
return (
response["translations"][0]["text"],
response["translations"][0]["detected_source_language"],
)
except Exception:
print(response)
traceback.print_exc()
return response
@staticmethod
def get_all_country_names(lower=False):
"""Get a list of all the country names"""
return (
list(COUNTRY_CODES.values())
if not lower
else [name.lower() for name in COUNTRY_CODES.values()]
)
@staticmethod
def get_all_country_codes():
"""Get a list of all the country codes"""
return list(COUNTRY_CODES.keys())
@staticmethod
def get_country_name_from_code(code):
"""Get the country name from the code"""
try:
return COUNTRY_CODES[code]
except KeyError:
return "Unknown Language"
@staticmethod
def get_country_code_from_name(name):
"""Get the country code from the name"""
for code, country_name in COUNTRY_CODES.items():
if country_name.lower().strip() == name.lower().strip():
return code
| SwarmsDiscord-main | swarmsdiscord/models/deepl_model.py |
import discord
from services.environment_service import EnvService
BOT_NAME = EnvService.get_custom_bot_name()
class EmbedStatics:
def __init__(self):
pass
@staticmethod
def get_api_timeout_embed():
embed = discord.Embed(
title="The API timed out. Try again later.",
description=f"*This is an issue with the OpenAI APIs, not with the bot instance.*",
color=discord.Color.red(),
)
return embed
@staticmethod
def get_invalid_api_response_embed(e):
embed = discord.Embed(
title="The API returned an invalid response",
description=f"**{e.status}: {e.message}**",
color=discord.Color.red(),
)
return embed
@staticmethod
def get_invalid_value_embed(e):
embed = discord.Embed(
title="Invalid value",
description=f"**{str(e)}**",
color=discord.Color.red(),
)
return embed
@staticmethod
def get_general_error_embed(e):
embed = discord.Embed(
title="An error occurred",
description=f"**{str(e)}**",
color=discord.Color.red(),
)
return embed
@staticmethod
def generate_end_embed():
embed = discord.Embed(
title="Conversation Ended",
description=f"This conversation has ended. You can start a new one with `/gpt converse`",
color=0x808080,
)
embed.set_thumbnail(url="https://i.imgur.com/asA13vI.png")
footer_text = "Conversation ended"
embed.set_footer(text=footer_text, icon_url="https://i.imgur.com/asA13vI.png")
return embed
@staticmethod
def generate_conversation_embed(conversation_threads, thread, opener, overrides):
# Generate a nice looking embed for the above text
embed = discord.Embed(
title="Conversation started",
description=f"Conversation started with {BOT_NAME}",
color=0x808080,
)
embed.add_field(
name="Model",
value=f"The model used is **{conversation_threads[thread.id].model}**",
)
embed.add_field(
name="Overrides",
value=f"**temp={overrides['temperature']}**, **top_p={overrides['top_p']}**"
f", **freq. penalty={overrides['frequency_penalty']}**, **pres. penalty={overrides['presence_penalty']}**\n",
)
embed.add_field(
name="End the conversation",
value="End the conversation by saying `end`, or clicking the red 'End Conversation' button\n\n",
inline=False,
)
embed.add_field(
name="Ignoring Messages",
value="If you want GPT to ignore your messages, start your messages with `~`\n\n",
inline=False,
)
return embed
@staticmethod
def generate_opener_embed(opener):
embed = discord.Embed(
title="Opening Prompt",
description=f"{opener}",
color=0x808080,
)
return embed
@staticmethod
def get_index_set_success_embed(price="Unknown"):
embed = discord.Embed(
title="Index Added",
description=f"This index can now be queried and loaded with `/index query` and `/index load`\n\n||Total cost: {round(float(price), 6) if price != 'Unknown' else 'Unknown'}||",
color=discord.Color.green(),
)
# thumbnail of https://i.imgur.com/I5dIdg6.png
embed.set_thumbnail(url="https://i.imgur.com/I5dIdg6.png")
return embed
@staticmethod
def get_index_set_failure_embed(message):
embed = discord.Embed(
title="Index Add",
description=f"Index add failed. {message}",
color=discord.Color.red(),
)
# thumbnail of https://i.imgur.com/hbdBZfG.png
embed.set_thumbnail(url="https://i.imgur.com/hbdBZfG.png")
return embed
@staticmethod
def get_index_load_success_embed(name=None):
embed = discord.Embed(
title="Index Loaded" if not name else f"Index {name} loaded",
color=discord.Color.green(),
)
# thumbnail of https://i.imgur.com/I5dIdg6.png
embed.set_thumbnail(url="https://i.imgur.com/I5dIdg6.png")
return embed
@staticmethod
def get_index_load_failure_embed(message):
embed = discord.Embed(
title="Index load",
description=f"Index load failed. {message}",
color=discord.Color.red(),
)
# thumbnail of https://i.imgur.com/hbdBZfG.png
embed.set_thumbnail(url="https://i.imgur.com/hbdBZfG.png")
return embed
@staticmethod
def get_index_query_failure_embed(message):
embed = discord.Embed(
title="Index query",
description=f"Index query failed. {message}",
color=discord.Color.red(),
)
# thumbnail of https://i.imgur.com/hbdBZfG.png
embed.set_thumbnail(url="https://i.imgur.com/hbdBZfG.png")
return embed
@staticmethod
def get_index_compose_success_embed(price="Unknown"):
embed = discord.Embed(
title="Indexes Composed",
description=f"Indexes composed successfully, you can query and load this index with `/index query` and `/index load`\n\n||Total cost: {round(float(price), 6) if price != 'Unknown' else 'Unknown'}||",
color=discord.Color.green(),
)
# thumbnail of https://i.imgur.com/I5dIdg6.png
embed.set_thumbnail(url="https://i.imgur.com/I5dIdg6.png")
return embed
@staticmethod
def get_index_compose_failure_embed(message):
embed = discord.Embed(
title="Index Compose",
description=f"Index compose failed. {message}",
color=discord.Color.red(),
)
# thumbnail of https://i.imgur.com/hbdBZfG.png
embed.set_thumbnail(url="https://i.imgur.com/hbdBZfG.png")
return embed
@staticmethod
def get_index_compose_progress_embed():
embed = discord.Embed(
title="Index Compose",
description=f"Your index composition is running, this may take a while.",
color=discord.Color.blurple(),
)
# thumbnail of https://i.imgur.com/hbdBZfG.png
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
@staticmethod
def get_index_chat_preparation_message():
embed = discord.Embed(
title="Index Chat",
description=f"Your index chat is preparing, this might take a moment.",
color=discord.Color.blurple(),
)
# thumbnail of https://i.imgur.com/hbdBZfG.png
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
@staticmethod
def get_index_rename_success_embed(original, renamed):
embed = discord.Embed(
title=f"Index Rename",
description=f"Index {original} renamed to {renamed}",
color=discord.Color.green(),
)
# thumbnail of https://i.imgur.com/I5dIdg6.png
embed.set_thumbnail(url="https://i.imgur.com/I5dIdg6.png")
return embed
@staticmethod
def get_index_rename_failure_embed(original, renamed, message):
embed = discord.Embed(
title="Index Rename",
description=f"Index rename from {original} to {renamed} failed. {message}",
color=discord.Color.red(),
)
# thumbnail of https://i.imgur.com/hbdBZfG.png
embed.set_thumbnail(url="https://i.imgur.com/hbdBZfG.png")
return embed
@staticmethod
def get_edit_command_output_embed(response_text):
embed = discord.Embed(
title="GPT Edits",
description=f"{response_text}",
color=discord.Color.light_grey(),
)
return embed
@staticmethod
def get_search_failure_embed(message):
embed = discord.Embed(
title="AI-Assisted Search",
description=f"An error occured while performing search: {message}",
color=discord.Color.red(),
)
# thumbnail of https://i.imgur.com/hbdBZfG.png
embed.set_thumbnail(url="https://i.imgur.com/hbdBZfG.png")
return embed
@staticmethod
def get_internet_chat_failure_embed(message):
embed = discord.Embed(
title="Internet-Connected Chat",
description=f"An error occured while using internet connected chat: {message}",
color=discord.Color.red(),
)
# thumbnail of https://i.imgur.com/hbdBZfG.png
embed.set_thumbnail(url="https://i.imgur.com/hbdBZfG.png")
return embed
@staticmethod
def get_search_redo_progress_embed():
embed = discord.Embed(
title="AI-Assisted Search",
description=f"Your original search request is being redone. This may take a while.",
color=discord.Color.blurple(),
)
# thumbnail of https://i.imgur.com/hbdBZfG.png
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
@staticmethod
def get_conversation_shared_embed(url):
embed = discord.Embed(
title="Conversation Shared",
description=f"You can access your shared conversation at: {url}",
color=discord.Color.blurple(),
)
# thumbnail of https://i.imgur.com/hbdBZfG.png
embed.set_thumbnail(url="https://i.imgur.com/8OIZc1A.png")
return embed
@staticmethod
def get_conversation_share_failed_embed(message):
embed = discord.Embed(
title="Conversation Sharing",
description=f"Conversation sharing failed: " + message,
color=discord.Color.red(),
)
# thumbnail of https://i.imgur.com/hbdBZfG.png
embed.set_thumbnail(url="https://i.imgur.com/hbdBZfG.png")
return embed
@staticmethod
def build_index_progress_embed():
embed = discord.Embed(
title="Index Service",
description="Indexing...",
color=discord.Color.blurple(),
)
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
@staticmethod
def build_index_query_progress_embed(query):
embed = discord.Embed(
title="Index Service",
description=f"Query:\n`{query}`\nQuerying...",
color=discord.Color.blurple(),
)
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
@staticmethod
def build_index_query_success_embed(query, price="Unknown"):
embed = discord.Embed(
title="Index Service",
description=f"Query:\n`{query}`\nThe index query was successful.\n\n||Total cost: {round(float(price), 6) if price != 'Unknown' else 'Unknown'}||",
color=discord.Color.green(),
)
# thumbnail of https://i.imgur.com/I5dIdg6.png
embed.set_thumbnail(url="https://i.imgur.com/I5dIdg6.png")
return embed
@staticmethod
def build_transcribe_progress_embed():
embed = discord.Embed(
title="Transcriber",
description=f"Your transcription request has been sent, this may take a while.",
color=discord.Color.blurple(),
)
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
@staticmethod
def build_transcribe_success_embed(transcribed_text):
embed = discord.Embed(
title="Transcriber",
description=f"Transcribed successfully:\n`{transcribed_text}`",
color=discord.Color.green(),
)
# thumbnail of https://i.imgur.com/I5dIdg6.png
embed.set_thumbnail(url="https://i.imgur.com/I5dIdg6.png")
return embed
@staticmethod
def build_transcribe_failed_embed(message):
embed = discord.Embed(
title="Transcriber",
description=f"Transcription failed: " + message,
color=discord.Color.red(),
)
embed.set_thumbnail(url="https://i.imgur.com/hbdBZfG.png")
return embed
| SwarmsDiscord-main | swarmsdiscord/models/embed_statics_model.py |
import asyncio
import functools
import math
import os
import re
import tempfile
import traceback
import uuid
from typing import Any, Tuple
import aiohttp
import backoff
import discord
# An enum of two modes, TOP_P or TEMPERATURE
import requests
from services.environment_service import EnvService
from PIL import Image
from discord import File
from sqlitedict import SqliteDict
try:
print("Attempting to retrieve the settings DB")
SETTINGS_DB = SqliteDict(
f"{EnvService.save_path()}/main_db.sqlite",
tablename="settings",
autocommit=True,
)
print("Retrieved the settings DB")
except Exception as e:
print("Failed to retrieve the settings DB. The bot is terminating.")
raise e
class Mode:
TEMPERATURE = "temperature"
TOP_P = "top_p"
ALL_MODES = [TEMPERATURE, TOP_P]
class Override:
def __init__(self, temp=None, top_p=None, frequency=None, presence=None):
self.temperature = temp
self.top_p = top_p
self.frequency_penalty = frequency
self.presence_penalty = presence
class Models:
# Text models
DAVINCI = "text-davinci-003"
CURIE = "text-curie-001"
# Embedding models
EMBEDDINGS = "text-embedding-ada-002"
# Edit models
EDIT = "text-davinci-edit-001"
# ChatGPT Models
TURBO = "gpt-3.5-turbo"
TURBO_16 = "gpt-3.5-turbo-16k"
TURBO_DEV = "gpt-3.5-turbo-0613"
TURBO_16_DEV = "gpt-3.5-turbo-16k-0613"
# GPT4 Models
GPT4 = "gpt-4"
GPT4_32 = "gpt-4-32k"
GPT4_DEV = "gpt-4-0613"
GPT4_32_DEV = "gpt-4-32k-0613"
# Model collections
TEXT_MODELS = [
DAVINCI,
CURIE,
TURBO,
TURBO_16,
TURBO_DEV,
TURBO_16_DEV,
GPT4,
GPT4_32,
GPT4_DEV,
GPT4_32_DEV,
]
CHATGPT_MODELS = [
TURBO,
TURBO_16,
TURBO_DEV,
TURBO_16_DEV,
]
GPT4_MODELS = [
GPT4,
GPT4_32,
GPT4_DEV,
GPT4_32_DEV,
]
EDIT_MODELS = [EDIT]
DEFAULT = TURBO
LOW_USAGE_MODEL = CURIE
# Tokens Mapping
TOKEN_MAPPING = {
DAVINCI: 4024,
CURIE: 2024,
TURBO: 4096,
TURBO_16: 16384,
TURBO_DEV: 4096,
TURBO_16_DEV: 16384,
GPT4: 8192,
GPT4_32: 32768,
GPT4_DEV: 8192,
GPT4_32_DEV: 32768,
}
@staticmethod
def get_max_tokens(model: str) -> int:
return Models.TOKEN_MAPPING.get(model, 2024)
class ImageSize:
SMALL = "256x256"
MEDIUM = "512x512"
LARGE = "1024x1024"
ALL_SIZES = [SMALL, MEDIUM, LARGE]
class ModelLimits:
MIN_TOKENS = 15
MAX_TOKENS = 4096
MIN_CONVERSATION_LENGTH = 1
MAX_CONVERSATION_LENGTH = 100000
MIN_SUMMARIZE_THRESHOLD = 800
MAX_SUMMARIZE_THRESHOLD = 3500
MIN_NUM_IMAGES = 1
MAX_NUM_IMAGES = 4
MIN_NUM_STATIC_CONVERSATION_ITEMS = 5
MAX_NUM_STATIC_CONVERSATION_ITEMS = 20
MIN_NUM_CONVERSATION_LOOKBACK = 5
MAX_NUM_CONVERSATION_LOOKBACK = 15
MIN_TEMPERATURE = 0.0
MAX_TEMPERATURE = 2.0
MIN_TOP_P = 0.0
MAX_TOP_P = 1.0
MIN_PRESENCE_PENALTY = -2.0
MAX_PRESENCE_PENALTY = 2.0
MIN_FREQUENCY_PENALTY = -2.0
MAX_FREQUENCY_PENALTY = 2.0
MIN_BEST_OF = 1
MAX_BEST_OF = 3
MIN_PROMPT_MIN_LENGTH = 5
MAX_PROMPT_MIN_LENGTH = 4000
class Model:
def set_initial_state(self, usage_service):
self.mode = Mode.TEMPERATURE
self.temp = (
SETTINGS_DB["temp"] if "temp" in SETTINGS_DB else 0.85
) # Higher value means more random, lower value means more likely to be a coherent sentence
self.top_p = (
SETTINGS_DB["top_p"] if "top_p" in SETTINGS_DB else 1
) # 1 is equivalent to greedy sampling, 0.1 means that the model will only consider the top 10% of the probability distribution
self.max_tokens = (
SETTINGS_DB["max_tokens"] if "max_tokens" in SETTINGS_DB else 4000
) # The maximum number of tokens the model can generate
self.presence_penalty = (
SETTINGS_DB["presence_penalty"]
if "presence_penalty" in SETTINGS_DB
else 0.1
) # The presence penalty is a number between -2 and 2 that determines how much the model should avoid repeating the same text
# Penalize new tokens based on their existing frequency in the text so far. (Higher frequency = lower probability of being chosen.)
self.frequency_penalty = (
SETTINGS_DB["frequency_penalty"]
if "frequency_penalty" in SETTINGS_DB
else 0.0
)
self.best_of = (
SETTINGS_DB["best_of"] if "best_of" in SETTINGS_DB else 1
) # Number of responses to compare the loglikelihoods of
self.prompt_min_length = (
SETTINGS_DB["prompt_min_length"]
if "prompt_min_length" in SETTINGS_DB
else 6
) # The minimum length of the prompt
self.max_conversation_length = (
SETTINGS_DB["max_conversation_length"]
if "max_conversation_length" in SETTINGS_DB
else 100000
) # The maximum number of conversation items to keep in memory
self.model = (
SETTINGS_DB["model"]
if "model" in SETTINGS_DB and SETTINGS_DB["model"] in Models.TEXT_MODELS
else Models.DEFAULT
)
self._low_usage_mode = False
self.usage_service = usage_service
self.DAVINCI_ROLES = ["admin", "Admin", "GPT", "gpt"]
self.image_size = (
SETTINGS_DB["image_size"]
if "image_size" in SETTINGS_DB
else ImageSize.MEDIUM
)
self.num_images = (
SETTINGS_DB["num_images"] if "num_images" in SETTINGS_DB else 2
)
self.summarize_conversations = (
bool(SETTINGS_DB["summarize_conversations"])
if "summarize_conversations" in SETTINGS_DB
else True
)
self.summarize_threshold = (
SETTINGS_DB["summarize_threshold"]
if "summarize_threshold" in SETTINGS_DB
else 3000
)
self.model_max_tokens = 4024
self.welcome_message_enabled = (
bool(SETTINGS_DB["welcome_message_enabled"])
if "welcome_message_enabled" in SETTINGS_DB
else False
)
self.num_static_conversation_items = (
SETTINGS_DB["num_static_conversation_items"]
if "num_static_conversation_items" in SETTINGS_DB
else 10
)
self.num_conversation_lookback = (
SETTINGS_DB["num_conversation_lookback"]
if "num_conversation_lookback" in SETTINGS_DB
else 5
)
self.use_org = (
bool(SETTINGS_DB["use_org"]) if "use_org" in SETTINGS_DB else False
)
def reset_settings(self):
keys = [
"temp",
"top_p",
"max_tokens",
"presence_penalty",
"frequency_penalty",
"best_of",
"prompt_min_length",
"max_conversation_length",
"model",
"image_size",
"num_images",
"summarize_conversations",
"summarize_threshold",
"welcome_message_enabled",
"num_static_conversation_items",
"num_conversation_lookback",
"use_org",
]
for key in keys:
try:
del SETTINGS_DB[key]
except:
pass
self.set_initial_state(self.usage_service)
def __init__(self, usage_service):
self._num_conversation_lookback = None
self._num_static_conversation_items = None
self._welcome_message_enabled = None
self.model_max_tokens = None
self._summarize_threshold = None
self._summarize_conversations = None
self._num_images = None
self._image_size = None
self.DAVINCI_ROLES = None
self.usage_service = None
self._low_usage_mode = None
self._model = None
self._max_conversation_length = None
self._prompt_min_length = None
self._best_of = None
self._frequency_penalty = None
self._presence_penalty = None
self._max_tokens = None
self._top_p = None
self._temp = None
self._mode = None
self._use_org = None
self.set_initial_state(usage_service)
try:
self.IMAGE_SAVE_PATH = os.environ["IMAGE_SAVE_PATH"]
self.custom_image_path = True
except Exception:
self.IMAGE_SAVE_PATH = "dalleimages"
# Try to make this folder called images/ in the local directory if it doesnt exist
if not os.path.exists(self.IMAGE_SAVE_PATH):
os.makedirs(self.IMAGE_SAVE_PATH)
self.custom_image_path = False
self._hidden_attributes = [
"usage_service",
"DAVINCI_ROLES",
"custom_image_path",
"custom_web_root",
"_hidden_attributes",
"model_max_tokens",
"openai_key",
"openai_organization",
"IMAGE_SAVE_PATH",
]
self.openai_key = EnvService.get_openai_token()
self.openai_organization = EnvService.get_openai_organization()
# Use the @property and @setter decorators for all the self fields to provide value checking
@property
def use_org(self):
return self._use_org
@use_org.setter
def use_org(self, value):
self._use_org = value
SETTINGS_DB["use_org"] = value
@property
def num_static_conversation_items(self):
return self._num_static_conversation_items
@num_static_conversation_items.setter
def num_static_conversation_items(self, value):
value = int(value)
if value < ModelLimits.MIN_NUM_STATIC_CONVERSATION_ITEMS:
raise ValueError(
f"Number of static conversation items must be >= {ModelLimits.MIN_NUM_STATIC_CONVERSATION_ITEMS}"
)
if value > ModelLimits.MAX_NUM_STATIC_CONVERSATION_ITEMS:
raise ValueError(
f"Number of static conversation items must be <= {ModelLimits.MAX_NUM_STATIC_CONVERSATION_ITEMS}, this is to ensure reliability and reduce token wastage!"
)
self._num_static_conversation_items = value
SETTINGS_DB["num_static_conversation_items"] = value
@property
def num_conversation_lookback(self):
return self._num_conversation_lookback
@num_conversation_lookback.setter
def num_conversation_lookback(self, value):
value = int(value)
if value < ModelLimits.MIN_NUM_CONVERSATION_LOOKBACK:
raise ValueError(
f"Number of conversations to look back on must be >= {ModelLimits.MIN_NUM_CONVERSATION_LOOKBACK}"
)
if value > ModelLimits.MAX_NUM_CONVERSATION_LOOKBACK:
raise ValueError(
f"Number of conversations to look back on must be <= {ModelLimits.MIN_NUM_CONVERSATION_LOOKBACK}, this is to ensure reliability and reduce token wastage!"
)
self._num_conversation_lookback = value
SETTINGS_DB["num_conversation_lookback"] = value
@property
def welcome_message_enabled(self):
return self._welcome_message_enabled
@welcome_message_enabled.setter
def welcome_message_enabled(self, value):
if not isinstance(value, bool):
if value.lower() == "true":
value = True
elif value.lower() == "false":
value = False
else:
raise ValueError("Value must be either `true` or `false`!")
self._welcome_message_enabled = value
SETTINGS_DB["welcome_message_enabled"] = self._welcome_message_enabled
@property
def summarize_threshold(self):
return self._summarize_threshold
@summarize_threshold.setter
def summarize_threshold(self, value):
value = int(value)
if (
value < ModelLimits.MIN_SUMMARIZE_THRESHOLD
or value > ModelLimits.MAX_SUMMARIZE_THRESHOLD
):
raise ValueError(
f"Summarize threshold should be a number between {ModelLimits.MIN_SUMMARIZE_THRESHOLD} and {ModelLimits.MAX_SUMMARIZE_THRESHOLD}!"
)
self._summarize_threshold = value
SETTINGS_DB["summarize_threshold"] = value
@property
def summarize_conversations(self):
return self._summarize_conversations
@summarize_conversations.setter
def summarize_conversations(self, value):
# convert value string into boolean
if not isinstance(value, bool):
if value.lower() == "true":
value = True
elif value.lower() == "false":
value = False
else:
raise ValueError("Value must be either `true` or `false`!")
self._summarize_conversations = value
SETTINGS_DB["summarize_conversations"] = value
@property
def image_size(self):
return self._image_size
@image_size.setter
def image_size(self, value):
if value in ImageSize.ALL_SIZES:
self._image_size = value
SETTINGS_DB["image_size"] = value
else:
raise ValueError(
f"Image size must be one of the following: {ImageSize.ALL_SIZES}"
)
@property
def num_images(self):
return self._num_images
@num_images.setter
def num_images(self, value):
value = int(value)
if value < ModelLimits.MIN_NUM_IMAGES or value > ModelLimits.MAX_NUM_IMAGES:
raise ValueError(
f"Number of images to generate should be a number between {ModelLimits.MIN_NUM_IMAGES} and {ModelLimits.MAX_NUM_IMAGES}!"
)
self._num_images = value
SETTINGS_DB["num_images"] = value
@property
def low_usage_mode(self):
return self._low_usage_mode
@low_usage_mode.setter
def low_usage_mode(self, value):
# convert value string into boolean
if value.lower() == "true":
value = True
elif value.lower() == "false":
value = False
else:
raise ValueError("Value must be either `true` or `false`!")
if value:
self._model = Models.LOW_USAGE_MODEL
self.max_tokens = 1900
self.model_max_tokens = 1000
else:
self._model = Models.DEFAULT
self.max_tokens = 4000
self.model_max_tokens = 4024
@property
def model(self):
return self._model
@model.setter
def model(self, model):
if model not in Models.TEXT_MODELS:
raise ValueError(f"Invalid model, must be one of: {Models.TEXT_MODELS}")
self._model = model
# Set the token count
self._max_tokens = Models.get_max_tokens(self._model)
SETTINGS_DB["model"] = model
@property
def max_conversation_length(self):
return self._max_conversation_length
@max_conversation_length.setter
def max_conversation_length(self, value):
value = int(value)
if value < ModelLimits.MIN_CONVERSATION_LENGTH:
raise ValueError(
f"Max conversation length must be greater than {ModelLimits.MIN_CONVERSATION_LENGTH}"
)
if value > ModelLimits.MAX_CONVERSATION_LENGTH:
raise ValueError(
f"Max conversation length must be less than {ModelLimits.MIN_CONVERSATION_LENGTH}, this will start using credits quick."
)
self._max_conversation_length = value
SETTINGS_DB["max_conversation_length"] = value
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, value):
if value not in Mode.ALL_MODES:
raise ValueError(f"Mode must be one of: {Mode.ALL_MODES}")
# Set the other mode to 1 (the default) so that it is not used
# See https://beta.openai.com/docs/api-reference/completions/create#completions/create-temperature
if value == Mode.TOP_P:
self._temp = 1
elif value == Mode.TEMPERATURE:
self._top_p = 1
else:
raise ValueError(f"Unknown mode: {value}")
self._mode = value
SETTINGS_DB["mode"] = value
@property
def temp(self):
return self._temp
@temp.setter
def temp(self, value):
value = float(value)
if value < ModelLimits.MIN_TEMPERATURE or value > ModelLimits.MAX_TEMPERATURE:
raise ValueError(
f"Temperature must be between {ModelLimits.MIN_TEMPERATURE} and {ModelLimits.MAX_TEMPERATURE}, it is currently: {value}"
)
self._temp = value
SETTINGS_DB["temp"] = value
@property
def top_p(self):
return self._top_p
@top_p.setter
def top_p(self, value):
value = float(value)
if value < ModelLimits.MIN_TOP_P or value > ModelLimits.MAX_TOP_P:
raise ValueError(
f"Top P must be between {ModelLimits.MIN_TOP_P} and {ModelLimits.MAX_TOP_P}, it is currently: {value}"
)
self._top_p = value
SETTINGS_DB["top_p"] = value
@property
def max_tokens(self):
return self._max_tokens
@max_tokens.setter
def max_tokens(self, value):
value = int(value)
if value < ModelLimits.MIN_TOKENS or value > ModelLimits.MAX_TOKENS:
raise ValueError(
f"Max tokens must be between {ModelLimits.MIN_TOKENS} and {ModelLimits.MAX_TOKENS}, it is currently: {value}"
)
self._max_tokens = value
SETTINGS_DB["max_tokens"] = value
@property
def presence_penalty(self):
return self._presence_penalty
@presence_penalty.setter
def presence_penalty(self, value):
value = float(value)
if (
value < ModelLimits.MIN_PRESENCE_PENALTY
or value > ModelLimits.MAX_PRESENCE_PENALTY
):
raise ValueError(
f"Presence penalty must be between {ModelLimits.MIN_PRESENCE_PENALTY} and {ModelLimits.MAX_PRESENCE_PENALTY}, it is currently: {value}"
)
self._presence_penalty = value
SETTINGS_DB["presence_penalty"] = value
@property
def frequency_penalty(self):
return self._frequency_penalty
@frequency_penalty.setter
def frequency_penalty(self, value):
value = float(value)
if (
value < ModelLimits.MIN_FREQUENCY_PENALTY
or value > ModelLimits.MAX_FREQUENCY_PENALTY
):
raise ValueError(
f"Frequency penalty must be greater between {ModelLimits.MIN_FREQUENCY_PENALTY} and {ModelLimits.MAX_FREQUENCY_PENALTY}, it is currently: {value}"
)
self._frequency_penalty = value
SETTINGS_DB["frequency_penalty"] = value
@property
def best_of(self):
return self._best_of
@best_of.setter
def best_of(self, value):
value = int(value)
if value < ModelLimits.MIN_BEST_OF or value > ModelLimits.MAX_BEST_OF:
raise ValueError(
f"Best of must be between {ModelLimits.MIN_BEST_OF} and {ModelLimits.MAX_BEST_OF}, it is currently: {value}\nNote that increasing the value of this parameter will act as a multiplier on the number of tokens requested!"
)
self._best_of = value
SETTINGS_DB["best_of"] = value
@property
def prompt_min_length(self):
return self._prompt_min_length
@prompt_min_length.setter
def prompt_min_length(self, value):
value = int(value)
if (
value < ModelLimits.MIN_PROMPT_MIN_LENGTH
or value > ModelLimits.MAX_PROMPT_MIN_LENGTH
):
raise ValueError(
f"Minimal prompt length must be between {ModelLimits.MIN_PROMPT_MIN_LENGTH} and {ModelLimits.MAX_PROMPT_MIN_LENGTH}, it is currently: {value}"
)
self._prompt_min_length = value
SETTINGS_DB["prompt_min_length"] = value
def backoff_handler_http(details):
print(
f"Backing off {details['wait']:0.1f} seconds after {details['tries']} tries calling function {details['target']} | "
f"{details['exception'].status}: {details['exception'].message}"
)
def backoff_handler_request(details):
print(
f"Backing off {details['wait']:0.1f} seconds after {details['tries']} tries calling function {details['target']} | "
f"{details['exception'].args[0]}"
)
async def valid_text_request(self, response, model=None):
try:
tokens_used = int(response["usage"]["total_tokens"])
if model and model in Models.EDIT_MODELS:
pass
else:
await self.usage_service.update_usage(
tokens_used, await self.usage_service.get_cost_name(model)
)
except Exception as e:
traceback.print_exc()
if "error" in response:
raise ValueError(
"The API returned an invalid response: "
+ str(response["error"]["message"])
) from e
else:
raise ValueError("The API returned an invalid response") from e
@backoff.on_exception(
backoff.expo,
aiohttp.ClientResponseError,
factor=3,
base=5,
max_tries=4,
on_backoff=backoff_handler_http,
)
async def send_embedding_request(self, text, custom_api_key=None):
async with aiohttp.ClientSession(
raise_for_status=True, timeout=aiohttp.ClientTimeout(total=300)
) as session:
payload = {
"model": Models.EMBEDDINGS,
"input": text,
}
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.openai_key if not custom_api_key else custom_api_key}",
}
self.use_org = True if "true" in str(self.use_org).lower() else False
if self.use_org:
if self.openai_organization:
headers["OpenAI-Organization"] = self.openai_organization
async with session.post(
"https://api.openai.com/v1/embeddings", json=payload, headers=headers
) as resp:
response = await resp.json()
try:
return response["data"][0]["embedding"]
except Exception:
print(response)
traceback.print_exc()
return
@backoff.on_exception(
backoff.expo,
ValueError,
factor=3,
base=5,
max_tries=4,
on_backoff=backoff_handler_request,
)
async def send_edit_request(
self,
instruction,
text=None,
temp_override=None,
top_p_override=None,
custom_api_key=None,
):
print(
f"The text about to be edited is [{text}] with instructions [{instruction}]"
)
print(f"Overrides -> temp:{temp_override}, top_p:{top_p_override}")
async with aiohttp.ClientSession(
raise_for_status=False, timeout=aiohttp.ClientTimeout(total=300)
) as session:
payload = {
"model": Models.EDIT,
"input": "" if text is None else text,
"instruction": instruction,
"temperature": self.temp if temp_override is None else temp_override,
"top_p": self.top_p if top_p_override is None else top_p_override,
}
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.openai_key if not custom_api_key else custom_api_key}",
}
self.use_org = True if "true" in str(self.use_org).lower() else False
if self.use_org:
if self.openai_organization:
headers["OpenAI-Organization"] = self.openai_organization
async with session.post(
"https://api.openai.com/v1/edits", json=payload, headers=headers
) as resp:
response = await resp.json()
await self.valid_text_request(response, model=Models.EDIT)
return response
@backoff.on_exception(
backoff.expo,
aiohttp.ClientResponseError,
factor=3,
base=5,
max_tries=6,
on_backoff=backoff_handler_http,
)
async def send_moderations_request(self, text):
# Use aiohttp to send the above request:
async with aiohttp.ClientSession(raise_for_status=True) as session:
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.openai_key}",
}
payload = {"input": text}
async with session.post(
"https://api.openai.com/v1/moderations",
headers=headers,
json=payload,
) as response:
return await response.json()
@backoff.on_exception(
backoff.expo,
ValueError,
factor=3,
base=5,
max_tries=4,
on_backoff=backoff_handler_request,
)
async def send_summary_request(self, prompt, custom_api_key=None):
"""
Sends a summary request to the OpenAI API
"""
summary_request_text = []
summary_request_text.append(
"The following is a conversation instruction set and a conversation between two people, a <username>, and GPTie."
" Firstly, determine the <username>'s name from the conversation history, then summarize the conversation."
" Do not summarize the instructions for GPTie, only the conversation. Summarize the conversation in a detailed fashion. If <username> mentioned"
" their name, be sure to mention it in the summary. Pay close attention to things the <username> has told you, such as personal details."
)
summary_request_text.append(prompt + "\nDetailed summary of conversation: \n")
summary_request_text = "".join(summary_request_text)
tokens = self.usage_service.count_tokens(summary_request_text)
async with aiohttp.ClientSession(raise_for_status=False) as session:
payload = {
"model": Models.DAVINCI,
"prompt": summary_request_text,
"temperature": 0.5,
"top_p": 1,
"max_tokens": self.max_tokens - tokens,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
"best_of": self.best_of,
}
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.openai_key if not custom_api_key else custom_api_key}",
}
self.use_org = True if "true" in str(self.use_org).lower() else False
if self.use_org:
if self.openai_organization:
headers["OpenAI-Organization"] = self.openai_organization
async with session.post(
"https://api.openai.com/v1/completions", json=payload, headers=headers
) as resp:
response = await resp.json()
await self.valid_text_request(response)
# print(response["choices"][0]["text"])
return response
@backoff.on_exception(
backoff.expo,
ValueError,
factor=3,
base=5,
max_tries=4,
on_backoff=backoff_handler_request,
)
async def send_language_detect_request(
self,
text,
pretext,
) -> (
Tuple[dict, bool]
): # The response, and a boolean indicating whether or not the context limit was reached.
# Validate that all the parameters are in a good state before we send the request
prompt = f"{pretext}{text}\nOutput:"
max_tokens = Models.get_max_tokens(
Models.DAVINCI
) - self.usage_service.count_tokens(prompt)
print(f"Language detection request for {text}")
async with aiohttp.ClientSession(raise_for_status=False) as session:
payload = {
"model": Models.DAVINCI,
"prompt": prompt,
"temperature": 0,
"top_p": 1,
"max_tokens": max_tokens,
}
headers = {"Authorization": f"Bearer {self.openai_key}"}
self.use_org = True if "true" in str(self.use_org).lower() else False
if self.use_org:
if self.openai_organization:
headers["OpenAI-Organization"] = self.openai_organization
async with session.post(
"https://api.openai.com/v1/completions", json=payload, headers=headers
) as resp:
response = await resp.json()
await self.valid_text_request(response)
print(f"Response -> {response}")
return response
def cleanse_username(self, text):
text = text.strip()
text = text.replace(":", "")
text = text.replace(" ", "")
# Replace any character that's not a letter or number with an underscore
text = re.sub(r"[^a-zA-Z0-9]", "_", text)
return text
@backoff.on_exception(
backoff.expo,
ValueError,
factor=3,
base=5,
max_tries=4,
on_backoff=backoff_handler_request,
)
async def send_chatgpt_chat_request(
self,
prompt_history,
model,
bot_name,
user_displayname,
temp_override=None,
top_p_override=None,
best_of_override=None,
frequency_penalty_override=None,
presence_penalty_override=None,
max_tokens_override=None,
stop=None,
custom_api_key=None,
) -> (
Tuple[dict, bool]
): # The response, and a boolean indicating whether or not the context limit was reached.
# Validate that all the parameters are in a good state before we send the request
# Clean up the bot name
bot_name_clean = self.cleanse_username(bot_name)
# Format the request body into the messages format that the API is expecting
# "messages": [{"role": "user", "content": "Hello!"}]
messages = []
for number, message in enumerate(prompt_history):
if number == 0:
# If this is the first message, it is the context prompt.
messages.append(
{
"role": "user",
"content": message.text,
}
)
continue
if message.text.startswith(f"\n{bot_name}"):
text = message.text.replace(bot_name, "")
text = text.replace("<|endofstatement|>", "")
messages.append(
{
"role": "assistant",
"content": text,
} # TODO add back the assistant's name when the API is fixed..
)
else:
try:
if (
message.text.strip()
.lower()
.startswith("this conversation has some context from earlier")
):
raise Exception("This is a context message")
username = re.search(r"(?<=\n)(.*?)(?=:)", message.text).group()
username_clean = self.cleanse_username(username)
text = message.text.replace(f"{username}:", "")
# Strip whitespace just from the right side of the string
text = text.rstrip()
text = text.replace("<|endofstatement|>", "")
messages.append(
{"role": "user", "name": username_clean, "content": text}
)
except Exception:
text = message.text.replace("<|endofstatement|>", "")
messages.append({"role": "system", "content": text})
print(f"Messages -> {messages}")
async with aiohttp.ClientSession(
raise_for_status=False, timeout=aiohttp.ClientTimeout(total=300)
) as session:
payload = {
"model": self.model if not model else model,
"messages": messages,
"stop": "" if stop is None else stop,
"temperature": self.temp if temp_override is None else temp_override,
"top_p": self.top_p if top_p_override is None else top_p_override,
"presence_penalty": self.presence_penalty
if presence_penalty_override is None
else presence_penalty_override,
"frequency_penalty": self.frequency_penalty
if frequency_penalty_override is None
else frequency_penalty_override,
}
headers = {
"Authorization": f"Bearer {self.openai_key if not custom_api_key else custom_api_key}"
}
self.use_org = True if "true" in str(self.use_org).lower() else False
if self.use_org:
if self.openai_organization:
headers["OpenAI-Organization"] = self.openai_organization
async with session.post(
"https://api.openai.com/v1/chat/completions",
json=payload,
headers=headers,
) as resp:
response = await resp.json()
# print(f"Payload -> {payload}")
# Parse the total tokens used for this request and response pair from the response
await self.valid_text_request(
response, model=self.model if model is None else model
)
print(f"Response -> {response}")
return response
@backoff.on_exception(
backoff.expo,
ValueError,
factor=3,
base=5,
max_tries=4,
on_backoff=backoff_handler_request,
)
async def send_transcription_request(
self,
file: [discord.Attachment, discord.File],
temperature_override=None,
custom_api_key=None,
):
async with aiohttp.ClientSession(raise_for_status=True) as session:
data = aiohttp.FormData()
data.add_field("model", "whisper-1")
print("audio." + file.filename.split(".")[-1])
# TODO: make async
data.add_field(
"file",
file.read() if isinstance(file, discord.Attachment) else file.fp.read(),
filename="audio." + file.filename.split(".")[-1]
if isinstance(file, discord.Attachment)
else "audio.mp4",
content_type=file.content_type
if isinstance(file, discord.Attachment)
else "video/mp4",
)
if temperature_override:
data.add_field("temperature", temperature_override)
async with session.post(
"https://api.openai.com/v1/audio/transcriptions",
headers={
"Authorization": f"Bearer {self.openai_key if not custom_api_key else custom_api_key}",
},
data=data,
) as resp:
response = await resp.json()
return response["text"]
@backoff.on_exception(
backoff.expo,
ValueError,
factor=3,
base=5,
max_tries=4,
on_backoff=backoff_handler_request,
)
async def send_request(
self,
prompt,
tokens,
temp_override=None,
top_p_override=None,
best_of_override=None,
frequency_penalty_override=None,
presence_penalty_override=None,
max_tokens_override=None,
model=None,
stop=None,
custom_api_key=None,
is_chatgpt_request=False,
system_instruction=None,
): # The response, and a boolean indicating whether or not the context limit was reached.
# Validate that all the parameters are in a good state before we send the request
if not max_tokens_override:
if (
model
and model not in Models.GPT4_MODELS
and model not in Models.CHATGPT_MODELS
):
max_tokens_override = Models.get_max_tokens(model) - tokens
messages = [{"role": "user", "content": prompt}]
# modify prompt if a system instruction is set
if system_instruction and is_chatgpt_request:
messages = [
{"role": "system", "content": system_instruction},
{"role": "user", "content": prompt},
]
elif system_instruction:
prompt = f"{system_instruction} {prompt}"
if system_instruction:
print(f"The instruction added to the prompt will be {system_instruction}")
print(f"The prompt about to be sent is {prompt}")
print(
f"Overrides -> temp:{temp_override}, top_p:{top_p_override} frequency:{frequency_penalty_override}, presence:{presence_penalty_override}, model:{model if model else 'none'}, stop:{stop}"
)
# Non-ChatGPT simple completion models.
if not is_chatgpt_request:
async with aiohttp.ClientSession(
raise_for_status=False, timeout=aiohttp.ClientTimeout(total=300)
) as session:
payload = {
"model": self.model if model is None else model,
"prompt": prompt,
"stop": "" if stop is None else stop,
"temperature": self.temp
if temp_override is None
else temp_override,
"top_p": self.top_p if top_p_override is None else top_p_override,
"max_tokens": self.max_tokens - tokens
if max_tokens_override is None
else max_tokens_override,
"presence_penalty": self.presence_penalty
if presence_penalty_override is None
else presence_penalty_override,
"frequency_penalty": self.frequency_penalty
if frequency_penalty_override is None
else frequency_penalty_override,
"best_of": self.best_of
if not best_of_override
else best_of_override,
}
headers = {
"Authorization": f"Bearer {self.openai_key if not custom_api_key else custom_api_key}"
}
self.use_org = True if "true" in str(self.use_org).lower() else False
if self.use_org:
if self.openai_organization:
headers["OpenAI-Organization"] = self.openai_organization
async with session.post(
"https://api.openai.com/v1/completions",
json=payload,
headers=headers,
) as resp:
response = await resp.json()
# print(f"Payload -> {payload}")
# Parse the total tokens used for this request and response pair from the response
await self.valid_text_request(
response, model=self.model if model is None else model
)
print(f"Response -> {response}")
return response
else: # ChatGPT/GPT4 Simple completion
async with aiohttp.ClientSession(
raise_for_status=False, timeout=aiohttp.ClientTimeout(total=300)
) as session:
payload = {
"model": self.model if not model else model,
"messages": messages,
"stop": "" if stop is None else stop,
"temperature": self.temp
if temp_override is None
else temp_override,
"top_p": self.top_p if top_p_override is None else top_p_override,
"presence_penalty": self.presence_penalty
if presence_penalty_override is None
else presence_penalty_override,
"frequency_penalty": self.frequency_penalty
if frequency_penalty_override is None
else frequency_penalty_override,
}
headers = {
"Authorization": f"Bearer {self.openai_key if not custom_api_key else custom_api_key}"
}
self.use_org = True if "true" in str(self.use_org).lower() else False
if self.use_org:
if self.openai_organization:
headers["OpenAI-Organization"] = self.openai_organization
async with session.post(
"https://api.openai.com/v1/chat/completions",
json=payload,
headers=headers,
) as resp:
response = await resp.json()
# print(f"Payload -> {payload}")
# Parse the total tokens used for this request and response pair from the response
await self.valid_text_request(
response, model=self.model if model is None else model
)
print(f"Response -> {response}")
return response
@staticmethod
async def send_test_request(api_key):
async with aiohttp.ClientSession() as session:
payload = {
"model": Models.LOW_USAGE_MODEL,
"prompt": "test.",
"temperature": 1,
"top_p": 1,
"max_tokens": 10,
}
headers = {"Authorization": f"Bearer {api_key}"}
async with session.post(
"https://api.openai.com/v1/completions", json=payload, headers=headers
) as resp:
response = await resp.json()
try:
int(response["usage"]["total_tokens"])
except:
raise ValueError(str(response["error"]["message"]))
return response
@backoff.on_exception(
backoff.expo,
aiohttp.ClientResponseError,
factor=3,
base=5,
max_tries=4,
on_backoff=backoff_handler_http,
)
async def send_image_request(
self, ctx, prompt, vary=None, custom_api_key=None
) -> tuple[File, list[Any]]:
# Validate that all the parameters are in a good state before we send the request
words = len(prompt.split(" "))
if words < 1 or words > 75:
raise ValueError(
"Prompt must be greater than 1 word and less than 75, it is currently "
+ str(words)
)
# print("The prompt about to be sent is " + prompt)
await self.usage_service.update_usage_image(self.image_size)
response = None
if not vary:
payload = {"prompt": prompt, "n": self.num_images, "size": self.image_size}
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.openai_key if not custom_api_key else custom_api_key}",
}
self.use_org = True if "true" in str(self.use_org).lower() else False
if self.use_org:
if self.openai_organization:
headers["OpenAI-Organization"] = self.openai_organization
async with aiohttp.ClientSession(
raise_for_status=True, timeout=aiohttp.ClientTimeout(total=300)
) as session:
async with session.post(
"https://api.openai.com/v1/images/generations",
json=payload,
headers=headers,
) as resp:
response = await resp.json()
else:
async with aiohttp.ClientSession(
raise_for_status=True, timeout=aiohttp.ClientTimeout(total=300)
) as session:
data = aiohttp.FormData()
data.add_field("n", str(self.num_images))
data.add_field("size", self.image_size)
with open(vary, "rb") as f:
data.add_field(
"image", f, filename="file.png", content_type="image/png"
)
async with session.post(
"https://api.openai.com/v1/images/variations",
headers={
"Authorization": f"Bearer {self.openai_key if not custom_api_key else custom_api_key}",
},
data=data,
) as resp:
response = await resp.json()
print(response)
image_urls = []
for result in response["data"]:
image_urls.append(result["url"])
# For each image url, open it as an image object using PIL
images = await asyncio.get_running_loop().run_in_executor(
None,
lambda: [
Image.open(requests.get(url, stream=True, timeout=10).raw)
for url in image_urls
],
)
# Save all the images with a random name to self.IMAGE_SAVE_PATH
image_names = [f"{uuid.uuid4()}.png" for _ in range(len(images))]
for image, name in zip(images, image_names):
await asyncio.get_running_loop().run_in_executor(
None, image.save, f"{self.IMAGE_SAVE_PATH}/{name}"
)
# Update image_urls to include the local path to these new images
image_urls = [f"{self.IMAGE_SAVE_PATH}/{name}" for name in image_names]
widths, heights = zip(*(i.size for i in images))
# Calculate the number of rows and columns needed for the grid
num_rows = num_cols = int(math.ceil(math.sqrt(len(images))))
# If there are only 2 images, set the number of rows to 1
if len(images) == 2:
num_rows = 1
# Calculate the size of the combined image
width = max(widths) * num_cols
height = max(heights) * num_rows
# Create a transparent image with the same size as the images
transparent = await asyncio.get_running_loop().run_in_executor(
None, lambda: Image.new("RGBA", (max(widths), max(heights)))
)
# Create a new image with the calculated size
new_im = await asyncio.get_running_loop().run_in_executor(
None, lambda: Image.new("RGBA", (width, height))
)
# Paste the images and transparent segments into the grid
x_offset = y_offset = 0
for im in images:
await asyncio.get_running_loop().run_in_executor(
None, new_im.paste, im, (x_offset, y_offset)
)
x_offset += im.size[0]
if x_offset >= width:
x_offset = 0
y_offset += im.size[1]
# Fill the remaining cells with transparent segments
while y_offset < height:
while x_offset < width:
await asyncio.get_running_loop().run_in_executor(
None, new_im.paste, transparent, (x_offset, y_offset)
)
x_offset += transparent.size[0]
x_offset = 0
y_offset += transparent.size[1]
# Save the new_im to a temporary file and return it as a discord.File
temp_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False)
await asyncio.get_running_loop().run_in_executor(
None, new_im.save, temp_file.name
)
# Print the filesize of new_im, in mega bytes
image_size = os.path.getsize(temp_file.name) / 1048576
if ctx.guild is None:
guild_file_limit = 8
else:
guild_file_limit = ctx.guild.filesize_limit / 1048576
# If the image size is greater than 8MB, we can't return this to the user, so we will need to downscale the
# image and try again
safety_counter = 0
while image_size > guild_file_limit:
safety_counter += 1
if safety_counter >= 3:
break
print(
f"Image size is {image_size}MB, which is too large for this server {guild_file_limit}MB. Downscaling and trying again"
)
# We want to do this resizing asynchronously, so that it doesn't block the main thread during the resize.
# We can use the asyncio.run_in_executor method to do this
new_im = await asyncio.get_running_loop().run_in_executor(
None,
functools.partial(
new_im.resize, (int(new_im.width / 1.05), int(new_im.height / 1.05))
),
)
temp_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False)
await asyncio.get_running_loop().run_in_executor(
None, new_im.save, temp_file.name
)
image_size = os.path.getsize(temp_file.name) / 1000000
print(f"New image size is {image_size}MB")
return (discord.File(temp_file.name), image_urls)
| SwarmsDiscord-main | swarmsdiscord/models/openai_model.py |
import base64
import json
import os
import aiohttp
from services.environment_service import EnvService
import replicate
class ImageUnderstandingModel:
def __init__(self):
# Try to get the replicate API key from the environment
self.replicate_key = EnvService.get_replicate_api_key()
# Set the environment REPLICATE_API_TOKEN to the replicate API key
if self.replicate_key:
os.environ["REPLICATE_API_TOKEN"] = self.replicate_key
self.key_set = True
else:
self.key_set = False
self.google_cloud_project_id = EnvService.get_google_cloud_project_id()
self.google_cloud_api_key = EnvService.get_google_search_api_key()
def get_is_usable(self):
return self.key_set
def ask_image_question(self, prompt, filepath):
output = replicate.run(
"andreasjansson/blip-2:4b32258c42e9efd4288bb9910bc532a69727f9acd26aa08e175713a0a857a608",
input={"image": open(filepath, "rb"), "question": prompt},
)
return output
def get_image_caption(self, filepath):
output = replicate.run(
"andreasjansson/blip-2:4b32258c42e9efd4288bb9910bc532a69727f9acd26aa08e175713a0a857a608",
input={"image": open(filepath, "rb"), "caption": True},
)
return output
def get_image_stylistic_caption(self, filepath):
output = replicate.run(
"pharmapsychotic/clip-interrogator:a4a8bafd6089e1716b06057c42b19378250d008b80fe87caa5cd36d40c1eda90",
input={"image": open(filepath, "rb")},
)
return output
async def do_image_ocr(self, filepath):
# Read the image file and encode it in base64 format
if not self.google_cloud_api_key:
return "None"
with open(filepath, "rb") as image_file:
encoded_image = base64.b64encode(image_file.read()).decode("utf-8")
# Prepare the JSON payload
payload = {
"requests": [
{
"image": {"content": encoded_image},
"features": [{"type": "TEXT_DETECTION"}],
}
]
}
header = {
"Content-Type": "application/json; charset=utf-8",
}
url = f"https://vision.googleapis.com/v1/images:annotate?key={self.google_cloud_api_key}"
# Send the async request
async with aiohttp.ClientSession() as session:
async with session.post(
url, headers=header, data=json.dumps(payload)
) as response:
result = await response.json()
if response.status == 200:
# Get fullTextAnnotation
full_text_annotation = result.get("responses", [])[0].get(
"fullTextAnnotation"
)
if full_text_annotation:
extracted_text = full_text_annotation.get("text")
# Return the extracted text
return extracted_text
else:
return ""
else:
raise Exception(
f"Google Cloud Vision API returned an error. Status code: {response.status}, Error: {result}"
)
| SwarmsDiscord-main | swarmsdiscord/models/image_understanding_model.py |
import discord
import re
import aiohttp
from services.environment_service import EnvService
from typing import Callable
ADMIN_ROLES = EnvService.get_admin_roles()
DALLE_ROLES = EnvService.get_dalle_roles()
GPT_ROLES = EnvService.get_gpt_roles()
INDEX_ROLES = EnvService.get_index_roles()
TRANSLATOR_ROLES = EnvService.get_translator_roles()
SEARCH_ROLES = EnvService.get_search_roles()
ALLOWED_GUILDS = EnvService.get_allowed_guilds()
class Check:
@staticmethod
def check_admin_roles() -> Callable:
async def inner(ctx: discord.ApplicationContext):
if ADMIN_ROLES == [None]:
return True
if not any(role.name.lower() in ADMIN_ROLES for role in ctx.user.roles):
await ctx.defer(ephemeral=True)
await ctx.respond(
f"You don't have permission, list of roles is {ADMIN_ROLES}",
ephemeral=True,
delete_after=10,
)
return False
return True
return inner
@staticmethod
def check_dalle_roles() -> Callable:
async def inner(ctx: discord.ApplicationContext):
if DALLE_ROLES == [None]:
return True
if not any(role.name.lower() in DALLE_ROLES for role in ctx.user.roles):
await ctx.defer(ephemeral=True)
await ctx.respond(
f"You don't have permission, list of roles is {DALLE_ROLES}",
ephemeral=True,
delete_after=10,
)
return False
return True
return inner
@staticmethod
def check_gpt_roles() -> Callable:
async def inner(ctx: discord.ApplicationContext):
if GPT_ROLES == [None]:
return True
if not any(role.name.lower() in GPT_ROLES for role in ctx.user.roles):
await ctx.defer(ephemeral=True)
await ctx.respond(
f"You don't have permission, list of roles is {GPT_ROLES}",
ephemeral=True,
delete_after=10,
)
return False
return True
return inner
@staticmethod
def check_index_roles() -> Callable:
async def inner(ctx: discord.ApplicationContext):
if INDEX_ROLES == [None]:
return True
if not any(role.name.lower() in INDEX_ROLES for role in ctx.user.roles):
await ctx.defer(ephemeral=True)
await ctx.respond(
f"You don't have permission, list of roles is {INDEX_ROLES}",
ephemeral=True,
delete_after=10,
)
return False
return True
return inner
@staticmethod
def check_translator_roles() -> Callable:
async def inner(ctx: discord.ApplicationContext):
if TRANSLATOR_ROLES == [None]:
return True
if not any(
role.name.lower() in TRANSLATOR_ROLES for role in ctx.user.roles
):
await ctx.defer(ephemeral=True)
await ctx.respond(
f"You don't have permission, list of roles is {TRANSLATOR_ROLES}",
ephemeral=True,
delete_after=10,
)
return False
return True
return inner
@staticmethod
def check_search_roles() -> Callable:
async def inner(ctx: discord.ApplicationContext):
if SEARCH_ROLES == [None]:
return True
if not any(role.name.lower() in SEARCH_ROLES for role in ctx.user.roles):
await ctx.defer(ephemeral=True)
await ctx.respond(
f"You don't have permission, list of roles is {SEARCH_ROLES}",
ephemeral=True,
delete_after=10,
)
return False
return True
return inner
class UrlCheck:
@staticmethod
async def check_youtube_link(url):
youtube_regex = (
r"(https?://)?(www\.)?(youtube|youtu|youtube-nocookie)\.(com|be)/"
)
match = re.match(youtube_regex, url)
if match is not None:
return True
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
final_url = str(resp.url)
match = re.match(youtube_regex, final_url)
return match is not None
| SwarmsDiscord-main | swarmsdiscord/models/check_model.py |
import asyncio
import os
import tempfile
import traceback
from datetime import datetime, date
from functools import partial
from pathlib import Path
import discord
import aiohttp
import openai
import tiktoken
from langchain.chat_models import ChatOpenAI
from llama_index import (
QuestionAnswerPrompt,
GPTVectorStoreIndex,
BeautifulSoupWebReader,
Document,
LLMPredictor,
OpenAIEmbedding,
SimpleDirectoryReader,
MockEmbedding,
ServiceContext,
ResponseSynthesizer,
)
from llama_index.callbacks import CallbackManager, TokenCountingHandler
from llama_index.composability import QASummaryQueryEngineBuilder
from llama_index.retrievers import VectorIndexRetriever
from llama_index.query_engine import RetrieverQueryEngine, MultiStepQueryEngine
from llama_index.indices.query.query_transform import StepDecomposeQueryTransform
from llama_index.optimization import SentenceEmbeddingOptimizer
from llama_index.prompts.chat_prompts import CHAT_REFINE_PROMPT
from llama_index.readers.web import DEFAULT_WEBSITE_EXTRACTOR
from langchain import OpenAI
from models.openai_model import Models
from services.environment_service import EnvService
MAX_SEARCH_PRICE = EnvService.get_max_search_price()
class Search:
def __init__(self, gpt_model, usage_service):
self.model = gpt_model
self.usage_service = usage_service
self.google_search_api_key = EnvService.get_google_search_api_key()
self.google_search_engine_id = EnvService.get_google_search_engine_id()
self.loop = asyncio.get_running_loop()
self.qaprompt = QuestionAnswerPrompt(
"You are formulating the response to a search query given the search prompt and the context. Context information is below. The text '<|endofstatement|>' is used to separate chat entries and make it easier for you to understand the context\n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Never say '<|endofstatement|>'\n"
"Given the context information and not prior knowledge, "
"answer the question, say that you were unable to answer the question if there is not sufficient context to formulate a decisive answer. If the prior knowledge/context was sufficient, simply repeat it. The search query was: {query_str}\n"
)
self.openai_key = os.getenv("OPENAI_TOKEN")
self.EMBED_CUTOFF = 2000
def add_search_index(self, index, user_id, query):
# Create a folder called "indexes/{USER_ID}" if it doesn't exist already
Path(f"{EnvService.save_path()}/indexes/{user_id}_search").mkdir(
parents=True, exist_ok=True
)
# Save the index to file under the user id
file = f"{date.today().month}_{date.today().day}_{query[:20]}"
index.storage_context.persist(
persist_dir=EnvService.save_path()
/ "indexes"
/ f"{str(user_id)}_search"
/ f"{file}"
)
def build_search_started_embed(self):
embed = discord.Embed(
title="Searching the web...",
description="Refining google search query...",
color=discord.Color.blurple(),
)
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
def build_search_refined_embed(self, refined_query):
embed = discord.Embed(
title="Searching the web...",
description="Refined query:\n"
+ f"`{refined_query}`"
+ "\nRetrieving links from google...",
color=discord.Color.blurple(),
)
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
def build_search_links_retrieved_embed(self, refined_query):
embed = discord.Embed(
title="Searching the web...",
description="Refined query:\n" + f"`{refined_query}`"
"\nRetrieving webpages...",
color=discord.Color.blurple(),
)
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
def build_search_determining_price_embed(self, refined_query):
embed = discord.Embed(
title="Searching the web...",
description="Refined query:\n" + f"`{refined_query}`"
"\nPre-determining index price...",
color=discord.Color.blurple(),
)
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
def build_search_webpages_retrieved_embed(self, refined_query):
embed = discord.Embed(
title="Searching the web...",
description="Refined query:\n" + f"`{refined_query}`" "\nIndexing...",
color=discord.Color.blurple(),
)
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
def build_search_indexed_embed(self, refined_query):
embed = discord.Embed(
title="Searching the web...",
description="Refined query:\n" + f"`{refined_query}`"
"\nThinking about your question...",
color=discord.Color.blurple(),
)
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
def build_search_final_embed(self, refined_query, price):
embed = discord.Embed(
title="Searching the web...",
description="Refined query:\n" + f"`{refined_query}`"
"\nDone!\n||The total price was $" + price + "||",
color=discord.Color.blurple(),
)
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
def index_webpage(self, url) -> list[Document]:
documents = BeautifulSoupWebReader(
website_extractor=DEFAULT_WEBSITE_EXTRACTOR
).load_data(urls=[url])
return documents
async def index_pdf(self, url) -> list[Document]:
# Download the PDF at the url and save it to a tempfile
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
if response.status == 200:
data = await response.read()
f = tempfile.NamedTemporaryFile(suffix=".pdf", delete=False)
f.write(data)
f.close()
else:
raise ValueError("Could not download PDF")
# Get the file path of this tempfile.NamedTemporaryFile
# Save this temp file to an actual file that we can put into something else to read it
documents = SimpleDirectoryReader(input_files=[f.name]).load_data()
for document in documents:
document.extra_info = {"URL": url}
# Delete the temporary file
return documents
async def get_links(self, query, search_scope=2):
"""Search the web for a query"""
async with aiohttp.ClientSession() as session:
async with session.get(
f"https://www.googleapis.com/customsearch/v1?key={self.google_search_api_key}&cx={self.google_search_engine_id}&q={query}"
) as response:
if response.status == 200:
data = await response.json()
# Return a list of the top 2 links
return (
[item["link"] for item in data["items"][:search_scope]],
[item["link"] for item in data["items"]],
)
else:
raise ValueError(
"Error while retrieving links, the response returned "
+ str(response.status)
+ " with the message "
+ str(await response.text())
)
async def try_edit(self, message, embed):
try:
await message.edit(embed=embed)
except Exception:
traceback.print_exc()
pass
async def try_delete(self, message):
try:
await message.delete()
except Exception:
traceback.print_exc()
pass
async def search(
self,
ctx: discord.ApplicationContext,
query,
user_api_key,
search_scope,
nodes,
deep,
response_mode,
model,
multistep=False,
redo=None,
):
DEFAULT_SEARCH_NODES = 1
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
# Initialize the search cost
price = 0
if ctx:
in_progress_message = (
await ctx.respond(embed=self.build_search_started_embed())
if not redo
else await ctx.channel.send(embed=self.build_search_started_embed())
)
try:
llm_predictor_presearch = OpenAI(
max_tokens=50,
temperature=0.4,
presence_penalty=0.65,
model_name="text-davinci-003",
)
# Refine a query to send to google custom search API
prompt = f"You are to be given a search query for google. Change the query such that putting it into the Google Custom Search API will return the most relevant websites to assist in answering the original query. If the original query is inferring knowledge about the current day, insert the current day into the refined prompt. If the original query is inferring knowledge about the current month, insert the current month and year into the refined prompt. If the original query is inferring knowledge about the current year, insert the current year into the refined prompt. Generally, if the original query is inferring knowledge about something that happened recently, insert the current month into the refined query. Avoid inserting a day, month, or year for queries that purely ask about facts and about things that don't have much time-relevance. The current date is {str(datetime.now().date())}. Do not insert the current date if not neccessary. Respond with only the refined query for the original query. Donβt use punctuation or quotation marks.\n\nExamples:\n---\nOriginal Query: βWho is Harald Baldr?β\nRefined Query: βHarald Baldr biographyβ\n---\nOriginal Query: βWhat happened today with the Ohio train derailment?β\nRefined Query: βOhio train derailment details {str(datetime.now().date())}β\n---\nOriginal Query: βIs copper in drinking water bad for you?β\nRefined Query: βcopper in drinking water adverse effectsβ\n---\nOriginal Query: What's the current time in Mississauga?\nRefined Query: current time Mississauga\nNow, refine the user input query.\nOriginal Query: {query}\nRefined Query:"
query_refined = await llm_predictor_presearch.agenerate(
prompts=[prompt],
)
query_refined_text = query_refined.generations[0][0].text
await self.usage_service.update_usage(
query_refined.llm_output.get("token_usage").get("total_tokens"),
"davinci",
)
price += await self.usage_service.get_price(
query_refined.llm_output.get("token_usage").get("total_tokens"),
"davinci",
)
except Exception as e:
traceback.print_exc()
query_refined_text = query
if ctx:
await self.try_edit(
in_progress_message, self.build_search_refined_embed(query_refined_text)
)
# Get the links for the query
links, all_links = await self.get_links(
query_refined_text, search_scope=search_scope
)
if ctx:
await self.try_edit(
in_progress_message,
self.build_search_links_retrieved_embed(query_refined_text),
)
if all_links is None:
raise ValueError("The Google Search API returned an error.")
# For each link, crawl the page and get all the text that's not HTML garbage.
# Concatenate all the text for a given website into one string and save it into an array:
documents = []
for link in links:
# First, attempt a connection with a timeout of 3 seconds to the link, if the timeout occurs, don't
# continue to the document loading.
pdf = False
try:
async with aiohttp.ClientSession() as session:
async with session.get(link, timeout=1) as response:
# Add another entry to links from all_links if the link is not already in it to compensate for the failed request
if response.status not in [200, 203, 202, 204]:
for link2 in all_links:
if link2 not in links:
links.append(link2)
break
continue
# Follow redirects
elif response.status in [301, 302, 303, 307, 308]:
try:
links.append(response.url)
continue
except:
continue
else:
# Detect if the link is a PDF, if it is, we load it differently
if response.headers["Content-Type"] == "application/pdf":
pdf = True
except:
try:
# Try to add a link from all_links, this is kind of messy.
for link2 in all_links:
if link2 not in links:
links.append(link2)
break
except:
pass
continue
try:
if not pdf:
document = await self.loop.run_in_executor(
None, partial(self.index_webpage, link)
)
else:
document = await self.index_pdf(link)
[documents.append(doc) for doc in document]
except Exception as e:
traceback.print_exc()
if ctx:
await self.try_edit(
in_progress_message,
self.build_search_webpages_retrieved_embed(query_refined_text),
)
embedding_model = OpenAIEmbedding()
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name=model))
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model(model).encode, verbose=False
)
callback_manager = CallbackManager([token_counter])
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor,
embed_model=embedding_model,
callback_manager=callback_manager,
)
# Check price
token_counter_mock = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model(model).encode, verbose=False
)
callback_manager_mock = CallbackManager([token_counter_mock])
embed_model_mock = MockEmbedding(embed_dim=1536)
service_context_mock = ServiceContext.from_defaults(
embed_model=embed_model_mock, callback_manager=callback_manager_mock
)
self.loop.run_in_executor(
None,
partial(
GPTVectorStoreIndex.from_documents,
documents,
service_context=service_context_mock,
),
)
total_usage_price = await self.usage_service.get_price(
token_counter_mock.total_embedding_token_count, "embedding"
)
if total_usage_price > 1.00:
raise ValueError(
"Doing this search would be prohibitively expensive. Please try a narrower search scope."
)
if not deep:
index = await self.loop.run_in_executor(
None,
partial(
GPTVectorStoreIndex.from_documents,
documents,
service_context=service_context,
use_async=True,
),
)
# save the index to disk if not a redo
if not redo:
self.add_search_index(
index,
ctx.user.id
if isinstance(ctx, discord.ApplicationContext)
else ctx.author.id,
query,
)
else:
if ctx:
await self.try_edit(
in_progress_message,
self.build_search_determining_price_embed(query_refined_text),
)
graph_builder = QASummaryQueryEngineBuilder(service_context=service_context)
index = await self.loop.run_in_executor(
None,
partial(
graph_builder.build_from_documents,
documents,
),
)
if ctx:
await self.try_edit(
in_progress_message, self.build_search_indexed_embed(query_refined_text)
)
########################################
if not deep:
step_decompose_transform = StepDecomposeQueryTransform(
service_context.llm_predictor
)
retriever = VectorIndexRetriever(
index=index,
similarity_top_k=nodes or DEFAULT_SEARCH_NODES,
)
response_synthesizer = ResponseSynthesizer.from_args(
response_mode=response_mode,
use_async=True,
refine_template=CHAT_REFINE_PROMPT,
text_qa_template=self.qaprompt,
optimizer=SentenceEmbeddingOptimizer(threshold_cutoff=0.7),
service_context=service_context,
)
query_engine = RetrieverQueryEngine(
retriever=retriever, response_synthesizer=response_synthesizer
)
multistep_query_engine = MultiStepQueryEngine(
query_engine=query_engine,
query_transform=step_decompose_transform,
index_summary="Provides information about everything you need to know about this topic, use this to answer the question.",
)
if multistep:
response = await self.loop.run_in_executor(
None,
partial(multistep_query_engine.query, query),
)
else:
response = await self.loop.run_in_executor(
None,
partial(query_engine.query, query),
)
else:
query_configs = [
{
"index_struct_type": "simple_dict",
"query_mode": "default",
"query_kwargs": {"similarity_top_k": 1},
},
{
"index_struct_type": "list",
"query_mode": "default",
"query_kwargs": {
"response_mode": "tree_summarize",
"use_async": True,
"verbose": True,
},
},
{
"index_struct_type": "tree",
"query_mode": "default",
"query_kwargs": {
"verbose": True,
"use_async": True,
"child_branch_factor": 2,
},
},
]
response = await self.loop.run_in_executor(
None,
partial(
index.query,
query,
),
)
await self.usage_service.update_usage(
token_counter.total_llm_token_count,
await self.usage_service.get_cost_name(model),
)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
price += await self.usage_service.get_price(
token_counter.total_llm_token_count,
await self.usage_service.get_cost_name(model),
) + await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
)
if ctx:
await self.try_edit(
in_progress_message,
self.build_search_final_embed(query_refined_text, str(round(price, 6))),
)
return response, query_refined_text
| SwarmsDiscord-main | swarmsdiscord/models/search_model.py |
import asyncio
import traceback
import os
from functools import partial
from pathlib import Path
from yt_dlp import YoutubeDL
import discord
from discord.ext import pages
from models.deepl_model import TranslationModel
from models.embed_statics_model import EmbedStatics
from models.check_model import UrlCheck
from services.environment_service import EnvService
from services.text_service import TextService
ALLOWED_GUILDS = EnvService.get_allowed_guilds()
USER_INPUT_API_KEYS = EnvService.get_user_input_api_keys()
USER_KEY_DB = EnvService.get_api_db()
class TranscribeService(discord.Cog, name="TranscribeService"):
"""Cog containing translation commands and retrieval of transcribe services"""
def __init__(
self,
bot,
model,
usage_service,
):
super().__init__()
self.bot = bot
self.model = model
self.usage_service = usage_service
# Make the "audiotemp" folder if it doesn't exist, using pathlib
Path("audiotemp").mkdir(parents=True, exist_ok=True)
async def transcribe_link_command(
self, ctx: discord.ApplicationContext, link: str, temperature: float
):
# Check if this discord file is an instance of mp3, mp4, mpeg, mpga, m4a, wav, or webm.
await ctx.defer()
user_api_key = None
if USER_INPUT_API_KEYS:
user_api_key = await TextService.get_user_api_key(
ctx.user.id, ctx, USER_KEY_DB
)
if not user_api_key:
return
if await UrlCheck.check_youtube_link(link):
# We need to download the youtube video and save it to a temporary file
options = {
"format": "bestaudio/best",
"outtmpl": os.path.join(
"audiotemp/", f"{ctx.user.id}temp.%(ext)s"
), # save file as the videos title
"quiet": True,
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "mp3",
"preferredquality": "192",
}
],
}
# Delete audiotemp/{str(ctx.user.id)}temp.mp3 if it already exists
if Path("audiotemp/{}temp.mp3".format(str(ctx.user.id))).exists():
Path("audiotemp/{}temp.mp3".format(str(ctx.user.id))).unlink()
def download_video(url, options):
with YoutubeDL(options) as ydl:
ydl.download([url])
try:
await asyncio.get_running_loop().run_in_executor(
None,
partial(
download_video,
link,
options,
),
)
except Exception as e:
traceback.print_exc()
await ctx.respond(
"Failed to download youtube video. Please try again later. "
+ str(e)
)
return
else:
await ctx.respond(
"Please upload a valid youtube link. Other links are not implemented yet"
)
return
# Load the file object from the file_path
file_path = Path("audiotemp/{}temp.mp3".format(str(ctx.user.id)))
file = discord.File(file_path)
response_message = await ctx.respond(
embed=EmbedStatics.build_transcribe_progress_embed()
)
try:
response = await self.model.send_transcription_request(
file, temperature, user_api_key
)
print(response)
if len(response) > 4080:
# Chunk the response into 2048 character chunks, each an embed page
chunks = [response[i : i + 2048] for i in range(0, len(response), 2048)]
embed_pages = []
for chunk in chunks:
embed_pages.append(
discord.Embed(
title="Transcription Page {}".format(len(embed_pages) + 1),
description=chunk,
)
)
paginator = pages.Paginator(
pages=embed_pages,
timeout=None,
author_check=False,
)
await paginator.respond(ctx.interaction)
await response_message.delete()
return
await response_message.edit(
embed=EmbedStatics.build_transcribe_success_embed(response)
)
except Exception as e:
await response_message.edit(
embed=EmbedStatics.build_transcribe_failed_embed(str(e))
)
async def transcribe_file_command(
self,
ctx: discord.ApplicationContext,
file: discord.Attachment,
temperature: float,
):
# Check if this discord file is an instance of mp3, mp4, mpeg, mpga, m4a, wav, or webm.
await ctx.defer()
user_api_key = None
if USER_INPUT_API_KEYS:
user_api_key = await TextService.get_user_api_key(
ctx.user.id, ctx, USER_KEY_DB
)
if not user_api_key:
return
if not file.filename.endswith(
(".mp3", ".mp4", ".mpeg", ".mpga", ".m4a", ".wav", ".webm")
):
await ctx.respond("Please upload a valid audio/video file.")
return
# Also check the file metadata in case it is actually an audio/video file but with a weird ending
if not file.content_type.startswith(("audio/", "video/")):
await ctx.respond("Please upload a valid audio/video file.")
return
response_message = await ctx.respond(
embed=EmbedStatics.build_transcribe_progress_embed()
)
try:
response = await self.model.send_transcription_request(
file, temperature, user_api_key
)
if len(response) > 4080:
# Chunk the response into 2048 character chunks, each an embed page
chunks = [response[i : i + 2048] for i in range(0, len(response), 2048)]
embed_pages = []
for chunk in chunks:
embed_pages.append(
discord.Embed(
title="Transcription Page {}".format(len(embed_pages) + 1),
description=chunk,
)
)
paginator = pages.Paginator(
pages=embed_pages,
timeout=None,
author_check=False,
)
await paginator.respond(ctx.interaction)
await response_message.edit(
embed=EmbedStatics.build_transcribe_success_embed(response)
)
except Exception as e:
traceback.print_exc()
await response_message.edit(
embed=EmbedStatics.build_transcribe_failed_embed(str(e))
)
| SwarmsDiscord-main | swarmsdiscord/cogs/transcription_service_cog.py |
import datetime
import io
import json
import os
import sys
import tempfile
import traceback
from typing import Optional, Dict, Any
import aiohttp
import re
import discord
import openai
from bs4 import BeautifulSoup
from discord.ext import pages
from langchain import (
GoogleSearchAPIWrapper,
WolframAlphaAPIWrapper,
FAISS,
InMemoryDocstore,
LLMChain,
ConversationChain,
)
from langchain.agents import (
Tool,
initialize_agent,
AgentType,
ZeroShotAgent,
AgentExecutor,
)
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory, CombinedMemory
from langchain.prompts import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
MessagesPlaceholder,
HumanMessagePromptTemplate,
)
from langchain.requests import TextRequestsWrapper, Requests
from llama_index import (
GPTVectorStoreIndex,
Document,
SimpleDirectoryReader,
ServiceContext,
OpenAIEmbedding,
ResponseSynthesizer,
)
from llama_index.retrievers import VectorIndexRetriever
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.prompts.chat_prompts import CHAT_REFINE_PROMPT
from pydantic import Extra, BaseModel
import tiktoken
from models.embed_statics_model import EmbedStatics
from models.search_model import Search
from services.deletion_service import Deletion
from services.environment_service import EnvService
from services.moderations_service import Moderation
from services.text_service import TextService
from models.openai_model import Models
from contextlib import redirect_stdout
from langchain.agents.conversational_chat.output_parser import ConvoOutputParser
original_parse = ConvoOutputParser.parse
def my_parse(self, text):
# Remove all pairs of triple backticks from the input. However, don't remove pairs of ```json and ```. Only remove ``` and ``` pairs, maintain the text between the pairs so that only the backticks
# are removed and the text is left intact.
text_without_triple_backticks = re.sub(
r"```(?!json)(.*?)```", r"\1", text, flags=re.DOTALL
)
# Call the original parse() method with the modified input
try:
result = original_parse(self, text_without_triple_backticks)
except Exception:
traceback.print_exc()
# Take the text and format it like
# {
# "action": "Final Answer",
# "action_input": text
# }
# This will cause the bot to respond with the text as if it were a final answer.
if "action_input" not in text_without_triple_backticks:
text_without_triple_backticks = f'{{"action": "Final Answer", "action_input": {json.dumps(text_without_triple_backticks)}}}'
result = original_parse(self, text_without_triple_backticks)
else:
# Insert "```json" before the opening curly brace
text_without_triple_backticks = re.sub(
r"({)", r"```json \1", text_without_triple_backticks
)
# Insert "```" after the closing curly brace
text_without_triple_backticks = re.sub(
r"(})", r"\1 ```", text_without_triple_backticks
)
result = original_parse(self, text_without_triple_backticks)
return result
# Replace the original parse function with the new one
ConvoOutputParser.parse = my_parse
class CaptureStdout:
def __enter__(self):
self.buffer = io.StringIO()
self.original_stdout = sys.stdout
sys.stdout = self.buffer
return self.buffer
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout = self.original_stdout
async def capture_stdout(func, *args, **kwargs):
with CaptureStdout() as buffer:
result = await func(*args, **kwargs)
captured_output = buffer.getvalue()
return result, captured_output
ALLOWED_GUILDS = EnvService.get_allowed_guilds()
USER_INPUT_API_KEYS = EnvService.get_user_input_api_keys()
USER_KEY_DB = EnvService.get_api_db()
PRE_MODERATE = EnvService.get_premoderate()
GOOGLE_API_KEY = EnvService.get_google_search_api_key()
GOOGLE_SEARCH_ENGINE_ID = EnvService.get_google_search_engine_id()
OPENAI_API_KEY = EnvService.get_openai_token()
# Set the environment
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
openai.api_key = os.environ["OPENAI_API_KEY"]
WOLFRAM_API_KEY = EnvService.get_wolfram_api_key()
vector_stores = {}
class RedoSearchUser:
def __init__(self, ctx, query, search_scope, nodes, response_mode):
self.ctx = ctx
self.query = query
self.search_scope = search_scope
self.nodes = nodes
self.response_mode = response_mode
class CustomTextRequestWrapper(BaseModel):
"""Lightweight wrapper around requests library.
The main purpose of this wrapper is to always return a text output.
"""
headers: Optional[Dict[str, str]] = None
aiosession: Optional[aiohttp.ClientSession] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def __init__(self, **data: Any):
super().__init__(**data)
@property
def requests(self) -> Requests:
return Requests(headers=self.headers, aiosession=self.aiosession)
def get(self, url: str, **kwargs: Any) -> str:
# the "url" field is actuall some input from the LLM, it is a comma separated string of the url and a boolean value and the original query
try:
url, model, original_query = url.split(",")
url = url.strip()
model = model.strip()
original_query = original_query.strip()
except:
url = url
model = "gpt-3.5-turbo"
original_query = "No Original Query Provided"
"""GET the URL and return the text."""
text = self.requests.get(url, **kwargs).text
# Load this text into BeautifulSoup, clean it up and only retain text content within <p> and <title> and <h1> type tags, get rid of all javascript and css too.
soup = BeautifulSoup(text, "html.parser")
# Decompose script, style, head, and meta tags
for tag in soup(["script", "style", "head", "meta"]):
tag.decompose()
# Get remaining text from the soup object
text = soup.get_text()
# Clean up white spaces
text = re.sub(r"\s+", " ", text).strip()
# If not using GPT-4 and the text token amount is over 3500, truncate it to 3500 tokens
enc = tiktoken.encoding_for_model(model)
tokens = len(enc.encode(text))
if len(text) < 5:
return "This website could not be scraped. I cannot answer this question."
if (
model in Models.CHATGPT_MODELS
and tokens > Models.get_max_tokens(model) - 1000
) or (
model in Models.GPT4_MODELS and tokens > Models.get_max_tokens(model) - 1000
):
with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
f.write(text)
f.close()
document = SimpleDirectoryReader(input_files=[f.name]).load_data()
embed_model = OpenAIEmbedding()
service_context = ServiceContext.from_defaults(embed_model=embed_model)
index = GPTVectorStoreIndex.from_documents(
document, service_context=service_context, use_async=True
)
retriever = VectorIndexRetriever(
index=index, similarity_top_k=4, service_context=service_context
)
response_synthesizer = ResponseSynthesizer.from_args(
response_mode="compact",
refine_template=CHAT_REFINE_PROMPT,
service_context=service_context,
use_async=True,
)
query_engine = RetrieverQueryEngine(
retriever=retriever, response_synthesizer=response_synthesizer
)
response_text = query_engine.query(original_query)
return response_text
return text
class SearchService(discord.Cog, name="SearchService"):
"""Cog containing translation commands and retrieval of translation services"""
def __init__(
self,
bot,
gpt_model,
usage_service,
deletion_service,
converser_cog,
):
super().__init__()
self.bot = bot
self.usage_service = usage_service
self.model = Search(gpt_model, usage_service)
self.EMBED_CUTOFF = 2000
self.redo_users = {}
self.chat_agents = {}
self.thread_awaiting_responses = []
self.converser_cog = converser_cog
# Make a mapping of all the country codes and their full country names:
async def paginate_embed(
self, response_text, user: discord.Member, original_link=None
):
"""Given a response text make embed pages and return a list of the pages."""
response_text = [
response_text[i : i + self.EMBED_CUTOFF]
for i in range(0, len(response_text), self.EMBED_CUTOFF)
]
pages = []
first = False
# Send each chunk as a message
for count, chunk in enumerate(response_text, start=1):
if not first:
page = discord.Embed(
title="Search Results"
if not original_link
else "Follow-up results",
description=chunk,
url=original_link,
)
first = True
else:
page = discord.Embed(
title=f"Page {count}",
description=chunk,
url=original_link,
)
if user.avatar:
page.set_footer(
text=f"Requested by {user.name}", icon_url=user.avatar.url
)
else:
page.set_footer(
text=f"Requested by {user.name}", icon_url=user.default_avatar.url
)
pages.append(page)
return pages
async def paginate_chat_embed(self, response_text):
"""Given a response text make embed pages and return a list of the pages."""
response_text = [
response_text[i : i + 3500] for i in range(0, len(response_text), 7000)
]
pages = []
first = False
# Send each chunk as a message
for count, chunk in enumerate(response_text, start=1):
if not first:
page = discord.Embed(
title=f"{count}",
description=chunk,
)
first = True
else:
page = discord.Embed(
title=f"{count}",
description=chunk,
)
pages.append(page)
return pages
@discord.Cog.listener()
async def on_message(self, message):
# Check if the message is from a bot.
if message.author.id == self.bot.user.id:
return
# Check if the message is from a guild.
if not message.guild:
return
if message.content.strip().startswith("~"):
return
# if we are still awaiting a response from the agent, then we don't want to process the message.
if message.channel.id in self.thread_awaiting_responses:
resp_message = await message.reply(
"Please wait for the agent to respond to a previous message first!"
)
deletion_time = datetime.datetime.now() + datetime.timedelta(seconds=5)
deletion_time = deletion_time.timestamp()
original_deletion_message = Deletion(message, deletion_time)
deletion_message = Deletion(resp_message, deletion_time)
await self.converser_cog.deletion_queue.put(deletion_message)
await self.converser_cog.deletion_queue.put(original_deletion_message)
return
# Pre moderation
if PRE_MODERATE:
if await Moderation.simple_moderate_and_respond(message.content, message):
await message.delete()
return
prompt = message.content.strip()
# If the message channel is in self.chat_agents, then we delegate the message to the agent.
if message.channel.id in self.chat_agents:
if prompt.lower() in ["stop", "end", "quit", "exit"]:
await message.reply("Ending chat session.")
self.chat_agents.pop(message.channel.id)
# close the thread
thread = await self.bot.fetch_channel(message.channel.id)
await thread.edit(name="Closed-GPT")
await thread.edit(archived=True)
return
self.thread_awaiting_responses.append(message.channel.id)
try:
await message.channel.trigger_typing()
except:
pass
agent = self.chat_agents[message.channel.id]
used_tools = []
try:
# Start listening to STDOUT before this call. We wanna track all the output for this specific call below
response, stdout_output = await capture_stdout(
self.bot.loop.run_in_executor, None, agent.run, prompt
)
response = str(response)
try:
print(stdout_output)
except:
traceback.print_exc()
stdout_output = ""
if "Wolfram-Tool" in stdout_output:
used_tools.append("Wolfram Alpha")
if "Search-Tool" in stdout_output:
used_tools.append("Google Search")
if "Web-Crawling-Tool" in stdout_output:
used_tools.append("Web Crawler")
except Exception as e:
response = f"Error: {e}"
traceback.print_exc()
await message.reply(
embed=EmbedStatics.get_internet_chat_failure_embed(response)
)
self.thread_awaiting_responses.remove(message.channel.id)
return
if len(response) > 2000:
embed_pages = await self.paginate_chat_embed(response)
paginator = pages.Paginator(
pages=embed_pages,
timeout=None,
author_check=False,
)
await paginator.respond(message)
else:
response = response.replace("\\n", "\n")
# Build a response embed
response_embed = discord.Embed(
title="",
description=response,
color=0x808080,
)
if len(used_tools) > 0:
response_embed.set_footer(
text="Used tools: " + ", ".join(used_tools)
)
await message.reply(embed=response_embed)
self.thread_awaiting_responses.remove(message.channel.id)
async def search_chat_command(
self, ctx: discord.ApplicationContext, model, search_scope=2
):
embed_title = f"{ctx.user.name}'s internet-connected conversation with GPT"
message_embed = discord.Embed(
title=embed_title,
description=f"The agent will visit and browse **{search_scope}** link(s) every time it needs to access the internet.\nCrawling is enabled, send the bot a link for it to access it!\nModel: {model}\n\nType `end` to stop the conversation",
color=0xBA6093,
)
message_embed.set_thumbnail(url="https://i.imgur.com/lt5AYJ9.png")
message_embed.set_footer(
text="Internet Chat", icon_url="https://i.imgur.com/lt5AYJ9.png"
)
message_thread = await ctx.send(embed=message_embed)
thread = await message_thread.create_thread(
name=ctx.user.name + "'s internet-connected conversation with GPT",
auto_archive_duration=60,
)
await ctx.respond("Conversation started.")
print("The search scope is " + str(search_scope) + ".")
# Make a new agent for this user to chat.
search = GoogleSearchAPIWrapper(
google_api_key=GOOGLE_API_KEY,
google_cse_id=GOOGLE_SEARCH_ENGINE_ID,
k=search_scope,
)
requests = CustomTextRequestWrapper()
tools = [
Tool(
name="Search-Tool",
func=search.run,
description="useful when you need to answer questions about current events or retrieve information about a topic that may require the internet. The input to this tool is a search query to ask google. Search queries should be less than 8 words. For example, an input could be 'What is the weather like in New York?' and the tool input would be 'weather new york'.",
),
# The requests tool
Tool(
name="Web-Crawling-Tool",
func=requests.get,
description=f"Useful for when the user provides you with a website link, use this tool to crawl the website and retrieve information from it. The input to this tool is a comma separated list of three values, the first value is the link to crawl for, and the second value is {model} and is the GPT model used, and the third value is the original question that the user asked. For example, an input could be 'https://google.com', gpt-3.5-turbo, 'What is this webpage?'. This tool should only be used if a direct link is provided and not in conjunction with other tools.",
),
]
# Try to add wolfram tool
try:
wolfram = WolframAlphaAPIWrapper(wolfram_alpha_appid=WOLFRAM_API_KEY)
tools.append(
Tool(
name="Wolfram-Tool",
func=wolfram.run,
description="useful when you need to answer questions about math, solve equations, do proofs, mathematical science questions, science questions, and when asked to do numerical based reasoning.",
)
)
print("Wolfram tool added to internet-connected conversation agent.")
except Exception:
traceback.print_exc()
print("Wolfram tool not added to internet-connected conversation agent.")
memory = ConversationBufferMemory(
memory_key="chat_history", return_messages=True
)
llm = ChatOpenAI(model=model, temperature=0, openai_api_key=OPENAI_API_KEY)
agent_chain = initialize_agent(
tools,
llm,
agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,
verbose=True,
memory=memory,
max_execution_time=120,
max_iterations=4,
early_stopping_method="generate",
)
self.chat_agents[thread.id] = agent_chain
async def search_command(
self,
ctx: discord.ApplicationContext,
query,
search_scope,
nodes,
deep,
response_mode,
model,
multistep=False,
redo=None,
from_followup=None,
followup_user=None,
):
"""Command handler for the search command"""
await ctx.defer() if not redo else None
# Check the opener for bad content.
if PRE_MODERATE:
if await Moderation.simple_moderate_and_respond(query, ctx):
return
user_api_key = None
if USER_INPUT_API_KEYS:
user_api_key = await TextService.get_user_api_key(
ctx.user.id, ctx, USER_KEY_DB
)
if not user_api_key:
return
if (
not EnvService.get_google_search_api_key()
or not EnvService.get_google_search_engine_id()
):
await ctx.respond(
embed=EmbedStatics.get_search_failure_embed(
str("The search service is not enabled on this server.")
),
)
return
try:
response, refined_text = await self.model.search(
ctx,
query,
user_api_key,
search_scope,
nodes,
deep,
response_mode,
model,
multistep,
)
except ValueError as e:
traceback.print_exc()
await ctx.respond(
embed=EmbedStatics.get_search_failure_embed(str(e)),
ephemeral=True,
)
return
except Exception as e:
await ctx.respond(
embed=EmbedStatics.get_search_failure_embed(str(e)), ephemeral=True
)
traceback.print_exc()
return
url_extract_pattern = "https?:\\/\\/(?:www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{1,256}\\.[a-zA-Z0-9()]{1,6}\\b(?:[-a-zA-Z0-9()@:%_\\+.~#?&\\/=]*)"
urls = re.findall(
url_extract_pattern,
str(response.get_formatted_sources(length=200)),
flags=re.IGNORECASE,
)
urls = "\n".join(f"<{url}>" for url in urls)
# Deduplicate the urls
urls = "\n".join(dict.fromkeys(urls.split("\n")))
if from_followup:
original_link, followup_question = (
from_followup.original_link,
from_followup.followup_question,
)
query_response_message = f"**Question:**\n\n`{followup_question}`\n\n**Google Search Query**\n\n`{refined_text.strip()}`\n\n**Final Answer:**\n\n{response.response.strip()}\n\n**Sources:**\n{urls}"
else:
query_response_message = f"**Question:**\n\n`{query.strip()}`\n\n**Google Search Query**\n\n`{refined_text.strip()}`\n\n**Final Answer:**\n\n{response.response.strip()}\n\n**Sources:**\n{urls}"
query_response_message = query_response_message.replace(
"<|endofstatement|>", ""
)
query_response_message = query_response_message.replace(
"Answer to original:\n", ""
)
query_response_message = query_response_message.replace(
"Answer to follow-up:\n", ""
)
# If the response is too long, lets paginate using the discord pagination
# helper
embed_pages = await self.paginate_embed(
query_response_message,
ctx.user if not followup_user else followup_user,
original_link if from_followup else None,
)
paginator = pages.Paginator(
pages=embed_pages,
timeout=None,
author_check=False,
custom_view=SearchView(ctx, self, query_response_message),
)
self.redo_users[ctx.user.id] = RedoSearchUser(
ctx, query, search_scope, nodes, response_mode
)
await paginator.respond(ctx.interaction)
class SearchView(discord.ui.View):
def __init__(
self,
ctx,
search_cog,
response_text,
):
super().__init__(timeout=None) # No timeout
self.search_cog = search_cog
self.ctx = ctx
self.response_text = response_text
self.add_item(RedoButton(self.ctx, self.search_cog))
self.add_item(FollowupButton(self.ctx, self.search_cog, self.response_text))
# A view for a follow-up button
class FollowupButton(discord.ui.Button["SearchView"]):
def __init__(self, ctx, search_cog, response_text):
super().__init__(label="Follow Up", style=discord.ButtonStyle.green)
self.search_cog = search_cog
self.ctx = ctx
self.response_text = response_text
async def callback(self, interaction: discord.Interaction):
"""Send the followup modal"""
await interaction.response.send_modal(
modal=FollowupModal(self.ctx, self.search_cog, self.response_text)
)
# A view for a redo button
class RedoButton(discord.ui.Button["SearchView"]):
def __init__(self, ctx, search_cog):
super().__init__(
style=discord.ButtonStyle.danger,
label="Redo",
custom_id="redo_search_button",
)
self.ctx = ctx
self.search_cog = search_cog
async def callback(self, interaction: discord.Interaction):
"""Redo the search"""
await interaction.response.send_message(
embed=EmbedStatics.get_search_redo_progress_embed(),
ephemeral=True,
delete_after=15,
)
await self.search_cog.search_command(
self.search_cog.redo_users[self.ctx.user.id].ctx,
self.search_cog.redo_users[self.ctx.user.id].query,
self.search_cog.redo_users[self.ctx.user.id].search_scope,
self.search_cog.redo_users[self.ctx.user.id].nodes,
deep=False,
redo=True,
response_mode=self.search_cog.redo_users[self.ctx.user.id].response_mode,
)
class FollowupData:
def __init__(self, original_link, followup_question):
self.original_link = original_link
self.followup_question = followup_question
# The modal for following up
class FollowupModal(discord.ui.Modal):
def __init__(self, ctx, search_cog, response_text) -> None:
super().__init__(title="Search Follow-up")
# Get the argument named "user_key_db" and save it as USER_KEY_DB
self.search_cog = search_cog
self.ctx = ctx
self.response_text = response_text
self.add_item(
discord.ui.InputText(
label="What other questions do you have?",
placeholder="",
)
)
async def callback(self, interaction: discord.Interaction):
await interaction.response.defer()
query = self.search_cog.redo_users[self.ctx.user.id].query
# In the response text, get only the text between "**Final Answer:**" and "**Sources:**"
self.response_text = self.response_text.split("**Final Answer:**")[1].split(
"**Sources:**"
)[0]
# Build the context
context_text = (
"Original question: "
+ query
+ "\n"
+ "Answer to original: "
+ self.response_text
+ "\n"
+ "Follow-up question: "
+ self.children[0].value
)
# Get the link of the message that the user interacted on
message_link = f"https://discord.com/channels/{interaction.guild_id}/{interaction.channel_id}/{interaction.message.id}"
await self.search_cog.search_command(
self.search_cog.redo_users[self.ctx.user.id].ctx,
context_text,
self.search_cog.redo_users[self.ctx.user.id].search_scope,
self.search_cog.redo_users[self.ctx.user.id].nodes,
deep=False,
redo=True,
from_followup=FollowupData(message_link, self.children[0].value),
response_mode=self.search_cog.redo_users[self.ctx.user.id].response_mode,
followup_user=interaction.user,
)
| SwarmsDiscord-main | swarmsdiscord/cogs/search_service_cog.py |
SwarmsDiscord-main | swarmsdiscord/cogs/__init__.py |
|
import asyncio
import discord
from sqlitedict import SqliteDict
from services.environment_service import EnvService
from services.moderations_service import Moderation, ThresholdSet
MOD_DB = None
try:
print("Attempting to retrieve the General and Moderations DB")
MOD_DB = SqliteDict(
EnvService.find_shared_file("main_db.sqlite"),
tablename="moderations",
autocommit=True,
)
except Exception as e:
print("Failed to retrieve the General and Moderations DB")
raise e
class ModerationsService(discord.Cog, name="ModerationsService"):
"""Cog containing moderation tools and features"""
def __init__(
self,
bot,
usage_service,
model,
):
super().__init__()
self.bot = bot
self.usage_service = usage_service
self.model = model
# Moderation service data
self.moderation_queues = {}
self.moderation_alerts_channel = EnvService.get_moderations_alert_channel()
self.moderation_enabled_guilds = []
self.moderation_tasks = {}
self.moderations_launched = []
# Defaults
self.default_warn_set = ThresholdSet(0.01, 0.05, 0.05, 0.91, 0.1, 0.45, 0.1)
self.default_delete_set = ThresholdSet(0.26, 0.26, 0.1, 0.95, 0.03, 0.85, 0.4)
@discord.Cog.listener()
async def on_ready(self):
"""Check moderation service for each guild"""
for guild in self.bot.guilds:
self.get_or_set_warn_set(guild.id)
self.get_or_set_delete_set(guild.id)
await self.check_and_launch_moderations(guild.id)
print("The moderation service is ready.")
def check_guild_moderated(self, guild_id):
"""Given guild id, return bool of moderation status"""
return guild_id in MOD_DB and MOD_DB[guild_id]["moderated"]
def get_moderated_alert_channel(self, guild_id):
"""Given guild id, return alert channel"""
return MOD_DB[guild_id]["alert_channel"]
def set_moderated_alert_channel(self, guild_id, channel_id):
"""Given guild id and channel id, set channel to recieve alerts"""
MOD_DB[guild_id] = {"moderated": True, "alert_channel": channel_id}
MOD_DB.commit()
def get_or_set_warn_set(self, guild_id):
"""Get warn_set set for the guild, if not set them from default values"""
guild_id = str(guild_id)
key = guild_id + "_warn_set"
if key not in MOD_DB:
MOD_DB[key] = zip(
self.default_warn_set.keys, self.default_warn_set.thresholds
)
MOD_DB.commit()
return dict(MOD_DB[key])
def get_or_set_delete_set(self, guild_id):
"""Get delete_set set for the guild, if not set them from default values"""
guild_id = str(guild_id)
key = guild_id + "_delete_set"
if key not in MOD_DB:
MOD_DB[key] = zip(
self.default_delete_set.keys, self.default_delete_set.thresholds
)
MOD_DB.commit()
return dict(MOD_DB[key])
def set_warn_set(self, guild_id, threshold_set):
"""Set threshold for warning a message"""
guild_id = str(guild_id)
key = guild_id + "_warn_set"
MOD_DB[key] = zip(threshold_set.keys, threshold_set.thresholds)
MOD_DB.commit()
def set_delete_set(self, guild_id, threshold_set):
"""Set threshold for deleting a message"""
guild_id = str(guild_id)
key = guild_id + "_delete_set"
MOD_DB[key] = zip(threshold_set.keys, threshold_set.thresholds)
MOD_DB.commit()
def set_guild_moderated(self, guild_id, status=True):
"""Set the guild to moderated or not"""
if guild_id not in MOD_DB:
MOD_DB[guild_id] = {"moderated": status, "alert_channel": 0}
MOD_DB.commit()
return
MOD_DB[guild_id] = {
"moderated": status,
"alert_channel": self.get_moderated_alert_channel(guild_id),
}
MOD_DB.commit()
async def check_and_launch_moderations(self, guild_id, alert_channel_override=None):
"""Create the moderation service"""
if self.check_guild_moderated(guild_id):
Moderation.moderation_queues[guild_id] = asyncio.Queue()
moderations_channel = await self.bot.fetch_channel(
self.get_moderated_alert_channel(guild_id)
if not alert_channel_override
else alert_channel_override
)
warn_set_nums = self.get_or_set_warn_set(guild_id).values()
delete_set_nums = self.get_or_set_delete_set(guild_id).values()
warn_set = ThresholdSet(*warn_set_nums)
delete_set = ThresholdSet(*delete_set_nums)
Moderation.moderation_tasks[guild_id] = asyncio.ensure_future(
Moderation.process_moderation_queue(
Moderation.moderation_queues[guild_id],
0.25,
0.25,
moderations_channel,
warn_set,
delete_set,
)
)
print("Launched the moderations service for guild " + str(guild_id))
Moderation.moderations_launched.append(guild_id)
return moderations_channel
return None
async def moderations_command(
self, ctx: discord.ApplicationContext, status: str, alert_channel_id: str
):
"""command handler for toggling moderation and setting an alert channel"""
await ctx.defer()
try:
if alert_channel_id:
int(alert_channel_id)
except ValueError:
# the alert_channel_id was passed in as a channel NAME instead of an ID, fetch the ID.
alert_channel = discord.utils.get(ctx.guild.channels, name=alert_channel_id)
alert_channel_id = alert_channel.id
if status == "on":
# Check if the current guild is already in the database and if so, if the moderations is on
if self.check_guild_moderated(ctx.guild_id):
await ctx.respond("Moderations is already enabled for this guild")
return
# Create the moderations service.
await self.start_moderations_service(
guild_id=ctx.guild_id, alert_channel_id=alert_channel_id
)
await ctx.respond("Moderations is now enabled for this guild")
elif status == "off":
# Cancel the moderations service.
await self.stop_moderations_service(ctx.guild_id)
await ctx.respond(
"Moderations is now disabled for this guild", ephemeral=True
)
async def stop_moderations_service(self, guild_id):
"""Remove guild moderation status and stop the service"""
self.set_guild_moderated(guild_id, False)
Moderation.moderation_tasks[guild_id].cancel()
Moderation.moderation_tasks[guild_id] = None
Moderation.moderation_queues[guild_id] = None
Moderation.moderations_launched.remove(guild_id)
async def start_moderations_service(self, guild_id, alert_channel_id=None):
"""Set guild moderation and start the service"""
self.set_guild_moderated(guild_id)
moderations_channel = await self.check_and_launch_moderations(
guild_id,
Moderation.moderation_alerts_channel
if not alert_channel_id
else alert_channel_id,
)
self.set_moderated_alert_channel(guild_id, moderations_channel.id)
async def restart_moderations_service(self, ctx):
"""restarts the moderation of the guild it's run in"""
if not self.check_guild_moderated(ctx.guild_id):
await ctx.respond(
"Moderations are not enabled, can't restart",
ephemeral=True,
delete_after=30,
)
return
await ctx.respond(
"The moderations service is being restarted...",
ephemeral=True,
delete_after=30,
)
await self.stop_moderations_service(ctx.guild_id)
await ctx.send_followup(
"The moderations service was stopped..", ephemeral=True, delete_after=30
)
await self.start_moderations_service(
ctx.guild_id, self.get_moderated_alert_channel(ctx.guild_id)
)
await ctx.send_followup(
"The moderations service was restarted successfully.",
ephemeral=True,
delete_after=30,
)
async def build_moderation_settings_embed(self, category, mod_set):
embed = discord.Embed(
title="Moderation Settings",
description="The moderation settings for this guild for the type: "
+ category,
color=discord.Color.yellow() if type == "warn" else discord.Color.red(),
)
# Add each key_value pair in the mod_set to the embed, make them fairly small
for key, value in mod_set.items():
embed.add_field(name=key, value=value, inline=False)
return embed
async def config_command(
self,
ctx: discord.ApplicationContext,
config_type: str,
hate,
hate_threatening,
self_harm,
sexual,
sexual_minors,
violence,
violence_graphic,
):
"""command handler for assigning threshold values for warn or delete"""
all_args = [
hate,
hate_threatening,
self_harm,
sexual,
sexual_minors,
violence,
violence_graphic,
]
await ctx.defer(ephemeral=True)
# Case for printing the current config
if not any(all_args) and config_type != "reset":
await ctx.respond(
ephemeral=True,
embed=await self.build_moderation_settings_embed(
config_type,
self.get_or_set_warn_set(ctx.guild_id)
if config_type == "warn"
else self.get_or_set_delete_set(ctx.guild_id),
),
)
return
if config_type == "warn":
# Check if no args were
warn_set = self.get_or_set_warn_set(ctx.guild_id)
new_warn_set = ThresholdSet(
hate if hate else warn_set["hate"],
hate_threatening if hate_threatening else warn_set["hate/threatening"],
self_harm if self_harm else warn_set["self-harm"],
sexual if sexual else warn_set["sexual"],
sexual_minors if sexual_minors else warn_set["sexual/minors"],
violence if violence else warn_set["violence"],
violence_graphic if violence_graphic else warn_set["violence/graphic"],
)
self.set_warn_set(ctx.guild_id, new_warn_set)
await self.restart_moderations_service(ctx)
elif config_type == "delete":
delete_set = self.get_or_set_delete_set(ctx.guild_id)
new_delete_set = ThresholdSet(
hate if hate else delete_set["hate"],
hate_threatening
if hate_threatening
else delete_set["hate/threatening"],
self_harm if self_harm else delete_set["self-harm"],
sexual if sexual else delete_set["sexual"],
sexual_minors if sexual_minors else delete_set["sexual/minors"],
violence if violence else delete_set["violence"],
violence_graphic
if violence_graphic
else delete_set["violence/graphic"],
)
self.set_delete_set(ctx.guild_id, new_delete_set)
await self.restart_moderations_service(ctx)
elif config_type == "reset":
self.set_delete_set(ctx.guild_id, self.default_delete_set)
self.set_warn_set(ctx.guild_id, self.default_warn_set)
await self.restart_moderations_service(ctx)
async def moderations_test_command(
self, ctx: discord.ApplicationContext, prompt: str
):
"""command handler for checking moderation values of a given input"""
await ctx.defer()
response = await self.model.send_moderations_request(prompt)
await ctx.respond(response["results"][0]["category_scores"])
await ctx.send_followup(response["results"][0]["flagged"])
| SwarmsDiscord-main | swarmsdiscord/cogs/moderations_service_cog.py |
import asyncio
import datetime
import pickle
import re
import traceback
import sys
from pathlib import Path
import aiofiles
import json
import discord
from discord.ext import pages
from models.deepl_model import TranslationModel
from models.embed_statics_model import EmbedStatics
from models.image_understanding_model import ImageUnderstandingModel
from models.openai_model import Override
from services.environment_service import EnvService
from services.message_queue_service import Message
from services.moderations_service import Moderation
from models.user_model import Thread, EmbeddedConversationItem, Instruction
from collections import defaultdict
from sqlitedict import SqliteDict
from services.pickle_service import Pickler
from services.sharegpt_service import ShareGPTService
from services.text_service import SetupModal, TextService
original_message = {}
ALLOWED_GUILDS = EnvService.get_allowed_guilds()
if sys.platform == "win32":
separator = "\\"
else:
separator = "/"
#
# Get the user key service if it is enabled.
#
USER_INPUT_API_KEYS = EnvService.get_user_input_api_keys()
USER_KEY_DB = EnvService.get_api_db()
CHAT_BYPASS_ROLES = EnvService.get_bypass_roles()
PRE_MODERATE = EnvService.get_premoderate()
FORCE_ENGLISH = EnvService.get_force_english()
BOT_TAGGABLE = EnvService.get_bot_is_taggable()
CHANNEL_CHAT_ROLES = EnvService.get_channel_chat_roles()
BOT_TAGGABLE_ROLES = EnvService.get_gpt_roles()
CHANNEL_INSTRUCTION_ROLES = EnvService.get_channel_instruction_roles()
image_understanding_model = ImageUnderstandingModel()
#
# Obtain the Moderation table and the General table, these are two SQLite tables that contain
# information about the server that are used for persistence and to auto-restart the moderation service.
#
MOD_DB = None
GENERAL_DB = None
try:
print("Attempting to retrieve the General and Moderations DB")
MOD_DB = SqliteDict(
EnvService.find_shared_file("main_db.sqlite"),
tablename="moderations",
autocommit=True,
)
GENERAL_DB = SqliteDict(
EnvService.find_shared_file("main_db.sqlite"),
tablename="general",
autocommit=True,
)
print("Retrieved the General and Moderations DB")
except Exception as e:
print("Failed to retrieve the General and Moderations DB. The bot is terminating.")
raise e
BOT_NAME = EnvService.get_custom_bot_name()
class SWARMSComCon(discord.Cog, name="SWARMSComCon"):
def __init__(
self,
bot,
usage_service,
model,
message_queue,
deletion_queue,
DEBUG_GUILD,
DEBUG_CHANNEL,
data_path: Path,
pinecone_service,
pickle_queue,
):
super().__init__()
self.GLOBAL_COOLDOWN_TIME = 0.25
# Environment
self.data_path = data_path
self.debug_channel = None
# Services and models
self.bot = bot
self.usage_service = usage_service
self.model = model
self.translation_model = TranslationModel()
self.deletion_queue = deletion_queue
# Data specific to all text based GPT interactions
self.users_to_interactions = defaultdict(list)
self.redo_users = {}
# Pickle queue
self.pickle_queue = pickle_queue
# Conversations-specific data
self.END_PROMPTS = [
"end",
"end conversation",
"end the conversation",
"that's all",
"that'll be all",
]
self.awaiting_responses = []
self.awaiting_thread_responses = []
self.conversation_threads = {}
self.full_conversation_history = defaultdict(list)
self.instructions = defaultdict(list)
self.summarize = self.model.summarize_conversations
# Pinecone data
self.pinecone_service = pinecone_service
# Sharing service
self.sharegpt_service = ShareGPTService()
try:
conversation_file_path = EnvService.find_shared_file(
"conversation_starter_pretext.txt"
)
# Attempt to read a conversation starter text string from the file.
with conversation_file_path.open("r") as f:
self.CONVERSATION_STARTER_TEXT = f.read()
print(
f"Conversation starter text loaded from {conversation_file_path}."
)
assert self.CONVERSATION_STARTER_TEXT is not None
language_detect_file_path = EnvService.find_shared_file(
"language_detection_pretext.txt"
)
# Attempt to read a conversation starter text string from the file.
with language_detect_file_path.open("r") as f:
self.LANGUAGE_DETECT_STARTER_TEXT = f.read()
print(
f"Language detection starter text loaded from {language_detect_file_path}."
)
assert self.LANGUAGE_DETECT_STARTER_TEXT is not None
conversation_file_path_minimal = EnvService.find_shared_file(
"conversation_starter_pretext_minimal.txt"
)
with conversation_file_path_minimal.open("r") as f:
self.CONVERSATION_STARTER_TEXT_MINIMAL = f.read()
print(
f"Conversation starter text loaded from {conversation_file_path_minimal}."
)
assert self.CONVERSATION_STARTER_TEXT_MINIMAL is not None
except Exception:
self.CONVERSATION_STARTER_TEXT = self.CONVERSATION_STARTER_TEXT_MINIMAL = (
"You are an artificial intelligence that is able to do anything, and answer any question,"
"I want you to be my personal assistant and help me with some tasks. "
"and I want you to make well-informed decisions using the data that you have been trained on, "
"and be sure to be mindful of the previous conversation history and be consistent with your answers."
)
self.DEBUG_GUILD = DEBUG_GUILD
self.DEBUG_CHANNEL = DEBUG_CHANNEL
print(
f"The debug channel and guild IDs are {self.DEBUG_GUILD} and {self.DEBUG_CHANNEL}"
)
self.TEXT_CUTOFF = 1900
self.EMBED_CUTOFF = 3900
self.message_queue = message_queue
self.conversation_thread_owners = defaultdict(list)
async def load_file(self, file, ctx):
"""Take filepath, return content or respond if not found"""
try:
async with aiofiles.open(file, "r") as f:
return await f.read()
except Exception as e:
traceback.print_exc()
await ctx.respond(
"Error loading file. Please check that it is correctly placed in the bot's root file directory."
)
raise e
@discord.Cog.listener()
async def on_member_join(self, member):
"""When members join send welcome message if enabled"""
if self.model.welcome_message_enabled:
query = f"Please generate a welcome message for {member.name} who has just joined the server."
try:
welcome_message_response = await self.model.send_request(
query,
tokens=self.usage_service.count_tokens(query),
is_chatgpt_request=True
if "turbo" in str(self.model.model)
else False,
)
welcome_message = str(welcome_message_response["choices"][0]["text"])
except Exception:
welcome_message = None
if not welcome_message:
welcome_message = EnvService.get_welcome_message()
welcome_embed = discord.Embed(
title=f"Welcome, {member.name}!", description=welcome_message
)
welcome_embed.add_field(
name="Just so you know...",
value="> My commands are invoked with a forward slash (/)\n> Use /help to see my help message(s).",
)
await member.send(content=None, embed=welcome_embed)
@discord.Cog.listener()
async def on_ready(self):
"""When ready to recieve data set debug channel and sync commands"""
self.debug_channel = self.bot.get_guild(self.DEBUG_GUILD).get_channel(
self.DEBUG_CHANNEL
)
print("The debug channel was acquired")
print("Attempting to load from pickles")
# Try to load self.full_conversation_history, self.conversation_threads, and self.conversation_thread_owners from the `pickles` folder
try:
with open(
EnvService.save_path() / "pickles" / "full_conversation_history.pickle",
"rb",
) as f:
self.full_conversation_history = pickle.load(f)
print("Loaded full_conversation_history")
with open(
EnvService.save_path() / "pickles" / "conversation_threads.pickle", "rb"
) as f:
self.conversation_threads = pickle.load(f)
print("Loaded conversation_threads")
with open(
EnvService.save_path()
/ "pickles"
/ "conversation_thread_owners.pickle",
"rb",
) as f:
self.conversation_thread_owners = pickle.load(f)
print("Loaded conversation_thread_owners")
with open(
EnvService.save_path() / "pickles" / "instructions.pickle",
"rb",
) as f:
self.instructions = pickle.load(f)
print("Loaded instructions")
# Fail if all three weren't loaded
assert self.full_conversation_history is not {}
assert self.conversation_threads is not {}
assert self.conversation_thread_owners is not defaultdict(list)
except Exception:
print("Failed to load existing pickles")
self.full_conversation_history = defaultdict(list)
self.conversation_threads = {}
self.conversation_thread_owners = defaultdict(list)
print("Set empty dictionaries, pickles will be saved in the future")
print("Syncing commands...")
await self.bot.sync_commands(
commands=None,
method="individual",
force=True,
guild_ids=ALLOWED_GUILDS,
register_guild_commands=True,
check_guilds=[],
delete_existing=True,
)
print("Commands synced")
# Start an inline async loop that runs every 10 seconds to save the conversation history to a pickle file
print("Starting pickle loop")
while True:
await asyncio.sleep(15)
await self.pickle_queue.put(
Pickler(
self.full_conversation_history,
self.conversation_threads,
self.conversation_thread_owners,
self.instructions,
)
)
def check_conversing(self, channel_id, message_content):
'''given channel id and a message, return true if it's a conversation thread, false if not, or if the message starts with "~"'''
cond1 = channel_id in self.conversation_threads
# If the trimmed message starts with a Tilde, then we want to not contribute this to the conversation
try:
cond2 = not message_content.strip().startswith("~")
except Exception as e:
print(e)
cond2 = False
return (cond1) and cond2
async def end_conversation(
self, ctx, opener_user_id=None, conversation_limit=False
):
"""end the thread of the user interacting with the bot, if the conversation has reached the limit close it for the owner"""
normalized_user_id = opener_user_id if opener_user_id else ctx.author.id
# Check if the channel is an instance of a thread
thread = False
if isinstance(ctx.channel, discord.Thread):
thread = True
if (
conversation_limit
): # if we reach the conversation limit we want to close from the channel it was maxed out in
channel_id = ctx.channel.id
else:
try:
channel_ids = self.conversation_thread_owners[normalized_user_id]
if ctx.channel.id not in channel_ids:
await ctx.reply(
"This is not a conversation thread that you own!",
delete_after=5,
)
return
if normalized_user_id in self.awaiting_responses:
await ctx.reply(
embed=discord.Embed(
title=f"Please wait for a response before ending the conversation.",
color=0x808080,
)
)
return
except Exception:
traceback.print_exc()
await ctx.delete(delay=5)
await ctx.reply(
"Only the conversation starter can end this.", delete_after=5
)
return
# TODO Possible bug here, if both users have a conversation active and one user tries to end the other, it may
# allow them to click the end button on the other person's thread and it will end their own convo.
self.conversation_threads.pop(ctx.channel.id)
if isinstance(
ctx, discord.ApplicationContext
): # When the conversation is ended from the slash command
await ctx.respond(
"You have ended the conversation with GPT. Start a conversation with /gpt converse",
ephemeral=True,
delete_after=10,
)
elif isinstance(
ctx, discord.Interaction
): # When the user ends the conversation from the button
await ctx.response.send_message(
"You have ended the conversation with GPT. Start a conversation with /gpt converse",
ephemeral=True,
delete_after=10,
)
else: # The case for when the user types "end" in the channel
await ctx.reply(
"You have ended the conversation with GPT. Start a conversation with /gpt converse",
delete_after=10,
)
await ctx.channel.send(
embed=EmbedStatics.generate_end_embed(),
view=ShareView(self, ctx.channel.id) if thread else None,
)
# Close all conversation threads for the user
# If at conversation limit then fetch the owner and close the thread for them
if conversation_limit:
try:
owner_id = [
owner
for owner, threads in self.conversation_thread_owners.items()
if channel_id in threads
][0]
self.conversation_thread_owners[owner_id].remove(ctx.channel.id)
# Attempt to close and lock the thread.
if thread:
try:
thread = await self.bot.fetch_channel(channel_id)
await thread.edit(name="Closed-GPT")
await thread.edit(archived=True)
except Exception:
traceback.print_exc()
except Exception:
traceback.print_exc()
else:
if normalized_user_id in self.conversation_thread_owners:
thread_id = ctx.channel.id
self.conversation_thread_owners[normalized_user_id].remove(
ctx.channel.id
)
# Attempt to close and lock the thread.
if thread:
try:
thread = await self.bot.fetch_channel(thread_id)
await thread.edit(name="Closed-GPT")
await thread.edit(archived=True)
except Exception:
traceback.print_exc()
async def send_settings_text(self, ctx):
"""compose and return the settings menu to the interacting user"""
embed = discord.Embed(
title="SWARMSBot Settings",
description="The current settings of the model",
color=0x00FF00,
)
# Create a two-column embed to display the settings, use \u200b to create a blank space
embed.add_field(
name="Setting",
value="\n".join(
[
key
for key in self.model.__dict__.keys()
if key not in self.model._hidden_attributes
]
),
inline=True,
)
embed.add_field(
name="Value",
value="\n".join(
[
str(value)
for key, value in self.model.__dict__.items()
if key not in self.model._hidden_attributes
]
),
inline=True,
)
await ctx.respond(embed=embed, ephemeral=True)
async def process_settings(self, ctx, parameter, value):
"""Given a parameter and value set the corresponding parameter in storage to the value"""
# Check if the parameter is a valid parameter
if hasattr(self.model, parameter):
# Check if the value is a valid value
try:
# Set the parameter to the value
setattr(self.model, parameter, value)
await ctx.respond(
"Successfully set the parameter " + parameter + " to " + value
)
if parameter == "mode":
await ctx.send_followup(
"The mode has been set to "
+ value
+ ". This has changed the temperature top_p to the mode defaults of "
+ str(self.model.temp)
+ " and "
+ str(self.model.top_p)
)
except ValueError as e:
await ctx.respond(e)
else:
await ctx.respond("The parameter is not a valid parameter")
def generate_debug_message(self, prompt, response):
"""create a debug message with a prompt and a response field"""
debug_message = "----------------------------------------------------------------------------------\n"
debug_message += "Prompt:\n```\n" + prompt + "\n```\n"
debug_message += "Response:\n```\n" + json.dumps(response, indent=4) + "\n```\n"
return debug_message
async def paginate_and_send(self, response_text, ctx):
"""paginate a response to a text cutoff length and send it in chunks"""
from_context = isinstance(ctx, discord.ApplicationContext)
response_text = [
response_text[i : i + self.TEXT_CUTOFF]
for i in range(0, len(response_text), self.TEXT_CUTOFF)
]
# Send each chunk as a message
first = False
for chunk in response_text:
if not first:
if from_context:
await ctx.send_followup(chunk)
else:
await ctx.reply(chunk)
first = True
else:
if from_context:
response_message = await ctx.send_followup(chunk)
else:
response_message = await ctx.channel.send(chunk)
return response_message
async def paginate_embed(self, response_text):
"""Given a response text make embed pages and return a list of the pages."""
response_text = [
response_text[i : i + self.EMBED_CUTOFF]
for i in range(0, len(response_text), self.EMBED_CUTOFF)
]
pages = []
first = False
# Send each chunk as a message
for count, chunk in enumerate(response_text, start=1):
if not first:
page = discord.Embed(
title=f"Page {count}",
description=chunk,
)
first = True
else:
page = discord.Embed(
title=f"Page {count}",
description=chunk,
)
pages.append(page)
return pages
async def queue_debug_message(self, debug_message, debug_channel):
"""Put a message into the debug queue"""
await self.message_queue.put(Message(debug_message, debug_channel))
async def queue_debug_chunks(self, debug_message, debug_channel):
"""Put a message as chunks into the debug queue"""
debug_message_chunks = [
debug_message[i : i + self.TEXT_CUTOFF]
for i in range(0, len(debug_message), self.TEXT_CUTOFF)
]
backticks_encountered = 0
for i, chunk in enumerate(debug_message_chunks):
# Count the number of backticks in the chunk
backticks_encountered += chunk.count("```")
# If it's the first chunk, append a "\n```\n" to the end
if i == 0:
chunk += "\n```\n"
# If it's an interior chunk, append a "```\n" to the end, and a "\n```\n" to the beginning
elif i < len(debug_message_chunks) - 1:
chunk = "\n```\n" + chunk + "```\n"
# If it's the last chunk, append a "```\n" to the beginning
else:
chunk = "```\n" + chunk
await self.message_queue.put(Message(chunk, debug_channel))
async def send_debug_message(self, debug_message, debug_channel):
"""process a debug message and put directly into queue or chunk it"""
# Send the debug message
try:
if len(debug_message) > self.TEXT_CUTOFF:
await self.queue_debug_chunks(debug_message, debug_channel)
else:
await self.queue_debug_message(debug_message, debug_channel)
except Exception as e:
traceback.print_exc()
await self.message_queue.put(
Message("Error sending debug message: " + str(e), debug_channel)
)
async def check_conversation_limit(self, message):
"""Check if a conversation has reached the set limit and end it if it has"""
# After each response, check if the user has reached the conversation limit in terms of messages or time.
if message.channel.id in self.conversation_threads:
# If the user has reached the max conversation length, end the conversation
if (
self.conversation_threads[message.channel.id].count
>= self.model.max_conversation_length
):
await message.reply(
"You have reached the maximum conversation length. You have ended the conversation with GPT, and it has ended."
)
await self.end_conversation(message, conversation_limit=True)
return True
return False
async def summarize_conversation(self, message, prompt):
"""Takes a conversation history filled prompt and summarizes it to then start a new history with it as the base"""
response = await self.model.send_summary_request(prompt)
summarized_text = response["choices"][0]["text"]
new_conversation_history = []
new_conversation_history.append(
EmbeddedConversationItem(self.CONVERSATION_STARTER_TEXT, 0)
)
new_conversation_history.append(
EmbeddedConversationItem(
f"\nThis conversation has some context from earlier, which has been summarized as follows: {summarized_text} \nContinue the conversation, paying very close attention to things <username> told you, such as their name, and personal details.",
0,
)
)
# Get the last entry from the thread's conversation history
new_conversation_history.append(
EmbeddedConversationItem(
self.conversation_threads[message.channel.id].history[-1].text + "\n", 0
)
)
self.conversation_threads[message.channel.id].history = new_conversation_history
# A listener for message edits to redo prompts if they are edited
@discord.Cog.listener()
async def on_message_edit(self, before, after):
"""When a message is edited run moderation if enabled, and process if it a prompt that should be redone"""
if after.author.id == self.bot.user.id:
return
# Moderation
if not isinstance(after.channel, discord.DMChannel):
if (
after.guild.id in Moderation.moderation_queues
and Moderation.moderation_queues[after.guild.id] is not None
):
# Create a timestamp that is 0.25 seconds from now
timestamp = (
datetime.datetime.now() + datetime.timedelta(seconds=0.25)
).timestamp()
await Moderation.moderation_queues[after.guild.id].put(
Moderation(after, timestamp)
) # TODO Don't proceed if message was deleted!
await TextService.process_conversation_edit(self, after, original_message)
@discord.Cog.listener()
async def on_message(self, message):
"""On a new message check if it should be moderated then process it for conversation"""
if message.author == self.bot.user:
return
# Check if the message is a discord system message
if message.type != discord.MessageType.default:
return
# Moderations service is done here.
if (
hasattr(message, "guild")
and message.guild.id in Moderation.moderation_queues
and Moderation.moderation_queues[message.guild.id] is not None
):
# Don't moderate if there is no "roles" attribute for the author
if not hasattr(message.author, "roles"):
pass
# Verify that the user is not in a role that can bypass moderation
elif CHAT_BYPASS_ROLES is [None] or not any(
role.name.lower() in CHAT_BYPASS_ROLES for role in message.author.roles
):
# Create a timestamp that is 0.5 seconds from now
timestamp = (
datetime.datetime.now() + datetime.timedelta(seconds=0.5)
).timestamp()
await Moderation.moderation_queues[message.guild.id].put(
Moderation(message, timestamp)
)
# Language check
if FORCE_ENGLISH and len(message.content.split(" ")) > 3:
if not await Moderation.force_english_and_respond(
message.content, self.LANGUAGE_DETECT_STARTER_TEXT, message
):
await message.delete()
return
# Get the first file in the message if there is one
file = message.attachments[0] if len(message.attachments) > 0 else None
# Process the message if the user is in a conversation
if await TextService.process_conversation_message(
self, message, USER_INPUT_API_KEYS, USER_KEY_DB, file=file
):
original_message[message.author.id] = message.id
# If the user tagged the bot and the tag wasn't an @here or @everyone, retrieve the message
if f"<@{self.bot.user.id}>" in message.content and not (
"@everyone" in message.content or "@here" in message.content
):
if not BOT_TAGGABLE:
return
# Check if any of the message author's role names are in BOT_TAGGABLE_ROLES, if not, return
if BOT_TAGGABLE_ROLES != [None] and not any(
role.name.lower() in BOT_TAGGABLE_ROLES for role in message.author.roles
):
return
# Remove the mention from the message
prompt = message.content.replace(self.bot.user.mention, "")
# If the message is empty, don't process it
if len(prompt) < 5:
await message.reply(
"This is too short of a prompt to think about. Please be more specific."
)
return
await self.ask_command(
message,
prompt=prompt,
from_message_context=True,
)
def cleanse_response(self, response_text):
"""Cleans history tokens from response"""
response_text = response_text.replace("<yourname>:", "")
response_text = response_text.replace("You:", "")
response_text = response_text.replace(BOT_NAME.replace(" ", ""), "")
response_text = response_text.replace(BOT_NAME, "")
response_text = response_text.replace("<|endofstatement|>", "")
return response_text
def remove_awaiting(
self, author_id, channel_id, from_ask_command, from_edit_command
):
"""Remove user from ask/edit command response wait, if not any of those then process the id to remove user from thread response wait"""
if author_id in self.awaiting_responses:
self.awaiting_responses.remove(author_id)
if not from_ask_command and not from_edit_command:
if channel_id in self.awaiting_thread_responses:
self.awaiting_thread_responses.remove(channel_id)
async def mention_to_username(self, ctx, message):
"""replaces discord mentions with their server nickname in text, if the user is not found keep the mention as is"""
if not discord.utils.raw_mentions(message):
return message
for mention in discord.utils.raw_mentions(message):
try:
user = await discord.utils.get_or_fetch(ctx.guild, "member", mention)
message = message.replace(f"<@{str(mention)}>", user.display_name)
except Exception:
pass
return message
# COMMANDS
async def help_command(self, ctx):
"""Command handler. Generates a help message and sends it to the user"""
await ctx.defer()
embed = discord.Embed(
title="SWARMSBot Help", description="The current commands", color=0xC730C7
)
embed.add_field(
name="/search",
value="AI-Assisted google search!",
inline=False,
)
embed.add_field(
name="/index",
value="Indexing commands for document knowledge and querying",
inline=False,
)
embed.add_field(
name="/gpt ask",
value="Ask GPT something. Be clear, long, and concise in your prompt. Don't waste tokens.",
inline=False,
)
embed.add_field(
name="/gpt edit",
value="Use GPT to edit a piece of text given an instruction",
inline=False,
)
embed.add_field(
name="/gpt converse", value="Start a conversation with GPT", inline=False
)
embed.add_field(
name="/gpt end",
value="End a conversation with GPT. You can also type `end` in the conversation.",
inline=False,
)
embed.add_field(
name="/dalle draw <image prompt>",
value="Use DALL-E2 to draw an image based on a text prompt",
inline=False,
)
embed.add_field(
name="/dalle optimize <image prompt>",
value="Optimize an image prompt for use with DALL-E2, Midjourney, SD, etc.",
inline=False,
)
embed.add_field(
name="/system settings",
value="Print the current settings of the model",
inline=False,
)
embed.add_field(
name="/system settings <model parameter> <value>",
value="Change the parameter of the model named by <model parameter> to new value <value>",
inline=False,
)
embed.add_field(
name="/mod",
value="The automatic moderations service",
inline=False,
)
embed.add_field(
name="/translate",
value="Translate from one language to another",
inline=False,
)
embed.add_field(name="/help", value="See this help text", inline=False)
await ctx.respond(embed=embed, ephemeral=False)
async def set_usage_command(
self, ctx: discord.ApplicationContext, usage_amount: float
):
"""Command handler. Sets the usage file to the given value"""
await ctx.defer()
# Attempt to convert the input usage value into a float
try:
usage = float(usage_amount)
await self.usage_service.set_usage(usage)
await ctx.respond(f"Set the usage to {usage}")
except Exception:
await ctx.respond("The usage value must be a valid float.")
return
async def delete_all_conversation_threads_command(
self, ctx: discord.ApplicationContext
):
"""Command handler. Deletes all threads made by the bot in the current guild"""
await ctx.defer()
for thread in ctx.guild.threads:
thread_name = thread.name.lower()
if "with gpt" in thread_name or "closed-gpt" in thread_name:
try:
await thread.delete()
except Exception:
pass
await ctx.respond("All conversation threads in this server have been deleted.")
async def usage_command(self, ctx):
"""Command handler. Responds with the current usage of the bot"""
await ctx.defer()
embed = discord.Embed(
title="SWARMSBot Usage", description="The current usage", color=0x00FF00
)
# 1000 tokens costs 0.02 USD, so we can calculate the total tokens used from the price that we have stored
embed.add_field(
name="Total tokens used",
value=str(int((await self.usage_service.get_usage() / 0.02)) * 1000),
inline=False,
)
embed.add_field(
name="Total price",
value="$" + str(round(await self.usage_service.get_usage(), 2)),
inline=False,
)
await ctx.respond(embed=embed)
async def instruction_command(
self,
ctx: discord.ApplicationContext,
mode: str,
type: str,
instruction: str,
instruction_file: discord.Attachment,
private: bool,
):
"""Command to let users set their own system prompt or add one to the channel"""
await ctx.defer(ephemeral=private)
if mode == "set" and not (instruction or instruction_file):
await ctx.respond(
"You must include either an **instruction** or an **instruction file**"
)
return
# Check if any of the message author's role names are in CHANNEL_INSTRUCTION_ROLES, if not, continue as user
if type == "channel" and mode in ["set", "clear"]:
if CHANNEL_INSTRUCTION_ROLES != [None] and not any(
role.name.lower() in CHANNEL_INSTRUCTION_ROLES
for role in ctx.author.roles
):
await ctx.respond(
"You don't have permisson to set the channel instruction. Defaulting to setting a user instruction"
)
type = "user"
if instruction_file:
bytestring = await instruction_file.read()
file_instruction = bytestring.decode("utf-8")
if instruction and instruction_file:
instruction = f"{file_instruction}\n\n{instruction}"
elif instruction_file:
instruction = file_instruction
# If premoderation is enabled, check
if PRE_MODERATE:
if await Moderation.simple_moderate_and_respond(instruction, ctx):
return
if type == "channel":
set_id = ctx.channel.id
else:
set_id = ctx.user.id
if mode == "set":
self.instructions[set_id] = Instruction(set_id, instruction)
await ctx.respond(f"The system instruction is set for **{type}**")
elif mode == "get":
try:
instruction = self.instructions[set_id].prompt
embed_pages = await self.paginate_embed(instruction)
paginator = pages.Paginator(
pages=embed_pages, timeout=None, author_check=False
)
await paginator.respond(ctx.interaction)
except Exception:
await ctx.respond("There is no instruction set")
elif mode == "clear":
self.instructions.pop(set_id)
await ctx.respond(f"The instruction has been removed for **{type}**")
async def ask_command(
self,
ctx: discord.ApplicationContext,
prompt: str,
private: bool = False,
temperature: float = None,
top_p: float = None,
frequency_penalty: float = None,
presence_penalty: float = None,
from_ask_action=None,
from_other_action=None,
from_message_context=None,
prompt_file: discord.Attachment = None,
model=None,
):
"""Command handler. Requests and returns a generation with no extras to the completion endpoint
Args:
ctx (discord.ApplicationContext): Command interaction
prompt (str): A prompt to use for generation
temperature (float): Sets the temperature override
top_p (float): Sets the top p override
frequency_penalty (float): Sets the frequency penalty override
presence_penalty (float): Sets the presence penalty override
from_action (bool, optional): Enables ephemeral. Defaults to None.
"""
is_context = isinstance(ctx, discord.ApplicationContext)
user = ctx.user if is_context else ctx.author
if not (prompt or prompt_file):
await ctx.respond(
"You must include either a **prompt** or a **prompt file**"
)
return
if prompt_file:
bytestring = await prompt_file.read()
file_prompt = bytestring.decode("utf-8")
if prompt and prompt_file:
prompt = f"{file_prompt}\n\n{prompt}"
elif prompt_file:
prompt = file_prompt
prompt = await self.mention_to_username(ctx, prompt.strip())
if len(prompt) < self.model.prompt_min_length:
alias = ctx.respond if is_context else ctx.send
await alias(
f"Prompt must be greater than {self.model.prompt_min_length} characters, it is currently: {len(prompt)} characters"
)
return
user_api_key = None
if USER_INPUT_API_KEYS:
user_api_key = await TextService.get_user_api_key(user.id, ctx, USER_KEY_DB)
if not user_api_key:
return
await ctx.defer(ephemeral=private) if is_context else None
# If premoderation is enabled, check
if PRE_MODERATE:
if await Moderation.simple_moderate_and_respond(prompt, ctx):
return
overrides = Override(temperature, top_p, frequency_penalty, presence_penalty)
await TextService.encapsulated_send(
self,
user.id,
prompt,
ctx,
overrides=overrides,
from_ask_command=True,
custom_api_key=user_api_key,
from_ask_action=from_ask_action,
from_other_action=from_other_action,
from_message_context=from_message_context,
model=model,
)
async def edit_command(
self,
ctx: discord.ApplicationContext,
instruction: str,
text: str,
private: bool,
temperature: float,
top_p: float,
):
"""Command handler. Requests and returns a generation with no extras to the edit endpoint
Args:
ctx (discord.ApplicationContext): Command interaction
instruction (str): The modification instructions
text (str): The text that should be modified
temperature (float): Sets the temperature override
top_p (float): Sets the top p override
"""
user = ctx.user
text = await self.mention_to_username(ctx, text.strip())
instruction = await self.mention_to_username(ctx, instruction.strip())
# Validate that all the parameters are in a good state before we send the request
if len(instruction) < self.model.prompt_min_length:
await ctx.respond(
f"Instruction must be at least {self.model.prompt_min_length} characters long"
)
return
user_api_key = None
if USER_INPUT_API_KEYS:
user_api_key = await TextService.get_user_api_key(user.id, ctx, USER_KEY_DB)
if not user_api_key:
return
await ctx.defer(ephemeral=private)
if PRE_MODERATE:
if await Moderation.simple_moderate_and_respond(instruction + text, ctx):
return
overrides = Override(temperature, top_p, 0, 0)
await TextService.encapsulated_send(
self,
user.id,
prompt=text,
ctx=ctx,
overrides=overrides,
instruction=instruction,
from_edit_command=True,
custom_api_key=user_api_key,
)
async def private_test_command(self, ctx: discord.ApplicationContext):
"""Command handler. Creates a private thread in the current channel"""
await ctx.defer(ephemeral=True)
await ctx.respond("Your private test thread")
thread = await ctx.channel.create_thread(
name=ctx.user.name + "'s private test conversation",
auto_archive_duration=60,
)
await thread.send(
f"<@{str(ctx.user.id)}> This is a private thread for testing. Only you and server admins can see this thread."
)
async def converse_command(
self,
ctx: discord.ApplicationContext,
opener: str,
opener_file: str,
private: bool,
minimal: bool,
model: str,
temperature: float,
top_p: float,
frequency_penalty: float,
presence_penalty: float,
use_threads: bool = True, # Add this parameter
):
"""Command handler. Starts a conversation with the bot
Args:
ctx (discord.ApplicationContext): Command interaction
opener (str): The first prompt to send in the conversation
opener_file (str): A .txt or .json file which is appended before the opener
private (bool): If the thread should be private
minimal (bool): If a minimal starter should be used
model (str): The openai model that should be used
temperature (float): Sets the temperature override
top_p (float): Sets the top p override
frequency_penalty (float): Sets the frequency penalty override
presence_penalty (float): Sets the presence penalty override
"""
user = ctx.user
# If we are in user input api keys mode, check if the user has entered their api key before letting them continue
user_api_key = None
if USER_INPUT_API_KEYS:
user_api_key = await TextService.get_user_api_key(user.id, ctx, USER_KEY_DB)
if not user_api_key:
return
if private:
await ctx.defer(ephemeral=True)
elif not private:
await ctx.defer()
# Check the opener for bad content.
if PRE_MODERATE and opener is not None:
if await Moderation.simple_moderate_and_respond(opener, ctx):
return
if use_threads:
if private:
embed_title = f"{user.name}'s private conversation with GPT"
thread = await ctx.channel.create_thread(
name=embed_title,
auto_archive_duration=60,
)
target = thread
else:
embed_title = f"{user.name}'s conversation with GPT"
message_embed = discord.Embed(
title=embed_title,
description=f"**Model**: {self.model.model if not model else model}",
color=0x808080,
)
message_embed.set_thumbnail(url="https://i.imgur.com/asA13vI.png")
footer_text = (
"Regular Chat"
if not image_understanding_model.get_is_usable()
else "Regular Chat, Multi-Modal"
)
message_embed.set_footer(
text=footer_text, icon_url="https://i.imgur.com/asA13vI.png"
)
message_thread = await ctx.send(embed=message_embed)
thread = await message_thread.create_thread(
name=user.name + "'s conversation with GPT",
auto_archive_duration=60,
)
await ctx.respond("Conversation started.")
target = thread
else:
# Check if this current channel is already in a conversation
if ctx.channel.id in self.conversation_threads:
await ctx.respond(
"There is already a conversation in this channel. Please finish that conversation before starting a new one."
)
return
# Check if the user is permitted to start a conversation in full channels
# check if any of the user role names match CHANNEL_CHAT_ROLES
if CHANNEL_CHAT_ROLES and CHANNEL_CHAT_ROLES != [None]:
if not any(
role.name.lower() in CHANNEL_CHAT_ROLES for role in ctx.user.roles
):
await ctx.respond(
"You are not permitted to start a conversation in this channel."
)
return
target = ctx.channel
if private:
embed_title = f"{user.name}'s private conversation with GPT"
else:
embed_title = f"{user.name}'s conversation with GPT"
embed = discord.Embed(title=embed_title, color=0x808080)
await ctx.respond(embed=embed)
self.conversation_threads[target.id] = Thread(target.id)
self.conversation_threads[target.id].model = (
self.model.model if not model else model
)
# Set the overrides for the conversation
self.conversation_threads[target.id].set_overrides(
temperature, top_p, frequency_penalty, presence_penalty
)
if opener or opener_file:
user_id_normalized = ctx.author.id
else:
user_id_normalized = user.id
if opener_file:
if not opener_file.endswith((".txt", ".json")):
opener_file = (
None # Just start a regular thread if the file fails to load
)
else:
# Load the file and read it into opener
try:
opener_file = re.sub(
".+(?=[\\//])", "", opener_file
) # remove paths from the opener file
opener_file = EnvService.find_shared_file(
f"openers{separator}{opener_file}"
)
opener_file = await self.load_file(opener_file, ctx)
try: # Try opening as json, if it fails it'll just pass the whole txt or json to the opener
opener_file = json.loads(opener_file)
temperature = opener_file.get("temperature", None)
top_p = opener_file.get("top_p", None)
frequency_penalty = opener_file.get("frequency_penalty", None)
presence_penalty = opener_file.get("presence_penalty", None)
self.conversation_threads[target.id].set_overrides(
temperature, top_p, frequency_penalty, presence_penalty
)
if (
not opener
): # if we only use opener_file then only pass on opener_file for the opening prompt
opener = opener_file.get("text", "error getting text")
else:
opener = (
opener_file.get("text", "error getting text") + opener
)
except Exception: # Parse as just regular text
if not opener:
opener = opener_file
else:
opener = opener_file + opener
except Exception:
opener_file = (
None # Just start a regular thread if the file fails to load
)
# Append the starter text for gpt to the user's history so it gets concatenated with the prompt later
if minimal or opener_file or opener:
self.conversation_threads[target.id].history.append(
EmbeddedConversationItem(self.CONVERSATION_STARTER_TEXT_MINIMAL, 0)
)
elif not minimal:
self.conversation_threads[target.id].history.append(
EmbeddedConversationItem(self.CONVERSATION_STARTER_TEXT, 0)
)
# Set user as thread owner before sending anything that can error and leave the thread unowned
self.conversation_thread_owners[user_id_normalized].append(target.id)
overrides = self.conversation_threads[target.id].get_overrides()
await target.send(f"<@{str(ctx.user.id)}> is the thread owner.")
await target.send(
embed=EmbedStatics.generate_conversation_embed(
self.conversation_threads, target, opener, overrides
)
)
# send opening
if opener:
self.conversation_threads[target.id].has_opener = True
opener = await self.mention_to_username(ctx, opener)
target_message = await target.send(
embed=EmbedStatics.generate_opener_embed(opener)
)
if target.id in self.conversation_threads:
self.awaiting_responses.append(user_id_normalized)
if not self.pinecone_service:
self.conversation_threads[target.id].history.append(
EmbeddedConversationItem(
f"\n{ctx.author.display_name}: {opener} <|endofstatement|>\n",
0,
)
)
self.awaiting_thread_responses.append(target.id)
# ... (no other changes in the middle part of the function)
overrides = Override(
overrides["temperature"],
overrides["top_p"],
overrides["frequency_penalty"],
overrides["presence_penalty"],
)
await TextService.encapsulated_send(
self,
target.id,
opener
if target.id not in self.conversation_threads or self.pinecone_service
else "".join(
[item.text for item in self.conversation_threads[target.id].history]
),
target_message,
overrides=overrides,
user=user,
model=self.conversation_threads[target.id].model,
custom_api_key=user_api_key,
)
self.awaiting_responses.remove(user_id_normalized)
if target.id in self.awaiting_thread_responses:
self.awaiting_thread_responses.remove(target.id)
async def end_command(self, ctx: discord.ApplicationContext):
"""Command handler. Gets the user's thread and ends it"""
await ctx.defer(ephemeral=True)
user_id = ctx.user.id
if ctx.channel.id in self.conversation_threads:
try:
await self.end_conversation(ctx)
except Exception as e:
print(e)
traceback.print_exc()
else:
await ctx.respond(
"This is not a conversation channel.", ephemeral=True, delete_after=10
)
async def setup_command(self, ctx: discord.ApplicationContext):
"""Command handler. Opens the setup modal"""
if not USER_INPUT_API_KEYS:
await ctx.respond(
"This server doesn't support user input API keys.",
ephemeral=True,
delete_after=30,
)
return
modal = SetupModal(user_key_db=USER_KEY_DB)
await ctx.send_modal(modal)
async def settings_command(
self, ctx: discord.ApplicationContext, parameter: str = None, value: str = None
):
"""Command handler. Returns current settings or sets new values"""
await ctx.defer()
if parameter is None and value is None:
await self.send_settings_text(ctx)
return
# If only one of the options are set, then this is invalid.
if (
parameter is None
and value is not None
or parameter is not None
and value is None
):
await ctx.respond(
"Invalid settings command. Please use `/settings <parameter> <value>` to change a setting"
)
return
# Otherwise, process the settings change
await self.process_settings(ctx, parameter, value)
async def settings_reset_command(self, ctx: discord.ApplicationContext):
"""Command handler. Resets all settings to default"""
await ctx.defer()
self.model.reset_settings()
await ctx.respond("Settings reset to default")
#
# Text-based context menu commands from here
#
async def ask_gpt_action(self, ctx, message: discord.Message):
"""Message command. Return the message"""
prompt = await self.mention_to_username(ctx, message.content)
await self.ask_command(
ctx,
prompt=prompt,
from_ask_action=prompt,
)
async def paraphrase_action(self, ctx, message: discord.Message):
"""Message command. paraphrase the current message content"""
user = ctx.user
prompt = await self.mention_to_username(ctx, message.content)
from_other_action = prompt + "\nParaphrased:"
# Construct the paraphrase prompt
prompt = f"Paraphrase the following text. Maintain roughly the same text length after paraphrasing and the same tone of voice: {prompt} \nParaphrased:"
tokens = self.model.usage_service.count_tokens(prompt)
if tokens > self.model.max_tokens - 1000:
await ctx.respond(
f"This message is too long to paraphrase.",
ephemeral=True,
delete_after=10,
)
return
await self.ask_command(
ctx,
prompt=prompt,
from_other_action=from_other_action,
)
async def elaborate_action(self, ctx, message: discord.Message):
"""Message command. elaborate on the subject of the current message content"""
user = ctx.user
prompt = await self.mention_to_username(ctx, message.content)
from_other_action = prompt + "\nElaboration:"
# Construct the paraphrase prompt
prompt = f"Elaborate with more information about the subject of the following message. Be objective and detailed and respond with elaborations only about the subject(s) of the message: {prompt} \n\nElaboration:"
tokens = self.model.usage_service.count_tokens(prompt)
if tokens > self.model.max_tokens - 500:
await ctx.respond(
f"This message is too long to elaborate on.",
ephemeral=True,
delete_after=10,
)
return
await self.ask_command(
ctx,
prompt=prompt,
from_other_action=from_other_action,
)
async def summarize_action(self, ctx, message: discord.Message):
"""Message command. elaborate on the subject of the current message content"""
user = ctx.user
prompt = await self.mention_to_username(ctx, message.content)
from_other_action = (
"Message at message link: " + message.jump_url + "\nSummarized:"
)
# Construct the paraphrase prompt
prompt = f"Summarize the following message, be as short and concise as possible: {prompt} \n\nSummary:"
tokens = self.model.usage_service.count_tokens(prompt)
if tokens > self.model.max_tokens - 300:
await ctx.respond(
f"Your prompt is too long. It has {tokens} tokens, but the maximum is {self.model.max_tokens-300}.",
ephemeral=True,
delete_after=10,
)
return
await self.ask_command(
ctx,
prompt=prompt,
from_other_action=from_other_action,
)
class ShareView(discord.ui.View):
def __init__(
self,
converser_cog,
conversation_id,
):
super().__init__(timeout=3600) # 1 hour interval to share the conversation.
self.converser_cog = converser_cog
self.conversation_id = conversation_id
self.add_item(ShareButton(converser_cog, conversation_id))
async def on_timeout(self):
# Remove the button from the view/message
self.clear_items()
class ShareButton(discord.ui.Button["ShareView"]):
def __init__(self, converser_cog, conversation_id):
super().__init__(
style=discord.ButtonStyle.green,
label="Share Conversation",
custom_id="share_conversation",
)
self.converser_cog = converser_cog
self.conversation_id = conversation_id
async def callback(self, interaction: discord.Interaction):
# Get the user
try:
id = await self.converser_cog.sharegpt_service.format_and_share(
self.converser_cog.full_conversation_history[self.conversation_id],
self.converser_cog.bot.user.default_avatar.url
if not self.converser_cog.bot.user.avatar
else self.converser_cog.bot.user.avatar.url,
)
url = f"https://shareg.pt/{id}"
await interaction.response.send_message(
embed=EmbedStatics.get_conversation_shared_embed(url)
)
except ValueError as e:
traceback.print_exc()
await interaction.response.send_message(
embed=EmbedStatics.get_conversation_share_failed_embed(
"The ShareGPT API returned an error: " + str(e)
),
ephemeral=True,
delete_after=15,
)
return
except Exception as e:
traceback.print_exc()
await interaction.response.send_message(
embed=EmbedStatics.get_conversation_share_failed_embed(str(e)),
ephemeral=True,
delete_after=15,
)
return
| SwarmsDiscord-main | swarmsdiscord/cogs/text_service_cog.py |
import datetime
import traceback
from pathlib import Path
import discord
import os
from models.embed_statics_model import EmbedStatics
from services.deletion_service import Deletion
from services.environment_service import EnvService
from services.moderations_service import Moderation
from services.text_service import TextService
from models.index_model import Index_handler
USER_INPUT_API_KEYS = EnvService.get_user_input_api_keys()
USER_KEY_DB = EnvService.get_api_db()
PRE_MODERATE = EnvService.get_premoderate()
GITHUB_TOKEN = EnvService.get_github_token()
if GITHUB_TOKEN:
os.environ["GITHUB_TOKEN"] = GITHUB_TOKEN
class IndexService(discord.Cog, name="IndexService"):
"""Cog containing gpt-index commands"""
def __init__(
self,
bot,
usage_service,
deletion_queue,
):
super().__init__()
self.bot = bot
self.index_handler = Index_handler(bot, usage_service)
self.thread_awaiting_responses = []
self.deletion_queue = deletion_queue
@discord.Cog.listener()
async def on_message(self, message):
# Check for self
if message.author == self.bot.user:
return
# Check if the message is from a guild.
if not message.guild:
return
if message.content.strip().startswith("~"):
return
if message.channel.id in self.thread_awaiting_responses:
resp_message = await message.reply(
"Please wait for the agent to respond to a previous message first!"
)
deletion_time = datetime.datetime.now() + datetime.timedelta(seconds=5)
deletion_time = deletion_time.timestamp()
original_deletion_message = Deletion(message, deletion_time)
deletion_message = Deletion(resp_message, deletion_time)
await self.deletion_queue.put(deletion_message)
await self.deletion_queue.put(original_deletion_message)
return
# Pre moderation
if PRE_MODERATE:
if await Moderation.simple_moderate_and_respond(message.content, message):
await message.delete()
return
prompt = message.content.strip()
if await self.index_handler.get_is_in_index_chat(message):
self.thread_awaiting_responses.append(message.channel.id)
try:
await message.channel.trigger_typing()
except:
pass
chat_result = await self.index_handler.execute_index_chat_message(
message, prompt
)
if chat_result:
await message.channel.send(chat_result)
self.thread_awaiting_responses.remove(message.channel.id)
async def index_chat_command(self, ctx, user_index, search_index, model):
if not user_index and not search_index:
await ctx.respond("Please provide a valid user index or search index")
return
await self.index_handler.start_index_chat(ctx, search_index, user_index, model)
pass
async def rename_user_index_command(self, ctx, user_index, new_name):
"""Command handler to rename a user index"""
if not new_name:
await ctx.respond(
await EmbedStatics.get_index_rename_failure_embed(
user_index.split("/")[-1],
"None",
"Please provide a new name for this index",
)
)
return
if await self.index_handler.rename_index(
ctx,
f"indexes/{ctx.user.id}/{user_index}",
f"indexes/{ctx.user.id}/{new_name}",
):
await ctx.respond(
embed=EmbedStatics.get_index_rename_success_embed(
user_index.split("/")[-1], new_name
)
)
else:
await ctx.respond(
embed=EmbedStatics.get_index_rename_failure_embed(
user_index.split("/")[-1],
new_name,
"Please check the server console for more details.",
)
)
async def rename_server_index_command(self, ctx, server_index, new_name):
"""Command handler to rename a user index"""
if not new_name:
await ctx.respond(
await EmbedStatics.get_index_rename_failure_embed(
server_index.split("/")[-1],
"None",
"Please provide a new name for this index",
)
)
return
if await self.index_handler.rename_index(
ctx,
f"indexes/{ctx.guild.id}/{server_index}",
f"indexes/{ctx.guild.id}/{new_name}",
):
await ctx.respond(
embed=EmbedStatics.get_index_rename_success_embed(
server_index.split("/")[-1], new_name
)
)
else:
await ctx.respond(
embed=EmbedStatics.get_index_rename_failure_embed(
server_index.split("/")[-1],
new_name,
"Please check the server console for more details.",
)
)
async def rename_search_index_command(self, ctx, search_index, new_name):
if not new_name:
await ctx.respond(
await EmbedStatics.get_index_rename_failure_embed(
search_index.split("/")[-1],
"None",
"Please provide a new name for this index",
)
)
return
if await self.index_handler.rename_index(
ctx,
f"indexes/{ctx.user.id}_search/{search_index}",
f"indexes/{ctx.user.id}_search/{new_name}",
):
await ctx.respond(
embed=EmbedStatics.get_index_rename_success_embed(
search_index.split("/")[-1], new_name
)
)
else:
await ctx.respond(
embed=EmbedStatics.get_index_rename_failure_embed(
search_index.split("/")[-1],
new_name,
"Please check the server console for more details.",
)
)
async def set_index_link_recurse_command(
self, ctx, link: str = None, depth: int = 1
):
await ctx.defer()
"""Command handler to set a file as your personal index"""
if not link:
await ctx.respond("Please provide a link")
return
user_api_key = None
if USER_INPUT_API_KEYS:
user_api_key = await TextService.get_user_api_key(
ctx.user.id, ctx, USER_KEY_DB
)
if not user_api_key:
return
await self.index_handler.set_link_index_recurse(
ctx, link, depth, user_api_key=user_api_key
)
async def set_index_command(
self, ctx, file: discord.Attachment = None, link: str = None
):
await ctx.defer()
"""Command handler to set a file as your personal index"""
if not file and not link:
await ctx.respond("Please provide a file or a link")
return
if file and link:
await ctx.respond(
"Please provide only one file or link. Only one or the other."
)
return
user_api_key = None
if USER_INPUT_API_KEYS:
user_api_key = await TextService.get_user_api_key(
ctx.user.id, ctx, USER_KEY_DB
)
if not user_api_key:
return
if file:
await self.index_handler.set_file_index(
ctx, file, user_api_key=user_api_key
)
elif link:
await self.index_handler.set_link_index(
ctx, link, user_api_key=user_api_key
)
async def set_discord_command(
self, ctx, channel: discord.TextChannel = None, message_limit: int = 2500
):
"""Command handler to set a channel as your personal index"""
await ctx.defer()
user_api_key = None
if USER_INPUT_API_KEYS:
user_api_key = await TextService.get_user_api_key(
ctx.user.id, ctx, USER_KEY_DB
)
if not user_api_key:
return
await self.index_handler.set_discord_index(
ctx, channel, user_api_key=user_api_key, message_limit=message_limit
)
async def reset_command(self, ctx):
await ctx.defer()
try:
self.index_handler.reset_indexes(ctx.user.id)
await ctx.respond("Your indexes have been reset")
except:
traceback.print_exc()
await ctx.respond(
"Something went wrong while resetting your indexes. Contact the server admin."
)
async def discord_backup_command(self, ctx, message_limit: int = 2500):
"""Command handler to backup the entire server"""
await ctx.defer()
user_api_key = None
if USER_INPUT_API_KEYS:
user_api_key = await TextService.get_user_api_key(
ctx.user.id, ctx, USER_KEY_DB
)
if not user_api_key:
return
await self.index_handler.backup_discord(
ctx, user_api_key=user_api_key, message_limit=message_limit
)
async def load_index_command(self, ctx, user_index, server_index, search_index):
"""Command handler to load indexes"""
if not user_index and not server_index and not search_index:
await ctx.respond("Please provide a user or server or search index")
return
if (
user_index
and server_index
or user_index
and search_index
or server_index
and search_index
):
await ctx.respond(
"Please only try to load one type of index. Either a user index, a server index or a search index."
)
return
search = False
if server_index:
index = server_index
server = True
elif user_index:
index = user_index
server = False
else:
index = search_index
server = False
search = True
user_api_key = None
if USER_INPUT_API_KEYS:
user_api_key = await TextService.get_user_api_key(
ctx.user.id, ctx, USER_KEY_DB
)
if not user_api_key:
return
await self.index_handler.load_index(ctx, index, server, search, user_api_key)
async def query_command(
self,
ctx,
query,
nodes,
response_mode,
child_branch_factor,
model,
multistep,
):
"""Command handler to query your index"""
user_api_key = None
if USER_INPUT_API_KEYS:
user_api_key = await TextService.get_user_api_key(
ctx.user.id, ctx, USER_KEY_DB
)
if not user_api_key:
return
# Check the opener for bad content.
if PRE_MODERATE:
if await Moderation.simple_moderate_and_respond(query, ctx):
return
await self.index_handler.query(
ctx,
query,
response_mode,
nodes,
user_api_key,
child_branch_factor,
model,
multistep,
)
async def compose_command(self, ctx, name):
"""Command handler to compose from your index"""
user_api_key = None
if USER_INPUT_API_KEYS:
user_api_key = await TextService.get_user_api_key(
ctx.user.id, ctx, USER_KEY_DB
)
if not user_api_key:
return
await self.index_handler.compose(ctx, name, user_api_key)
| SwarmsDiscord-main | swarmsdiscord/cogs/index_service_cog.py |
import asyncio
import os
import traceback
import discord
# We don't use the converser cog here because we want to be able to redo for the last images and text prompts at the same time
from sqlitedict import SqliteDict
from services.environment_service import EnvService
from services.image_service import ImageService
from services.moderations_service import Moderation
from services.text_service import TextService
users_to_interactions = {}
ALLOWED_GUILDS = EnvService.get_allowed_guilds()
USER_INPUT_API_KEYS = EnvService.get_user_input_api_keys()
USER_KEY_DB = EnvService.get_api_db()
PRE_MODERATE = EnvService.get_premoderate()
class DrawDallEService(discord.Cog, name="DrawDallEService"):
"""Cog containing a draw commands and file management for saved images"""
def __init__(
self, bot, usage_service, model, message_queue, deletion_queue, converser_cog
):
super().__init__()
self.bot = bot
self.usage_service = usage_service
self.model = model
self.message_queue = message_queue
self.deletion_queue = deletion_queue
self.converser_cog = converser_cog
print("Draw service initialized")
self.redo_users = {}
async def draw_command(
self, ctx: discord.ApplicationContext, prompt: str, from_action=False
):
"""With an ApplicationContext and prompt, send a dalle image to the invoked channel. Ephemeral if from an action"""
user_api_key = None
if USER_INPUT_API_KEYS:
user_api_key = await TextService.get_user_api_key(
ctx.user.id, ctx, USER_KEY_DB
)
if not user_api_key:
return
await ctx.defer()
# Check the opener for bad content.
if PRE_MODERATE:
if await Moderation.simple_moderate_and_respond(prompt, ctx):
return
user = ctx.user
if user == self.bot.user:
return
try:
asyncio.ensure_future(
ImageService.encapsulated_send(
self, user.id, prompt, ctx, custom_api_key=user_api_key
)
)
except Exception as e:
print(e)
traceback.print_exc()
await ctx.respond(
"Something went wrong. Please try again later.", ephemeral=from_action
)
await ctx.send_followup(e, ephemeral=from_action)
async def draw_action(self, ctx, message):
"""decoupler to handle context actions for the draw command"""
await self.draw_command(ctx, message.content, from_action=True)
async def local_size_command(self, ctx: discord.ApplicationContext):
"""Get the folder size of the image folder"""
await ctx.defer()
image_path = self.model.IMAGE_SAVE_PATH
total_size = 0
for dirpath, dirnames, filenames in os.walk(image_path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
# Format the size to be in MB and send.
total_size = total_size / 1000000
await ctx.respond(f"The size of the local images folder is {total_size} MB.")
async def clear_local_command(self, ctx):
"""Delete all local images"""
await ctx.defer()
image_path = self.model.IMAGE_SAVE_PATH
for dirpath, dirnames, filenames in os.walk(image_path):
for f in filenames:
try:
fp = os.path.join(dirpath, f)
os.remove(fp)
except Exception as e:
print(e)
await ctx.respond("Local images cleared.")
| SwarmsDiscord-main | swarmsdiscord/cogs/image_service_cog.py |
import traceback
import aiohttp
import discord
from models.deepl_model import TranslationModel
from services.environment_service import EnvService
ALLOWED_GUILDS = EnvService.get_allowed_guilds()
def build_translation_embed(
text,
translated_text,
translated_language,
detected_language,
requestor: discord.User,
):
"""Build an embed for the translation"""
embed_description = (
f"**Original Text:** \n\n{text}\n\n **Translated Text:** \n\n{translated_text}"
)
embed = discord.Embed(
title=f"Translation from {detected_language} to {translated_language}",
description=embed_description,
color=0x311432,
)
embed.set_footer(
text=f"Requested by {requestor.name}#{requestor.discriminator}",
icon_url=requestor.avatar.url
if requestor.avatar
else requestor.default_avatar.url,
)
return embed
class TranslationService(discord.Cog, name="TranslationService"):
"""Cog containing translation commands and retrieval of translation services"""
def __init__(
self,
bot,
translation_model,
):
super().__init__()
self.bot = bot
self.translation_model = translation_model
# Make a mapping of all the country codes and their full country names:
def build_supported_language_embed(self):
"""Build an embed for the translation"""
embed = discord.Embed(
title="Translator supported languages",
color=0x311432,
)
# Add the list of supported languages in a nice format
embed.add_field(
name="Languages",
value=", ".join(
[f"{name}" for name in TranslationModel.get_all_country_names()]
),
inline=False,
)
return embed
async def translate_command(self, ctx, text, target_language, formality):
"""Command handler for the translation command"""
await ctx.defer()
# TODO Add pagination!
if (
target_language.lower().strip()
not in TranslationModel.get_all_country_names(lower=True)
):
await ctx.respond(
f"The language {target_language} is not recognized or supported. Please use `/languages` to see the list of supported languages."
)
return
try:
(
response,
detected_language,
) = await self.translation_model.send_translate_request(
text,
TranslationModel.get_country_code_from_name(target_language),
formality,
)
except aiohttp.ClientResponseError as e:
await ctx.respond(f"There was an error with the DeepL API: {e.message}")
return
await ctx.respond(
embed=build_translation_embed(
text,
response,
target_language,
TranslationModel.get_country_name_from_code(detected_language),
ctx.user,
)
)
async def translate_action(self, ctx: discord.ApplicationContext, message):
await ctx.defer(ephemeral=True)
# If the message is only an embed and there's no content, don't translate.
if message.content == "" and len(message.embeds) > 0:
await ctx.respond(
"Cannot translate an embed.", ephemeral=True, delete_after=30
)
return
if len(message.content) > 2000:
await ctx.respond(
"Message is too long to translate.", ephemeral=True, delete_after=30
)
return
selection_message = await ctx.respond(
"Select language", ephemeral=True, delete_after=60
)
await selection_message.edit(
view=TranslateView(
self.translation_model, message, selection_message, ctx.user
)
)
async def languages_command(self, ctx):
"""Show all languages supported for translation"""
await ctx.defer()
await ctx.respond(embed=self.build_supported_language_embed())
class TranslateView(discord.ui.View):
def __init__(self, translation_model, message, selection_message, requestor):
super().__init__()
self.language_long = None
self.language = None
self.translation_model = translation_model
self.message = message
self.selection_message = selection_message
self.formality = None
self.requestor = requestor
@discord.ui.select( # the decorator that lets you specify the properties of the select menu
placeholder="Language", # the placeholder text that will be displayed if nothing is selected
min_values=1, # the minimum number of values that must be selected by the users
max_values=1, # the maximum number of values that can be selected by the users
options=[ # the list of options from which users can choose, a required field
discord.SelectOption(
label=name,
)
for name in TranslationModel.get_all_country_names()
],
)
async def select_callback(
self, select, interaction
): # the function called when the user is done selecting options
try:
self.language = TranslationModel.get_country_code_from_name(
select.values[0]
)
self.language_long = select.values[0]
await interaction.response.defer()
except:
traceback.print_exc()
@discord.ui.select(
placeholder="Formality (optional)",
min_values=1,
max_values=1,
options=[
discord.SelectOption(label="Prefer more", value="prefer_more"),
discord.SelectOption(label="default", value="default"),
discord.SelectOption(label="Prefer less", value="prefer_less"),
],
)
async def formality_callback(self, select, interaction):
try:
self.formality = select.values[0]
await interaction.response.defer()
except aiohttp.ClientResponseError as e:
await interaction.response.send_message(
f"There was an error with the DeepL API: {e.message}",
ephemeral=True,
delete_after=15,
)
return
except Exception as e:
await interaction.response.send_message(
f"There was an error: {e}", ephemeral=True, delete_after=15
)
return
# A button "Translate"
@discord.ui.button(label="Translate", style=discord.ButtonStyle.green)
async def button_callback(self, button, interaction):
if not self.language or not self.language_long:
await interaction.response.send_message(
"Please select a language first.", ephemeral=True, delete_after=15
)
return
try:
(
response,
detected_language,
) = await self.translation_model.send_translate_request(
self.message.content,
self.language,
self.formality,
)
await self.message.reply(
mention_author=False,
embed=build_translation_embed(
self.message.content,
response,
self.language_long,
TranslationModel.get_country_name_from_code(detected_language),
self.requestor,
),
)
await self.selection_message.delete()
except aiohttp.ClientResponseError as e:
await interaction.response.send_message(
f"There was an error with the DeepL API: {e.message}",
ephemeral=True,
delete_after=15,
)
return
except discord.errors.HTTPException as e:
if e.code == 50035:
await interaction.response.send_message(
"Message was too long to translate.",
ephemeral=True,
delete_after=15,
)
return
except Exception as e:
await interaction.response.send_message(
f"There was an error: {e}", ephemeral=True, delete_after=15
)
traceback.print_exc()
return
| SwarmsDiscord-main | swarmsdiscord/cogs/translation_service_cog.py |
import re
import traceback
import discord
from sqlitedict import SqliteDict
from models.openai_model import Override, Models
from services.environment_service import EnvService
from models.user_model import RedoUser
from services.image_service import ImageService
from services.moderations_service import Moderation
from services.text_service import TextService
ALLOWED_GUILDS = EnvService.get_allowed_guilds()
USER_INPUT_API_KEYS = EnvService.get_user_input_api_keys()
USER_KEY_DB = EnvService.get_api_db()
PRE_MODERATE = EnvService.get_premoderate()
class ImgPromptOptimizer(discord.Cog, name="ImgPromptOptimizer"):
"""cog containing the optimizer command"""
_OPTIMIZER_PRETEXT = "Optimize the following text for DALL-E image generation to have the most detailed and realistic image possible. Prompt:"
def __init__(
self,
bot,
usage_service,
model,
message_queue,
deletion_queue,
converser_cog,
image_service_cog,
):
super().__init__()
self.bot = bot
self.usage_service = usage_service
self.model = model
self.message_queue = message_queue
self.OPTIMIZER_PRETEXT = self._OPTIMIZER_PRETEXT
self.converser_cog = converser_cog
self.image_service_cog = image_service_cog
self.deletion_queue = deletion_queue
try:
image_pretext_path = EnvService.find_shared_file(
"image_optimizer_pretext.txt"
)
# Try to read the image optimizer pretext from
# the file system
with image_pretext_path.open("r") as file:
self.OPTIMIZER_PRETEXT = file.read()
print(f"Loaded image optimizer pretext from {image_pretext_path}")
except Exception:
traceback.print_exc()
self.OPTIMIZER_PRETEXT = self._OPTIMIZER_PRETEXT
async def optimize_command(self, ctx: discord.ApplicationContext, prompt: str):
"""Command handler. Given a string it generates an output that's fitting for image generation"""
user_api_key = None
if USER_INPUT_API_KEYS:
user_api_key = await TextService.get_user_api_key(
ctx.user.id, ctx, USER_KEY_DB
)
if not user_api_key:
return
await ctx.defer()
user = ctx.user
final_prompt = self.OPTIMIZER_PRETEXT
# replace mentions with nicknames for the prompt
final_prompt += await self.converser_cog.mention_to_username(ctx, prompt)
# If the prompt doesn't end in a period, terminate it.
if not final_prompt.endswith("."):
final_prompt += "."
# Check the opener for bad content.
if PRE_MODERATE:
if await Moderation.simple_moderate_and_respond(prompt, ctx):
return
# Get the token amount for the prompt
# tokens = self.usage_service.count_tokens(final_prompt)
try:
response = await self.model.send_request(
final_prompt,
tokens=60,
top_p_override=1.0,
temp_override=0.9,
presence_penalty_override=0.5,
best_of_override=1,
max_tokens_override=60,
custom_api_key=user_api_key,
is_chatgpt_request="turbo" in str(self.model.model)
or "gpt-4" in str(self.model.model),
)
# THIS USES MORE TOKENS THAN A NORMAL REQUEST! This will use roughly 4000 tokens, and will repeat the query
# twice because of the best_of_override=2 parameter. This is to ensure that the model does a lot of analysis, but is
# also relatively cost-effective
response_text = (
str(response["choices"][0]["text"])
if not (
self.model.model in Models.CHATGPT_MODELS
or self.model.model in Models.GPT4_MODELS
)
else response["choices"][0]["message"]["content"]
)
# escape any mentions
response_text = discord.utils.escape_mentions(response_text)
# If the response_message is > 75 words, concatenate to the last 70th word
# TODO Temporary workaround until prompt is adjusted to make the optimized prompts shorter.
try:
if len(response_text.split()) > 75:
response_text = " ".join(response_text.split()[-70:])
except Exception:
pass
response_message = await ctx.respond(
response_text.replace("Optimized Prompt:", "")
.replace("Output Prompt:", "")
.replace("Output:", "")
)
self.converser_cog.users_to_interactions[user.id] = []
self.converser_cog.users_to_interactions[user.id].append(
response_message.id
)
self.converser_cog.redo_users[user.id] = RedoUser(
prompt=final_prompt,
message=ctx,
ctx=ctx,
response=response_message,
instruction=None,
paginator=None,
)
self.converser_cog.redo_users[user.id].add_interaction(response_message.id)
await response_message.edit(
view=OptimizeView(
self.converser_cog,
self.image_service_cog,
self.deletion_queue,
custom_api_key=user_api_key,
)
)
# Catch the value errors raised by the Model object
except ValueError as e:
await ctx.respond(e)
return
# Catch all other errors, we want this to keep going if it errors out.
except Exception as e:
await ctx.respond("Something went wrong, please try again later")
await ctx.send_followup(e)
# print a stack trace
traceback.print_exc()
return
class OptimizeView(discord.ui.View):
def __init__(
self, converser_cog, image_service_cog, deletion_queue, custom_api_key=None
):
super().__init__(timeout=None)
self.cog = converser_cog
self.image_service_cog = image_service_cog
self.deletion_queue = deletion_queue
self.custom_api_key = custom_api_key
self.add_item(
RedoButton(
self.cog,
self.image_service_cog,
self.deletion_queue,
custom_api_key=self.custom_api_key,
)
)
self.add_item(
DrawButton(
self.cog,
self.image_service_cog,
self.deletion_queue,
custom_api_key=self.custom_api_key,
)
)
class DrawButton(discord.ui.Button["OptimizeView"]):
def __init__(
self, converser_cog, image_service_cog, deletion_queue, custom_api_key
):
super().__init__(
style=discord.ButtonStyle.green,
label="Draw",
custom_id="draw_button_optimizer",
)
self.converser_cog = converser_cog
self.image_service_cog = image_service_cog
self.deletion_queue = deletion_queue
self.custom_api_key = custom_api_key
async def callback(self, interaction: discord.Interaction):
user_id = interaction.user.id
interaction_id = interaction.message.id
if (
interaction_id not in self.converser_cog.users_to_interactions[user_id]
or interaction_id not in self.converser_cog.redo_users[user_id].interactions
):
await interaction.response.send_message(
content="You can only draw for prompts that you generated yourself!",
ephemeral=True,
)
return
msg = await interaction.response.send_message(
"Drawing this prompt...", ephemeral=False
)
self.converser_cog.users_to_interactions[interaction.user.id].append(msg.id)
self.converser_cog.users_to_interactions[interaction.user.id].append(
interaction.id
)
self.converser_cog.users_to_interactions[interaction.user.id].append(
interaction.message.id
)
# get the text content of the message that was interacted with
prompt = interaction.message.content
# Use regex to replace "Output Prompt:" loosely with nothing.
# This is to ensure that the prompt is formatted correctly
prompt = re.sub(r"Optimized Prompt: ?", "", prompt)
# Call the image service cog to draw the image
await ImageService.encapsulated_send(
self.image_service_cog,
user_id,
prompt,
interaction,
msg,
True,
True,
custom_api_key=self.custom_api_key,
)
class RedoButton(discord.ui.Button["OptimizeView"]):
def __init__(
self, converser_cog, image_service_cog, deletion_queue, custom_api_key=None
):
super().__init__(
style=discord.ButtonStyle.danger,
label="Retry",
custom_id="redo_button_optimizer",
)
self.converser_cog = converser_cog
self.image_service_cog = image_service_cog
self.deletion_queue = deletion_queue
self.custom_api_key = custom_api_key
async def callback(self, interaction: discord.Interaction):
interaction_id = interaction.message.id
# Get the user
user_id = interaction.user.id
if user_id in self.converser_cog.redo_users and self.converser_cog.redo_users[
user_id
].in_interaction(interaction_id):
# Get the message and the prompt and call encapsulated_send
ctx = self.converser_cog.redo_users[user_id].ctx
# message = self.converser_cog.redo_users[user_id].message
prompt = self.converser_cog.redo_users[user_id].prompt
response_message = self.converser_cog.redo_users[user_id].response
await interaction.response.send_message(
"Redoing your original request...", ephemeral=True, delete_after=20
)
overrides = Override(1.0, 0.9, 0.5)
await TextService.encapsulated_send(
self.converser_cog,
id=user_id,
prompt=prompt,
overrides=overrides,
ctx=ctx,
response_message=response_message,
custom_api_key=self.custom_api_key,
)
else:
await interaction.response.send_message(
content="You can only redo for prompts that you generated yourself!",
ephemeral=True,
delete_after=10,
)
| SwarmsDiscord-main | swarmsdiscord/cogs/prompt_optimizer_cog.py |
import discord
from pycord.multicog import add_to_group
from services.environment_service import EnvService
from models.check_model import Check
from models.autocomplete_model import (
Settings_autocompleter,
File_autocompleter,
Translations_autocompleter,
)
ALLOWED_GUILDS = EnvService.get_allowed_guilds()
class Commands(discord.Cog, name="Commands"):
"""Cog containing all slash and context commands as one-liners"""
def __init__(
self,
bot,
usage_service,
model,
message_queue,
deletion_queue,
converser_cog,
image_draw_cog,
image_service_cog,
moderations_cog,
index_cog,
translations_cog=None,
search_cog=None,
transcribe_cog=None,
):
super().__init__()
self.bot = bot
self.usage_service = usage_service
self.model = model
self.message_queue = message_queue
self.deletion_queue = deletion_queue
self.converser_cog = converser_cog
self.image_draw_cog = image_draw_cog
self.image_service_cog = image_service_cog
self.moderations_cog = moderations_cog
self.index_cog = index_cog
self.translations_cog = translations_cog
self.search_cog = search_cog
self.transcribe_cog = transcribe_cog
# Create slash command groups
dalle = discord.SlashCommandGroup(
name="dalle",
description="Dalle related commands",
guild_ids=ALLOWED_GUILDS,
checks=[Check.check_dalle_roles()],
)
gpt = discord.SlashCommandGroup(
name="gpt",
description="GPT related commands",
guild_ids=ALLOWED_GUILDS,
checks=[Check.check_gpt_roles()],
)
system = discord.SlashCommandGroup(
name="system",
description="Admin/System settings for the bot",
guild_ids=ALLOWED_GUILDS,
checks=[Check.check_admin_roles()],
)
mod = discord.SlashCommandGroup(
name="mod",
description="AI-Moderation commands for the bot",
guild_ids=ALLOWED_GUILDS,
checks=[Check.check_admin_roles()],
)
index = discord.SlashCommandGroup(
name="index",
description="Custom index commands for the bot",
guild_ids=ALLOWED_GUILDS,
checks=[Check.check_index_roles()],
)
transcribe = discord.SlashCommandGroup(
name="transcribe",
description="Transcription services using OpenAI Whisper",
guild_ids=ALLOWED_GUILDS,
checks=[Check.check_index_roles()], # TODO new role checker for transcribe
)
internet = discord.SlashCommandGroup(
name="internet",
description="Transcription services using OpenAI Whisper2",
guild_ids=ALLOWED_GUILDS,
checks=[Check.check_index_roles()], # TODO new role checker for transcribe
)
#
# System commands
#
@add_to_group("system")
@discord.slash_command(
name="settings",
description="Get settings for swarmsdiscord",
guild_ids=ALLOWED_GUILDS,
)
@discord.option(
name="parameter",
description="The setting to change",
required=False,
autocomplete=Settings_autocompleter.get_settings,
)
@discord.option(
name="value",
description="The value to set the setting to",
required=False,
autocomplete=Settings_autocompleter.get_value,
)
@discord.guild_only()
async def settings(
self, ctx: discord.ApplicationContext, parameter: str = None, value: str = None
):
await self.converser_cog.settings_command(ctx, parameter, value)
@add_to_group("system")
@discord.slash_command(
name="settings-reset",
description="Reset all settings for swarmsdiscord",
guild_ids=ALLOWED_GUILDS,
)
@discord.guild_only()
async def settings_reset(self, ctx: discord.ApplicationContext):
await self.converser_cog.settings_reset_command(ctx)
@add_to_group("system")
@discord.slash_command(
name="local-size",
description="Get the size of the dall-e images folder that we have on the current system",
guild_ids=ALLOWED_GUILDS,
)
@discord.guild_only()
async def local_size(self, ctx: discord.ApplicationContext):
await self.image_draw_cog.local_size_command(ctx)
@add_to_group("system")
@discord.slash_command(
name="clear-local",
description="Clear the local dalleimages folder on system.",
guild_ids=ALLOWED_GUILDS,
)
@discord.guild_only()
async def clear_local(self, ctx: discord.ApplicationContext):
await self.image_draw_cog.clear_local_command(ctx)
@add_to_group("system")
@discord.slash_command(
name="usage",
description="Get usage statistics for swarmsdiscord",
guild_ids=ALLOWED_GUILDS,
)
@discord.guild_only()
async def usage(self, ctx: discord.ApplicationContext):
await self.converser_cog.usage_command(ctx)
@add_to_group("system")
@discord.slash_command(
name="set-usage",
description="Set the current OpenAI usage (in dollars)",
guild_ids=ALLOWED_GUILDS,
)
@discord.option(
name="usage_amount",
description="The current usage amount in dollars and cents (e.g 10.24)",
type=float,
)
async def set_usage(self, ctx: discord.ApplicationContext, usage_amount: float):
await self.converser_cog.set_usage_command(ctx, usage_amount)
@add_to_group("system")
@discord.slash_command(
name="delete-conversation-threads",
description="Delete all conversation threads across the bot servers.",
guild_ids=ALLOWED_GUILDS,
)
async def delete_all_conversation_threads(self, ctx: discord.ApplicationContext):
await self.converser_cog.delete_all_conversation_threads_command(ctx)
# """
# Moderation commands
# """
@add_to_group("mod")
@discord.slash_command(
name="test",
description="Used to test a prompt and see what threshold values are returned by the moderations endpoint",
guild_ids=ALLOWED_GUILDS,
)
@discord.option(
name="prompt",
description="The prompt to test",
required=True,
)
@discord.guild_only()
async def moderations_test(self, ctx: discord.ApplicationContext, prompt: str):
await self.moderations_cog.moderations_test_command(ctx, prompt)
@add_to_group("mod")
@discord.slash_command(
name="set",
description="Turn the moderations service on and off",
guild_ids=ALLOWED_GUILDS,
)
@discord.option(
name="status",
description="Enable or disable the moderations service for the current guild (on/off)",
required=True,
choices=["on", "off"],
)
@discord.option(
name="alert_channel_id",
description="The channel ID to send moderation alerts to",
required=False,
autocomplete=Settings_autocompleter.get_value_alert_id_channel,
)
@discord.guild_only()
async def moderations(
self, ctx: discord.ApplicationContext, status: str, alert_channel_id: str
):
await self.moderations_cog.moderations_command(ctx, status, alert_channel_id)
@add_to_group("mod")
@discord.slash_command(
name="config",
description="Configure the moderations service for the current guild. Lower # = more strict",
guild_ids=ALLOWED_GUILDS,
)
@discord.option(
name="type",
description="The type of moderation to configure",
required=True,
autocomplete=Settings_autocompleter.get_value_moderations,
)
@discord.option(
name="hate",
description="The threshold for hate speech",
required=False,
min_value=0,
max_value=1,
)
@discord.option(
name="hate_threatening",
description="The threshold for hate/threatening speech",
required=False,
min_value=0,
max_value=1,
)
@discord.option(
name="self_harm",
description="The threshold for self_harm speech",
required=False,
min_value=0,
max_value=1,
)
@discord.option(
name="sexual",
description="The threshold for sexual speech",
required=False,
min_value=0,
max_value=1,
)
@discord.option(
name="sexual_minors",
description="The threshold for sexual speech with minors in context",
required=False,
min_value=0,
max_value=1,
)
@discord.option(
name="violence",
description="The threshold for violent speech",
required=False,
min_value=0,
max_value=1,
)
@discord.option(
name="violence_graphic",
description="The threshold for violent and graphic speech",
required=False,
min_value=0,
max_value=1,
)
@discord.guild_only()
async def config(
self,
ctx: discord.ApplicationContext,
type: str,
hate: float,
hate_threatening: float,
self_harm: float,
sexual: float,
sexual_minors: float,
violence: float,
violence_graphic: float,
):
await self.moderations_cog.config_command(
ctx,
type,
hate,
hate_threatening,
self_harm,
sexual,
sexual_minors,
violence,
violence_graphic,
)
#
# GPT commands
#
@add_to_group("gpt")
@discord.slash_command(
name="instruction",
description="Set your own system instruction",
guild_ids=ALLOWED_GUILDS,
)
@discord.option(
name="mode",
description="Set/Get/Clear prompt",
choices=["set", "get", "clear"],
required=True,
)
@discord.option(
name="type",
description="Enable for channel or for user",
choices=["user", "channel"],
required=True,
)
@discord.option(
name="instruction", description="The instruction to set", required=False
)
@discord.option(
name="instruction_file",
description="The instruction to set from a txt file",
input_type=discord.SlashCommandOptionType.attachment,
required=False,
)
@discord.option(
name="private", description="Will only be visible to you", required=False
)
@discord.guild_only()
async def instruction(
self,
ctx: discord.ApplicationContext,
mode: str,
type: str,
instruction: str,
instruction_file: discord.Attachment,
private: bool,
):
await self.converser_cog.instruction_command(
ctx, mode, type, instruction, instruction_file, private
)
@add_to_group("gpt")
@discord.slash_command(
name="ask",
description="Ask the bot something!",
guild_ids=ALLOWED_GUILDS,
)
@discord.option(
name="prompt", description="The prompt to send to the model", required=False
)
@discord.option(
name="prompt_file",
description="The prompt file to send to the model. Is added before the prompt, both can be combined",
required=False,
input_type=discord.SlashCommandOptionType.attachment,
)
@discord.option(
name="model",
description="The model to use for the request",
required=False,
autocomplete=Settings_autocompleter.get_models,
)
@discord.option(
name="private", description="Will only be visible to you", required=False
)
@discord.option(
name="temperature",
description="Higher values means the model will take more risks",
required=False,
min_value=0,
max_value=2,
)
@discord.option(
name="top_p",
description="1 is greedy sampling, 0.1 means only considering the top 10% of probability distribution",
required=False,
min_value=0,
max_value=1,
)
@discord.option(
name="frequency_penalty",
description="Decreasing the model's likelihood to repeat the same line verbatim",
required=False,
min_value=-2,
max_value=2,
)
@discord.option(
name="presence_penalty",
description="Increasing the model's likelihood to talk about new topics",
required=False,
min_value=-2,
max_value=2,
)
@discord.guild_only()
async def ask(
self,
ctx: discord.ApplicationContext,
prompt: str,
prompt_file: discord.Attachment,
model: str,
private: bool,
temperature: float,
top_p: float,
frequency_penalty: float,
presence_penalty: float,
):
await self.converser_cog.ask_command(
ctx,
prompt,
private,
temperature,
top_p,
frequency_penalty,
presence_penalty,
prompt_file=prompt_file,
model=model,
)
@add_to_group("gpt")
@discord.slash_command(
name="edit",
description="Ask the bot to edit some text!",
guild_ids=ALLOWED_GUILDS,
)
@discord.option(
name="instruction",
description="How you want the bot to edit the text",
required=True,
)
@discord.option(
name="text",
description="The text you want to edit, can be empty",
required=False,
default="",
)
@discord.option(
name="private", description="Will only be visible to you", required=False
)
@discord.option(
name="temperature",
description="Higher values means the model will take more risks",
required=False,
input_type=float,
min_value=0,
max_value=2,
)
@discord.option(
name="top_p",
description="1 is greedy sampling, 0.1 means only considering the top 10% of probability distribution",
required=False,
input_type=float,
min_value=0,
max_value=1,
)
@discord.guild_only()
async def edit(
self,
ctx: discord.ApplicationContext,
instruction: str,
text: str,
private: bool,
temperature: float,
top_p: float,
):
await self.converser_cog.edit_command(
ctx, instruction, text, private, temperature, top_p
)
@add_to_group("gpt")
@discord.slash_command(
name="converse",
description="Have a conversation with GPT",
guild_ids=ALLOWED_GUILDS,
)
@discord.option(
name="opener",
description="Which sentence to start with, added after the file",
required=False,
)
@discord.option(
name="opener_file",
description="Which file to start with, added before the opener, sets minimal starter",
required=False,
autocomplete=File_autocompleter.get_openers,
)
@discord.option(
name="private",
description="Converse in a private thread",
required=False,
default=False,
)
@discord.option(
name="minimal",
description="Use minimal starter text, saves tokens and has a more open personality",
required=False,
default=False,
)
@discord.option(
name="model",
description="Which model to use with the bot",
required=False,
default=False,
autocomplete=Settings_autocompleter.get_converse_models,
)
@discord.option(
name="temperature",
description="Higher values means the model will take more risks",
required=False,
input_type=float,
min_value=0,
max_value=2,
)
@discord.option(
name="top_p",
description="1 is greedy sampling, 0.1 means only top 10%",
required=False,
input_type=float,
min_value=0,
max_value=1,
)
@discord.option(
name="frequency_penalty",
description="Decreasing the model's likelihood to repeat the same line verbatim",
required=False,
input_type=float,
min_value=-2,
max_value=2,
)
@discord.option(
name="presence_penalty",
description="Increasing the model's likelihood to talk about new topics",
required=False,
input_type=float,
min_value=-2,
max_value=2,
)
@discord.option(
name="use_threads",
description="Set this to false to start a channel conversation",
required=False,
default=True,
)
@discord.guild_only()
async def converse(
self,
ctx: discord.ApplicationContext,
opener: str,
opener_file: str,
private: bool,
minimal: bool,
model: str,
temperature: float,
top_p: float,
frequency_penalty: float,
presence_penalty: float,
use_threads: bool,
):
await self.converser_cog.converse_command(
ctx,
opener,
opener_file,
private,
minimal,
model,
temperature,
top_p,
frequency_penalty,
presence_penalty,
use_threads=use_threads,
)
@add_to_group("gpt")
@discord.slash_command(
name="end",
description="End a conversation with GPT",
guild_ids=ALLOWED_GUILDS,
)
@discord.guild_only()
async def end(self, ctx: discord.ApplicationContext):
await self.converser_cog.end_command(ctx)
#
# Index commands
#
@add_to_group("index")
@discord.slash_command(
name="rename-user",
description="Select one of your saved indexes to rename",
guild_ids=ALLOWED_GUILDS,
)
@discord.guild_only()
@discord.option(
name="user_index",
description="Which user index to rename",
required=True,
autocomplete=File_autocompleter.get_user_indexes,
)
@discord.option(
name="new_name",
description="The new name",
required=True,
type=discord.SlashCommandOptionType.string,
)
async def rename_user_index(
self,
ctx: discord.ApplicationContext,
user_index: str,
new_name: str,
):
await ctx.defer()
await self.index_cog.rename_user_index_command(ctx, user_index, new_name)
@add_to_group("index")
@discord.slash_command(
name="rename-server",
description="Select one of your saved server indexes to rename",
guild_ids=ALLOWED_GUILDS,
)
@discord.guild_only()
@discord.option(
name="server_index",
description="Which server index to rename",
required=True,
autocomplete=File_autocompleter.get_server_indexes,
)
@discord.option(
name="new_name",
description="The new name",
required=True,
type=discord.SlashCommandOptionType.string,
)
async def rename_server_index(
self,
ctx: discord.ApplicationContext,
server_index: str,
new_name: str,
):
await ctx.defer()
await self.index_cog.rename_server_index_command(ctx, server_index, new_name)
@add_to_group("index")
@discord.slash_command(
name="rename-search",
description="Select one of your saved search indexes to rename",
guild_ids=ALLOWED_GUILDS,
)
@discord.guild_only()
@discord.option(
name="search_index",
description="Which search index to rename",
required=True,
autocomplete=File_autocompleter.get_user_search_indexes,
)
@discord.option(
name="new_name",
description="The new name",
required=True,
type=discord.SlashCommandOptionType.string,
)
async def rename_search_index(
self,
ctx: discord.ApplicationContext,
search_index: str,
new_name: str,
):
await ctx.defer()
await self.index_cog.rename_search_index_command(ctx, search_index, new_name)
@add_to_group("index")
@discord.slash_command(
name="load",
description="Select one of your saved indexes to query from",
guild_ids=ALLOWED_GUILDS,
)
@discord.guild_only()
@discord.option(
name="user_index",
description="Which user file to load the index from",
required=False,
autocomplete=File_autocompleter.get_user_indexes,
)
@discord.option(
name="server_index",
description="Which server file to load the index from",
required=False,
autocomplete=File_autocompleter.get_server_indexes,
)
@discord.option(
name="search_index",
description="Which search index file to load the index from",
required=False,
autocomplete=File_autocompleter.get_user_search_indexes,
)
async def load_index(
self,
ctx: discord.ApplicationContext,
user_index: str,
server_index: str,
search_index: str,
):
await ctx.defer()
await self.index_cog.load_index_command(
ctx, user_index, server_index, search_index
)
@add_to_group("index")
@discord.slash_command(
name="talk",
description="Select one of your saved indexes to talk to",
guild_ids=ALLOWED_GUILDS,
)
@discord.guild_only()
@discord.option(
name="user_index",
description="Which user file to load the index from",
required=False,
autocomplete=File_autocompleter.get_user_indexes,
)
@discord.option(
name="search_index",
description="Which search index file to load the index from",
required=False,
autocomplete=File_autocompleter.get_user_search_indexes,
)
@discord.option(
name="model",
description="The model to use for the conversation",
required=False,
default="gpt-3.5-turbo",
autocomplete=Settings_autocompleter.get_index_and_search_models,
)
async def talk(
self,
ctx: discord.ApplicationContext,
user_index: str,
search_index: str,
model: str,
):
await ctx.defer()
await self.index_cog.index_chat_command(ctx, user_index, search_index, model)
@add_to_group("index")
@discord.slash_command(
name="add", description="Add an index to query from", guild_ids=ALLOWED_GUILDS
)
@discord.guild_only()
@discord.option(
name="file",
description="A file to create the index from",
required=False,
input_type=discord.SlashCommandOptionType.attachment,
)
@discord.option(
name="link",
description="A link to a file to a webpage ",
required=False,
input_type=str,
)
async def set_file(
self, ctx: discord.ApplicationContext, file: discord.Attachment, link: str
):
await self.index_cog.set_index_command(ctx, file, link)
@add_to_group("index")
@discord.slash_command(
name="recurse-link",
description="Recursively index a link",
guild_ids=ALLOWED_GUILDS,
)
@discord.guild_only()
@discord.option(
name="link",
description="A link to create the index from",
required=True,
input_type=discord.SlashCommandOptionType.string,
)
@discord.option(
name="depth",
description="How deep to recurse",
required=False,
input_type=discord.SlashCommandOptionType.integer,
min_value=1,
max_value=5,
)
async def set_recurse_link(
self, ctx: discord.ApplicationContext, link: str, depth: int
):
await self.index_cog.set_index_link_recurse_command(ctx, link, depth)
@add_to_group("index")
@discord.slash_command(
name="reset",
description="Reset (delete) all of your saved indexes",
guild_ids=ALLOWED_GUILDS,
)
@discord.guild_only()
async def reset(self, ctx: discord.ApplicationContext):
await self.index_cog.reset_command(ctx)
@add_to_group("index")
@discord.slash_command(
name="compose",
description="Combine multiple indexes together",
guild_ids=ALLOWED_GUILDS,
)
@discord.option(
name="name",
description="The name of the new index",
required=False,
input_type=discord.SlashCommandOptionType.string,
)
@discord.guild_only()
async def compose(self, ctx: discord.ApplicationContext, name: str):
await self.index_cog.compose_command(ctx, name)
@add_to_group("index")
@discord.slash_command(
name="add_discord",
description="Set a index from a discord channel",
guild_ids=ALLOWED_GUILDS,
)
@discord.guild_only()
@discord.option(
name="channel",
description="A channel to create the index from",
required=False,
input_type=discord.SlashCommandOptionType.channel,
)
@discord.option(
name="message_limit",
description="The number of messages to index",
required=False,
input_type=discord.SlashCommandOptionType.integer,
)
async def set_discord(
self,
ctx: discord.ApplicationContext,
channel: discord.TextChannel,
message_limit: int,
):
await self.index_cog.set_discord_command(
ctx, channel, message_limit=message_limit
)
@add_to_group("index")
@discord.slash_command(
name="discord_backup",
description="Save an index made from the whole server",
guild_ids=ALLOWED_GUILDS,
checks=[Check.check_admin_roles(), Check.check_index_roles()],
)
@discord.option(
name="message_limit",
description="The number of messages to index per channel",
required=False,
input_type=discord.SlashCommandOptionType.integer,
)
@discord.guild_only()
async def discord_backup(self, ctx: discord.ApplicationContext, message_limit: int):
await self.index_cog.discord_backup_command(ctx, message_limit=message_limit)
@add_to_group("index")
@discord.slash_command(
name="query", description="Query from your index", guild_ids=ALLOWED_GUILDS
)
@discord.guild_only()
@discord.option(name="query", description="What to query the index", required=True)
@discord.option(
name="nodes",
description="How many nodes should the response be queried from, only non-deep indexes",
required=False,
default=1,
min_value=1,
max_value=5,
input_type=discord.SlashCommandOptionType.integer,
)
@discord.option(
name="response_mode",
description="Response mode, doesn't work on deep composed indexes",
guild_ids=ALLOWED_GUILDS,
required=False,
default="refine",
choices=["refine", "compact", "tree_summarize"],
)
@discord.option(
name="child_branch_factor",
description="Only for deep indexes, how deep to go, higher is expensive.",
required=False,
default=1,
min_value=1,
max_value=3,
input_type=discord.SlashCommandOptionType.integer,
)
@discord.option(
name="model",
description="The model to use for the request (querying, not composition)",
required=False,
default="gpt-3.5-turbo",
autocomplete=Settings_autocompleter.get_index_and_search_models,
)
@discord.option(
name="multistep",
description="Do a more intensive, multi-step query,",
required=False,
default=False,
input_type=discord.SlashCommandOptionType.boolean,
)
async def query(
self,
ctx: discord.ApplicationContext,
query: str,
nodes: int,
response_mode: str,
child_branch_factor: int,
model: str,
multistep: bool,
):
await ctx.defer()
await self.index_cog.query_command(
ctx,
query,
nodes,
response_mode,
child_branch_factor,
model,
multistep,
)
#
# DALLE commands
#
@add_to_group("dalle")
@discord.slash_command(
name="draw",
description="Draw an image from a prompt",
guild_ids=ALLOWED_GUILDS,
)
@discord.option(name="prompt", description="The prompt to draw from", required=True)
async def draw(self, ctx: discord.ApplicationContext, prompt: str):
await self.image_draw_cog.draw_command(ctx, prompt)
@add_to_group("dalle")
@discord.slash_command(
name="optimize",
description="Optimize a text prompt for DALL-E/MJ/SD image generation.",
guild_ids=ALLOWED_GUILDS,
)
@discord.option(
name="prompt", description="The text prompt to optimize.", required=True
)
@discord.guild_only()
async def optimize(self, ctx: discord.ApplicationContext, prompt: str):
await self.image_service_cog.optimize_command(ctx, prompt)
#
# Other commands
#
@discord.slash_command(
name="private-test",
description="Private thread for testing. Only visible to you and server admins.",
guild_ids=ALLOWED_GUILDS,
)
@discord.guild_only()
async def private_test(self, ctx: discord.ApplicationContext):
await self.converser_cog.private_test_command(ctx)
@discord.slash_command(
name="help", description="Get help for swarmsdiscord", guild_ids=ALLOWED_GUILDS
)
@discord.guild_only()
async def help(self, ctx: discord.ApplicationContext):
await self.converser_cog.help_command(ctx)
@discord.slash_command(
name="setup",
description="Setup your API key for use with swarmsdiscord",
guild_ids=ALLOWED_GUILDS,
)
@discord.guild_only()
async def setup(self, ctx: discord.ApplicationContext):
await self.converser_cog.setup_command(ctx)
#
# Text-based context menu commands from here
#
@discord.message_command(
name="Ask GPT", guild_ids=ALLOWED_GUILDS, checks=[Check.check_gpt_roles()]
)
async def ask_gpt_action(self, ctx, message: discord.Message):
await self.converser_cog.ask_gpt_action(ctx, message)
#
# Image-based context menu commands from here
#
@discord.message_command(
name="Draw", guild_ids=ALLOWED_GUILDS, checks=[Check.check_dalle_roles()]
)
async def draw_action(self, ctx, message: discord.Message):
await self.image_draw_cog.draw_action(ctx, message)
"""
Translation commands and actions
"""
@discord.slash_command(
name="translate",
description="Translate text to a given language",
guild_ids=ALLOWED_GUILDS,
checks=[Check.check_translator_roles()],
)
@discord.option(name="text", description="The text to translate", required=True)
@discord.option(
name="target_language",
description="The language to translate to",
required=True,
autocomplete=Translations_autocompleter.get_languages,
)
@discord.option(
name="formality",
description="Formal/Informal tone of translation",
required=False,
autocomplete=Translations_autocompleter.get_formality_values,
)
@discord.guild_only()
async def translate(
self,
ctx: discord.ApplicationContext,
text: str,
target_language: str,
formality: str,
):
if self.translations_cog:
await self.translations_cog.translate_command(
ctx, text, target_language, formality
)
else:
await ctx.respond(
"Translations are disabled on this server.", ephemeral=True
)
@discord.slash_command(
name="languages",
description="View the supported languages for translation",
guild_ids=ALLOWED_GUILDS,
checks=[Check.check_translator_roles()],
)
@discord.guild_only()
async def languages(self, ctx: discord.ApplicationContext):
if self.translations_cog:
await self.translations_cog.languages_command(ctx)
else:
await ctx.respond(
"Translations are disabled on this server.", ephemeral=True
)
@discord.message_command(
name="Translate",
guild_ids=ALLOWED_GUILDS,
checks=[Check.check_translator_roles()],
)
async def translate_action(self, ctx, message: discord.Message):
if self.translations_cog:
await self.translations_cog.translate_action(ctx, message)
else:
await ctx.respond(
"Translations are disabled on this server.", ephemeral=True
)
# @discord.message_command(
# name="Paraphrase",
# guild_ids=ALLOWED_GUILDS,
# checks=[Check.check_gpt_roles()],
# )
# async def paraphrase_action(self, ctx, message: discord.Message):
# await self.converser_cog.paraphrase_action(ctx, message)
@discord.message_command(
name="Elaborate",
guild_ids=ALLOWED_GUILDS,
checks=[Check.check_gpt_roles()],
)
async def elaborate_action(self, ctx, message: discord.Message):
await self.converser_cog.elaborate_action(ctx, message)
@discord.message_command(
name="Summarize",
guild_ids=ALLOWED_GUILDS,
checks=[Check.check_gpt_roles()],
)
async def summarize_action(self, ctx, message: discord.Message):
await self.converser_cog.summarize_action(ctx, message)
@add_to_group("internet")
@discord.slash_command(
name="chat",
description="Chat with GPT connected to the internet!",
guild_ids=ALLOWED_GUILDS,
checks=[Check.check_search_roles()],
)
@discord.option(
name="search_scope",
description="How many top links to use for context",
required=False,
input_type=discord.SlashCommandOptionType.integer,
max_value=6,
min_value=1,
default=2,
)
@discord.option(
name="model",
description="The model to use for the request (querying, not composition)",
required=False,
default="gpt-3.5-turbo",
autocomplete=Settings_autocompleter.get_index_and_search_models,
)
async def chat(
self,
ctx: discord.ApplicationContext,
model: str,
search_scope: int = 2,
):
await self.search_cog.search_chat_command(
ctx, search_scope=search_scope, model=model
)
# Search slash commands
@add_to_group("internet")
@discord.slash_command(
name="search",
description="Search google alongside GPT for something",
guild_ids=ALLOWED_GUILDS,
checks=[Check.check_search_roles()],
)
@discord.option(name="query", description="The query to search", required=True)
@discord.option(
name="scope",
description="How many top links to use for context",
required=False,
input_type=discord.SlashCommandOptionType.integer,
max_value=16,
min_value=1,
)
@discord.option(
name="nodes",
description="The higher the number, the more accurate the results, but more expensive",
required=False,
input_type=discord.SlashCommandOptionType.integer,
max_value=8,
min_value=1,
)
@discord.option(
name="deep",
description="Do a more intensive, long-running search",
required=False,
input_type=discord.SlashCommandOptionType.boolean,
)
@discord.option(
name="response_mode",
description="Response mode, doesn't work on deep searches",
guild_ids=ALLOWED_GUILDS,
required=False,
default="refine",
choices=["refine", "compact", "tree_summarize"],
)
@discord.option(
name="model",
description="The model to use for the request (querying, not composition)",
required=False,
default="gpt-3.5-turbo",
autocomplete=Settings_autocompleter.get_index_and_search_models,
)
@discord.option(
name="multistep",
description="Do a more intensive, multi-step query,",
required=False,
default=False,
input_type=discord.SlashCommandOptionType.boolean,
)
@discord.guild_only()
async def search(
self,
ctx: discord.ApplicationContext,
query: str,
scope: int,
nodes: int,
deep: bool,
response_mode: str,
model: str,
multistep: bool,
):
await self.search_cog.search_command(
ctx,
query,
scope,
nodes,
deep,
response_mode,
model,
multistep,
)
# Transcribe commands
@add_to_group("transcribe")
@discord.slash_command(
name="file",
description="Transcribe an audio or video file",
guild_ids=ALLOWED_GUILDS,
)
@discord.guild_only()
@discord.option(
name="file",
description="A file to transcribe",
required=True,
input_type=discord.SlashCommandOptionType.attachment,
)
@discord.option(
name="temperature",
description="The higher the value, the riskier the model will be",
required=False,
input_type=discord.SlashCommandOptionType.number,
max_value=1,
min_value=0,
)
async def transcribe_file(
self,
ctx: discord.ApplicationContext,
file: discord.Attachment,
temperature: float,
):
await self.transcribe_cog.transcribe_file_command(ctx, file, temperature)
@add_to_group("transcribe")
@discord.slash_command(
name="link",
description="Transcribe a file link or youtube link",
guild_ids=ALLOWED_GUILDS,
)
@discord.guild_only()
@discord.option(
name="link",
description="A link to transcribe",
required=True,
input_type=discord.SlashCommandOptionType.string,
)
@discord.option(
name="temperature",
description="The higher the value, the riskier the model will be",
required=False,
input_type=discord.SlashCommandOptionType.number,
max_value=1,
min_value=0,
)
async def transcribe_link(
self, ctx: discord.ApplicationContext, link: str, temperature: float
):
await self.transcribe_cog.transcribe_link_command(ctx, link, temperature)
| SwarmsDiscord-main | swarmsdiscord/cogs/commands.py |
import asyncio
import traceback
from datetime import datetime
import discord
class Deletion:
def __init__(self, message, timestamp):
self.message = message
self.timestamp = timestamp
# This function will be called by the bot to process the message queue
@staticmethod
async def process_deletion_queue(
deletion_queue, PROCESS_WAIT_TIME, EMPTY_WAIT_TIME
):
while True:
try:
# If the queue is empty, sleep for a short time before checking again
if deletion_queue.empty():
await asyncio.sleep(EMPTY_WAIT_TIME)
continue
# Get the next message from the queue
deletion = await deletion_queue.get()
# Check if the current timestamp is greater than the deletion timestamp
if datetime.now().timestamp() > deletion.timestamp:
# If the deletion timestamp has passed, delete the message
# check if deletion.message is of type discord.Message
if isinstance(deletion.message, discord.Message):
await deletion.message.delete()
else:
await deletion.message.delete_original_response()
else:
await deletion_queue.put(deletion)
# Sleep for a short time before processing the next message
# This will prevent the bot from spamming messages too quickly
await asyncio.sleep(PROCESS_WAIT_TIME)
except Exception:
traceback.print_exc()
| SwarmsDiscord-main | swarmsdiscord/services/deletion_service.py |
import json
import aiohttp
class ShareGPTService:
def __init__(self):
self.API_URL = "https://sharegpt.com/api/conversations"
def format_conversation(
self, conversation_history, avatar_url="https://i.imgur.com/SpuAF0v.png"
):
# The format is { 'avatarUrl' : <url>, 'items': [ { 'from': 'human', 'text': <text> }, { 'from': 'bot', 'text': <text> } ] } "
# The conversation history is not in this format, its just in simple alternating human and bot conversation snippets
conversation = {"avatarUrl": avatar_url, "items": []}
# The conversation history alternates between human and bot
# So we need to add the human and bot items to the conversation
for i in range(len(conversation_history)):
if i % 2 == 0:
conversation["items"].append(
{"from": "human", "value": conversation_history[i]}
)
else:
conversation["items"].append(
{"from": "gpt", "value": conversation_history[i]}
)
return json.dumps(conversation)
async def format_and_share(self, conversation_history, avatar_url=None):
conversation = self.format_conversation(conversation_history, avatar_url)
print(conversation)
headers = {"Content-Type": "application/json"}
async with aiohttp.ClientSession() as session:
async with session.post(
self.API_URL, data=conversation, headers=headers
) as response:
if response.status == 200:
response_json = await response.json()
return response_json["id"]
else:
raise ValueError(
f"ShareGPT returned an invalid response: {await response.text()}"
)
| SwarmsDiscord-main | swarmsdiscord/services/sharegpt_service.py |
import asyncio
import os
import random
import traceback
from datetime import datetime, timedelta
from pathlib import Path
import discord
from models.openai_model import Model
from services.environment_service import EnvService
from services.usage_service import UsageService
usage_service = UsageService(Path(os.environ.get("DATA_DIR", os.getcwd())))
model = Model(usage_service)
class ModerationResult:
WARN = "warn"
DELETE = "delete"
NONE = "none"
class ModerationOptions:
WARN = "warn"
DELETE = "delete"
RESET = "reset"
OPTIONS = [WARN, DELETE, RESET]
class ThresholdSet:
def __init__(self, h_t, hv_t, sh_t, s_t, sm_t, v_t, vg_t):
"""A set of thresholds for the OpenAI moderation endpoint
Args:
h_t (float): hate
hv_t (float): hate/violence
sh_t (float): self-harm
s_t (float): sexual
sm_t (float): sexual/minors
v_t (float): violence
vg_t (float): violence/graphic
"""
self.keys = [
"hate",
"hate/threatening",
"self-harm",
"sexual",
"sexual/minors",
"violence",
"violence/graphic",
]
self.thresholds = [
h_t,
hv_t,
sh_t,
s_t,
sm_t,
v_t,
vg_t,
]
# The string representation is just the keys alongside the threshold values
def __str__(self):
""" "key": value format"""
# "key": value format
return ", ".join([f"{k}: {v}" for k, v in zip(self.keys, self.thresholds)])
def moderate(self, text, response_message):
category_scores = response_message["results"][0]["category_scores"]
flagged = response_message["results"][0]["flagged"]
for category, threshold in zip(self.keys, self.thresholds):
threshold = float(threshold)
if category_scores[category] > threshold:
return (True, flagged)
return (False, flagged)
class Moderation:
# Moderation service data
moderation_queues = {}
moderation_alerts_channel = EnvService.get_moderations_alert_channel()
moderation_enabled_guilds = []
moderation_tasks = {}
moderations_launched = []
def __init__(self, message, timestamp):
self.message = message
self.timestamp = timestamp
@staticmethod
def build_moderation_embed():
# Create a discord embed to send to the user when their message gets moderated
embed = discord.Embed(
title="Your message was moderated",
description="Our automatic moderation systems detected that your message was inappropriate and has been deleted. Please review the rules.",
colour=discord.Colour.red(),
)
# Set the embed thumbnail
embed.set_thumbnail(url="https://i.imgur.com/2oL8JSp.png")
embed.set_footer(
text="If you think this was a mistake, please contact the server admins."
)
return embed
@staticmethod
def build_safety_blocked_message():
# Create a discord embed to send to the user when their message gets moderated
embed = discord.Embed(
title="Your request was blocked by the safety system",
description="Our automatic moderation systems detected that your request was inappropriate and it has not been sent. Please review the usage guidelines.",
colour=discord.Colour.red(),
)
# Set the embed thumbnail
embed.set_thumbnail(url="https://i.imgur.com/2oL8JSp.png")
embed.set_footer(
text="If you think this was a mistake, please contact the server admins."
)
return embed
@staticmethod
def build_non_english_message():
# Create a discord embed to send to the user when their message gets moderated
embed = discord.Embed(
title="Your message was moderated",
description="Our automatic moderation systems detected that your message was not in English and has been deleted. Please review the rules.",
colour=discord.Colour.red(),
)
# Set the embed thumbnail
embed.set_thumbnail(url="https://i.imgur.com/2oL8JSp.png")
embed.set_footer(
text="If you think this was a mistake, please contact the server admins."
)
return embed
@staticmethod
async def force_english_and_respond(text, pretext, ctx):
response = await model.send_language_detect_request(text, pretext)
response_text = response["choices"][0]["text"]
if "false" in response_text.lower().strip():
if isinstance(ctx, discord.Message):
await ctx.reply(embed=Moderation.build_non_english_message())
else:
await ctx.respond(embed=Moderation.build_non_english_message())
return False
return True
@staticmethod
async def simple_moderate(text):
return await model.send_moderations_request(text)
@staticmethod
async def simple_moderate_and_respond(text, ctx):
pre_mod_set = ThresholdSet(0.26, 0.26, 0.1, 0.95, 0.03, 0.95, 0.4)
response = await Moderation.simple_moderate(text)
print(response)
flagged = (
True
if Moderation.determine_moderation_result(
text, response, pre_mod_set, pre_mod_set
)
== ModerationResult.DELETE
else False
)
if flagged:
if isinstance(ctx, discord.Message):
await ctx.reply(embed=Moderation.build_safety_blocked_message())
else:
await ctx.respond(embed=Moderation.build_safety_blocked_message())
return True
return False
@staticmethod
def build_admin_warning_message(
moderated_message, deleted_message=None, timed_out=None
):
embed = discord.Embed(
title="Potentially unwanted message in the "
+ moderated_message.guild.name
+ " server",
description=f"**Message from {moderated_message.author.mention}:** {moderated_message.content}",
colour=discord.Colour.yellow(),
)
link = f"https://discord.com/channels/{moderated_message.guild.id}/{moderated_message.channel.id}/{moderated_message.id}"
embed.add_field(name="Message link", value=link, inline=False)
if deleted_message:
embed.add_field(
name="Message deleted by: ", value=deleted_message, inline=False
)
if timed_out:
embed.add_field(name="User timed out by: ", value=timed_out, inline=False)
return embed
@staticmethod
def build_admin_moderated_message(
moderated_message, response_message, user_kicked=None, timed_out=None
):
direct_message_object = isinstance(moderated_message, discord.Message)
moderated_message = (
moderated_message if direct_message_object else moderated_message.message
)
# Create a discord embed to send to the user when their message gets moderated
embed = discord.Embed(
title="A message was moderated in the "
+ moderated_message.guild.name
+ " server",
description=f"Message from {moderated_message.author.mention} was moderated: {moderated_message.content}",
colour=discord.Colour.red(),
)
# Get the link to the moderated message
link = f"https://discord.com/channels/{response_message.guild.id}/{response_message.channel.id}/{response_message.id}"
# set the link of the embed
embed.add_field(name="Moderated message link", value=link, inline=False)
if user_kicked:
embed.add_field(name="User kicked by", value=user_kicked, inline=False)
if timed_out:
embed.add_field(name="User timed out by: ", value=timed_out, inline=False)
return embed
@staticmethod
def determine_moderation_result(text, response, warn_set, delete_set):
warn_result, flagged_warn = warn_set.moderate(text, response)
delete_result, flagged_delete = delete_set.moderate(text, response)
if delete_result:
return ModerationResult.DELETE
if warn_result:
return ModerationResult.WARN
return ModerationResult.NONE
# This function will be called by the bot to process the message queue
@staticmethod
async def process_moderation_queue(
moderation_queue,
PROCESS_WAIT_TIME,
EMPTY_WAIT_TIME,
moderations_alert_channel,
warn_set,
delete_set,
):
while True:
try:
# If the queue is empty, sleep for a short time before checking again
if moderation_queue.empty():
await asyncio.sleep(EMPTY_WAIT_TIME)
continue
# Get the next message from the queue
to_moderate = await moderation_queue.get()
# Check if the current timestamp is greater than the deletion timestamp
if datetime.now().timestamp() > to_moderate.timestamp:
response = await model.send_moderations_request(
to_moderate.message.content
)
moderation_result = Moderation.determine_moderation_result(
to_moderate.message.content, response, warn_set, delete_set
)
if moderation_result == ModerationResult.DELETE:
# Take care of the flagged message
response_message = await to_moderate.message.reply(
embed=Moderation.build_moderation_embed()
)
# Do the same response as above but use an ephemeral message
await to_moderate.message.delete()
# Send to the moderation alert channel
if moderations_alert_channel:
response_message = await moderations_alert_channel.send(
embed=Moderation.build_admin_moderated_message(
to_moderate, response_message
)
)
await response_message.edit(
view=ModerationAdminView(
to_moderate.message,
response_message,
True,
True,
True,
)
)
elif moderation_result == ModerationResult.WARN:
response_message = await moderations_alert_channel.send(
embed=Moderation.build_admin_warning_message(
to_moderate.message
),
)
# Attempt to react to the to_moderate.message with a warning icon
try:
await to_moderate.message.add_reaction("β οΈ")
except discord.errors.Forbidden:
pass
await response_message.edit(
view=ModerationAdminView(
to_moderate.message, response_message
)
)
else:
await moderation_queue.put(to_moderate)
# Sleep for a short time before processing the next message
# This will prevent the bot from spamming messages too quickly
await asyncio.sleep(PROCESS_WAIT_TIME)
except Exception:
traceback.print_exc()
class ModerationAdminView(discord.ui.View):
def __init__(
self,
message,
moderation_message,
nodelete=False,
deleted_message=False,
source_deleted=False,
):
super().__init__(timeout=None) # 1 hour interval to redo.
component_number = 0
self.message = message
self.moderation_message = (moderation_message,)
self.add_item(
TimeoutUserButton(
self.message,
self.moderation_message,
component_number,
1,
nodelete,
source_deleted,
)
)
component_number += 1
self.add_item(
TimeoutUserButton(
self.message,
self.moderation_message,
component_number,
6,
nodelete,
source_deleted,
)
)
component_number += 1
self.add_item(
TimeoutUserButton(
self.message,
self.moderation_message,
component_number,
12,
nodelete,
source_deleted,
)
)
component_number += 1
self.add_item(
TimeoutUserButton(
self.message,
self.moderation_message,
component_number,
24,
nodelete,
source_deleted,
)
)
component_number += 1
if not nodelete:
self.add_item(
DeleteMessageButton(
self.message, self.moderation_message, component_number
)
)
component_number += 1
self.add_item(
ApproveMessageButton(
self.message, self.moderation_message, component_number
)
)
component_number += 1
if deleted_message:
self.add_item(
KickUserButton(self.message, self.moderation_message, component_number)
)
class ApproveMessageButton(discord.ui.Button["ModerationAdminView"]):
def __init__(self, message, moderation_message, current_num):
super().__init__(
style=discord.ButtonStyle.green, label="Approve", custom_id="approve_button"
)
self.message = message
self.moderation_message = moderation_message
self.current_num = current_num
async def callback(self, interaction: discord.Interaction):
# Remove reactions on the message, delete the moderation message
await self.message.clear_reactions()
await self.moderation_message[0].delete()
class DeleteMessageButton(discord.ui.Button["ModerationAdminView"]):
def __init__(self, message, moderation_message, current_num):
super().__init__(
style=discord.ButtonStyle.danger,
label="Delete Message",
custom_id="delete_button",
)
self.message = message
self.moderation_message = moderation_message
self.current_num = current_num
async def callback(self, interaction: discord.Interaction):
# Get the user
await self.message.delete()
await interaction.response.send_message(
"This message was deleted", ephemeral=True, delete_after=10
)
while isinstance(self.moderation_message, tuple):
self.moderation_message = self.moderation_message[0]
await self.moderation_message.edit(
embed=Moderation.build_admin_warning_message(
self.message, deleted_message=interaction.user.mention
),
view=ModerationAdminView(
self.message, self.moderation_message, nodelete=True
),
)
class KickUserButton(discord.ui.Button["ModerationAdminView"]):
def __init__(self, message, moderation_message, current_num):
super().__init__(
style=discord.ButtonStyle.danger, label="Kick User", custom_id="kick_button"
)
self.message = message
self.moderation_message = moderation_message
self.current_num = current_num
async def callback(self, interaction: discord.Interaction):
# Get the user and kick the user
try:
await self.message.author.kick(
reason="You broke the server rules. Please rejoin and review the rules."
)
except Exception:
pass
await interaction.response.send_message(
"This user was attempted to be kicked", ephemeral=True, delete_after=10
)
while isinstance(self.moderation_message, tuple):
self.moderation_message = self.moderation_message[0]
await self.moderation_message.edit(
embed=Moderation.build_admin_moderated_message(
self.message,
self.moderation_message,
user_kicked=interaction.user.mention,
),
view=ModerationAdminView(
self.message,
self.moderation_message,
nodelete=True,
deleted_message=False,
source_deleted=True,
),
)
class TimeoutUserButton(discord.ui.Button["ModerationAdminView"]):
def __init__(
self, message, moderation_message, current_num, hours, nodelete, source_deleted
):
super().__init__(
style=discord.ButtonStyle.danger,
label=f"Timeout {hours}h",
custom_id="timeout_button" + str(random.randint(100000, 999999)),
)
self.message = message
self.moderation_message = moderation_message
self.hours = hours
self.nodelete = nodelete
self.current_num = current_num
self.source_deleted = source_deleted
async def callback(self, interaction: discord.Interaction):
# Get the user id
try:
await self.message.delete()
except Exception:
pass
try:
await self.message.author.timeout(
until=discord.utils.utcnow() + timedelta(hours=self.hours),
reason="Breaking the server chat rules",
)
except Exception:
traceback.print_exc()
await interaction.response.send_message(
f"This user was timed out for {self.hours} hour(s)",
ephemeral=True,
delete_after=10,
)
while isinstance(self.moderation_message, tuple):
self.moderation_message = self.moderation_message[0]
if not self.source_deleted:
await self.moderation_message.edit(
embed=Moderation.build_admin_warning_message(
self.message,
deleted_message=interaction.user.mention,
timed_out=interaction.user.mention,
),
view=ModerationAdminView(
self.message, self.moderation_message, nodelete=True
),
)
else:
await self.moderation_message.edit(
embed=Moderation.build_admin_moderated_message(
self.message,
self.moderation_message,
timed_out=interaction.user.mention,
),
view=ModerationAdminView(
self.message,
self.moderation_message,
nodelete=True,
deleted_message=True,
source_deleted=True,
),
)
| SwarmsDiscord-main | swarmsdiscord/services/moderations_service.py |
import pinecone
class PineconeService:
def __init__(self, index: pinecone.Index):
self.index = index
def upsert_basic(self, text, embeddings):
self.index.upsert([(text, embeddings)])
def get_all_for_conversation(self, conversation_id: int):
response = self.index.query(
top_k=100, filter={"conversation_id": conversation_id}
)
return response
async def upsert_conversation_embedding(
self, model, conversation_id: int, text, timestamp, custom_api_key=None
):
# If the text is > 512 characters, we need to split it up into multiple entries.
first_embedding = None
if len(text) > 500:
# Split the text into 512 character chunks
chunks = [text[i : i + 500] for i in range(0, len(text), 500)]
for chunk in chunks:
# Create an embedding for the split chunk
embedding = await model.send_embedding_request(
chunk, custom_api_key=custom_api_key
)
if not first_embedding:
first_embedding = embedding
self.index.upsert(
[(chunk, embedding)],
metadata={
"conversation_id": conversation_id,
"timestamp": timestamp,
},
)
return first_embedding
embedding = await model.send_embedding_request(
text, custom_api_key=custom_api_key
)
self.index.upsert(
[
(
text,
embedding,
{"conversation_id": conversation_id, "timestamp": timestamp},
)
]
)
return embedding
def get_n_similar(self, conversation_id: int, embedding, n=10):
response = self.index.query(
vector=embedding,
top_k=n,
include_metadata=True,
filter={"conversation_id": conversation_id},
)
# print(response)
relevant_phrases = [
(match["id"], match["metadata"]["timestamp"])
for match in response["matches"]
]
# Sort the relevant phrases based on the timestamp
relevant_phrases.sort(key=lambda x: x[1])
return relevant_phrases
def get_all_conversation_items(self, conversation_id: int):
response = self.index.query(
vector=[0] * 1536, top_k=1000, filter={"conversation_id": conversation_id}
)
phrases = [match["id"] for match in response["matches"]]
# Sort on timestamp
phrases.sort(key=lambda x: x[1])
return phrases
| SwarmsDiscord-main | swarmsdiscord/services/pinecone_service.py |
import datetime
import traceback
from flask import Flask
from multiprocessing import Process
app = Flask(__name__)
start_time = datetime.datetime.now()
@app.route("/healthz")
def health():
# Find the difference between the current time and start_time in seconds
uptime = (datetime.datetime.now() - start_time).total_seconds()
# Set the response status
status = 200
return {"status": "ok", "uptime": uptime, "uptime_unit": "seconds"}, status
def run_target(host, port):
try:
app.run(host=host, port=port, debug=False, use_reloader=False)
except:
pass
class HealthService:
"""
Service for health checks, for cloud services like Azure App Service.
"""
def __init__(self, host="0.0.0.0", port=8181):
self.host = host
self.port = port
print("Starting the health check service..")
self.process = Process(target=lambda: run_target(self.host, self.port))
self.process.start()
print("Health check service started!")
def get_process(self):
return self.process
| SwarmsDiscord-main | swarmsdiscord/services/health_service.py |
SwarmsDiscord-main | swarmsdiscord/services/__init__.py |
|
import os
import sys
import traceback
from pathlib import Path
from typing import Union
from dotenv import load_dotenv
from sqlitedict import SqliteDict
def app_root_path():
app_path = Path(sys.argv[0]).resolve()
try:
if app_path.parent.name == "bin": # Installed in unixy hierachy
return app_path.parents[1]
except IndexError:
pass
return app_path.parent
# None will let direnv do its' thing
env_paths = [Path(".env"), app_root_path() / "etc/environment", None]
for env_path in env_paths:
print("Loading environment from " + str(env_path))
load_dotenv(dotenv_path=env_path)
class EnvService:
# To be expanded upon later!
def __init__(self):
self.env = {}
@staticmethod
def environment_path_with_fallback(env_name, relative_fallback=None):
directory = os.getenv(env_name)
if directory is not None:
return Path(directory).resolve()
if relative_fallback:
app_relative = (app_root_path() / relative_fallback).resolve()
if app_relative.exists():
return app_relative
return Path.cwd()
@staticmethod
def save_path():
share_dir = os.getenv("SHARE_DIR")
if share_dir is not None:
return Path(share_dir)
return app_root_path()
@staticmethod
def find_shared_file(file_name):
share_file_paths = []
share_dir = os.getenv("SHARE_DIR")
if share_dir is not None:
share_file_paths.append(Path(share_dir) / file_name)
share_file_paths.extend(
[
app_root_path() / "share" / file_name,
app_root_path() / file_name,
Path(file_name),
]
)
for share_file_path in share_file_paths:
if share_file_path.exists():
return share_file_path.resolve()
raise ValueError(f"Unable to find shared data file {file_name}")
@staticmethod
def get_allowed_guilds():
# ALLOWED_GUILDS is a comma separated list of guild ids
# It can also just be one guild ID
# Read these allowed guilds and return as a list of ints
try:
allowed_guilds = os.getenv("ALLOWED_GUILDS")
except Exception:
allowed_guilds = None
if allowed_guilds is None:
raise ValueError(
"ALLOWED_GUILDS is not defined properly in the environment file!"
"Please copy your server's guild ID and put it into ALLOWED_GUILDS in the .env file."
'For example a line should look like: `ALLOWED_GUILDS="971268468148166697"`'
)
allowed_guilds = (
allowed_guilds.split(",") if "," in allowed_guilds else [allowed_guilds]
)
allowed_guilds = [int(guild) for guild in allowed_guilds]
return allowed_guilds
@staticmethod
def get_admin_roles():
# ADMIN_ROLES is a comma separated list of string roles
# It can also just be one role
# Read these allowed roles and return as a list of strings
try:
admin_roles = os.getenv("ADMIN_ROLES")
except Exception:
admin_roles = None
if admin_roles is None:
print(
"ADMIN_ROLES is not defined properly in the environment file!"
"Please copy your server's role and put it into ADMIN_ROLES in the .env file."
'For example a line should look like: `ADMIN_ROLES="Admin"`'
)
print("Defaulting to allowing all users to use admin commands...")
return [None]
admin_roles = (
admin_roles.lower().split(",")
if "," in admin_roles
else [admin_roles.lower()]
)
return admin_roles
@staticmethod
def get_dalle_roles():
# DALLE_ROLES is a comma separated list of string roles
# It can also just be one role
# Read these allowed roles and return as a list of strings
try:
dalle_roles = os.getenv("DALLE_ROLES")
except Exception:
dalle_roles = None
if dalle_roles is None:
print(
"DALLE_ROLES is not defined properly in the environment file!"
"Please copy your server's role and put it into DALLE_ROLES in the .env file."
'For example a line should look like: `DALLE_ROLES="Dalle"`'
)
print("Defaulting to allowing all users to use Dalle commands...")
return [None]
dalle_roles = (
dalle_roles.lower().split(",")
if "," in dalle_roles
else [dalle_roles.lower()]
)
return dalle_roles
@staticmethod
def get_translator_roles():
# TRANSLATOR_ROLES is a comma separated list of string roles
# It can also just be one role
# Read these allowed roles and return as a list of strings
try:
translator_roles = os.getenv("TRANSLATOR_ROLES")
except Exception:
translator_roles = None
if translator_roles is None:
print(
"TRANSLATOR_ROLES is not defined properly in the environment file!"
"Please copy your server's role and put it into TRANSLATOR in the .env file."
'For example a line should look like: `TRANSLATOR_ROLES="Translate"`'
)
print("Defaulting to allowing all users to use Translator commands...")
return [None]
translator_roles = (
translator_roles.lower().split(",")
if "," in translator_roles
else [translator_roles.lower()]
)
return translator_roles
@staticmethod
def get_search_roles():
# SEARCH_ROLES is a comma separated list of string roles
# It can also just be one role
# Read these allowed roles and return as a list of strings
try:
search_roles = os.getenv("SEARCH_ROLES")
except Exception:
search_roles = None
if search_roles is None:
print(
"SEARCH_ROLES is not defined properly in the environment file!"
"Please copy your server's role and put it into SEARCH in the .env file."
'For example a line should look like: `SEARCH_ROLES="Translate"`'
)
print("Defaulting to allowing all users to use Search commands...")
return [None]
search_roles = (
search_roles.lower().split(",")
if "," in search_roles
else [search_roles.lower()]
)
return search_roles
@staticmethod
def get_gpt_roles():
# GPT_ROLES is a comma separated list of string roles
# It can also just be one role
# Read these allowed roles and return as a list of strings
try:
gpt_roles = os.getenv("GPT_ROLES")
except Exception:
gpt_roles = None
if gpt_roles is None:
print(
"GPT_ROLES is not defined properly in the environment file!"
"Please copy your server's role and put it into GPT_ROLES in the .env file."
'For example a line should look like: `GPT_ROLES="Gpt"`'
)
print("Defaulting to allowing all users to use GPT commands...")
return [None]
gpt_roles = (
gpt_roles.lower().strip().split(",")
if "," in gpt_roles
else [gpt_roles.lower()]
)
return gpt_roles
@staticmethod
def get_index_roles():
# INDEX_ROLES is a comma separated list of string roles
# It can also just be one role
# Read these allowed roles and return as a list of strings
try:
index_roles = os.getenv("INDEX_ROLES")
except Exception:
traceback.print_exc()
index_roles = None
if index_roles is None:
print(
"INDEX_ROLES is not defined properly in the environment file!"
"Please copy your server's role and put it into INDEX_ROLES in the .env file."
'For example a line should look like: `INDEX_ROLES="Gpt"`'
)
print("Defaulting to allowing all users to use Index commands...")
return [None]
index_roles = (
index_roles.lower().strip().split(",")
if "," in index_roles
else [index_roles.lower()]
)
return index_roles
@staticmethod
def get_channel_chat_roles():
# CHANNEL_CHAT_ROLES is a comma separated list of string roles
# It can also just be one role
# Read these allowed roles and return as a list of strings
try:
cc_roles = os.getenv("CHANNEL_CHAT_ROLES")
except Exception:
cc_roles = None
if cc_roles is None:
print(
"CHANNEL_CHAT_ROLES is not defined properly in the environment file!"
"Please copy your server's role and put it into CHANNEL_CHAT_ROLES in the .env file."
'For example a line should look like: `CHANNEL_CHAT_ROLES="Gpt"`'
)
print(
"Defaulting to allowing all users to make conversations in full channels..."
)
return [None]
cc_roles = (
cc_roles.lower().strip().split(",")
if "," in cc_roles
else [cc_roles.lower()]
)
return cc_roles
@staticmethod
def get_channel_instruction_roles():
# CHANNEL_INSTRUCTION_ROLES is a comma separated list of string roles
# It can also just be one role
# Read these allowed roles and return as a list of strings
try:
cc_roles = os.getenv("CHANNEL_INSTRUCTION_ROLES")
except Exception:
cc_roles = None
if cc_roles is None:
print(
"CHANNEL_INSTRUCTION_ROLES is not defined properly in the environment file!"
"Please copy your server's role and put it into CHANNEL_INSTRUCTION_ROLES in the .env file."
'For example a line should look like: `CHANNEL_INSTRUCTION_ROLES="Gpt"`'
)
print(
"Defaulting to allowing all users to set instructions for channels..."
)
return [None]
cc_roles = (
cc_roles.lower().strip().split(",")
if "," in cc_roles
else [cc_roles.lower()]
)
return cc_roles
@staticmethod
def get_welcome_message():
# WELCOME_MESSAGE is a default string used to welcome new members to the server if GPT is not available.
# The string can be blank but this is not advised. If a string cannot be found in the .env file, the below string is used.
# The string is DMd to the new server member as part of an embed.
try:
welcome_message = os.getenv("WELCOME_MESSAGE")
except Exception:
welcome_message = "Hi there! Welcome to our Discord server!"
return welcome_message
@staticmethod
def get_moderations_alert_channel():
# MODERATIONS_ALERT_CHANNEL is a channel id where moderation alerts are sent to
# The string can be blank but this is not advised. If a string cannot be found in the .env file, the below string is used.
try:
moderations_alert_channel = os.getenv("MODERATIONS_ALERT_CHANNEL")
except Exception:
moderations_alert_channel = None
return moderations_alert_channel
@staticmethod
def get_user_input_api_keys():
try:
user_input_api_keys = os.getenv("USER_INPUT_API_KEYS")
if user_input_api_keys.lower().strip() == "true":
return True
return False
except Exception:
return False
@staticmethod
def get_premoderate():
try:
pre_moderate = os.getenv("PRE_MODERATE")
if pre_moderate.lower().strip() == "true":
return True
return False
except Exception:
return False
@staticmethod
def get_force_english():
try:
force_english = os.getenv("FORCE_ENGLISH")
if force_english.lower().strip() == "true":
return True
return False
except Exception:
return False
@staticmethod
def get_custom_bot_name():
try:
custom_bot_name = os.getenv("CUSTOM_BOT_NAME") + ": "
return custom_bot_name
except Exception:
return "GPTie: "
@staticmethod
def get_health_service_enabled():
try:
user_input_api_keys = os.getenv("HEALTH_SERVICE_ENABLED")
if user_input_api_keys.lower().strip() == "true":
return True
return False
except Exception:
return False
@staticmethod
def get_bot_is_taggable():
try:
user_input_api_keys = os.getenv("BOT_TAGGABLE")
if user_input_api_keys.lower().strip() == "true":
return True
return False
except Exception:
return False
@staticmethod
def get_user_key_db_path() -> Union[Path, None]:
try:
user_key_db_path = os.getenv("USER_KEY_DB_PATH")
if user_key_db_path is None:
return None
return Path(user_key_db_path)
except Exception:
return None
@staticmethod
def get_api_db():
user_input_api_keys = EnvService.get_user_input_api_keys()
user_key_db = None
if user_input_api_keys:
print(
"This server was configured to enforce user input API keys. Doing the required database setup now"
)
# Get user_key_db from environment variable
user_key_db_path = EnvService.get_user_key_db_path()
# Check if user_key_db_path is valid
if not user_key_db_path:
print(
"No user key database path was provided. Defaulting to user_key_db.sqlite"
)
user_key_db_path = EnvService.find_shared_file("user_key_db.sqlite")
else:
# append "user_key_db.sqlite" to USER_KEY_DB_PATH if it doesn't already end with .sqlite
if not user_key_db_path.match("*.sqlite"):
# append "user_key_db.sqlite" to USER_KEY_DB_PATH
user_key_db_path = user_key_db_path / "user_key_db.sqlite"
user_key_db = SqliteDict(user_key_db_path)
print("Retrieved/created the user key database")
return user_key_db
return user_key_db
@staticmethod
def get_bypass_roles():
# GPT_ROLES is a comma separated list of string roles
# It can also just be one role
# Read these allowed roles and return as a list of strings
try:
bypass_roles = os.getenv("CHAT_BYPASS_ROLES")
except Exception:
bypass_roles = None
if bypass_roles is None:
print(
"CHAT_BYPASS_ROLES is not defined properly in the environment file!"
"Please copy your server's role and put it into CHAT_BYPASS_ROLES in the .env file."
'For example a line should look like: `CHAT_BYPASS_ROLES="bypass"`'
)
print("Defaulting to allowing NO ONE to bypass chat moderation")
return [None]
bypass_roles = (
bypass_roles.lower().strip().split(",")
if "," in bypass_roles
else [bypass_roles.lower()]
)
return bypass_roles
@staticmethod
def get_deepl_token():
try:
deepl_token = os.getenv("DEEPL_TOKEN")
return deepl_token
except Exception:
return None
@staticmethod
def get_github_token():
try:
github_token = os.getenv("GITHUB_TOKEN")
return github_token
except Exception:
return None
@staticmethod
def get_openai_token():
try:
openai_token = os.getenv("OPENAI_TOKEN")
return openai_token
except Exception:
raise ValueError(
"OPENAI_TOKEN is not defined properly in the environment file! The bot cannot start without this token."
)
@staticmethod
def get_wolfram_api_key():
try:
openai_token = os.getenv("WOLFRAM_API_KEY")
return openai_token
except Exception:
print(
"WOLFRAM_API_KEY is not defined properly in the environment file! The bot cannot use /internet chat's wolfram functionalities without this"
)
return None
@staticmethod
def get_openai_organization():
try:
openai_org = os.getenv("OPENAI_ORGANIZATION")
return openai_org
except Exception:
return None
@staticmethod
def get_google_search_api_key():
try:
google_search_api_key = os.getenv("GOOGLE_SEARCH_API_KEY")
return google_search_api_key
except Exception:
return None
@staticmethod
def get_replicate_api_key():
try:
replicate_key = os.getenv("REPLICATE_API_KEY")
return replicate_key
except Exception:
return None
@staticmethod
def get_google_search_engine_id():
try:
google_search_engine_id = os.getenv("GOOGLE_SEARCH_ENGINE_ID")
return google_search_engine_id
except Exception:
return None
@staticmethod
def get_pinecone_region():
try:
pinecone_region = os.getenv("PINECONE_REGION")
return pinecone_region
except Exception:
return "us-west1-gcp"
@staticmethod
def get_max_search_price():
try:
search_price = float(os.getenv("MAX_SEARCH_PRICE"))
return search_price
except Exception:
return 1.00
@staticmethod
def get_max_deep_compose_price():
try:
deep_compose_price = float(os.getenv("MAX_DEEP_COMPOSE_PRICE"))
return deep_compose_price
except Exception:
return 3.00
@staticmethod
def get_google_cloud_project_id():
try:
google_cloud_project_id = os.getenv("GOOGLE_CLOUD_PROJECT_ID")
return google_cloud_project_id
except Exception:
return None
| SwarmsDiscord-main | swarmsdiscord/services/environment_service.py |
import asyncio
import random
import tempfile
import traceback
from io import BytesIO
import aiohttp
import discord
from PIL import Image
from models.user_model import RedoUser
class ImageService:
def __init__(self):
pass
@staticmethod
async def encapsulated_send(
image_service_cog,
user_id,
prompt,
ctx,
response_message=None,
vary=None,
draw_from_optimizer=None,
custom_api_key=None,
):
"""service function that takes input and returns an image generation
Args:
image_service_cog (Cog): The cog which contains draw related commands
user_id (int): A discord user id
prompt (string): Prompt for the model
ctx (ApplicationContext): A discord ApplicationContext, from an interaction
response_message (Message, optional): A discord message. Defaults to None.
vary (bool, optional): If the image is a variation of another one. Defaults to None.
draw_from_optimizer (bool, optional): If the prompt is passed from the optimizer command. Defaults to None.
custom_api_key (str, optional): User defined OpenAI API key. Defaults to None.
"""
await asyncio.sleep(0)
# send the prompt to the model
from_context = isinstance(ctx, discord.ApplicationContext)
try:
file, image_urls = await image_service_cog.model.send_image_request(
ctx,
prompt,
vary=vary if not draw_from_optimizer else None,
custom_api_key=custom_api_key,
)
# Error catching for API errors
except aiohttp.ClientResponseError as e:
message = (
f"The API returned an invalid response: **{e.status}: {e.message}**"
)
if not from_context:
await ctx.channel.send(message)
else:
await ctx.respond(message, ephemeral=True)
return
except ValueError as e:
message = f"Error: {e}. Please try again with a different prompt."
if not from_context:
await ctx.channel.send(message)
else:
await ctx.respond(message, ephemeral=True)
return
# Start building an embed to send to the user with the results of the image generation
embed = discord.Embed(
title="Image Generation Results"
if not vary
else "Image Generation Results (Varying)"
if not draw_from_optimizer
else "Image Generation Results (Drawing from Optimizer)",
description=f"{prompt}",
color=0xC730C7,
)
# Add the image file to the embed
embed.set_image(url=f"attachment://{file.filename}")
if not response_message: # Original generation case
# Start an interaction with the user, we also want to send data embed=embed, file=file,
# view=SaveView(image_urls, image_service_cog, image_service_cog.converser_cog)
result_message = (
await ctx.channel.send(
embed=embed,
file=file,
)
if not from_context
else await ctx.respond(embed=embed, file=file)
)
await result_message.edit(
view=SaveView(
ctx,
image_urls,
image_service_cog,
image_service_cog.converser_cog,
result_message,
custom_api_key=custom_api_key,
)
)
image_service_cog.converser_cog.users_to_interactions[user_id] = []
image_service_cog.converser_cog.users_to_interactions[user_id].append(
result_message.id
)
# Get the actual result message object
if from_context:
result_message = await ctx.fetch_message(result_message.id)
image_service_cog.redo_users[user_id] = RedoUser(
prompt=prompt,
message=ctx,
ctx=ctx,
response=result_message,
instruction=None,
paginator=None,
)
else:
if not vary: # Editing case
message = await response_message.edit(
embed=embed,
file=file,
)
await message.edit(
view=SaveView(
ctx,
image_urls,
image_service_cog,
image_service_cog.converser_cog,
message,
custom_api_key=custom_api_key,
)
)
else: # Varying case
if not draw_from_optimizer:
result_message = await response_message.edit_original_response(
content="Image variation completed!",
embed=embed,
file=file,
)
await result_message.edit(
view=SaveView(
ctx,
image_urls,
image_service_cog,
image_service_cog.converser_cog,
result_message,
True,
custom_api_key=custom_api_key,
)
)
else:
result_message = await response_message.edit_original_response(
content="I've drawn the optimized prompt!",
embed=embed,
file=file,
)
await result_message.edit(
view=SaveView(
ctx,
image_urls,
image_service_cog,
image_service_cog.converser_cog,
result_message,
custom_api_key=custom_api_key,
)
)
image_service_cog.redo_users[user_id] = RedoUser(
prompt=prompt,
message=ctx,
ctx=ctx,
response=result_message,
instruction=None,
paginator=None,
)
image_service_cog.converser_cog.users_to_interactions[user_id].append(
response_message.id
)
image_service_cog.converser_cog.users_to_interactions[user_id].append(
result_message.id
)
class SaveView(discord.ui.View):
def __init__(
self,
ctx,
image_urls,
cog,
converser_cog,
message,
no_retry=False,
only_save=None,
custom_api_key=None,
):
super().__init__(
timeout=3600 if not only_save else None
) # 1 hour timeout for Retry, Save
self.ctx = ctx
self.image_urls = image_urls
self.cog = cog
self.no_retry = no_retry
self.converser_cog = converser_cog
self.message = message
self.custom_api_key = custom_api_key
for x in range(1, len(image_urls) + 1):
self.add_item(SaveButton(x, image_urls[x - 1]))
if not only_save:
if not no_retry:
self.add_item(
RedoButton(
self.cog,
converser_cog=self.converser_cog,
custom_api_key=self.custom_api_key,
)
)
for x in range(1, len(image_urls) + 1):
self.add_item(
VaryButton(
x,
image_urls[x - 1],
self.cog,
converser_cog=self.converser_cog,
custom_api_key=self.custom_api_key,
)
)
# On the timeout event, override it and we want to clear the items.
async def on_timeout(self):
# Save all the SaveButton items, then clear all the items, then add back the SaveButton items, then
# update the message
self.clear_items()
# Create a new view with the same params as this one, but pass only_save=True
new_view = SaveView(
self.ctx,
self.image_urls,
self.cog,
self.converser_cog,
self.message,
self.no_retry,
only_save=True,
)
# Set the view of the message to the new view
await self.ctx.edit(view=new_view)
class VaryButton(discord.ui.Button):
def __init__(self, number, image_url, cog, converser_cog, custom_api_key):
super().__init__(
style=discord.ButtonStyle.blurple,
label="Vary " + str(number),
custom_id="vary_button" + str(random.randint(10000000, 99999999)),
)
self.number = number
self.image_url = image_url
self.cog = cog
self.converser_cog = converser_cog
self.custom_api_key = custom_api_key
async def callback(self, interaction: discord.Interaction):
user_id = interaction.user.id
interaction_id = interaction.message.id
if interaction_id not in self.converser_cog.users_to_interactions[user_id]:
if len(self.converser_cog.users_to_interactions[user_id]) >= 2:
interaction_id2 = interaction.id
if (
interaction_id2
not in self.converser_cog.users_to_interactions[user_id]
):
await interaction.response.send_message(
content="You can not vary images in someone else's chain!",
ephemeral=True,
)
else:
await interaction.response.send_message(
content="You can only vary for images that you generated yourself!",
ephemeral=True,
)
return
if user_id in self.cog.redo_users:
response_message = await interaction.response.send_message(
content="Varying image number " + str(self.number) + "..."
)
self.converser_cog.users_to_interactions[user_id].append(
response_message.message.id
)
self.converser_cog.users_to_interactions[user_id].append(
response_message.id
)
prompt = self.cog.redo_users[user_id].prompt
asyncio.ensure_future(
ImageService.encapsulated_send(
self.cog,
user_id,
prompt,
interaction.message,
response_message=response_message,
vary=self.image_url,
custom_api_key=self.custom_api_key,
)
)
class SaveButton(discord.ui.Button["SaveView"]):
def __init__(self, number: int, image_url: str):
super().__init__(
style=discord.ButtonStyle.gray,
label="Save " + str(number),
custom_id="save_button" + str(random.randint(1000000, 9999999)),
)
self.number = number
self.image_url = image_url
async def callback(self, interaction: discord.Interaction):
# If the image url doesn't start with "http", then we need to read the file from the URI, and then send the
# file to the user as an attachment.
try:
if not self.image_url.startswith("http"):
with open(self.image_url, "rb") as f:
image = Image.open(BytesIO(f.read()))
temp_file = tempfile.NamedTemporaryFile(suffix=".png", delete=False)
image.save(temp_file.name)
await interaction.response.send_message(
content="Here is your image for download (open original and save)",
file=discord.File(temp_file.name),
ephemeral=True,
)
else:
await interaction.response.send_message(
f"You can directly download this image from {self.image_url}",
ephemeral=True,
)
except Exception as e:
await interaction.response.send_message(f"Error: {e}", ephemeral=True)
traceback.print_exc()
class RedoButton(discord.ui.Button["SaveView"]):
def __init__(self, cog, converser_cog, custom_api_key):
super().__init__(
style=discord.ButtonStyle.danger,
label="Retry",
custom_id="redo_button_draw_main",
)
self.cog = cog
self.converser_cog = converser_cog
self.custom_api_key = custom_api_key
async def callback(self, interaction: discord.Interaction):
user_id = interaction.user.id
interaction_id = interaction.message.id
if interaction_id not in self.converser_cog.users_to_interactions[user_id]:
await interaction.response.send_message(
content="You can only retry for prompts that you generated yourself!",
ephemeral=True,
)
return
# We have passed the intial check of if the interaction belongs to the user
if user_id in self.cog.redo_users:
# Get the message and the prompt and call encapsulated_send
ctx = self.cog.redo_users[user_id].ctx
prompt = self.cog.redo_users[user_id].prompt
response_message = self.cog.redo_users[user_id].response
message = await interaction.response.send_message(
"Regenerating the image for your original prompt, check the original message.",
ephemeral=True,
)
self.converser_cog.users_to_interactions[user_id].append(message.id)
asyncio.ensure_future(
ImageService.encapsulated_send(
self.cog,
user_id,
prompt,
ctx,
response_message,
custom_api_key=self.custom_api_key,
)
)
| SwarmsDiscord-main | swarmsdiscord/services/image_service.py |
import asyncio
class Message:
def __init__(self, content, channel):
self.content = content
self.channel = channel
# This function will be called by the bot to process the message queue
@staticmethod
async def process_message_queue(message_queue, PROCESS_WAIT_TIME, EMPTY_WAIT_TIME):
while True:
await asyncio.sleep(PROCESS_WAIT_TIME)
# If the queue is empty, sleep for a short time before checking again
if message_queue.empty():
await asyncio.sleep(EMPTY_WAIT_TIME)
continue
# Get the next message from the queue
message = await message_queue.get()
# Send the message
try:
await message.channel.send(message.content)
except Exception:
pass
# Sleep for a short time before processing the next message
# This will prevent the bot from spamming messages too quickly
await asyncio.sleep(PROCESS_WAIT_TIME)
| SwarmsDiscord-main | swarmsdiscord/services/message_queue_service.py |
from pathlib import Path
import aiofiles
from typing import Literal
import tiktoken
class UsageService:
def __init__(self, data_dir: Path):
self.usage_file_path = data_dir / "usage.txt"
# If the usage.txt file doesn't currently exist in the directory, create it and write 0.00 to it.
if not self.usage_file_path.exists():
with self.usage_file_path.open("w") as f:
f.write("0.00")
f.close()
self.tokenizer = tiktoken.get_encoding("cl100k_base")
COST_MAPPING = {
"gpt4": 0.05,
"gpt4-32": 0.1,
"turbo": 0.0019,
"turbo-16": 0.0038,
"davinci": 0.02,
"curie": 0.002,
"embedding": 0.0001,
}
MODEL_COST_MAP = {
"gpt-4": "gpt4",
"gpt-4-32k": "gpt4-32",
"gpt-4-0613": "gpt4",
"gpt-4-32k-0613": "gpt4-32",
"gpt-3.5-turbo": "turbo",
"gpt-3.5-turbo-16k": "turbo-16",
"gpt-3.5-turbo-0613": "turbo",
"gpt-3.5-turbo-16k-0613": "turbo",
"text-davinci-003": "davinci",
"text-curie-001": "curie",
}
ModeType = Literal["gpt4", "gpt4-32k", "turbo", "turbo-16k", "davinci", "embedding"]
@staticmethod
async def get_model_cost(mode: ModeType) -> float:
return UsageService.COST_MAPPING.get(mode, 0)
@staticmethod
async def get_cost_name(model) -> str:
return UsageService.MODEL_COST_MAP.get(model, "davinci")
async def get_price(self, tokens_used, mode: ModeType = None):
tokens_used = int(tokens_used)
price = (tokens_used / 1000) * await self.get_model_cost(
mode
) # This is a very rough estimate
price = round(price, 6)
return price
async def update_usage(
self,
tokens_used,
mode: ModeType = None,
):
tokens_used = int(tokens_used)
price = (tokens_used / 1000) * await self.get_model_cost(mode)
price = round(price, 6)
usage = round(await self.get_usage(), 6)
new_total = round(usage + price, 6)
print(
f"{'Completion' if mode != 'embedding' else 'Embed'} cost -> Old: {str(usage)} | New: {str(new_total)}, used {str(price)} credits"
)
async with aiofiles.open(self.usage_file_path, "w") as f:
await f.write(str(new_total))
await f.close()
async def set_usage(self, usage):
async with aiofiles.open(self.usage_file_path, "w") as f:
await f.write(str(usage))
await f.close()
async def get_usage(self):
async with aiofiles.open(self.usage_file_path, "r") as f:
usage = float((await f.read()).strip())
await f.close()
return usage
def count_tokens(self, text):
res = self.tokenizer.encode(text)
return len(res)
async def update_usage_image(self, image_size):
# 1024Γ1024 $0.020 / image
# 512Γ512 $0.018 / image
# 256Γ256 $0.016 / image
if image_size == "1024x1024":
price = 0.02
elif image_size == "512x512":
price = 0.018
elif image_size == "256x256":
price = 0.016
else:
raise ValueError("Invalid image size")
usage = await self.get_usage()
async with aiofiles.open(self.usage_file_path, "w") as f:
await f.write(str(usage + float(price)))
await f.close()
@staticmethod
def count_tokens_static(text):
tokenizer = tiktoken.get_encoding("cl100k_base")
res = tokenizer.encode(text)
return len(res)
| SwarmsDiscord-main | swarmsdiscord/services/usage_service.py |
import asyncio.exceptions
import datetime
import re
import traceback
from collections import defaultdict
import aiofiles
import aiohttp
import discord
import requests
from discord.ext import pages
import unidecode
from models.embed_statics_model import EmbedStatics
from models.image_understanding_model import ImageUnderstandingModel
from services.deletion_service import Deletion
from models.openai_model import Model, Override, Models
from models.user_model import EmbeddedConversationItem, RedoUser
from services.environment_service import EnvService
from services.moderations_service import Moderation
BOT_NAME = EnvService.get_custom_bot_name()
PRE_MODERATE = EnvService.get_premoderate()
image_understanding_model = ImageUnderstandingModel()
class TextService:
def __init__(self):
pass
@staticmethod
async def encapsulated_send(
converser_cog,
id,
prompt,
ctx,
response_message=None,
overrides=None,
instruction=None,
from_ask_command=False,
from_edit_command=False,
model=None,
user=None,
custom_api_key=None,
edited_request=False,
redo_request=False,
from_ask_action=False,
from_other_action=None,
from_message_context=None,
):
"""General service function for sending and receiving gpt generations
Args:
converser_cog (Cog): The conversation cog with our gpt commands
id (user or thread id): A user or thread id for keeping track of conversations
prompt (str): The prompt to use for generation
ctx (ApplicationContext): The interaction which called this
response_message (discord.Message, optional): For when we're doing redos. Defaults to None.
temp_override (float, optional): Sets the temperature for the generation. Defaults to None.
top_p_override (float, optional): Sets the top p for the generation. Defaults to None.
frequency_penalty_override (float, optional): Sets the frequency penalty for the generation. Defaults to None.
presence_penalty_override (float, optional): Sets the presence penalty for the generation. Defaults to None.
instruction (str, optional): Instruction for use with the edit endpoint. Defaults to None.
from_ask_command (bool, optional): Called from the ask command. Defaults to False.
from_edit_command (bool, optional): Called from the edit command. Defaults to False.
model (str, optional): Which model to generate output with. Defaults to None.
user (discord.User, optional): An user object that can be used to set the stop. Defaults to None.
custom_api_key (str, optional): per-user api key. Defaults to None.
edited_request (bool, optional): If we're doing an edited message. Defaults to False.
redo_request (bool, optional): If we're redoing a previous prompt. Defaults to False.
from_action (bool, optional): If the function is being called from a message action. Defaults to False.
"""
new_prompt, _new_prompt_clean = (
prompt # + "\n" + BOT_NAME
if not from_ask_command and not from_edit_command and not redo_request
else prompt
), prompt
stop = f"{ctx.author.display_name if user is None else user.display_name}:"
from_context = isinstance(ctx, discord.ApplicationContext)
if not instruction:
tokens = converser_cog.usage_service.count_tokens(new_prompt)
else:
tokens = converser_cog.usage_service.count_tokens(
new_prompt
) + converser_cog.usage_service.count_tokens(instruction)
try:
user_displayname = (
ctx.author.display_name if not user else user.display_name
)
# Pinecone is enabled, we will create embeddings for this conversation.
if (
converser_cog.pinecone_service
and ctx.channel.id in converser_cog.conversation_threads
):
for item in converser_cog.conversation_threads[ctx.channel.id].history:
if item.text.strip() == BOT_NAME + "<|endofstatement|>":
converser_cog.conversation_threads[
ctx.channel.id
].history.remove(item)
# The conversation_id is the id of the thread
conversation_id = ctx.channel.id
# Create an embedding and timestamp for the prompt
# new_prompt = prompt.encode("ascii", "ignore").decode()
new_prompt = unidecode.unidecode(new_prompt)
prompt_less_author = f"{new_prompt} <|endofstatement|>\n"
new_prompt = f"\n{user_displayname}: {new_prompt} <|endofstatement|>\n"
# new_prompt = new_prompt.encode("ascii", "ignore").decode()
new_prompt = unidecode.unidecode(new_prompt)
timestamp = int(
str(datetime.datetime.now().timestamp()).replace(".", "")
)
new_prompt_item = EmbeddedConversationItem(new_prompt, timestamp)
if not redo_request:
converser_cog.conversation_threads[conversation_id].history.append(
new_prompt_item
)
if edited_request:
new_prompt = "".join(
[
item.text
for item in converser_cog.conversation_threads[
ctx.channel.id
].history
]
)
converser_cog.redo_users[ctx.author.id].prompt = new_prompt
else:
# Create and upsert the embedding for the conversation id, prompt, timestamp
await converser_cog.pinecone_service.upsert_conversation_embedding(
converser_cog.model,
conversation_id,
new_prompt,
timestamp,
custom_api_key=custom_api_key,
)
# Print all phrases
embedding_prompt_less_author = await converser_cog.model.send_embedding_request(
prompt_less_author, custom_api_key=custom_api_key
) # Use the version of the prompt without the author's name for better clarity on retrieval.
# Now, build the new prompt by getting the X most similar with pinecone
similar_prompts = converser_cog.pinecone_service.get_n_similar(
conversation_id,
embedding_prompt_less_author,
n=converser_cog.model.num_conversation_lookback,
)
# We use the pretext to build our new history
_prompt_with_history = [
converser_cog.conversation_threads[ctx.channel.id].history[0]
]
# If there's an opener we add it to the history
if converser_cog.conversation_threads[ctx.channel.id].has_opener:
_prompt_with_history += [
converser_cog.conversation_threads[ctx.channel.id].history[
1
]
]
# Append the similar prompts to the prompt with history
_prompt_with_history += [
EmbeddedConversationItem(prompt, timestamp)
for prompt, timestamp in similar_prompts
]
# iterate UP TO the last X prompts in the history
for i in range(
1,
min(
len(
converser_cog.conversation_threads[
ctx.channel.id
].history
),
converser_cog.model.num_static_conversation_items,
),
):
_prompt_with_history.append(
converser_cog.conversation_threads[ctx.channel.id].history[
-i
]
)
# remove duplicates from prompt_with_history and set the conversation history
_prompt_with_history = list(dict.fromkeys(_prompt_with_history))
# Sort the prompt_with_history by increasing timestamp if pinecone is enabled
if converser_cog.pinecone_service:
_prompt_with_history.sort(key=lambda x: x.timestamp)
# Remove the last two entries after sort, this is from the end of the list as prompt(redo), answer, prompt(original), leaving only prompt(original) and further history
if redo_request:
_prompt_with_history = _prompt_with_history[:-2]
converser_cog.conversation_threads[
ctx.channel.id
].history = _prompt_with_history
# Ensure that the last prompt in this list is the prompt we just sent (new_prompt_item)
if _prompt_with_history[-1].text != new_prompt_item.text:
try:
_prompt_with_history.remove(new_prompt_item)
except ValueError:
pass
_prompt_with_history.append(new_prompt_item)
prompt_with_history = "".join(
[item.text for item in _prompt_with_history]
)
new_prompt = prompt_with_history + "\n" + BOT_NAME
tokens = converser_cog.usage_service.count_tokens(new_prompt)
# No pinecone, we do conversation summarization for long term memory instead
elif (
id in converser_cog.conversation_threads
and tokens > converser_cog.model.summarize_threshold
and not from_ask_command
and not from_edit_command
and not converser_cog.pinecone_service
# This should only happen if we are not doing summarizations.
):
# We don't need to worry about the differences between interactions and messages in this block,
# because if we are in this block, we can only be using a message object for ctx
if converser_cog.model.summarize_conversations:
await ctx.reply(
"I'm currently summarizing our current conversation so we can keep chatting, "
"give me one moment!"
)
await converser_cog.summarize_conversation(ctx, new_prompt)
# Check again if the prompt is about to go past the token limit
new_prompt = (
"".join(
[
item.text
for item in converser_cog.conversation_threads[
id
].history
]
)
+ "\n"
+ BOT_NAME
)
tokens = converser_cog.usage_service.count_tokens(new_prompt)
if (
tokens > converser_cog.model.summarize_threshold
): # 150 is a buffer for the second stage
await ctx.reply(
"I tried to summarize our current conversation so we could keep chatting, "
"but it still went over the token "
"limit. Please try again later."
)
await converser_cog.end_conversation(ctx)
converser_cog.remove_awaiting(
ctx.author.id, ctx.channel.id, False, False
)
return
else:
await ctx.reply("The conversation context limit has been reached.")
await converser_cog.end_conversation(ctx)
return
# Send the request to the model
is_chatgpt_conversation = (
ctx.channel.id in converser_cog.conversation_threads
and not from_ask_command
and not from_edit_command
and (
(
model is not None
and (
model in Models.CHATGPT_MODELS
or (model == "chatgpt" or "gpt-4" in model)
)
)
or (
model is None
and converser_cog.model.model in Models.CHATGPT_MODELS
)
)
)
delegator = model or converser_cog.model.model
is_chatgpt_request = (
delegator in Models.CHATGPT_MODELS or delegator in Models.GPT4_MODELS
)
# Set some variables if a user or channel has a system instruction set
if ctx.author.id in converser_cog.instructions:
system_instruction = converser_cog.instructions[ctx.author.id].prompt
usage_message = "***Added user instruction to prompt***"
tokens += converser_cog.usage_service.count_tokens(system_instruction)
elif ctx.channel.id in converser_cog.instructions:
system_instruction = converser_cog.instructions[ctx.channel.id].prompt
usage_message = "***Added channel instruction to prompt***"
tokens += converser_cog.usage_service.count_tokens(system_instruction)
else:
system_instruction = None
usage_message = None
if is_chatgpt_conversation:
_prompt_with_history = converser_cog.conversation_threads[
ctx.channel.id
].history
response = await converser_cog.model.send_chatgpt_chat_request(
_prompt_with_history,
model=model,
bot_name=BOT_NAME,
user_displayname=user_displayname,
temp_override=overrides.temperature,
top_p_override=overrides.top_p,
frequency_penalty_override=overrides.frequency_penalty,
presence_penalty_override=overrides.presence_penalty,
stop=stop if not from_ask_command else None,
custom_api_key=custom_api_key,
)
elif from_edit_command:
response = await converser_cog.model.send_edit_request(
text=new_prompt,
instruction=instruction,
temp_override=overrides.temperature,
top_p_override=overrides.top_p,
custom_api_key=custom_api_key,
)
else:
response = await converser_cog.model.send_request(
new_prompt,
tokens=tokens,
temp_override=overrides.temperature,
top_p_override=overrides.top_p,
frequency_penalty_override=overrides.frequency_penalty,
presence_penalty_override=overrides.presence_penalty,
model=model,
stop=stop if not from_ask_command else None,
custom_api_key=custom_api_key,
is_chatgpt_request=is_chatgpt_request,
system_instruction=system_instruction,
)
# Clean the request response
response_text = (
converser_cog.cleanse_response(str(response["choices"][0]["text"]))
if not is_chatgpt_request
and not is_chatgpt_conversation
or from_edit_command
else converser_cog.cleanse_response(
str(response["choices"][0]["message"]["content"])
)
)
if from_message_context:
response_text = f"{response_text}"
response_text = (
f"{usage_message}\n\n{response_text}"
if system_instruction
else response_text
)
elif from_other_action:
response_text = f"***{from_other_action}*** {response_text}"
response_text = (
f"{usage_message}\n\n{response_text}"
if system_instruction
else response_text
)
elif from_ask_command or from_ask_action:
response_model = response["model"]
if "gpt-3.5" in response_model or "gpt-4" in response_model:
response_text = (
f"\n\n{response_text}"
if not response_text.startswith("\n\n")
else response_text
)
response_text = f"***{prompt}***{response_text}"
response_text = (
f"{usage_message}\n\n{response_text}"
if system_instruction
else response_text
)
elif from_edit_command:
response_text = response_text.strip()
response_text = f"***Prompt:***\n {prompt}\n\n***Instruction:***\n {instruction}\n\n***Response:***\n {response_text}"
# If gpt tries writing a user mention try to replace it with their name
response_text = await converser_cog.mention_to_username(ctx, response_text)
# If the user is conversing, add the GPT response to their conversation history.
if (
ctx.channel.id in converser_cog.conversation_threads
and not from_ask_command
and not converser_cog.pinecone_service
):
if not redo_request:
converser_cog.conversation_threads[ctx.channel.id].history.append(
EmbeddedConversationItem(
"\n"
+ BOT_NAME
+ str(response_text)
+ "<|endofstatement|>\n",
0,
)
)
# Embeddings case!
elif (
ctx.channel.id in converser_cog.conversation_threads
and not from_ask_command
and not from_edit_command
and converser_cog.pinecone_service
):
conversation_id = ctx.channel.id
# A cleaner version for the convo history
response_text_clean = str(response_text)
# Create an embedding and timestamp for the prompt
response_text = (
"\n" + BOT_NAME + str(response_text) + "<|endofstatement|>\n"
)
# response_text = response_text.encode("ascii", "ignore").decode()
response_text = unidecode.unidecode(response_text)
# Print the current timestamp
timestamp = int(
str(datetime.datetime.now().timestamp()).replace(".", "")
)
converser_cog.conversation_threads[conversation_id].history.append(
EmbeddedConversationItem(response_text, timestamp)
)
# Create and upsert the embedding for the conversation id, prompt, timestamp
embedding = (
await converser_cog.pinecone_service.upsert_conversation_embedding(
converser_cog.model,
conversation_id,
response_text,
timestamp,
custom_api_key=custom_api_key,
)
)
# Cleanse again
response_text = converser_cog.cleanse_response(response_text)
converser_cog.full_conversation_history[ctx.channel.id].append(
response_text
)
# escape any other mentions like @here or @everyone
response_text = discord.utils.escape_mentions(response_text)
# If we don't have a response message, we are not doing a redo, send as a new message(s)
if not response_message:
if len(response_text) > converser_cog.TEXT_CUTOFF:
if not from_context:
paginator = None
response_message = await converser_cog.paginate_and_send(
response_text, ctx
)
else:
embed_pages = await converser_cog.paginate_embed(response_text)
view = ConversationView(
ctx,
converser_cog,
ctx.channel.id,
model,
from_ask_command,
from_edit_command,
custom_api_key=custom_api_key,
)
paginator = pages.Paginator(
pages=embed_pages,
timeout=None,
custom_view=view,
author_check=True,
)
try:
response_message = await paginator.respond(ctx.interaction)
except:
response_message = await paginator.send(ctx.channel)
else:
paginator = None
if not from_context:
response_message = await ctx.reply(
response_text,
view=ConversationView(
ctx,
converser_cog,
ctx.channel.id,
model,
custom_api_key=custom_api_key,
),
)
elif from_edit_command:
response_message = await ctx.respond(
embed=EmbedStatics.get_edit_command_output_embed(
response_text
),
view=ConversationView(
ctx,
converser_cog,
ctx.channel.id,
model,
from_edit_command=from_edit_command,
custom_api_key=custom_api_key,
),
)
else:
response_message = await ctx.respond(
response_text,
view=ConversationView(
ctx,
converser_cog,
ctx.channel.id,
model,
from_ask_command=from_ask_command,
custom_api_key=custom_api_key,
),
)
converser_cog.redo_users[ctx.author.id] = RedoUser(
prompt=new_prompt if not converser_cog.pinecone_service else prompt,
instruction=instruction,
ctx=ctx,
message=ctx,
response=response_message,
paginator=paginator,
)
converser_cog.redo_users[ctx.author.id].add_interaction(
response_message.id
)
# We are doing a redo, edit the message.
else:
paginator = converser_cog.redo_users.get(ctx.author.id).paginator
if isinstance(paginator, pages.Paginator):
embed_pages = await converser_cog.paginate_embed(response_text)
view = ConversationView(
ctx,
converser_cog,
ctx.channel.id,
model,
from_ask_command,
from_edit_command,
custom_api_key=custom_api_key,
)
await paginator.update(pages=embed_pages, custom_view=view)
elif len(response_text) > converser_cog.TEXT_CUTOFF:
if not from_context:
await response_message.channel.send(
"Over 2000 characters", delete_after=5
)
else:
if not from_edit_command:
await response_message.edit(content=response_text)
else:
await response_message.edit(
embed=EmbedStatics.get_edit_command_output_embed(
response_text
)
)
await converser_cog.send_debug_message(
converser_cog.generate_debug_message(prompt, response),
converser_cog.debug_channel,
)
converser_cog.remove_awaiting(
ctx.author.id, ctx.channel.id, from_ask_command, from_edit_command
)
# Error catching for AIOHTTP Errors
except aiohttp.ClientResponseError as e:
embed = EmbedStatics.get_invalid_api_response_embed(e)
if from_context:
await ctx.send_followup(embed=embed)
else:
await ctx.reply(embed=embed)
converser_cog.remove_awaiting(
ctx.author.id, ctx.channel.id, from_ask_command, from_edit_command
)
except asyncio.exceptions.TimeoutError as e:
embed = EmbedStatics.get_api_timeout_embed()
if from_context:
await ctx.send_followup(embed=embed)
else:
await ctx.reply(embed=embed)
converser_cog.remove_awaiting(
ctx.author.id, ctx.channel.id, from_ask_command, from_edit_command
)
# Error catching for OpenAI model value errors
except ValueError as e:
embed = EmbedStatics.get_invalid_value_embed(e)
if from_ask_action:
await ctx.respond(embed=embed, ephemeral=True)
elif from_context:
await ctx.send_followup(embed=embed, ephemeral=True)
else:
await ctx.reply(e)
converser_cog.remove_awaiting(
ctx.author.id, ctx.channel.id, from_ask_command, from_edit_command
)
# General catch case for everything
except Exception as e:
embed = EmbedStatics.get_general_error_embed(e)
try:
await ctx.channel.send(embed=embed)
except:
pass
converser_cog.remove_awaiting(
ctx.author.id, ctx.channel.id, from_ask_command, from_edit_command
)
traceback.print_exc()
try:
await converser_cog.end_conversation(ctx)
except Exception:
pass
return
@staticmethod
async def process_conversation_message(
converser_cog, message, USER_INPUT_API_KEYS, USER_KEY_DB, file=None
):
content = message.content.strip()
conversing = converser_cog.check_conversing(message.channel.id, content)
# If the user is conversing and they want to end it, end it immediately before we continue any further.
if conversing and message.content.lower() in converser_cog.END_PROMPTS:
await converser_cog.end_conversation(message)
return
if conversing:
# Pre-moderation check
if PRE_MODERATE:
if await Moderation.simple_moderate_and_respond(
message.content, message
):
await message.delete()
return
user_api_key = None
if USER_INPUT_API_KEYS:
user_api_key = await TextService.get_user_api_key(
message.author.id, message, USER_KEY_DB
)
if not user_api_key:
return
prompt = await converser_cog.mention_to_username(message, content)
if await converser_cog.check_conversation_limit(message):
return
# If the user is in a conversation thread
if message.channel.id in converser_cog.conversation_threads:
# Since this is async, we don't want to allow the user to send another prompt while a conversation
# prompt is processing, that'll mess up the conversation history!
if message.author.id in converser_cog.awaiting_responses:
resp_message = await message.reply(
embed=discord.Embed(
title=f"You are already waiting for a response, please wait and speak afterwards.",
color=0x808080,
)
)
try:
await resp_message.channel.trigger_typing()
except:
pass
# get the current date, add 10 seconds to it, and then turn it into a timestamp.
# we need to use our deletion service because this isn't an interaction, it's a regular message.
deletion_time = datetime.datetime.now() + datetime.timedelta(
seconds=5
)
deletion_time = deletion_time.timestamp()
deletion_message = Deletion(resp_message, deletion_time)
deletion_original_message = Deletion(message, deletion_time)
await converser_cog.deletion_queue.put(deletion_message)
await converser_cog.deletion_queue.put(deletion_original_message)
return
if message.channel.id in converser_cog.awaiting_thread_responses:
resp_message = await message.reply(
embed=discord.Embed(
title=f"This thread is already waiting for a response, please wait and speak afterwards.",
color=0x808080,
)
)
try:
await resp_message.channel.trigger_typing()
except:
pass
# get the current date, add 10 seconds to it, and then turn it into a timestamp.
# we need to use our deletion service because this isn't an interaction, it's a regular message.
deletion_time = datetime.datetime.now() + datetime.timedelta(
seconds=5
)
deletion_time = deletion_time.timestamp()
deletion_message = Deletion(resp_message, deletion_time)
deletion_original_message = Deletion(message, deletion_time)
await converser_cog.deletion_queue.put(deletion_message)
await converser_cog.deletion_queue.put(deletion_original_message)
return
if file and image_understanding_model.get_is_usable():
thinking_embed = discord.Embed(
title=f"π€π¬ Interpreting attachment...",
color=0x808080,
)
thinking_embed.set_footer(text="This may take a few seconds.")
try:
thinking_message = await message.reply(embed=thinking_embed)
except:
traceback.print_exc()
pass
try:
await message.channel.trigger_typing()
except Exception:
pass
async with aiofiles.tempfile.NamedTemporaryFile(
delete=False
) as temp_file:
await file.save(temp_file.name)
try:
image_caption, image_qa, image_ocr = await asyncio.gather(
asyncio.to_thread(
image_understanding_model.get_image_caption,
temp_file.name,
),
asyncio.to_thread(
image_understanding_model.ask_image_question,
prompt,
temp_file.name,
),
image_understanding_model.do_image_ocr(temp_file.name),
)
prompt = (
f"Image Info-Caption: {image_caption}\nImage Info-QA: {image_qa}\nImage Info-OCR: {image_ocr}\n"
+ prompt
)
try:
await thinking_message.delete()
except:
pass
except Exception:
traceback.print_exc()
await message.reply(
"I wasn't able to understand the file you gave me."
)
await thinking_message.delete()
return
converser_cog.awaiting_responses.append(message.author.id)
converser_cog.awaiting_thread_responses.append(message.channel.id)
if not converser_cog.pinecone_service:
converser_cog.conversation_threads[
message.channel.id
].history.append(
EmbeddedConversationItem(
f"\n{message.author.display_name}: {prompt} <|endofstatement|>\n",
0,
)
)
# increment the conversation counter for the user
converser_cog.conversation_threads[message.channel.id].count += 1
# Send the request to the model
# If conversing, the prompt to send is the history, otherwise, it's just the prompt
if (
converser_cog.pinecone_service
or message.channel.id not in converser_cog.conversation_threads
):
primary_prompt = prompt
else:
primary_prompt = "".join(
[
item.text
for item in converser_cog.conversation_threads[
message.channel.id
].history
]
)
# set conversation overrides
conversation_overrides = converser_cog.conversation_threads[
message.channel.id
].get_overrides()
overrides = Override(
conversation_overrides["temperature"],
conversation_overrides["top_p"],
conversation_overrides["frequency_penalty"],
conversation_overrides["presence_penalty"],
)
# Send an embed that tells the user that the bot is thinking
thinking_embed = discord.Embed(
title=f"π€π¬ Thinking...",
color=0x808080,
)
thinking_embed.set_footer(text="This may take a few seconds.")
try:
thinking_message = await message.reply(embed=thinking_embed)
except:
pass
try:
await message.channel.trigger_typing()
except Exception:
pass
converser_cog.full_conversation_history[message.channel.id].append(prompt)
if not converser_cog.pinecone_service:
primary_prompt += BOT_NAME
await TextService.encapsulated_send(
converser_cog,
message.channel.id,
primary_prompt,
message,
overrides=overrides,
model=converser_cog.conversation_threads[message.channel.id].model,
custom_api_key=user_api_key,
)
# Delete the thinking embed
await thinking_message.delete()
return True
@staticmethod
async def get_user_api_key(user_id, ctx, USER_KEY_DB):
user_api_key = None if user_id not in USER_KEY_DB else USER_KEY_DB[user_id]
if user_api_key is None or user_api_key == "":
modal = SetupModal(user_key_db=USER_KEY_DB)
if isinstance(ctx, discord.ApplicationContext):
await ctx.send_modal(modal)
await ctx.send_followup(
"You must set up your API key before using this command."
)
else:
await ctx.reply(
"You must set up your API key before typing in a GPT powered channel, type `/setup` to enter your API key."
)
return user_api_key
@staticmethod
async def process_conversation_edit(converser_cog, after, original_message):
if after.author.id in converser_cog.redo_users:
if after.id == original_message.get(after.author.id, None):
response_message = converser_cog.redo_users[after.author.id].response
ctx = converser_cog.redo_users[after.author.id].ctx
await response_message.edit(content="Redoing prompt π...")
edited_content = await converser_cog.mention_to_username(
after, after.content
)
if after.channel.id in converser_cog.conversation_threads:
# Remove the last two elements from the history array and add the new <username>: prompt
converser_cog.conversation_threads[
after.channel.id
].history = converser_cog.conversation_threads[
after.channel.id
].history[
:-2
]
pinecone_dont_reinsert = None
if not converser_cog.pinecone_service:
converser_cog.conversation_threads[
after.channel.id
].history.append(
EmbeddedConversationItem(
f"\n{after.author.display_name}: {after.content}<|endofstatement|>\n",
0,
)
)
converser_cog.conversation_threads[after.channel.id].count += 1
conversation_overrides = converser_cog.conversation_threads[
after.channel.id
].get_overrides()
overrides = Override(
conversation_overrides["temperature"],
conversation_overrides["top_p"],
conversation_overrides["frequency_penalty"],
conversation_overrides["presence_penalty"],
)
await TextService.encapsulated_send(
converser_cog,
id=after.channel.id,
prompt=edited_content,
ctx=ctx,
response_message=response_message,
overrides=overrides,
model=converser_cog.conversation_threads[after.channel.id].model,
edited_request=True,
)
if not converser_cog.pinecone_service:
converser_cog.redo_users[after.author.id].prompt = edited_content
#
# Conversation interaction buttons
#
class ConversationView(discord.ui.View):
def __init__(
self,
ctx,
converser_cog,
id,
model,
from_ask_command=False,
from_edit_command=False,
custom_api_key=None,
):
super().__init__(timeout=3600) # 1 hour interval to redo.
self.converser_cog = converser_cog
self.ctx = ctx
self.model = model
self.from_ask_command = from_ask_command
self.from_edit_command = from_edit_command
self.custom_api_key = custom_api_key
self.add_item(
RedoButton(
self.converser_cog,
model=model,
from_ask_command=from_ask_command,
from_edit_command=from_edit_command,
custom_api_key=self.custom_api_key,
)
)
if id in self.converser_cog.conversation_threads:
self.add_item(EndConvoButton(self.converser_cog))
async def on_timeout(self):
# Remove the button from the view/message
self.clear_items()
# Send a message to the user saying the view has timed out
if self.message:
# check if the timeout happens in a thread and if it's locked
if isinstance(self.message.channel, discord.Thread):
if self.message.channel.locked:
return
await self.message.edit(
view=None,
)
else:
await self.ctx.edit(
view=None,
)
class EndConvoButton(discord.ui.Button["ConversationView"]):
def __init__(self, converser_cog):
super().__init__(
style=discord.ButtonStyle.danger,
label="End Conversation",
custom_id="conversation_end",
)
self.converser_cog = converser_cog
async def callback(self, interaction: discord.Interaction):
# Get the user
user_id = interaction.user.id
if (
user_id in self.converser_cog.conversation_thread_owners
and interaction.channel.id
in self.converser_cog.conversation_thread_owners[user_id]
):
try:
await self.converser_cog.end_conversation(
interaction, opener_user_id=interaction.user.id
)
except Exception as e:
print(e)
traceback.print_exc()
await interaction.response.send_message(
e, ephemeral=True, delete_after=30
)
else:
await interaction.response.send_message(
"This is not your conversation to end!", ephemeral=True, delete_after=10
)
class RedoButton(discord.ui.Button["ConversationView"]):
def __init__(
self, converser_cog, model, from_ask_command, from_edit_command, custom_api_key
):
super().__init__(
style=discord.ButtonStyle.danger,
label="Retry",
custom_id="conversation_redo",
)
self.converser_cog = converser_cog
self.model = model
self.from_ask_command = from_ask_command
self.from_edit_command = from_edit_command
self.custom_api_key = custom_api_key
async def callback(self, interaction: discord.Interaction):
# Get the user
user_id = interaction.user.id
if user_id in self.converser_cog.redo_users and self.converser_cog.redo_users[
user_id
].in_interaction(interaction.message.id):
# Get the message and the prompt and call encapsulated_send
prompt = self.converser_cog.redo_users[user_id].prompt
instruction = self.converser_cog.redo_users[user_id].instruction
ctx = self.converser_cog.redo_users[user_id].ctx
response_message = self.converser_cog.redo_users[user_id].response
await interaction.response.send_message(
"Retrying your original request...", ephemeral=True, delete_after=15
)
await TextService.encapsulated_send(
self.converser_cog,
overrides=Override(None, None, None, None),
id=user_id,
prompt=prompt,
instruction=instruction,
ctx=ctx,
model=self.model,
response_message=response_message,
custom_api_key=self.custom_api_key,
redo_request=True,
from_ask_command=self.from_ask_command,
from_edit_command=self.from_edit_command,
)
else:
await interaction.response.send_message(
"You can only redo the most recent prompt that you sent yourself.",
ephemeral=True,
delete_after=10,
)
#
# The setup modal when using user input API keys
#
class SetupModal(discord.ui.Modal):
def __init__(self, user_key_db) -> None:
super().__init__(title="API Key Setup")
# Get the argument named "user_key_db" and save it as USER_KEY_DB
self.USER_KEY_DB = user_key_db
self.add_item(
discord.ui.InputText(
label="OpenAI API Key",
placeholder="sk--......",
)
)
async def callback(self, interaction: discord.Interaction):
user = interaction.user
api_key = self.children[0].value
# Validate that api_key is indeed in this format
if not re.match(r"sk-[a-zA-Z0-9]{32}", api_key):
await interaction.response.send_message(
"Your API key looks invalid, please check that it is correct before proceeding. Please run the /setup command to set your key.",
ephemeral=True,
delete_after=100,
)
else:
# We can save the key for the user to the database.
# Make a test request using the api key to ensure that it is valid.
try:
await Model.send_test_request(api_key)
await interaction.response.send_message(
"Your API key was successfully validated.",
ephemeral=True,
delete_after=10,
)
except aiohttp.ClientResponseError as e:
await interaction.response.send_message(
embed=EmbedStatics.get_invalid_api_response_embed(e),
ephemeral=True,
delete_after=30,
)
return
except Exception as e:
await interaction.response.send_message(
f"Your API key looks invalid, the API returned: {e}. Please check that your API key is correct before proceeding",
ephemeral=True,
delete_after=30,
)
return
# Save the key to the database
try:
self.USER_KEY_DB[user.id] = api_key
self.USER_KEY_DB.commit()
await interaction.followup.send(
"Your API key was successfully saved.",
ephemeral=True,
delete_after=10,
)
except Exception:
traceback.print_exc()
await interaction.followup.send(
"There was an error saving your API key.",
ephemeral=True,
delete_after=30,
)
return
| SwarmsDiscord-main | swarmsdiscord/services/text_service.py |
import asyncio
import pickle
import traceback
from datetime import datetime
import aiofiles
import discord
from services.environment_service import EnvService
class Pickler:
def __init__(
self,
full_conversation_history,
conversation_threads,
conversation_thread_owners,
instructions,
):
self.full_conversation_history = full_conversation_history
self.conversation_threads = conversation_threads
self.conversation_thread_owners = conversation_thread_owners
self.instructions = instructions
# This function will be called by the bot to process the message queue
@staticmethod
async def process_pickle_queue(pickle_queue, PROCESS_WAIT_TIME, EMPTY_WAIT_TIME):
while True:
try:
# If the queue is empty, sleep for a short time before checking again
if pickle_queue.empty():
await asyncio.sleep(EMPTY_WAIT_TIME)
continue
# Get the next object to pickle from the queue
to_pickle = await pickle_queue.get()
# Pickle all the objects inside to_pickle using aiofiles
async with aiofiles.open(
EnvService.save_path()
/ "pickles"
/ "full_conversation_history.pickle",
"wb",
) as f:
await f.write(pickle.dumps(to_pickle.full_conversation_history))
async with aiofiles.open(
EnvService.save_path() / "pickles" / "conversation_threads.pickle",
"wb",
) as f:
await f.write(pickle.dumps(to_pickle.conversation_threads))
async with aiofiles.open(
EnvService.save_path()
/ "pickles"
/ "conversation_thread_owners.pickle",
"wb",
) as f:
await f.write(pickle.dumps(to_pickle.conversation_thread_owners))
async with aiofiles.open(
EnvService.save_path() / "pickles" / "instructions.pickle", "wb"
) as f:
await f.write(pickle.dumps(to_pickle.instructions))
await asyncio.sleep(PROCESS_WAIT_TIME)
except Exception:
traceback.print_exc()
| SwarmsDiscord-main | swarmsdiscord/services/pickle_service.py |
SwarmsDiscord-main | tests/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.