embed_text / app.py
bluuebunny's picture
Update app.py
cccd2c8 verified
raw
history blame
1.09 kB
# Import required libraries
import gradio as gr # For interface
from sentence_transformers import SentenceTransformer # For embedding the text
import torch # For gpu
# Make the app device agnostic
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# Load a pretrained Sentence Transformer model and move it to the appropriate device
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
model = model.to(device)
# Function that does the embedding
def predict(input_text):
# Calculate embeddings by calling model.encode(), specifying the device
embeddings = model.encode(input_text, device=device)
embeddings_str = np.array2string(embeddings, separator=',', threshold=np.inf)
return embeddings_str
# Gradio app interface
gradio_app = gr.Interface(
predict,
inputs="text",
outputs="json",
title="Text to Vector Generator",
description="Input a text and get its vector representation using an embedding model (mixedbread-ai/mxbai-embed-large-v1)."
)
if __name__ == "__main__":
gradio_app.launch()