embed_text / app.py
bluuebunny's picture
returning json for compatibility
04f8232
raw
history blame
996 Bytes
# Import required libraries
import gradio as gr # For interface
from sentence_transformers import SentenceTransformer # For embedding the text
import torch # For gpu
# Make the app device agnostic
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# Load a pretrained Sentence Transformer model and move it to the appropriate device
model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2', trust_remote_code=True)
model = model.to(device)
# Function that does the embedding
def predict(input_text):
# Calculate embeddings by calling model.encode(), specifying the device
embeddings = model.encode(input_text, device=device)
return embeddings
# Gradio app interface
gradio_app = gr.Interface(
predict,
inputs="text",
outputs="json",
title="Text to Vector Generator",
description="Input a text and get its vector representation using an embedding model."
)
if __name__ == "__main__":
gradio_app.launch()