Spaces:
Sleeping
Sleeping
File size: 1,420 Bytes
3e261a2 27e7139 3e261a2 62923ee 687db51 62923ee 3e261a2 62923ee 7b7086e 27e7139 3e261a2 a1f0d23 798cc05 a1f0d23 3e261a2 a1f0d23 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
# Import required libraries
import gradio as gr # For interface
from sentence_transformers import SentenceTransformer # For embedding the text
import torch # For gpu
import numpy as np
# Make the app device agnostic
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# Load a pretrained Sentence Transformer model and move it to the appropriate device
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
model = model.to(device)
# Function that does the embedding
def predict(input_text):
# Calculate embeddings by calling model.encode(), specifying the device
embeddings = model.encode(input_text, device=device)
# Set the print options to avoid truncation and use fixed-point notation
np.set_printoptions(threshold=np.inf, precision=8, suppress=True, floatmode='fixed')
# Convert the array to a string for display
embeddings_str = np.array2string(embeddings, separator=',')
return embeddings_str
# Gradio app interface
gradio_app = gr.Interface(
predict,
inputs=gr.Textbox(placeholder="Insert Text", label='Text'),
outputs=gr.Textbox(max_lines=1, placeholder='Vector of dimensions 1024', label='Vector', show_label=True, show_copy_button=True),
title="Text to Vector Generator",
description="Embedding model: mixedbread-ai/mxbai-embed-large-v1."
)
if __name__ == "__main__":
gradio_app.launch() |