File size: 1,090 Bytes
3e261a2
 
 
 
 
 
 
 
62923ee
687db51
62923ee
3e261a2
 
 
 
62923ee
 
cccd2c8
 
3e261a2
 
 
 
 
04f8232
798cc05
bf4250f
3e261a2
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
# Import required libraries
import gradio as gr # For interface
from sentence_transformers import SentenceTransformer # For embedding the text
import torch # For gpu 

# Make the app device agnostic
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")

# Load a pretrained Sentence Transformer model and move it to the appropriate device
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
model = model.to(device)

# Function that does the embedding
def predict(input_text):
    
    # Calculate embeddings by calling model.encode(), specifying the device
    embeddings = model.encode(input_text, device=device)
    embeddings_str = np.array2string(embeddings, separator=',', threshold=np.inf)
    return embeddings_str

# Gradio app interface
gradio_app = gr.Interface(
    predict,
    inputs="text", 
    outputs="json",
    title="Text to Vector Generator",
    description="Input a text and get its vector representation using an embedding model (mixedbread-ai/mxbai-embed-large-v1)."
)

if __name__ == "__main__":
    gradio_app.launch()