Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -103,7 +103,7 @@ def tokenize(tokenizer, input_texts, eod_id, max_length):
|
|
103 |
batch_dict = tokenizer.pad(batch_dict, padding=True, return_tensors="pt")
|
104 |
return batch_dict
|
105 |
|
106 |
-
def compute_similarity(embedder: QwenEmbedder, text1: str, text2: str) -> float:
|
107 |
embeddings = embedder.get_embeddings([text1, text2])
|
108 |
similarity = torch.cosine_similarity(embeddings[0:1], embeddings[1:2]).item()
|
109 |
return round(similarity, 3)
|
@@ -693,7 +693,7 @@ def create_demo():
|
|
693 |
similarity_score = gr.Number(label="Similarity Score")
|
694 |
|
695 |
similarity_btn.click(
|
696 |
-
fn=lambda t1, t2, m, d: process_with_embedder('compute_similarity', t1, t2
|
697 |
inputs=[text1, text2, model_choice, embedding_dim],
|
698 |
outputs=similarity_score
|
699 |
)
|
|
|
103 |
batch_dict = tokenizer.pad(batch_dict, padding=True, return_tensors="pt")
|
104 |
return batch_dict
|
105 |
|
106 |
+
def compute_similarity(embedder: QwenEmbedder, text1: str, text2: str, model_choice: str = None, embedding_dim: int = None) -> float:
|
107 |
embeddings = embedder.get_embeddings([text1, text2])
|
108 |
similarity = torch.cosine_similarity(embeddings[0:1], embeddings[1:2]).item()
|
109 |
return round(similarity, 3)
|
|
|
693 |
similarity_score = gr.Number(label="Similarity Score")
|
694 |
|
695 |
similarity_btn.click(
|
696 |
+
fn=lambda t1, t2, m, d: process_with_embedder('compute_similarity', t1, t2),
|
697 |
inputs=[text1, text2, model_choice, embedding_dim],
|
698 |
outputs=similarity_score
|
699 |
)
|