Update app.py
Browse files
app.py
CHANGED
|
@@ -20,6 +20,24 @@ import torch
|
|
| 20 |
import tqdm
|
| 21 |
import accelerate
|
| 22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
llm_model = "mistralai/Mistral-7B-Instruct-v0.2"
|
| 24 |
|
| 25 |
|
|
@@ -162,7 +180,8 @@ def conversation(qa_chain, message, history):
|
|
| 162 |
|
| 163 |
# Generate response using QA chain
|
| 164 |
response = qa_chain({"question": message, "chat_history": formatted_chat_history})
|
| 165 |
-
response_answer = response["answer"]
|
|
|
|
| 166 |
if response_answer.find("Helpful Answer:") != -1:
|
| 167 |
response_answer = response_answer.split("Helpful Answer:")[-1]
|
| 168 |
response_sources = response["source_documents"]
|
|
|
|
| 20 |
import tqdm
|
| 21 |
import accelerate
|
| 22 |
|
| 23 |
+
from transformers import MBartForConditionalGeneration, MBart50TokenizerFast
|
| 24 |
+
translation_model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
|
| 25 |
+
translation_tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
|
| 26 |
+
|
| 27 |
+
def english_to_hindi(sentence):
|
| 28 |
+
translation_tokenizer.src_lang = "en_xx"
|
| 29 |
+
encoded_hi = translation_tokenizer(sentence, return_tensors="pt")
|
| 30 |
+
generated_tokens = translation_model.generate(**encoded_hi, forced_bos_token_id=translation_tokenizer.lang_code_to_id["hi_IN"] )
|
| 31 |
+
return (translation_tokenizer.batch_decode(generated_tokens, skip_special_tokens=True))
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def hindi_to_english(sentence):
|
| 35 |
+
translation_tokenizer.src_lang = "hi_IN"
|
| 36 |
+
encoded_hi = translation_tokenizer(sentence, return_tensors="pt")
|
| 37 |
+
generated_tokens = translation_model.generate(**encoded_hi, forced_bos_token_id=translation_tokenizer.lang_code_to_id["en_XX"] )
|
| 38 |
+
return (translation_tokenizer.batch_decode(generated_tokens, skip_special_tokens=True))
|
| 39 |
+
|
| 40 |
+
|
| 41 |
llm_model = "mistralai/Mistral-7B-Instruct-v0.2"
|
| 42 |
|
| 43 |
|
|
|
|
| 180 |
|
| 181 |
# Generate response using QA chain
|
| 182 |
response = qa_chain({"question": message, "chat_history": formatted_chat_history})
|
| 183 |
+
#response_answer = response["answer"]
|
| 184 |
+
response_answer = english_to_hindi(response["answer"])[0]
|
| 185 |
if response_answer.find("Helpful Answer:") != -1:
|
| 186 |
response_answer = response_answer.split("Helpful Answer:")[-1]
|
| 187 |
response_sources = response["source_documents"]
|