akashraut commited on
Commit
1e52ecb
·
verified ·
1 Parent(s): 6ffd709

Delete src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +0 -132
src/streamlit_app.py DELETED
@@ -1,132 +0,0 @@
1
- # streamlit_app.py
2
- # A minimal Streamlit app rebuilt with the LangChain framework.
3
-
4
- import streamlit as st
5
- import torch
6
- from transformers import pipeline
7
-
8
- # Updated LangChain imports for modern versions
9
- from langchain_community.llms import HuggingFacePipeline
10
- from langchain.prompts import PromptTemplate
11
- from langchain.chains import LLMChain
12
- from langchain.memory import ConversationBufferMemory
13
-
14
- # -----------------------------------------------------------------------------
15
- # CORE MODEL LOGIC (Rebuilt with LangChain)
16
- # -----------------------------------------------------------------------------
17
- class LangChainBot:
18
- def __init__(self):
19
- """
20
- Loads the models and wraps them in LangChain components.
21
- """
22
- try:
23
- # 1. Load the base Hugging Face pipelines
24
- generator_pipeline = pipeline(
25
- "text2text-generation",
26
- model="ai4bharat/IndicBARTSS",
27
- device=0 if torch.cuda.is_available() else -1,
28
- torch_dtype=(torch.float16 if torch.cuda.is_available() else torch.float32),
29
- max_new_tokens=150,
30
- repetition_penalty=1.2
31
- )
32
-
33
- # Added `trust_remote_code=True` to allow the special translator model to load.
34
- self.translator = pipeline(
35
- "translation",
36
- model="ai4bharat/indictrans2-indic-indic-1B",
37
- device=0 if torch.cuda.is_available() else -1,
38
- trust_remote_code=True
39
- )
40
-
41
- # 2. Wrap the generator in a LangChain LLM object
42
- llm = HuggingFacePipeline(pipeline=generator_pipeline)
43
-
44
- # 3. Create a Prompt Template
45
- template = """
46
- You are a helpful conversational AI. Respond to the user's message.
47
-
48
- {history}
49
- मनुष्य: {input}
50
- सहायक:
51
- """
52
- prompt_template = PromptTemplate(input_variables=["history", "input"], template=template)
53
-
54
- # 4. Set up conversational memory
55
- self.memory = ConversationBufferMemory(memory_key="history")
56
-
57
- # 5. Create the final LLMChain
58
- self.chain = LLMChain(
59
- llm=llm,
60
- prompt=prompt_template,
61
- verbose=True,
62
- memory=self.memory
63
- )
64
-
65
- except Exception as e:
66
- st.error(f"Fatal: Could not load models. Error: {e}")
67
- self.chain = None
68
- self.translator = None
69
-
70
- def _translate(self, text, source_lang, target_lang):
71
- """Translation logic remains the same."""
72
- if not self.translator or source_lang == target_lang:
73
- return text
74
- try:
75
- codes = {'english': 'eng_Latn', 'hindi': 'hin_Deva', 'tamil': 'tam_Taml', 'telugu': 'tel_Telu'}
76
- result = self.translator(text, src_lang=codes[source_lang], tgt_lang=codes[target_lang])
77
- return result[0]['translation_text']
78
- except Exception as e:
79
- st.warning(f"Translation failed. Error: {e}")
80
- return text
81
-
82
- def get_response(self, user_message, input_lang, output_lang):
83
- """The main function to get a response."""
84
- if not self.chain:
85
- return "Error: The LangChain chain is not initialized."
86
-
87
- hindi_message = self._translate(user_message, input_lang, 'hindi')
88
- hindi_response = self.chain.run(hindi_message)
89
- final_response = self._translate(hindi_response, 'hindi', output_lang)
90
-
91
- return final_response
92
-
93
- # -----------------------------------------------------------------------------
94
- # MINIMAL STREAMLIT UI (This part remains mostly the same)
95
- # -----------------------------------------------------------------------------
96
-
97
- st.set_page_config(layout="centered")
98
- st.title("LangChain Model Interface")
99
-
100
- @st.cache_resource
101
- def load_bot():
102
- return LangChainBot()
103
-
104
- bot = load_bot()
105
-
106
- if bot and bot.chain: # Only show the UI if the bot loaded successfully
107
- st.markdown("---")
108
- language_options = ["english", "hindi", "tamil", "telugu"]
109
- col1, col2 = st.columns(2)
110
- with col1:
111
- input_lang = st.selectbox("Input Language", options=language_options, index=0)
112
- with col2:
113
- output_lang = st.selectbox("Output Language", options=language_options, index=1)
114
-
115
- user_input = st.text_area("Your Message:", height=100)
116
-
117
- if st.button("Get Response"):
118
- if user_input:
119
- with st.spinner("LangChain is processing your request..."):
120
- response = bot.get_response(user_input, input_lang, output_lang)
121
- st.markdown("### Model Response:")
122
- st.info(response)
123
- else:
124
- st.warning("Please enter a message.")
125
-
126
- # Add a button to clear LangChain's memory
127
- if st.button("Clear Conversation Memory"):
128
- if hasattr(bot, 'memory'):
129
- bot.memory.clear()
130
- st.success("Conversation memory has been cleared.")
131
- else:
132
- st.error("Application could not start. Please check the logs.")