amiguel commited on
Commit
27b07a6
Β·
verified Β·
1 Parent(s): 51ad096

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -5
app.py CHANGED
@@ -134,6 +134,12 @@ if prompt := st.chat_input("Ask your inspection question..."):
134
  st.error("πŸ”‘ Authentication required! Please enter your Hugging Face token in the sidebar.")
135
  st.stop()
136
 
 
 
 
 
 
 
137
  # Add user message with proper avatar handling
138
  try:
139
  with st.chat_message("user", avatar="πŸ‘€"):
@@ -144,15 +150,22 @@ if prompt := st.chat_input("Ask your inspection question..."):
144
  st.markdown(prompt)
145
 
146
  st.session_state.messages.append({"role": "user", "content": prompt})
 
 
 
147
 
148
- # Generate and stream response
149
  if model and tokenizer:
150
- with st.chat_message("assistant", avatar="πŸ€–"):
151
- try:
 
 
 
 
 
 
152
  streamer = generate_response(prompt, file_context)
153
  response = st.write_stream(streamer)
154
  st.session_state.messages.append({"role": "assistant", "content": response})
155
- except Exception as e:
156
- st.error(f"⚑ Generation error: {str(e)}")
157
  else:
158
  st.error("πŸ€– Model not loaded - check your token and connection!")
 
134
  st.error("πŸ”‘ Authentication required! Please enter your Hugging Face token in the sidebar.")
135
  st.stop()
136
 
137
+ # Load model if not loaded
138
+ if "model" not in st.session_state:
139
+ st.session_state.model, st.session_state.tokenizer = load_model(hf_token)
140
+ model = st.session_state.model
141
+ tokenizer = st.session_state.tokenizer
142
+
143
  # Add user message with proper avatar handling
144
  try:
145
  with st.chat_message("user", avatar="πŸ‘€"):
 
150
  st.markdown(prompt)
151
 
152
  st.session_state.messages.append({"role": "user", "content": prompt})
153
+
154
+ # Process file
155
+ file_context = process_file(uploaded_file) if uploaded_file else ""
156
 
157
+ # Generate and stream response with avatar handling
158
  if model and tokenizer:
159
+ try:
160
+ with st.chat_message("assistant", avatar="πŸ€–"):
161
+ streamer = generate_response(prompt, file_context)
162
+ response = st.write_stream(streamer)
163
+ st.session_state.messages.append({"role": "assistant", "content": response})
164
+ except:
165
+ # Fallback if avatar fails
166
+ with st.chat_message("assistant"):
167
  streamer = generate_response(prompt, file_context)
168
  response = st.write_stream(streamer)
169
  st.session_state.messages.append({"role": "assistant", "content": response})
 
 
170
  else:
171
  st.error("πŸ€– Model not loaded - check your token and connection!")