tenet commited on
Commit
25e0351
·
verified ·
1 Parent(s): 3fc80d7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -3
app.py CHANGED
@@ -9,7 +9,7 @@ model = GPT2LMHeadModel.from_pretrained("microsoft/DialoGPT-medium")
9
  # Streamlit UI Setup
10
  st.title("AI Multimodal Chat & File Processing App")
11
 
12
- # Chat history session state setup
13
  if "history" not in st.session_state:
14
  st.session_state.history = []
15
 
@@ -27,6 +27,7 @@ def chat_with_model(user_input):
27
  return bot_output
28
 
29
  # Chat Input Box
 
30
  user_input = st.text_input("You: ", "")
31
  if user_input:
32
  response = chat_with_model(user_input)
@@ -39,8 +40,24 @@ if st.session_state.history:
39
  user_msg = tokenizer.decode(st.session_state.history[i], skip_special_tokens=True)
40
  st.write(f"You: {user_msg}")
41
 
42
- # Video/Audio Stream
43
- st.subheader("Video/Audio Stream")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  class VideoProcessor(VideoProcessorBase):
45
  def recv(self, frame):
46
  return frame
 
9
  # Streamlit UI Setup
10
  st.title("AI Multimodal Chat & File Processing App")
11
 
12
+ # Session state for chat history
13
  if "history" not in st.session_state:
14
  st.session_state.history = []
15
 
 
27
  return bot_output
28
 
29
  # Chat Input Box
30
+ st.subheader("Chat with AI")
31
  user_input = st.text_input("You: ", "")
32
  if user_input:
33
  response = chat_with_model(user_input)
 
40
  user_msg = tokenizer.decode(st.session_state.history[i], skip_special_tokens=True)
41
  st.write(f"You: {user_msg}")
42
 
43
+ # --- File Upload and Processing ---
44
+ st.subheader("📁 Upload a File for AI to Read")
45
+
46
+ uploaded_file = st.file_uploader("Choose a text file", type=["txt", "csv", "md", "log"])
47
+ if uploaded_file:
48
+ content = uploaded_file.read().decode("utf-8")
49
+ st.text_area("File Content", content, height=200)
50
+
51
+ # Allow interaction with file content
52
+ file_question = st.text_input("Ask something about the file:")
53
+ if file_question:
54
+ combined_input = file_question + "\n" + content[:1000] # Prevent token overload
55
+ response = chat_with_model(combined_input)
56
+ st.write(f"Bot: {response}")
57
+
58
+ # --- Video/Audio Stream ---
59
+ st.subheader("🎥 Video/Audio Stream")
60
+
61
  class VideoProcessor(VideoProcessorBase):
62
  def recv(self, frame):
63
  return frame