Athspi commited on
Commit
6421d05
·
verified ·
1 Parent(s): 33d5962

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -7
app.py CHANGED
@@ -1,7 +1,13 @@
1
- import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
  import torch
4
  import os
 
 
 
 
 
 
5
 
6
  # --- Configuration (Read from Environment Variables) ---
7
 
@@ -30,6 +36,8 @@ try:
30
  tokenizer.pad_token = tokenizer.eos_token
31
  tokenizer.padding_side = "right"
32
 
 
 
33
  except OSError as e:
34
  print(f"Error loading model or tokenizer: {e}")
35
  print("Ensure MODEL_PATH and TOKENIZER_PATH environment variables are set correctly.")
@@ -42,7 +50,18 @@ def chat_with_llm(prompt, history):
42
  """Generates a response from the LLM, handling history correctly."""
43
  formatted_prompt = ""
44
  if history:
 
 
 
 
 
 
 
45
  for item in history:
 
 
 
 
46
  if item["role"] == "user":
47
  formatted_prompt += f"{tokenizer.bos_token}{item['content']}{tokenizer.eos_token}"
48
  elif item["role"] == "assistant":
@@ -72,19 +91,21 @@ def chat_with_llm(prompt, history):
72
  # --- Gradio Interface ---
73
  # Use the 'messages' format for chatbot
74
  def predict(message, history):
75
- history = history or []
76
- response = chat_with_llm(message, history)
77
- history.append({"role": "user", "content": message})
78
- history.append({"role": "assistant", "content": response})
79
- return "", history
 
80
 
81
  with gr.Blocks() as demo:
82
  chatbot = gr.Chatbot(label="Athspi Chat", height=500, show_label=True,
83
  value=[{"role": "assistant", "content": "Hi! I'm Athspi. How can I help you today?"}],
84
- type="messages") # Set type to "messages"
85
  msg = gr.Textbox(label="Your Message", placeholder="Type your message here...")
86
  clear = gr.Button("Clear")
87
 
 
88
  msg.submit(predict, [msg, chatbot], [msg, chatbot])
89
  clear.click(lambda: [], [], chatbot, queue=False)
90
 
 
1
+ import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
  import torch
4
  import os
5
+ import gradio as gr
6
+ import transformers
7
+
8
+ print(f"Gradio version: {gr.__version__}") # Print Gradio version
9
+ print(f"Transformers version: {transformers.__version__}") # Print Transformers version
10
+
11
 
12
  # --- Configuration (Read from Environment Variables) ---
13
 
 
36
  tokenizer.pad_token = tokenizer.eos_token
37
  tokenizer.padding_side = "right"
38
 
39
+ print("Model and tokenizer loaded successfully!") # Success message
40
+
41
  except OSError as e:
42
  print(f"Error loading model or tokenizer: {e}")
43
  print("Ensure MODEL_PATH and TOKENIZER_PATH environment variables are set correctly.")
 
50
  """Generates a response from the LLM, handling history correctly."""
51
  formatted_prompt = ""
52
  if history:
53
+ print("DEBUG: History variable type:", type(history))
54
+ if history:
55
+ print("DEBUG: Example history item:", history[0]) # Print first history item
56
+ else:
57
+ print("DEBUG: History is empty but should not be in chat turn > 1")
58
+
59
+
60
  for item in history:
61
+ if not isinstance(item, dict) or "role" not in item or "content" not in item: # Check item structure
62
+ print("DEBUG: Invalid history item format:", item) # Debug invalid item
63
+ continue # Skip invalid items instead of crashing
64
+
65
  if item["role"] == "user":
66
  formatted_prompt += f"{tokenizer.bos_token}{item['content']}{tokenizer.eos_token}"
67
  elif item["role"] == "assistant":
 
91
  # --- Gradio Interface ---
92
  # Use the 'messages' format for chatbot
93
  def predict(message, history):
94
+ history_messages = history or [] # Rename to avoid shadowing
95
+ response = chat_with_llm(message, history_messages)
96
+ history_messages.append({"role": "user", "content": message})
97
+ history_messages.append({"role": "assistant", "content": response})
98
+ return "", history_messages # Return the updated history
99
+
100
 
101
  with gr.Blocks() as demo:
102
  chatbot = gr.Chatbot(label="Athspi Chat", height=500, show_label=True,
103
  value=[{"role": "assistant", "content": "Hi! I'm Athspi. How can I help you today?"}],
104
+ type="messages") # Ensure type is "messages"
105
  msg = gr.Textbox(label="Your Message", placeholder="Type your message here...")
106
  clear = gr.Button("Clear")
107
 
108
+
109
  msg.submit(predict, [msg, chatbot], [msg, chatbot])
110
  clear.click(lambda: [], [], chatbot, queue=False)
111