Abs6187 commited on
Commit
dbce7ce
Β·
verified Β·
1 Parent(s): dd87cb1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -24
app.py CHANGED
@@ -1,6 +1,4 @@
1
  import os
2
- import time
3
- import json
4
  import gradio as gr
5
  from openai import OpenAI
6
 
@@ -10,11 +8,10 @@ API_HOSTS = {
10
  }
11
 
12
  MODELS_INFO = {
13
- "gpt-3.5-turbo": {"input_price": "0.0035", "output_price": "0.0105", "features": "Default model, equivalent to gpt-3.5-turbo-0125"},
14
- "o1-preview": {"input_price": "0.105", "output_price": "0.42", "features": "Powerful preview reasoning model"},
15
- "gpt-4o": {"input_price": "0.0175", "output_price": "0.07", "features": "Cheaper and faster GPT-4O; supports image fee"},
16
- "gpt-4-turbo": {"input_price": "0.07", "output_price": "0.21", "features": "Multimodal with image recognition, function tools support"},
17
- "gpt-4o-ca": {"input_price": "0.01", "output_price": "0.04", "features": "Cheap CA variant, but limited stability/prior daily limit"}
18
  }
19
 
20
  def create_client(host):
@@ -37,11 +34,15 @@ def get_model_card(model_name):
37
  def respond(user, history, host_choice, model_name, temperature, top_p, max_tokens, sys_prompt):
38
  history = history or []
39
  if not user.strip():
40
- return history + [("", "⚠️ Please enter a message.")]
 
 
41
  try:
42
  client = create_client(API_HOSTS[host_choice])
43
  except Exception as e:
44
- return history + [("", f"❌ {e}")]
 
 
45
  messages = [{"role": "system", "content": sys_prompt or "You are a helpful assistant."}]
46
  for u, a in history:
47
  messages.append({"role": "user", "content": u})
@@ -49,45 +50,64 @@ def respond(user, history, host_choice, model_name, temperature, top_p, max_toke
49
  messages.append({"role": "user", "content": user})
50
 
51
  try:
52
- resp = client.chat.completions.create(
53
  model=model_name,
54
  messages=messages,
55
  temperature=temperature,
56
  top_p=top_p,
57
- max_tokens=max_tokens
 
58
  )
59
- out = resp.choices[0].message.content.strip() or "No response received."
 
 
 
 
 
 
 
 
 
 
60
  except Exception as e:
61
  err = str(e)
62
  if "429" in err:
63
  out = (
64
- "🚫 Daily quota reached for the selected model (429).\n"
65
- "Please try again after 00:00 (China time) or switch models/hosts."
66
  )
67
  else:
68
  out = f"❌ API Error: {e}"
69
- history.append((user, out))
70
- return history
71
 
72
- with gr.Blocks(title="ChatAnywhere-powered Chatbot", theme=gr.themes.Soft()) as demo:
73
- gr.Markdown("## ChatAnywhere Chatbot\nPowered by the ChatAnywhere API β€” Use wisely!")
74
  with gr.Row():
75
  with gr.Column(scale=3):
76
- chat = gr.Chatbot(label="Chat", height=500, show_copy_button=True, render_markdown=True)
77
- msg = gr.Textbox(placeholder="Type your message...", lines=2)
78
- clear = gr.Button("Clear")
 
 
79
  with gr.Column(scale=1):
80
  host = gr.Radio(list(API_HOSTS.keys()), value="Domestic", label="API Host")
81
  model = gr.Dropdown(list(MODELS_INFO.keys()), value="gpt-3.5-turbo", label="Model")
82
  model_card = gr.Markdown(get_model_card("gpt-3.5-turbo"))
83
  temperature = gr.Slider(0.0, 1.5, value=0.7, step=0.05, label="Temperature")
84
  top_p = gr.Slider(0.05, 1.0, value=1.0, step=0.05, label="Top-p")
85
- max_tokens = gr.Slider(64, 8192, value=512, step=64, label="Max Tokens")
86
  sys_prompt = gr.Textbox(label="System Prompt (optional)", lines=2)
87
- msg.submit(respond, [msg, chat, host, model, temperature, top_p, max_tokens, sys_prompt], chat)
88
  model.change(lambda m: get_model_card(m), model, model_card)
89
- clear.click(lambda: [], None, chat)
 
 
 
 
90
  msg.submit(lambda _: "", msg, msg)
91
 
 
 
92
  if __name__ == "__main__":
93
  demo.launch()
 
1
  import os
 
 
2
  import gradio as gr
3
  from openai import OpenAI
4
 
 
8
  }
9
 
10
  MODELS_INFO = {
11
+ "gpt-3.5-turbo": {"input_price": "0.0035", "output_price": "0.0105", "features": "Default, fast, affordable"},
12
+ "gpt-4o": {"input_price": "0.0175", "output_price": "0.07", "features": "Cheaper & faster GPT-4O"},
13
+ "gpt-4-turbo": {"input_price": "0.07", "output_price": "0.21", "features": "Multimodal, tool use"},
14
+ "gpt-4o-ca": {"input_price": "0.01", "output_price": "0.04", "features": "CA variant, daily free limit"},
 
15
  }
16
 
17
  def create_client(host):
 
34
  def respond(user, history, host_choice, model_name, temperature, top_p, max_tokens, sys_prompt):
35
  history = history or []
36
  if not user.strip():
37
+ yield history + [("", "⚠️ Please enter a message.")]
38
+ return
39
+
40
  try:
41
  client = create_client(API_HOSTS[host_choice])
42
  except Exception as e:
43
+ yield history + [("", f"❌ {e}")]
44
+ return
45
+
46
  messages = [{"role": "system", "content": sys_prompt or "You are a helpful assistant."}]
47
  for u, a in history:
48
  messages.append({"role": "user", "content": u})
 
50
  messages.append({"role": "user", "content": user})
51
 
52
  try:
53
+ stream = client.chat.completions.create(
54
  model=model_name,
55
  messages=messages,
56
  temperature=temperature,
57
  top_p=top_p,
58
+ max_tokens=max_tokens,
59
+ stream=True
60
  )
61
+
62
+ partial = ""
63
+ history.append((user, partial))
64
+ yield history
65
+
66
+ for chunk in stream:
67
+ delta = chunk.choices[0].delta.content or ""
68
+ partial += delta
69
+ history[-1] = (user, partial)
70
+ yield history
71
+
72
  except Exception as e:
73
  err = str(e)
74
  if "429" in err:
75
  out = (
76
+ "🚫 Daily quota reached for this model.\n"
77
+ "Please try again after 00:00 China time or switch model/host."
78
  )
79
  else:
80
  out = f"❌ API Error: {e}"
81
+ history.append((user, out))
82
+ yield history
83
 
84
+ with gr.Blocks(title="ChatAnywhere Realtime Chatbot", theme=gr.themes.Soft()) as demo:
85
+ gr.Markdown("## πŸ’¬ ChatAnywhere Realtime Chatbot\nPowered by GPT-5 via ChatAnywhere API")
86
  with gr.Row():
87
  with gr.Column(scale=3):
88
+ chat = gr.Chatbot(label="Conversation", height=500, show_copy_button=True, render_markdown=True)
89
+ with gr.Row():
90
+ msg = gr.Textbox(placeholder="Type your message...", lines=2, scale=4)
91
+ send = gr.Button("Send", scale=1)
92
+ clear = gr.Button("Clear", scale=1)
93
  with gr.Column(scale=1):
94
  host = gr.Radio(list(API_HOSTS.keys()), value="Domestic", label="API Host")
95
  model = gr.Dropdown(list(MODELS_INFO.keys()), value="gpt-3.5-turbo", label="Model")
96
  model_card = gr.Markdown(get_model_card("gpt-3.5-turbo"))
97
  temperature = gr.Slider(0.0, 1.5, value=0.7, step=0.05, label="Temperature")
98
  top_p = gr.Slider(0.05, 1.0, value=1.0, step=0.05, label="Top-p")
99
+ max_tokens = gr.Slider(64, 4096, value=512, step=64, label="Max Tokens")
100
  sys_prompt = gr.Textbox(label="System Prompt (optional)", lines=2)
101
+
102
  model.change(lambda m: get_model_card(m), model, model_card)
103
+
104
+ send.click(respond, [msg, chat, host, model, temperature, top_p, max_tokens, sys_prompt], chat)
105
+ msg.submit(respond, [msg, chat, host, model, temperature, top_p, max_tokens, sys_prompt], chat)
106
+
107
+ send.click(lambda _: "", msg, msg)
108
  msg.submit(lambda _: "", msg, msg)
109
 
110
+ clear.click(lambda: [], None, chat)
111
+
112
  if __name__ == "__main__":
113
  demo.launch()