arshiaafshani commited on
Commit
d5a5044
·
verified ·
1 Parent(s): 1f6468c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -18
app.py CHANGED
@@ -3,8 +3,7 @@ from huggingface_hub import hf_hub_download
3
  from llama_cpp import Llama
4
  from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
5
  from llama_cpp_agent.providers import LlamaCppPythonProvider
6
- from llama_cpp_agent.chat_history import BasicChatHistory
7
- from llama_cpp_agent.chat_history.messages import Roles
8
 
9
  # ⬇️ دانلود مدل
10
  hf_hub_download(
@@ -31,17 +30,35 @@ agent = LlamaCppAgent(
31
  )
32
 
33
  # 💬 تابع پاسخ‌دهنده
34
- def respond(message, chat_history=[], system_message="You are Arsh...", max_tokens=2048, temperature=0.6, top_p=0.95, top_k=40, repeat_penalty=1.1):
 
 
 
 
 
 
 
 
 
 
35
  if chat_history is None:
36
  chat_history = []
37
 
38
- messages = BasicChatHistory()
 
39
  for msg in chat_history:
40
- messages.add_message(msg)
 
 
 
 
 
41
 
 
42
  stream = agent.get_chat_response(
43
  message,
44
  chat_history=messages,
 
45
  returns_streaming_generator=True,
46
  print_output=False
47
  )
@@ -54,22 +71,33 @@ def respond(message, chat_history=[], system_message="You are Arsh...", max_toke
54
  # 🎛️ رابط Gradio
55
  with gr.Blocks() as demo:
56
  gr.Markdown("# Arsh-LLM Q4_K_M Chat Demo")
57
- gr.ChatInterface(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  respond,
59
  additional_inputs=[
60
- gr.Textbox("You are Arsh, a helpful assistant.", label="System Message", interactive=True),
61
- gr.Slider(minimum=1, maximum=4096, value=2048, step=1, label="Max Tokens"),
62
- gr.Slider(minimum=0.1, maximum=4.0, value=0.6, step=0.1, label="Temperature"),
63
- gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p"),
64
- gr.Slider(minimum=0, maximum=100, value=40, step=1, label="Top-k"),
65
- gr.Slider(minimum=0.0, maximum=2.0, value=1.1, step=0.1, label="Repetition Penalty"),
66
  ],
67
- chatbot=gr.Chatbot(
68
- type="messages", # ✅ اینجا نوعشو مشخص کردیم
69
- label="Chat with Arsh-LLM",
70
- bubble_full_width=False,
71
- show_copy_button=True
72
- )
73
  )
74
 
75
  # 🚀 اجرای برنامه
 
3
  from llama_cpp import Llama
4
  from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
5
  from llama_cpp_agent.providers import LlamaCppPythonProvider
6
+ from llama_cpp_agent.chat_history.messages import UserMessage, AssistantMessage
 
7
 
8
  # ⬇️ دانلود مدل
9
  hf_hub_download(
 
30
  )
31
 
32
  # 💬 تابع پاسخ‌دهنده
33
+ def respond(message, chat_history, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty):
34
+ # تنظیمات
35
+ settings = provider.get_provider_default_settings()
36
+ settings.temperature = temperature
37
+ settings.top_k = top_k
38
+ settings.top_p = top_p
39
+ settings.max_tokens = max_tokens
40
+ settings.repeat_penalty = repeat_penalty
41
+ settings.stream = True
42
+
43
+ # رفرش کردن سابقه چت
44
  if chat_history is None:
45
  chat_history = []
46
 
47
+ # ساخت سابقه چت
48
+ messages = []
49
  for msg in chat_history:
50
+ role = msg["role"]
51
+ content = msg["content"]
52
+ if role == "user":
53
+ messages.append(UserMessage(content))
54
+ elif role == "assistant":
55
+ messages.append(AssistantMessage(content))
56
 
57
+ # گرفتن استریم پاسخ
58
  stream = agent.get_chat_response(
59
  message,
60
  chat_history=messages,
61
+ llm_sampling_settings=settings,
62
  returns_streaming_generator=True,
63
  print_output=False
64
  )
 
71
  # 🎛️ رابط Gradio
72
  with gr.Blocks() as demo:
73
  gr.Markdown("# Arsh-LLM Q4_K_M Chat Demo")
74
+
75
+ with gr.Row():
76
+ with gr.Column():
77
+ system_msg = gr.Textbox("You are Arsh, a helpful assistant.", label="System Message", interactive=True)
78
+ max_tokens = gr.Slider(1, 4096, value=2048, step=1, label="Max Tokens")
79
+ temperature = gr.Slider(0.1, 4.0, value=0.6, step=0.1, label="Temperature")
80
+ top_p = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")
81
+ top_k = gr.Slider(0, 100, value=40, step=1, label="Top-k")
82
+ repeat_penalty = gr.Slider(0.0, 2.0, value=1.1, step=0.1, label="Repetition Penalty")
83
+
84
+ chatbot = gr.Chatbot(
85
+ type="messages", # ✅ فرمت صحیح
86
+ bubble_full_width=False,
87
+ show_copy_button=True
88
+ )
89
+
90
+ chat_interface = gr.ChatInterface(
91
  respond,
92
  additional_inputs=[
93
+ system_msg,
94
+ max_tokens,
95
+ temperature,
96
+ top_p,
97
+ top_k,
98
+ repeat_penalty
99
  ],
100
+ chatbot=chatbot
 
 
 
 
 
101
  )
102
 
103
  # 🚀 اجرای برنامه