Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
|
|
3 |
from llama_cpp import Llama
|
4 |
import os
|
5 |
from huggingface_hub import hf_hub_download
|
@@ -19,7 +20,8 @@ def load_model():
|
|
19 |
model = Llama(
|
20 |
model_path=model_path,
|
21 |
n_ctx=4096, # Taille du contexte
|
22 |
-
n_gpu_layers=-1 # Utilise tous les layers disponibles sur GPU si possible
|
|
|
23 |
)
|
24 |
return model
|
25 |
|
@@ -38,31 +40,60 @@ def format_prompt(message, history):
|
|
38 |
|
39 |
return prompt
|
40 |
|
41 |
-
# Fonction d'inférence
|
42 |
def generate_response(message, history):
|
43 |
if not hasattr(generate_response, "model"):
|
44 |
generate_response.model = load_model()
|
45 |
|
46 |
-
|
|
|
47 |
|
48 |
-
|
49 |
-
|
|
|
|
|
|
|
|
|
50 |
prompt,
|
51 |
max_tokens=2048,
|
52 |
temperature=0.7,
|
53 |
top_p=0.95,
|
54 |
stop=["</s>", "<|user|>", "<|system|>"],
|
55 |
-
|
56 |
-
)
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
59 |
|
60 |
# Fonction pour réinitialiser la conversation
|
61 |
def reset_conversation():
|
62 |
return [], ""
|
63 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
# Interface Gradio
|
65 |
-
with gr.Blocks(css=
|
66 |
gr.Markdown("""
|
67 |
# 🌟 Assistant Vera-v1.5-Instruct
|
68 |
|
@@ -73,10 +104,13 @@ with gr.Blocks(css="footer {visibility: hidden}") as demo:
|
|
73 |
with gr.Row():
|
74 |
with gr.Column(scale=4):
|
75 |
chatbot = gr.Chatbot(
|
76 |
-
height=
|
77 |
show_copy_button=True,
|
78 |
avatar_images=("👤", "🤖"),
|
79 |
bubble_full_width=False,
|
|
|
|
|
|
|
80 |
)
|
81 |
|
82 |
with gr.Row():
|
@@ -84,20 +118,25 @@ with gr.Blocks(css="footer {visibility: hidden}") as demo:
|
|
84 |
message = gr.Textbox(
|
85 |
placeholder="Entrez votre message ici...",
|
86 |
lines=2,
|
87 |
-
container=
|
88 |
scale=4,
|
|
|
89 |
)
|
90 |
with gr.Column(scale=1):
|
91 |
with gr.Row():
|
92 |
submit_btn = gr.Button("Envoyer", variant="primary", scale=2)
|
93 |
reset_btn = gr.Button("Réinitialiser", variant="secondary", scale=1)
|
94 |
|
95 |
-
gr.
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
|
|
|
|
|
|
|
|
101 |
|
102 |
# Configuration des événements
|
103 |
submit_btn.click(
|
@@ -128,4 +167,4 @@ with gr.Blocks(css="footer {visibility: hidden}") as demo:
|
|
128 |
# Lancement de l'interface
|
129 |
if __name__ == "__main__":
|
130 |
demo.queue()
|
131 |
-
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
+
import time
|
4 |
from llama_cpp import Llama
|
5 |
import os
|
6 |
from huggingface_hub import hf_hub_download
|
|
|
20 |
model = Llama(
|
21 |
model_path=model_path,
|
22 |
n_ctx=4096, # Taille du contexte
|
23 |
+
n_gpu_layers=-1, # Utilise tous les layers disponibles sur GPU si possible
|
24 |
+
verbose=False # Désactive les logs verbeaux
|
25 |
)
|
26 |
return model
|
27 |
|
|
|
40 |
|
41 |
return prompt
|
42 |
|
43 |
+
# Fonction d'inférence avec streaming
|
44 |
def generate_response(message, history):
|
45 |
if not hasattr(generate_response, "model"):
|
46 |
generate_response.model = load_model()
|
47 |
|
48 |
+
# Ajout du message utilisateur à l'historique
|
49 |
+
history = history + [(message, "")]
|
50 |
|
51 |
+
prompt = format_prompt(message, history[:-1])
|
52 |
+
|
53 |
+
response_text = ""
|
54 |
+
|
55 |
+
# Utilise le stream pour générer la réponse progressivement
|
56 |
+
for token in generate_response.model.create_completion(
|
57 |
prompt,
|
58 |
max_tokens=2048,
|
59 |
temperature=0.7,
|
60 |
top_p=0.95,
|
61 |
stop=["</s>", "<|user|>", "<|system|>"],
|
62 |
+
stream=True,
|
63 |
+
):
|
64 |
+
response_text += token["choices"][0]["text"]
|
65 |
+
# Mise à jour du message en cours de génération
|
66 |
+
history[-1] = (message, response_text)
|
67 |
+
time.sleep(0.01) # Légère pause pour un affichage fluide
|
68 |
+
yield history
|
69 |
|
70 |
# Fonction pour réinitialiser la conversation
|
71 |
def reset_conversation():
|
72 |
return [], ""
|
73 |
|
74 |
+
# CSS personnalisé pour améliorer l'esthétique
|
75 |
+
custom_css = """
|
76 |
+
footer {visibility: hidden}
|
77 |
+
.gradio-container {
|
78 |
+
background-color: #f8f9fa;
|
79 |
+
}
|
80 |
+
.chatbot-container {
|
81 |
+
border-radius: 15px;
|
82 |
+
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
|
83 |
+
}
|
84 |
+
.chatbot .user-message {
|
85 |
+
background: linear-gradient(135deg, #6e8efb, #a777e3);
|
86 |
+
color: white;
|
87 |
+
border-radius: 15px 15px 0 15px;
|
88 |
+
}
|
89 |
+
.chatbot .bot-message {
|
90 |
+
background: #f0f2f5;
|
91 |
+
border-radius: 15px 15px 15px 0;
|
92 |
+
}
|
93 |
+
"""
|
94 |
+
|
95 |
# Interface Gradio
|
96 |
+
with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo:
|
97 |
gr.Markdown("""
|
98 |
# 🌟 Assistant Vera-v1.5-Instruct
|
99 |
|
|
|
104 |
with gr.Row():
|
105 |
with gr.Column(scale=4):
|
106 |
chatbot = gr.Chatbot(
|
107 |
+
height=550,
|
108 |
show_copy_button=True,
|
109 |
avatar_images=("👤", "🤖"),
|
110 |
bubble_full_width=False,
|
111 |
+
elem_id="chatbot",
|
112 |
+
container=True,
|
113 |
+
elem_classes="chatbot-container",
|
114 |
)
|
115 |
|
116 |
with gr.Row():
|
|
|
118 |
message = gr.Textbox(
|
119 |
placeholder="Entrez votre message ici...",
|
120 |
lines=2,
|
121 |
+
container=True,
|
122 |
scale=4,
|
123 |
+
autofocus=True,
|
124 |
)
|
125 |
with gr.Column(scale=1):
|
126 |
with gr.Row():
|
127 |
submit_btn = gr.Button("Envoyer", variant="primary", scale=2)
|
128 |
reset_btn = gr.Button("Réinitialiser", variant="secondary", scale=1)
|
129 |
|
130 |
+
with gr.Accordion("À propos du modèle", open=False):
|
131 |
+
gr.Markdown("""
|
132 |
+
Ce modèle est basé sur **Vera-v1.5-Instruct-GGUF** de [Dorian2B](https://huggingface.co/Dorian2B/Vera-v1.5-Instruct-GGUF).
|
133 |
+
Le modèle est optimisé pour les conversations en français.
|
134 |
+
|
135 |
+
**Paramètres du modèle:**
|
136 |
+
- Température: 0.7
|
137 |
+
- Top-p: 0.95
|
138 |
+
- Contexte: 4096 tokens
|
139 |
+
""")
|
140 |
|
141 |
# Configuration des événements
|
142 |
submit_btn.click(
|
|
|
167 |
# Lancement de l'interface
|
168 |
if __name__ == "__main__":
|
169 |
demo.queue()
|
170 |
+
demo.launch(share=True, show_error=True)
|