Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,52 +3,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
3 |
from typing import List, Tuple
|
4 |
|
5 |
|
6 |
-
model_name = "Hawoly18/Adia_Llama3.1"
|
7 |
|
8 |
-
# Vérifier si un GPU est disponible
|
9 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
10 |
|
11 |
-
|
12 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
13 |
-
model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
|
14 |
-
|
15 |
-
def respond(
|
16 |
-
message: str,
|
17 |
-
history: List[Tuple[str, str]],
|
18 |
-
system_message: str,
|
19 |
-
max_tokens: int,
|
20 |
-
temperature: float,
|
21 |
-
top_p: float,
|
22 |
-
) -> str:
|
23 |
-
|
24 |
-
prompt = system_message
|
25 |
-
for user_msg, assistant_msg in history:
|
26 |
-
prompt += f"\nUser: {user_msg}\nAssistant: {assistant_msg}"
|
27 |
-
prompt += f"\nUser: {message}\nAssistant:"
|
28 |
-
|
29 |
-
|
30 |
-
inputs = tokenizer(prompt, return_tensors="pt")
|
31 |
-
outputs = model.generate(
|
32 |
-
**inputs,
|
33 |
-
max_length=max_tokens,
|
34 |
-
temperature=temperature,
|
35 |
-
top_p=top_p,
|
36 |
-
do_sample=True,
|
37 |
-
)
|
38 |
-
response = tokenizer.decode(outputs[0], skip_special_tokens=True).split("Assistant:")[-1].strip()
|
39 |
-
return response
|
40 |
-
|
41 |
-
|
42 |
-
demo = gr.ChatInterface(
|
43 |
-
respond,
|
44 |
-
additional_inputs=[
|
45 |
-
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
|
46 |
-
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
47 |
-
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), # Fixed syntax error
|
48 |
-
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
|
49 |
-
],
|
50 |
-
title="Chatbot Interface"
|
51 |
-
)
|
52 |
-
|
53 |
-
if __name__ == "__main__":
|
54 |
-
demo.launch()
|
|
|
3 |
from typing import List, Tuple
|
4 |
|
5 |
|
6 |
+
#model_name = "Hawoly18/Adia_Llama3.1"
|
7 |
|
|
|
|
|
8 |
|
9 |
+
gr.load("Hawoly18/Adia_Llama3.1").launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|