Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -2,18 +2,15 @@ import gradio as gr
|
|
2 |
from huggingface_hub import InferenceClient
|
3 |
import os
|
4 |
import time
|
5 |
-
import re
|
6 |
|
7 |
-
# Token de autenticação
|
8 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
9 |
|
10 |
-
# Cliente da API da HuggingFace/SambaNova
|
11 |
client = InferenceClient(
|
12 |
provider="sambanova",
|
13 |
api_key=HF_TOKEN,
|
14 |
)
|
15 |
|
16 |
-
# Modelos disponíveis
|
17 |
MODELS = {
|
18 |
"LLaMA 70B": "meta-llama/Llama-3.3-70B-Instruct",
|
19 |
"Qwen 32B": "Qwen/QwQ-32B",
|
@@ -52,7 +49,6 @@ def chatbot_response(user_input, model_name):
|
|
52 |
|
53 |
return response
|
54 |
|
55 |
-
# Interface Gradio
|
56 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
57 |
with gr.Row():
|
58 |
with gr.Column(scale=1):
|
@@ -87,6 +83,5 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
87 |
|
88 |
history_btn.click(toggle_history, inputs=[], outputs=history_output)
|
89 |
|
90 |
-
# Executa o app
|
91 |
if __name__ == "__main__":
|
92 |
demo.launch()
|
|
|
2 |
from huggingface_hub import InferenceClient
|
3 |
import os
|
4 |
import time
|
5 |
+
import re
|
6 |
|
|
|
7 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
8 |
|
|
|
9 |
client = InferenceClient(
|
10 |
provider="sambanova",
|
11 |
api_key=HF_TOKEN,
|
12 |
)
|
13 |
|
|
|
14 |
MODELS = {
|
15 |
"LLaMA 70B": "meta-llama/Llama-3.3-70B-Instruct",
|
16 |
"Qwen 32B": "Qwen/QwQ-32B",
|
|
|
49 |
|
50 |
return response
|
51 |
|
|
|
52 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
53 |
with gr.Row():
|
54 |
with gr.Column(scale=1):
|
|
|
83 |
|
84 |
history_btn.click(toggle_history, inputs=[], outputs=history_output)
|
85 |
|
|
|
86 |
if __name__ == "__main__":
|
87 |
demo.launch()
|