File size: 1,250 Bytes
e31868e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import gradio as gr
from huggingface_hub import InferenceClient
import os

# Pegando o token da Hugging Face do ambiente
HF_TOKEN = os.getenv("HF_TOKEN")

# Inicializando o cliente de inferência
client = InferenceClient(
    provider="sambanova",
    api_key=HF_TOKEN,
)

def chatbot_response(user_input):
    messages = [{"role": "user", "content": user_input}]
    
    try:
        completion = client.chat.completions.create(
            model="meta-llama/Llama-3.3-70B-Instruct", 
            messages=messages, 
            max_tokens=500,
        )
        return completion.choices[0].message['content']
    except Exception as e:
        return f"Erro ao gerar resposta: {str(e)}"

# Criando interface Gradio
with gr.Blocks(theme=gr.themes.Soft()) as demo:
    gr.Markdown("# 🤖 Llama-70B Chatbot - SambaNova")
    
    chatbot = gr.Chatbot()
    msg = gr.Textbox(placeholder="Digite sua mensagem aqui...")
    btn = gr.Button("Enviar")
    
    def respond(message, chat_history):
        response = chatbot_response(message)
        chat_history.append((message, response))
        return "", chat_history
    
    btn.click(respond, [msg, chatbot], [msg, chatbot])

# Rodando a aplicação
if __name__ == "__main__":
    demo.launch()