from transformers import AutoModelForCausalLM, AutoTokenizer model_name = "TheBloke/phi-2-GPTQ" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained( model_name, device_map="auto", # ou "cuda:0" se for GPU trust_remote_code=True ) # Pipeline de texto pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) # Função do chat + salvar memória def chat(user_input, history): prompt = user_input result = pipe(prompt, max_new_tokens=256, temperature=0.7)[0]["generated_text"] # Salvar memória em arquivo with open("memoria.txt", "a", encoding="utf-8") as f: f.write(f"User: {user_input}\nAI: {result}\n") return result # Interface Gradio with gr.Blocks() as demo: chat_history = gr.State([]) chatbot = gr.Chatbot() msg = gr.Textbox(label="Digite sua pergunta:") def respond(user_input, chat_history): answer = chat(user_input, chat_history) chat_history.append((user_input, answer)) return chat_history, chat_history msg.submit(respond, [msg, chat_history], [chatbot, chat_history]) demo.launch()