|
import gradio as gr |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
import warnings |
|
|
|
|
|
warnings.filterwarnings("ignore", category=UserWarning, module="gradio.components.chatbot") |
|
|
|
|
|
model_name = "allegro/herbert-base-cased" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
def generate_response(prompt): |
|
inputs = tokenizer.encode(prompt, return_tensors="pt") |
|
outputs = model.generate( |
|
inputs, |
|
max_length=150, |
|
num_return_sequences=1, |
|
do_sample=True, |
|
temperature=0.7, |
|
top_k=50, |
|
top_p=0.95, |
|
) |
|
return tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
def chat(message, history): |
|
formatted_history = "" |
|
if history: |
|
for human, ai in history: |
|
formatted_history += f"U偶ytkownik: {human}\nAI: {ai}\n" |
|
|
|
prompt = f"{formatted_history}U偶ytkownik: {message}\nAI:" |
|
response = generate_response(prompt) |
|
return response.replace(prompt, "").strip() |
|
|
|
|
|
demo = gr.Interface( |
|
fn=chat, |
|
inputs=gr.Textbox(lines=2, placeholder="Wpisz swoje pytanie..."), |
|
outputs="text", |
|
title="Polski ChatAI", |
|
description="Rozmawiaj ze mn膮 po polsku!" |
|
) |
|
|
|
demo.launch() |