|
import gradio as gr |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
|
|
|
model_name = "allegro/herbert-base-cased" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
def generate_response(prompt): |
|
inputs = tokenizer.encode(prompt, return_tensors="pt") |
|
outputs = model.generate( |
|
inputs, |
|
max_length=150, |
|
num_return_sequences=1, |
|
do_sample=True, |
|
temperature=0.7, |
|
top_k=50, |
|
top_p=0.95, |
|
) |
|
return tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
def chat(message, history): |
|
|
|
formatted_history = "" |
|
if history: |
|
for human, ai in history: |
|
formatted_history += f"Użytkownik: {human}\nAI: {ai}\n" |
|
|
|
|
|
prompt = f"{formatted_history}Użytkownik: {message}\nAI:" |
|
|
|
|
|
response = generate_response(prompt) |
|
|
|
|
|
clean_response = response.replace(prompt, "").strip() |
|
return clean_response |
|
|
|
|
|
demo = gr.ChatInterface( |
|
fn=chat, |
|
title="Polski ChatAI", |
|
description="Rozmawiaj ze mną po polsku!", |
|
examples=["Cześć, jak się masz?", "Opowiedz mi o Warszawie", "Co to jest sztuczna inteligencja?"], |
|
theme="soft" |
|
) |
|
|
|
demo.launch() |