hjgfkd / app.py
abdullahalioo's picture
Update app.py
95ccd24 verified
raw
history blame contribute delete
714 Bytes
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load a smaller model for CPU usage
MODEL_NAME = "distilgpt2"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float32, device_map="cpu")
def chat_with_ai(prompt):
inputs = tokenizer(prompt, return_tensors="pt").to("cpu")
output = model.generate(**inputs, max_length=200)
response = tokenizer.decode(output[0], skip_special_tokens=True)
return response
# Create Gradio interface
demo = gr.Interface(
fn=chat_with_ai,
inputs=gr.Textbox(placeholder="Ask me anything..."),
outputs=gr.Textbox()
)
demo.launch()