import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline # ✅ Lightweight model suitable for free Hugging Face Spaces model_name = "microsoft/phi-2" # Load tokenizer and model tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) # Define chatbot function def chat_with_llm(prompt): response = pipe(prompt, max_length=200, do_sample=True) return response[0]['generated_text'] # Gradio interface for web chat UI gr.Interface(fn=chat_with_llm, inputs=gr.Textbox(lines=2, placeholder="Type your question..."), outputs="text", title="🗨️ My Hugging Face Chatbot", description="Powered by Phi-2 model. Free cloud chatbot.").launch()