Spaces:
Runtime error
Runtime error
import os | |
import urllib.request | |
# Download dataset only once (if not already downloaded) | |
dataset_url = "https://huggingface.co/datasets/bitext/Bitext-customer-support-llm-chatbot-training-dataset/resolve/main/Bitext_Sample_Customer_Support_Training_Dataset_27K__Labeled.csv" | |
dataset_file = "bitext_dataset.csv" | |
if not os.path.exists(dataset_file): | |
print("Downloading Bitext dataset...") | |
urllib.request.urlretrieve(dataset_url, dataset_file) | |
print("Download complete.") | |
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
import torch | |
# Load fine-tuned TinyLLaMA model from Hugging Face | |
model_name = "your-username/tinyllama-qlora-support-bot" # π Replace with your actual HF repo name | |
# Use FP16 if supported, fallback to CPU | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16 if device=="cuda" else torch.float32).to(device) | |
# Pipeline for response generation | |
generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0 if device=="cuda" else -1) | |
def chatbot(message, history=[]): | |
prompt = f"### Instruction:\n{message}\n\n### Response:\n" | |
output = generator(prompt, max_new_tokens=256, do_sample=True, temperature=0.7) | |
response = output[0]["generated_text"].split("### Response:\n")[-1].strip() | |
return response | |
interface = gr.ChatInterface(fn=chatbot, title="π¦ LLaMA Support Chatbot", theme="soft") | |
if __name__ == "__main__": | |
interface.launch() | |