|
import gradio as gr |
|
import torch |
|
from transformers import pipeline |
|
|
|
|
|
pipe = None |
|
|
|
def load_model(): |
|
"""Load the Atlas-Chat model""" |
|
global pipe |
|
if pipe is None: |
|
print("🏔️ Loading Atlas-Chat-2B model...") |
|
pipe = pipeline( |
|
"text-generation", |
|
model="MBZUAI-Paris/Atlas-Chat-2B", |
|
model_kwargs={"torch_dtype": torch.bfloat16}, |
|
device="cuda" if torch.cuda.is_available() else "cpu" |
|
) |
|
print("✅ Model loaded successfully!") |
|
return pipe |
|
|
|
def chat_with_atlas(message, history): |
|
"""Generate response from Atlas-Chat model""" |
|
if not message.strip(): |
|
return "مرحبا! أهلا وسهلا. Please enter a message!" |
|
|
|
try: |
|
|
|
model = load_model() |
|
|
|
|
|
messages = [{"role": "user", "content": message}] |
|
|
|
|
|
outputs = model( |
|
messages, |
|
max_new_tokens=256, |
|
temperature=0.1, |
|
do_sample=True, |
|
pad_token_id=model.tokenizer.eos_token_id |
|
) |
|
|
|
|
|
response = outputs[0]["generated_text"][-1]["content"].strip() |
|
return response |
|
|
|
except Exception as e: |
|
return f"عذراً، واجهت خطأ: {str(e)}. جرب مرة أخرى!" |
|
|
|
|
|
demo = gr.ChatInterface( |
|
fn=chat_with_atlas, |
|
title="🏔️ Atlas-Chat: Moroccan Arabic AI Assistant", |
|
description=""" |
|
**مرحبا بك في أطلس شات!** Welcome to Atlas-Chat! 🇲🇦 |
|
|
|
I'm an AI assistant specialized in **Moroccan Arabic (Darija)** and English. |
|
Ask me questions about Morocco, culture, or just have a chat! |
|
|
|
**جرب هذه الأسئلة / Try these questions:** |
|
""", |
|
examples=[ |
|
"شكون لي صنعك؟", |
|
"اشنو هو الطاجين؟", |
|
"شنو كيتسمى المنتخب المغربي؟", |
|
"What is Morocco famous for?", |
|
"Tell me about Casablanca", |
|
"كيفاش نقدر نتعلم الدارجة؟" |
|
], |
|
cache_examples=False, |
|
retry_btn="🔄 جرب مرة أخرى", |
|
undo_btn="↶ تراجع", |
|
clear_btn="🗑️ امسح الكل", |
|
theme=gr.themes.Soft( |
|
primary_hue="blue", |
|
secondary_hue="green" |
|
) |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
demo.launch() |