Spaces:
Sleeping
Sleeping
File size: 4,171 Bytes
00e87e2 0b0ca02 00e87e2 0b0ca02 00e87e2 0b5c836 0b0ca02 00e87e2 0b0ca02 00e87e2 7bd4818 00e87e2 f82e6cf 00e87e2 7bd4818 00e87e2 7bd4818 00e87e2 7bd4818 00e87e2 7bd4818 00e87e2 0b0ca02 00e87e2 1e149e3 00e87e2 1e149e3 0b0ca02 00e87e2 1e149e3 00e87e2 1e149e3 0b0ca02 1e149e3 00e87e2 0b5c836 1e149e3 0b5c836 1e149e3 00e87e2 1e149e3 00e87e2 1e149e3 00e87e2 1e149e3 0b0ca02 1e149e3 00e87e2 1e149e3 00e87e2 1e149e3 0b0ca02 00e87e2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 |
import os
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
from huggingface_hub import snapshot_download
# ================================
# CONFIGURATION
# ================================
MODEL_NAME_PRIMARY = "tiiuae/Falcon-H1-7B-Instruct"
MODEL_NAME_FALLBACK = "tiiuae/falcon-7b-instruct"
MODEL_LOCAL_DIR = "./falcon_model"
MAX_LENGTH = 120
TEMPERATURE = 0.3
REPETITION_PENALTY = 1.8
print("🚀 Preparing environment...")
# 1️⃣ Upgrade transformers & accelerate
os.system("pip install --upgrade transformers accelerate > /dev/null")
# 2️⃣ Ensure clean download of model
try:
print(f"⬇️ Downloading model: {MODEL_NAME_PRIMARY}")
snapshot_download(MODEL_NAME_PRIMARY, local_dir=MODEL_LOCAL_DIR, force_download=True)
model_name = MODEL_LOCAL_DIR
except Exception as e:
print(f"⚠️ Primary model download failed: {e}")
print("➡️ Falling back to Falcon 7B Instruct")
snapshot_download(MODEL_NAME_FALLBACK, local_dir=MODEL_LOCAL_DIR, force_download=True)
model_name = MODEL_LOCAL_DIR
# 3️⃣ Load tokenizer and model
try:
print("🔄 Loading tokenizer and model...")
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16,
trust_remote_code=True,
device_map="auto",
low_cpu_mem_usage=True
)
generator = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
torch_dtype=torch.float16,
device=0 if torch.cuda.is_available() else -1
)
print("✅ Model loaded successfully")
model_loaded = True
except Exception as e:
print(f"❌ Model loading failed: {e}")
generator = None
model_loaded = False
# ================================
# Test Questions (Pre-Filled)
# ================================
test_questions = [
"بدي شقة بالمالكي فيها شرفة وغسالة صحون.",
"هل في شقة دوبلكس بالمزة الفيلات فيها موقفين سيارة؟",
"بدي بيت عربي قديم بباب توما مع حديقة داخلية.",
"أرخص شقة بالشعلان شو سعرها؟",
"هل يوجد شقق بإطلالة جبلية في أبو رمانة؟",
"بدي شقة مفروشة بالكامل بالمزة ٨٦، الطابق الأول.",
"عندك منزل مستقل بالمهاجرين مع موقد حطب؟"
]
# ================================
# Falcon Chat Function
# ================================
def chat_falcon(user_input):
if not model_loaded:
return "❌ النموذج غير محمل. تحقق من الإعدادات."
prompt = f"أنت مساعد عقارات ذكي. أجب بجملة أو جملتين واضحتين.\nالسؤال: {user_input}\nالجواب:"
output = generator(
prompt,
max_new_tokens=MAX_LENGTH,
do_sample=True,
temperature=TEMPERATURE,
repetition_penalty=REPETITION_PENALTY,
top_p=0.9
)[0]["generated_text"]
return output.replace(prompt, "").strip()
# ================================
# Build Gradio Interface
# ================================
with gr.Blocks() as demo:
gr.Markdown("## 🏠 Falcon H1 7B Instruct - Damascus Real Estate Test")
gr.Markdown("اختبر قدرة النموذج على فهم الأسئلة بالعربية (لهجة سورية أو فصحى)")
with gr.Row():
with gr.Column(scale=2):
user_input = gr.Textbox(label="اكتب سؤالك هنا", lines=3, placeholder="مثال: بدي شقة بالمزة فيها بلكون")
submit_btn = gr.Button("🔎 أرسل")
with gr.Column(scale=1):
suggestions = gr.Dropdown(choices=test_questions, label="🧾 أسئلة جاهزة", value=test_questions[0])
output_box = gr.Textbox(label="إجابة النموذج", lines=8)
submit_btn.click(fn=chat_falcon, inputs=user_input, outputs=output_box)
suggestions.change(fn=chat_falcon, inputs=suggestions, outputs=output_box)
demo.launch()
|