Al-Atlas-LLM / app.py
nouamanetazi's picture
nouamanetazi HF Staff
Update app.py
48e09b8 verified
raw
history blame
4.63 kB
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import spaces
import torch
from datasets import load_dataset
from huggingface_hub import CommitScheduler
from pathlib import Path
import uuid
import json
device = "cuda:0" if torch.cuda.is_available() else "cpu"
print(f'[INFO] Using device: {device}')
# token
token = os.environ['TOKEN']
# Load the pretrained model and tokenizer
MODEL_NAME = "atlasia/Al-Atlas-0.5B" # "atlasia/Al-Atlas-LLM-mid-training" # "BounharAbdelaziz/Al-Atlas-LLM-0.5B" #"atlasia/Al-Atlas-LLM"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME,token=token) # , token=token
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME,token=token).to(device)
# Fix tokenizer padding
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token # Set pad token
# Predefined examples
examples = [
["الذكاء الاصطناعي هو فرع من علوم الكمبيوتر اللي كيركز"
, 256, 0.7, 0.9, 150, 8, 1.5],
["المستقبل ديال الذكاء الصناعي فالمغرب"
, 256, 0.7, 0.9, 150, 8, 1.5],
[" المطبخ المغربي"
, 256, 0.7, 0.9, 150, 8, 1.5],
["الماكلة المغربية كتعتبر من أحسن الماكلات فالعالم"
, 256, 0.7, 0.9, 150, 8, 1.5],
]
# Define the file where to save the data
submit_file = Path("user_submit/") / f"data_{uuid.uuid4()}.json"
feedback_file = submit_file
# Create directory if it doesn't exist
submit_file.parent.mkdir(exist_ok=True, parents=True)
scheduler = CommitScheduler(
repo_id="atlasia/atlaset_inference_ds",
repo_type="dataset",
folder_path=submit_file.parent,
path_in_repo="data",
every=5,
token=token
)
@spaces.GPU
def generate_text(prompt, max_length=256, temperature=0.7, top_p=0.9, top_k=150, num_beams=8, repetition_penalty=1.5):
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
output = model.generate(
**inputs,
max_length=max_length,
temperature=temperature,
top_p=top_p,
do_sample=True,
repetition_penalty=repetition_penalty,
num_beams=num_beams,
top_k= top_k,
early_stopping = True,
pad_token_id=tokenizer.pad_token_id, # Explicit pad token
eos_token_id=tokenizer.eos_token_id, # Explicit eos token
)
result=tokenizer.decode(output[0], skip_special_tokens=True)
save_feedback(prompt,result,f"{max_length},{temperature},{top_p},{top_k},{num_beams},{repetition_penalty}")
return result
def save_feedback(input, output, params) -> None:
"""
Append input/outputs and parameters to a JSON Lines file using a thread lock
to avoid concurrent writes from different users.
"""
with scheduler.lock:
with feedback_file.open("a") as f:
f.write(json.dumps({"input": input, "output": output, "params": params}))
f.write("\n")
if __name__ == "__main__":
# Create the Gradio interface
with gr.Blocks() as app:
with gr.Row():
with gr.Column():
prompt_input = gr.Textbox(label="Prompt: دخل النص بالدارجة")
max_length = gr.Slider(8, 4096, value=256, label="Max Length")
temperature = gr.Slider(0.0, 2, value=0.7, label="Temperature")
top_p = gr.Slider(0.0, 1.0, value=0.9, label="Top-p")
top_k = gr.Slider(1, 10000, value=150, label="Top-k")
num_beams = gr.Slider(1, 20, value=8, label="Number of Beams")
repetition_penalty = gr.Slider(0.0, 100.0, value=1.5, label="Repetition Penalty")
submit_btn = gr.Button("Generate")
with gr.Column():
output_text = gr.Textbox(label="Generated Text in Moroccan Darija")
# Examples section with caching
gr.Examples(
examples=examples,
inputs=[prompt_input, max_length, temperature, top_p, top_k, num_beams, repetition_penalty],
outputs=output_text,
fn=generate_text,
cache_examples=True
)
# Button action
submit_btn.click(
generate_text,
inputs=[prompt_input, max_length, temperature, top_p, top_k, num_beams, repetition_penalty],
outputs=output_text
)
gr.Markdown("""
# Moroccan Darija LLM
Enter a prompt and get AI-generated text using our pretrained LLM on Moroccan Darija.
""")
app.launch()