Spaces:
Running
on
Zero
Running
on
Zero
File size: 5,763 Bytes
757241b e382347 757241b ba3e817 757241b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
import torch
from threading import Thread
import gradio as gr
import spaces
import re
from peft import PeftModel
# Load the base model
try:
base_model = AutoModelForCausalLM.from_pretrained(
"openai/gpt-oss-20b",
torch_dtype="auto",
device_map="auto",
attn_implementation="kernels-community/vllm-flash-attention3"
)
tokenizer = AutoTokenizer.from_pretrained("openai/gpt-oss-20b")
# Load the LoRA adapter
try:
model = PeftModel.from_pretrained(base_model, "Tonic/gpt-oss-20b-multilingual-reasoner")
print("✅ LoRA model loaded successfully!")
except Exception as lora_error:
print(f"⚠️ LoRA adapter failed to load: {lora_error}")
print("🔄 Falling back to base model...")
model = base_model
except Exception as e:
print(f"❌ Error loading model: {e}")
raise e
def format_conversation_history(chat_history):
messages = []
for item in chat_history:
role = item["role"]
content = item["content"]
if isinstance(content, list):
content = content[0]["text"] if content and "text" in content[0] else str(content)
messages.append({"role": role, "content": content})
return messages
@spaces.GPU(duration=60)
def generate_response(input_data, chat_history, max_new_tokens, system_prompt, temperature, top_p, top_k, repetition_penalty):
new_message = {"role": "user", "content": input_data}
system_message = [{"role": "system", "content": system_prompt}] if system_prompt else []
processed_history = format_conversation_history(chat_history)
messages = system_message + processed_history + [new_message]
# Use the model's chat template to format the conversation properly
# This is crucial for GPT-OSS-20B which expects the Harmony format
prompt = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Alternative streaming approach with manual chunking
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
# Generate in smaller chunks for better streaming
chunk_size = 50 # Generate 50 tokens at a time
full_response = ""
with torch.no_grad():
for i in range(0, max_new_tokens, chunk_size):
current_max_tokens = min(chunk_size, max_new_tokens - i)
outputs = model.generate(
**inputs,
max_new_tokens=current_max_tokens,
do_sample=True,
temperature=temperature,
top_p=top_p,
top_k=top_k,
repetition_penalty=repetition_penalty,
pad_token_id=tokenizer.eos_token_id,
use_cache=True
)
# Decode the new tokens
new_tokens = outputs[0][inputs["input_ids"].shape[1]:]
new_text = tokenizer.decode(new_tokens, skip_special_tokens=True)
if new_text:
full_response += new_text
# Process for thinking/final split
thinking = ""
final = ""
started_final = False
if "assistantfinal" in full_response.lower():
split_parts = re.split(r'assistantfinal', full_response, maxsplit=1)
thinking = split_parts[0]
final = split_parts[1] if len(split_parts) > 1 else ""
started_final = True
else:
thinking = full_response
clean_thinking = re.sub(r'^analysis\s*', '', thinking).strip()
clean_final = final.strip()
formatted = f"<details open><summary>Click to view Thinking Process</summary>\n\n{clean_thinking}\n\n</details>\n\n{clean_final}"
yield formatted
# Update inputs for next iteration
inputs = {"input_ids": outputs}
# Check for end of generation
if outputs[0][-1].item() == tokenizer.eos_token_id:
break
demo = gr.ChatInterface(
fn=generate_response,
additional_inputs=[
gr.Slider(label="Max new tokens", minimum=64, maximum=4096, step=1, value=2048),
gr.Textbox(
label="System Prompt",
value="You are a helpful assistant. Reasoning: medium",
lines=4,
placeholder="Change system prompt"
),
gr.Slider(label="Temperature", minimum=0.1, maximum=2.0, step=0.1, value=0.7),
gr.Slider(label="Top-p", minimum=0.05, maximum=1.0, step=0.05, value=0.9),
gr.Slider(label="Top-k", minimum=1, maximum=100, step=1, value=50),
gr.Slider(label="Repetition Penalty", minimum=1.0, maximum=2.0, step=0.05, value=1.0)
],
examples=[
[{"text": "Explain Newton laws clearly and concisely"}],
[{"text": "Write a Python function to calculate the Fibonacci sequence"}],
[{"text": "What are the benefits of open weight AI models"}],
],
cache_examples=False,
type="messages",
description="""
# 🙋🏻♂️Welcome to 🌟Tonic's gpt-oss-20b Multilingual Reasoner Demo !
Wait couple of seconds initially. You can adjust reasoning level in the system prompt like "Reasoning: high.
""",
fill_height=True,
textbox=gr.Textbox(
label="Query Input",
placeholder="Type your prompt"
),
stop_btn="Stop Generation",
multimodal=False,
theme=gr.themes.Soft()
)
if __name__ == "__main__":
demo.launch(share=True) |