Spaces:
Sleeping
Sleeping
File size: 4,657 Bytes
c652474 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 |
import gradio as gr
import ollama
# The model name must exactly match what was pulled from Hugging Face
MODEL_NAME = 'hf.co/unsloth/gemma-3-4b-it-qat-GGUF:Q4_K_M'
# --- 1. Default System Prompt ---
DEFAULT_SYSTEM_PROMPT = "Answer everything in simple, smart, relevant and accurate way. No chatty."
# This function is the core of the chatbot. It takes the user's prompt and chat history,
# and then interacts with the Ollama API to get a response.
def predict(message, history, system_prompt, stream_output):
"""
Main prediction function for the chatbot.
Args:
message (str): The user's input message.
history (list): A list of previous chat interactions.
system_prompt (str): The system prompt to guide the model's behavior.
stream_output (bool): Flag to enable or disable streaming output.
"""
# --- 2. Support for Chat History ---
# Reformat the history from Gradio's format to the format expected by the Ollama API
messages = []
if system_prompt:
messages.append({'role': 'system', 'content': system_prompt})
for user_msg, assistant_msg in history:
messages.append({'role': 'user', 'content': user_msg})
messages.append({'role': 'assistant', 'content': assistant_msg})
messages.append({'role': 'user', 'content': message})
# --- 4. Enable/Disable Streaming ---
if stream_output:
# Stream the response from the Ollama API
response_stream = ollama.chat(
model=MODEL_NAME,
messages=messages,
stream=True
)
# Yield partial responses to create the streaming effect
partial_response = ""
for chunk in response_stream:
if chunk['message']['content']:
partial_response += chunk['message']['content']
yield partial_response
else:
# Get the full response from the Ollama API without streaming
response = ollama.chat(
model=MODEL_NAME,
messages=messages,
stream=False
)
yield response['message']['content']
# --- 3. Gradio Interface with Options for System Prompt and Streaming ---
with gr.Blocks(theme=gr.themes.Default(primary_hue="blue")) as demo:
gr.Markdown(f"# LLM GGUF Chat with `{MODEL_NAME}`")
gr.Markdown("Chat with the model, customize its behavior with a system prompt, and toggle streaming output.")
# The main chat interface component
chatbot = gr.Chatbot(label="Conversation", height=500)
with gr.Row():
msg = gr.Textbox(
label="Your Message",
placeholder="Type your message here and press Enter...",
lines=1,
scale=4,
)
with gr.Accordion("Advanced Options", open=False):
with gr.Row():
stream_checkbox = gr.Checkbox(
label="Stream Output",
value=True,
info="Enable to see the response generate in real-time."
)
use_custom_prompt_checkbox = gr.Checkbox(
label="Use Custom System Prompt",
value=False,
info="Check this box to provide your own system prompt below."
)
system_prompt_textbox = gr.Textbox(
label="System Prompt",
value=DEFAULT_SYSTEM_PROMPT,
lines=3,
placeholder="Enter a system prompt to guide the model's behavior...",
interactive=False # Initially disabled
)
# Function to handle the logic for showing/hiding the custom system prompt textbox
def toggle_system_prompt(use_custom):
if use_custom:
# If the user wants a custom prompt, return the default prompt but make the textbox interactive
return gr.update(value=DEFAULT_SYSTEM_PROMPT, interactive=True, visible=True)
else:
# If the user wants the default, hide the textbox and use the default prompt internally
return gr.update(value=DEFAULT_SYSTEM_PROMPT, interactive=False, visible=True)
# Wire up the checkbox to the toggle function
use_custom_prompt_checkbox.change(
fn=toggle_system_prompt,
inputs=use_custom_prompt_checkbox,
outputs=system_prompt_textbox
)
# Connect the message submission to the predict function
msg.submit(
predict,
[msg, chatbot, system_prompt_textbox, stream_checkbox],
chatbot
)
msg.submit(lambda: "", None, msg) # Clear the textbox after submission
# Launch the Gradio interface
demo.launch(server_name="0.0.0.0", server_port=7860) |