Spaces:
Running
Running
File size: 5,688 Bytes
9b2f298 81299b4 9b2f298 81299b4 9b2f298 99986b4 9b2f298 81299b4 9b2f298 81299b4 99986b4 9b2f298 81299b4 9b2f298 94fe1c6 9b2f298 81299b4 94fe1c6 9b2f298 99986b4 9b2f298 99986b4 9b2f298 99986b4 9b2f298 99986b4 9b2f298 7d0296f 9b2f298 99986b4 9b2f298 7d0296f 99986b4 7d0296f 94fe1c6 99986b4 94fe1c6 7d0296f 94fe1c6 99986b4 94fe1c6 7d0296f 94fe1c6 99986b4 94fe1c6 7d0296f 9b2f298 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 |
import gradio as gr
# Custom CSS for gradient background and styling
custom_css = """
.gradio-container {
background: linear-gradient(135deg, #ffecd2 0%, #fcb69f 25%, #fbc2eb 50%, #a6c1ee 75%, #c2e9fb 100%);
background-size: 400% 400%;
animation: gradient-animation 15s ease infinite;
min-height: 100vh;
}
@keyframes gradient-animation {
0% { background-position: 0% 50%; }
50% { background-position: 100% 50%; }
100% { background-position: 0% 50%; }
}
.dark .gradio-container {
background: linear-gradient(135deg, #2a2a3e 0%, #3a3a5e 25%, #4a4a6e 50%, #5a5a7e 75%, #6a6a8e 100%);
background-size: 400% 400%;
animation: gradient-animation 15s ease infinite;
}
/* Style for content areas */
.main-container {
background-color: rgba(255, 255, 255, 0.92);
backdrop-filter: blur(10px);
border-radius: 20px;
padding: 20px;
box-shadow: 0 4px 20px 0 rgba(31, 38, 135, 0.15);
border: 1px solid rgba(255, 255, 255, 0.3);
margin: 10px;
}
.dark .main-container {
background-color: rgba(40, 40, 40, 0.95);
border: 1px solid rgba(255, 255, 255, 0.1);
}
"""
def chat_fn(message, history, model_choice):
"""Placeholder chat function"""
return f"Response from {model_choice}: {message}"
with gr.Blocks(fill_height=True, theme="soft", css=custom_css) as demo:
# State variable to track current model
current_model = gr.State("openai/gpt-oss-120b")
with gr.Row():
# Sidebar
with gr.Column(scale=1):
with gr.Group(elem_classes="main-container"):
gr.Markdown("# ๐ Inference Provider")
gr.Markdown(
"This Space showcases OpenAI GPT-OSS models, served by the Cerebras API. "
"Sign in with your Hugging Face account to use this API."
)
# Model selection
model_dropdown = gr.Dropdown(
choices=[
"openai/gpt-oss-120b",
"openai/gpt-oss-20b"
],
value="openai/gpt-oss-120b",
label="๐ Select Model",
info="Choose between different model sizes"
)
# Login button
login_button = gr.LoginButton("Sign in with Hugging Face", size="lg")
# Additional options
with gr.Accordion("โ๏ธ Advanced Options", open=False):
gr.Markdown("*These options will be available after model implementation*")
temperature = gr.Slider(
minimum=0,
maximum=2,
value=0.7,
step=0.1,
label="Temperature"
)
max_tokens = gr.Slider(
minimum=1,
maximum=4096,
value=512,
step=1,
label="Max Tokens"
)
# Main chat area
with gr.Column(scale=3):
with gr.Group(elem_classes="main-container"):
gr.Markdown("## ๐ฌ Chat Interface")
# Display current model
model_display = gr.Markdown(f"### Model: openai/gpt-oss-120b")
# Single chat interface that works with both models
chatbot = gr.Chatbot(
height=400,
show_label=False,
elem_classes="main-container"
)
with gr.Row():
msg = gr.Textbox(
placeholder="Type your message here...",
show_label=False,
scale=9
)
submit_btn = gr.Button("Send", variant="primary", scale=1)
clear_btn = gr.Button("๐๏ธ Clear Chat", variant="secondary")
# Update model display when dropdown changes
def update_model_display(model_choice):
return f"### Model: {model_choice}", model_choice
model_dropdown.change(
fn=update_model_display,
inputs=[model_dropdown],
outputs=[model_display, current_model]
)
# Chat functionality
def respond(message, chat_history, model):
if not message:
return "", chat_history
# Add user message to history
chat_history = chat_history + [[message, None]]
# Generate bot response (placeholder - replace with actual model call)
bot_response = f"[{model.split('/')[-1]}]: This is a placeholder response. In production, this would connect to the {model} via Fireworks AI API."
# Update the last message with bot response
chat_history[-1][1] = bot_response
return "", chat_history
# Submit message on button click or enter
submit_btn.click(
fn=respond,
inputs=[msg, chatbot, current_model],
outputs=[msg, chatbot]
)
msg.submit(
fn=respond,
inputs=[msg, chatbot, current_model],
outputs=[msg, chatbot]
)
# Clear chat
clear_btn.click(
fn=lambda: (None, ""),
inputs=[],
outputs=[chatbot, msg]
)
# Show info message on login
login_button.click(
fn=lambda: gr.Info("Please authenticate with Hugging Face to access the models"),
inputs=[],
outputs=[]
)
demo.launch() |