File size: 4,816 Bytes
9b2f298
 
 
 
 
81299b4
9b2f298
 
 
 
 
 
 
 
 
 
81299b4
9b2f298
 
 
99986b4
9b2f298
81299b4
9b2f298
 
 
81299b4
 
99986b4
9b2f298
 
81299b4
9b2f298
 
 
 
99986b4
 
 
 
 
 
9b2f298
81299b4
9b2f298
99986b4
9b2f298
99986b4
9b2f298
 
 
 
 
 
99986b4
9b2f298
 
 
 
 
 
99986b4
9b2f298
 
 
 
7d0296f
 
99986b4
 
9b2f298
 
 
99986b4
9b2f298
 
 
 
 
 
 
 
 
 
 
 
 
 
7d0296f
99986b4
7d0296f
 
 
 
99986b4
 
 
 
 
 
 
 
7d0296f
99986b4
 
 
 
 
 
 
 
 
7d0296f
 
99986b4
 
 
 
 
 
 
 
 
 
 
7d0296f
 
9b2f298
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
import gradio as gr

# Custom CSS for gradient background and styling
custom_css = """
.gradio-container {
    background: linear-gradient(135deg, #ffecd2 0%, #fcb69f 25%, #fbc2eb 50%, #a6c1ee 75%, #c2e9fb 100%);
    background-size: 400% 400%;
    animation: gradient-animation 15s ease infinite;
    min-height: 100vh;
}
@keyframes gradient-animation {
    0% { background-position: 0% 50%; }
    50% { background-position: 100% 50%; }
    100% { background-position: 0% 50%; }
}
.dark .gradio-container {
    background: linear-gradient(135deg, #2a2a3e 0%, #3a3a5e 25%, #4a4a6e 50%, #5a5a7e 75%, #6a6a8e 100%);
    background-size: 400% 400%;
    animation: gradient-animation 15s ease infinite;
}
/* Style for content areas */
.main-container {
    background-color: rgba(255, 255, 255, 0.92);
    backdrop-filter: blur(10px);
    border-radius: 20px;
    padding: 20px;
    box-shadow: 0 4px 20px 0 rgba(31, 38, 135, 0.15);
    border: 1px solid rgba(255, 255, 255, 0.3);
    margin: 10px;
}
.dark .main-container {
    background-color: rgba(40, 40, 40, 0.95);
    border: 1px solid rgba(255, 255, 255, 0.1);
}
"""

# State variable to track current model
current_model = gr.State("openai/gpt-oss-120b")

def switch_model(model_choice):
    """Function to switch between models"""
    return gr.update(visible=False), gr.update(visible=True), model_choice

with gr.Blocks(fill_height=True, theme="soft", css=custom_css) as demo:
    with gr.Row():
        # Sidebar
        with gr.Column(scale=1):
            with gr.Group(elem_classes="main-container"):
                gr.Markdown("# ๐Ÿš€ Inference Provider")
                gr.Markdown(
                    "This Space showcases OpenAI GPT-OSS models, served by the Cerebras API. "
                    "Sign in with your Hugging Face account to use this API."
                )
                
                # Model selection
                model_dropdown = gr.Dropdown(
                    choices=[
                        "openai/gpt-oss-120b",
                        "openai/gpt-oss-20b"
                    ],
                    value="openai/gpt-oss-120b",
                    label="๐Ÿ“Š Select Model",
                    info="Choose between different model sizes"
                )
                
                # Login button
                login_button = gr.LoginButton("Sign in with Hugging Face", size="lg")
                
                # Reload button to apply model change
                reload_btn = gr.Button("๐Ÿ”„ Apply Model Change", variant="primary", size="lg")
                
                # Additional options
                with gr.Accordion("โš™๏ธ Advanced Options", open=False):
                    gr.Markdown("*These options will be available after model implementation*")
                    temperature = gr.Slider(
                        minimum=0,
                        maximum=2,
                        value=0.7,
                        step=0.1,
                        label="Temperature"
                    )
                    max_tokens = gr.Slider(
                        minimum=1,
                        maximum=4096,
                        value=512,
                        step=1,
                        label="Max Tokens"
                    )
        
        # Main chat area
        with gr.Column(scale=3):
            with gr.Group(elem_classes="main-container"):
                gr.Markdown("## ๐Ÿ’ฌ Chat Interface")
                
                # Container for model interfaces
                with gr.Column(visible=True) as model_120b_container:
                    gr.Markdown("### Model: openai/gpt-oss-120b")
                    gr.load("models/openai/gpt-oss-120b", accept_token=login_button, provider="fireworks-ai")
                
                with gr.Column(visible=False) as model_20b_container:
                    gr.Markdown("### Model: openai/gpt-oss-20b")
                    gr.load("models/openai/gpt-oss-20b", accept_token=login_button, provider="fireworks-ai")
    
    # Handle model switching
    reload_btn.click(
        fn=switch_model,
        inputs=[model_dropdown],
        outputs=[model_120b_container, model_20b_container, current_model]
    ).then(
        fn=lambda: gr.Info("Model switched successfully!"),
        inputs=[],
        outputs=[]
    )
    
    # Update visibility based on dropdown selection
    def update_visibility(model_choice):
        if model_choice == "openai/gpt-oss-120b":
            return gr.update(visible=True), gr.update(visible=False)
        else:
            return gr.update(visible=False), gr.update(visible=True)
    
    model_dropdown.change(
        fn=update_visibility,
        inputs=[model_dropdown],
        outputs=[model_120b_container, model_20b_container]
    )

demo.launch()