Spaces:
Running
Running
import gradio as gr | |
from huggingface_hub import InferenceClient | |
# Custom background CSS with semi-transparent panel | |
css = """ | |
body { | |
background-image: url('https://cdn-uploads.huggingface.co/production/uploads/67351c643fe51cb1aa28f2e5/wuyd5UYTh9jPrMJGmV9yC.jpeg'); | |
background-size: cover; | |
background-position: center; | |
background-repeat: no-repeat; | |
} | |
#chat-panel { | |
background-color: rgba(255, 255, 255, 0.85); | |
padding: 2rem; | |
border-radius: 12px; | |
max-width: 700px; | |
height: 70vh; | |
margin: auto; | |
box-shadow: 0 0 12px rgba(0, 0, 0, 0.3); | |
overflow-y: auto; | |
} | |
#chat-title { | |
color: #d63384 !important; | |
font-family: 'Playfair Display', serif !important; | |
font-size: 1.8rem !important; | |
font-weight: bold !important; | |
text-align: center !important; | |
margin-bottom: 1rem !important; | |
} | |
} | |
""" | |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") | |
def respond( | |
message, | |
history: list[tuple[str, str]], | |
system_message, | |
max_tokens, | |
temperature, | |
top_p, | |
): | |
messages = [{"role": "system", "content": system_message}] | |
for val in history: | |
if val[0]: | |
messages.append({"role": "user", "content": val[0]}) | |
if val[1]: | |
messages.append({"role": "assistant", "content": val[1]}) | |
messages.append({"role": "user", "content": message}) | |
response = "" | |
for message in client.chat_completion( | |
messages, | |
max_tokens=max_tokens, | |
stream=True, | |
temperature=temperature, | |
top_p=top_p, | |
): | |
token = message.choices[0].delta.content | |
response += token | |
yield response | |
with gr.Blocks(css=css) as demo: | |
with gr.Column(elem_id="chat-panel"): | |
gr.Markdown("## π«π· French Tutor") | |
with gr.Accordion("βοΈ Settings", open=False): | |
system_message = gr.Textbox(value="You are a friendly Chatbot.", label="System message") | |
max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens") | |
temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature") | |
top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)") | |
gr.ChatInterface( | |
respond, | |
additional_inputs=[system_message, max_tokens, temperature, top_p] | |
) | |
if __name__ == "__main__": | |
demo.launch() |