Spaces:
Running
Running
File size: 3,770 Bytes
1b9e6e3 fc43f27 a875c04 421af5a a875c04 421af5a f91d6af a875c04 2bfec82 421af5a a875c04 421af5a dbaa4dc 421af5a 846f316 421af5a a875c04 360ff2a a875c04 96da2bc 1b9e6e3 e972600 a875c04 3bdaa78 a875c04 3f0800d 1b9e6e3 3bdaa78 a875c04 3bdaa78 2bfec82 a875c04 3bdaa78 a875c04 3bdaa78 a875c04 3bdaa78 a875c04 3bdaa78 a875c04 3bdaa78 a875c04 3bdaa78 a875c04 ff87670 ea41834 ff87670 a875c04 2bfec82 a875c04 1b9e6e3 3f0800d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 |
import gradio as gr
from huggingface_hub import InferenceClient
# Custom background CSS with semi-transparent panel
css = """
body {
background-image: url('https://cdn-uploads.huggingface.co/production/uploads/67351c643fe51cb1aa28f2e5/vcVnxPZhCXRVL2fn3rG6B.jpeg');
background-size: cover;
background-position: center;
background-repeat: no-repeat;
}
.gradio-container {
display: flex;
flex-direction: column;
justify-content: center;
min-height: 100vh;
padding-top: 2rem;
padding-bottom: 2rem;
}
#chat-panel {
background-color: rgba(255, 255, 255, 0.85);
padding: 2rem;
border-radius: 12px;
justify-content: center;
width: 100%;
max-width: 700px;
height: 70vh;
box-shadow: 0 0 12px rgba(0, 0, 0, 0.3);
overflow-y: auto;
}
/* Improved title styling */
.gradio-container .chatbot h1 {
color: var(--custom-title-color) !important;
font-family: 'Noto Sans', serif !important;
font-size: 5rem !important; /* Increased font size */
font-weight: bold !important;
text-align: center !important;
margin-bottom: 1.5rem !important;
width: 100%; /* Ensure full width for centering */
}
"""
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
def respond(
message,
history,
system_message,
max_tokens,
temperature,
top_p,
):
messages = [{"role": "system", "content": system_message}]
# Handle history based on its format (tuple format or messages format)
if history and isinstance(history[0], tuple):
# Old tuple format
for user_msg, assistant_msg in history:
if user_msg:
messages.append({"role": "user", "content": user_msg})
if assistant_msg:
messages.append({"role": "assistant", "content": assistant_msg})
else:
# New messages format
messages.extend(history)
messages.append({"role": "user", "content": message})
response = ""
for msg in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = msg.choices[0].delta.content
response += token
yield response
with gr.Blocks(css=css) as demo:
# Title Markdown block
gr.Markdown("French Instructor", elem_id="custom-title")
with gr.Column(elem_id="chat-panel"):
with gr.Accordion("Advanced Settings", open=False):
system_message = gr.Textbox(
value="You are a helpful French language tutor. You help users learn French vocabulary, grammar, and cultural contexts. When appropriate, include both the French writing and pronunciation. For beginners, focus on simple phrases and gradually increase difficulty based on user proficiency.",
label="System Message"
)
max_tokens = gr.Slider(
minimum=1,
maximum=2048,
value=512,
step=1,
label="Response Length"
)
temperature = gr.Slider(
minimum=0.1,
maximum=4.0,
value=0.7,
step=0.1,
label="Creativity"
)
top_p = gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Dynamic Text"
)
gr.ChatInterface(
respond,
additional_inputs=[
system_message,
max_tokens,
temperature,
top_p
],
type="messages" # Set to new message format
)
if __name__ == "__main__":
demo.launch() |