French-Tutor / app.py
CCockrum's picture
Update app.py
09a42a6 verified
raw
history blame
4.46 kB
import gradio as gr
from huggingface_hub import InferenceClient
# Background CSS
css = """
body {
background-image: url('https://cdn-uploads.huggingface.co/production/uploads/67351c643fe51cb1aa28f2e5/YcsJnPk8HJvXiB5WkVmf1.jpeg');
background-size: cover;
background-position: center;
background-repeat: no-repeat;
}
.gradio-container {
display: flex;
flex-direction: column;
justify-content: center;
min-height: 100vh;
padding-top: 2rem;
padding-bottom: 2rem;
}
#chat-panel {
background-color: rgba(255, 255, 255, 0.85);
padding: 2rem;
border-radius: 12px;
justify-content: center;
width: 100%;
max-width: 700px;
height: 70vh;
box-shadow: 0 0 12px rgba(0, 0, 0, 0.3);
overflow-y: auto;
}
.gradio-container .chatbot h1 {
color: var(--custom-title-color) !important;
font-family: 'Noto Sans', serif !important;
font-size: 5rem !important;
font-weight: bold !important;
text-align: center !important;
margin-bottom: 1.5rem !important;
width: 100%;
}
"""
# Model client (consider switching to a public model like mistralai if 401 persists)
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
# Level prompt selector
def level_to_prompt(level):
return {
"A1": "You are a friendly French tutor. Respond only to the user's specific question. Do not explain unrelated vocabulary or topics unless asked. Use simple French with English explanations, and do not mention or suggest voice interaction.",
"A2": "You are a patient French tutor. Respond directly to the user’s query. Avoid unrelated tangents. Use short French phrases with English translations, and avoid suggesting audio or voice features.",
"B1": "You are a helpful French tutor. Answer only what the user asks. Use mostly French, clarify only what’s needed in English, and avoid adding extra examples or features like voice.",
"B2": "You are a French tutor. Keep responses concise and on-topic. Use French naturally with minimal English, and never refer to speaking aloud or audio tools.",
"C1": "You are a native French tutor. Use fluent French to address only what the user asks. Do not elaborate beyond their question or reference voice interaction.",
"C2": "You are a French language professor. Respond to the user’s question in advanced, formal French, staying completely on topic. Never introduce unrelated vocabulary or speaking suggestions."
}.get(level, "You are a helpful French tutor.")
# Chat handler
def respond(message, history, user_level, max_tokens, temperature, top_p):
system_message = level_to_prompt(user_level)
messages = [{"role": "system", "content": system_message}]
# Handle history
if history and isinstance(history[0], tuple):
for user_msg, assistant_msg in history:
if user_msg:
messages.append({"role": "user", "content": user_msg})
if assistant_msg:
messages.append({"role": "assistant", "content": assistant_msg})
else:
messages.extend(history)
messages.append({"role": "user", "content": message})
response = ""
try:
for msg in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = msg.choices[0].delta.content
if token:
response += token
yield response
except Exception as e:
yield f"Désolé! There was an error: {str(e)}"
# Gradio interface
with gr.Blocks(css=css) as demo:
gr.Markdown("French Instructor", elem_id="custom-title")
with gr.Column(elem_id="chat-panel"):
with gr.Accordion("⚙️ Advanced Settings", open=False):
user_level = gr.Dropdown(
choices=["A1", "A2", "B1", "B2", "C1", "C2"],
value="A1",
label="Your French Level (CEFR)"
)
max_tokens = gr.Slider(1, 2048, value=300, step=1, label="Response Length")
temperature = gr.Slider(0.1, 4.0, value=0.5, step=0.1, label="Creativity")
top_p = gr.Slider(0.1, 1.0, value=0.8, step=0.05, label="Dynamic Text Sampling")
gr.ChatInterface(
respond,
additional_inputs=[user_level, max_tokens, temperature, top_p],
type="messages"
)
if __name__ == "__main__":
demo.launch()