Spaces:
Running
Running
File size: 4,080 Bytes
1b9e6e3 fc43f27 dbb4e7d 1b9e6e3 c1df208 ea41834 c1df208 421af5a e972600 421af5a 2bfec82 421af5a f91d6af 8b09c72 2bfec82 421af5a dbaa4dc 421af5a 846f316 421af5a 360ff2a 072db12 96da2bc 072db12 96da2bc 1b9e6e3 e972600 1b9e6e3 c1df208 ff87670 1b9e6e3 ea41834 3f0800d 1b9e6e3 3f0800d 1b9e6e3 3f0800d 1b9e6e3 c1df208 2bfec82 7464f51 2bfec82 b9dcc6f ff87670 ea41834 ff87670 ea41834 2bfec82 1b9e6e3 3f0800d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 |
import gradio as gr
from huggingface_hub import InferenceClient
import os
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.1", token=os.getenv("HF_TOKEN"))
#Dynamic prompt builder based on CEFR level
def level_to_prompt(level):
return {
"A1": "You are a friendly French tutor. Speak mostly in English, use simple French, and explain everything.",
"A2": "You are a patient French tutor. Use short French phrases and explain them in English.",
"B1": "You are a helpful French tutor. Speak mostly in French but clarify in English when needed.",
"B2": "You are a French tutor. Speak primarily in French with rare English support.",
"C1": "You are a native French tutor. Speak entirely in French, clearly and professionally.",
"C2": "You are a native French professor. Speak in rich, complex French. Avoid English."
}.get(level, "You are a helpful French tutor.")
#Custom background CSS
css = """
@import url('https://fonts.googleapis.com/css2?family=Noto+Sans+JP&family=Playfair+Display&display=swap');
body {
background-image: url('https://cdn-uploads.huggingface.co/production/uploads/67351c643fe51cb1aa28f2e5/wuyd5UYTh9jPrMJGmV9yC.jpeg');
background-size: cover;
background-position: center;
background-repeat: no-repeat;
}
.gradio-container {
display: flex;
flex-direction: column;
justify-content: center;
min-height: 100vh;
padding-top: 2rem;
padding-bottom: 2rem;
}
#chat-panel {
background-color: rgba(255, 255, 255, 0.85);
padding: 2rem;
border-radius: 12px;
max-width: 700px;
height: 70vh;
margin: auto;
box-shadow: 0 0 12px rgba(0, 0, 0, 0.3);
overflow-y: auto;
}
.gradio-container .chatbot h1 {
color: var(--custom-title-color) !important;
font-family: 'Playfair Display', serif !important;
font-size: 5rem !important;
font-weight: bold !important;
text-align: center !important;
margin-bottom: 1.5rem !important;
width: 100%;
}
"""
#Chat logic
def respond(message, history, level, max_tokens, temperature, top_p):
system_message = level_to_prompt(level)
messages = [{"role": "system", "content": system_message}]
# Handle history based on its format
if history and isinstance(history[0], dict):
# New format (messages with role/content)
messages.extend(history)
else:
# Old format (tuples)
for user, bot in history:
if user:
messages.append({"role": "user", "content": user})
if bot:
messages.append({"role": "assistant", "content": bot})
# Add current message
messages.append({"role": "user", "content": message})
# Generate response
response = ""
try:
for msg in client.chat_completion(
messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p
):
token = msg.choices[0].delta.content
if token is not None: # Handle None tokens
response += token
yield response
except Exception as e:
print(f"Error in chat completion: {e}")
yield f"Désolé! There was an error: {str(e)}"
#UI layout
with gr.Blocks(css=css) as demo:
gr.Markdown("French Tutor", elem_id="custom-title")
with gr.Column(elem_id="chat-panel"):
with gr.Accordion("⚙️ Advanced Settings", open=False):
level = gr.Dropdown(
choices=["A1", "A2", "B1", "B2", "C1", "C2"],
value="A1",
label="Your French Level (CEFR)"
)
max_tokens = gr.Slider(1, 2048, value=512, step=1, label="Response Length")
temperature = gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Creativity")
top_p = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Dynamic Text")
gr.ChatInterface(
fn=respond,
additional_inputs=[level, max_tokens, temperature, top_p],
type="messages" # ✅ prevents deprecation warning
)
if __name__ == "__main__":
demo.launch() |