File size: 4,457 Bytes
1b9e6e3
 
fc43f27
211a88e
421af5a
 
7237d8a
421af5a
 
 
 
f91d6af
 
 
 
 
 
 
a875c04
2bfec82
421af5a
 
 
a875c04
 
421af5a
dbaa4dc
421af5a
846f316
421af5a
360ff2a
a875c04
 
211a88e
a875c04
 
 
211a88e
96da2bc
1b9e6e3
e972600
211a88e
a875c04
 
211a88e
 
 
09a42a6
 
 
 
 
 
211a88e
 
09a42a6
211a88e
 
 
a875c04
3bdaa78
211a88e
a875c04
 
 
 
 
 
 
 
211a88e
a875c04
3f0800d
1b9e6e3
211a88e
 
 
 
 
 
 
 
 
 
 
 
 
 
a875c04
211a88e
a875c04
 
3bdaa78
2bfec82
211a88e
 
 
 
 
3bdaa78
09a42a6
 
 
211a88e
ff87670
a875c04
211a88e
 
2bfec82
a875c04
1b9e6e3
211a88e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
import gradio as gr
from huggingface_hub import InferenceClient

# Background CSS
css = """
body {
  background-image: url('https://cdn-uploads.huggingface.co/production/uploads/67351c643fe51cb1aa28f2e5/YcsJnPk8HJvXiB5WkVmf1.jpeg');
  background-size: cover;
  background-position: center;
  background-repeat: no-repeat;
}
.gradio-container {
  display: flex;
  flex-direction: column;
  justify-content: center;
  min-height: 100vh;
  padding-top: 2rem;
  padding-bottom: 2rem;
}
#chat-panel {
  background-color: rgba(255, 255, 255, 0.85);
  padding: 2rem;
  border-radius: 12px;
  justify-content: center;
  width: 100%;
  max-width: 700px;
  height: 70vh;
  box-shadow: 0 0 12px rgba(0, 0, 0, 0.3);
  overflow-y: auto;
}
.gradio-container .chatbot h1 {
   color: var(--custom-title-color) !important;
   font-family: 'Noto Sans', serif !important;
   font-size: 5rem !important;
   font-weight: bold !important;
   text-align: center !important;
   margin-bottom: 1.5rem !important;
   width: 100%;
}
"""

# Model client (consider switching to a public model like mistralai if 401 persists)
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")

# Level prompt selector
def level_to_prompt(level):
    return {
        "A1": "You are a friendly French tutor. Respond only to the user's specific question. Do not explain unrelated vocabulary or topics unless asked. Use simple French with English explanations, and do not mention or suggest voice interaction.",
        "A2": "You are a patient French tutor. Respond directly to the user’s query. Avoid unrelated tangents. Use short French phrases with English translations, and avoid suggesting audio or voice features.",
        "B1": "You are a helpful French tutor. Answer only what the user asks. Use mostly French, clarify only what’s needed in English, and avoid adding extra examples or features like voice.",
        "B2": "You are a French tutor. Keep responses concise and on-topic. Use French naturally with minimal English, and never refer to speaking aloud or audio tools.",
        "C1": "You are a native French tutor. Use fluent French to address only what the user asks. Do not elaborate beyond their question or reference voice interaction.",
        "C2": "You are a French language professor. Respond to the user’s question in advanced, formal French, staying completely on topic. Never introduce unrelated vocabulary or speaking suggestions."
    }.get(level, "You are a helpful French tutor.")


# Chat handler
def respond(message, history, user_level, max_tokens, temperature, top_p):
    system_message = level_to_prompt(user_level)
    messages = [{"role": "system", "content": system_message}]
    
    # Handle history
    if history and isinstance(history[0], tuple):
        for user_msg, assistant_msg in history:
            if user_msg:
                messages.append({"role": "user", "content": user_msg})
            if assistant_msg:
                messages.append({"role": "assistant", "content": assistant_msg})
    else:
        messages.extend(history)

    messages.append({"role": "user", "content": message})
    
    response = ""
    try:
        for msg in client.chat_completion(
            messages,
            max_tokens=max_tokens,
            stream=True,
            temperature=temperature,
            top_p=top_p,
        ):
            token = msg.choices[0].delta.content
            if token:
                response += token
            yield response
    except Exception as e:
        yield f"Désolé! There was an error: {str(e)}"

# Gradio interface
with gr.Blocks(css=css) as demo:
    gr.Markdown("French Instructor", elem_id="custom-title")
    
    with gr.Column(elem_id="chat-panel"):
        with gr.Accordion("⚙️ Advanced Settings", open=False):
            user_level = gr.Dropdown(
                choices=["A1", "A2", "B1", "B2", "C1", "C2"],
                value="A1",
                label="Your French Level (CEFR)"
            )
            max_tokens = gr.Slider(1, 2048, value=300, step=1, label="Response Length")
            temperature = gr.Slider(0.1, 4.0, value=0.5, step=0.1, label="Creativity")
            top_p = gr.Slider(0.1, 1.0, value=0.8, step=0.05, label="Dynamic Text Sampling")

        gr.ChatInterface(
            respond,
            additional_inputs=[user_level, max_tokens, temperature, top_p],
            type="messages"
        )

if __name__ == "__main__":
    demo.launch()