CCockrum commited on
Commit
ea41834
·
verified ·
1 Parent(s): 0d82751

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -89
app.py CHANGED
@@ -1,7 +1,18 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
- # Custom background CSS with forced title styling
 
 
 
 
 
 
 
 
 
 
 
5
  css = """
6
  @import url('https://fonts.googleapis.com/css2?family=Noto+Sans+JP&family=Playfair+Display&display=swap');
7
 
@@ -38,61 +49,34 @@ body {
38
  margin-bottom: 1.5rem !important;
39
  width: 100%;
40
  }
41
-
42
- /* Fallback font import */
43
- @import url('https://fonts.googleapis.com/css2?family=Playfair+Display:wght@700&display=swap');
44
  """
45
 
46
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
47
 
 
48
  def respond(message, history, level, max_tokens, temperature, top_p):
49
  system_message = level_to_prompt(level)
50
- ...
51
-
52
- message,
53
- history: list[tuple[str, str]]
54
- system_message,
55
- max_tokens,
56
- temperature,
57
- top_p,
58
-
59
  messages = [{"role": "system", "content": system_message}]
60
-
61
- for val in history:
62
- if val[0]:
63
- messages.append({"role": "user", "content": val[0]})
64
- if val[1]:
65
- messages.append({"role": "assistant", "content": val[1]})
66
-
67
  messages.append({"role": "user", "content": message})
68
 
69
  response = ""
70
-
71
- for message in client.chat_completion(
72
- messages,
73
- max_tokens=max_tokens,
74
- stream=True,
75
- temperature=temperature,
76
- top_p=top_p,
77
  ):
78
- token = message.choices[0].delta.content
79
  response += token
80
  yield response
81
 
82
- def level_to_prompt(level):
83
- mapping = {
84
- "A1": "You are a friendly French tutor. Speak mostly in English, use simple French, and explain everything.",
85
- "A2": "You are a patient French tutor. Use short French phrases, but explain them in English.",
86
- "B1": "You are a helpful French tutor. Speak mostly in French with English explanations when needed.",
87
- "B2": "You are a French tutor. Speak primarily in French. Use English only when absolutely necessary.",
88
- "C1": "You are a native French tutor. Speak only in French, but be clear and articulate.",
89
- "C2": "You are a native French professor. Speak in complex and natural French. Avoid English."
90
- }
91
- return mapping.get(level, mapping["A1"]) # Fallback to A1
92
-
93
  with gr.Blocks(css=css) as demo:
94
  gr.Markdown("French Tutor", elem_id="custom-title")
95
-
96
  with gr.Column(elem_id="chat-panel"):
97
  with gr.Accordion("⚙️ Advanced Settings", open=False):
98
  level = gr.Dropdown(
@@ -103,57 +87,12 @@ with gr.Blocks(css=css) as demo:
103
  max_tokens = gr.Slider(1, 2048, value=512, step=1, label="Response Length")
104
  temperature = gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Creativity")
105
  top_p = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Dynamic Text")
106
-
107
  gr.ChatInterface(
108
  fn=respond,
109
- additional_inputs=[
110
- level, # replace system_message
111
- max_tokens, temperature, top_p
112
- ]
113
- )
114
-
115
-
116
- with gr.Blocks(css=css) as demo:
117
- # Title Markdown block
118
- gr.Markdown("French Tutor", elem_id="custom-title")
119
-
120
- with gr.Accordion("⚙️ Advanced Settings", open=False):
121
- level = gr.Dropdown(
122
- choices=["A1", "A2", "B1", "B2", "C1", "C2"],
123
- value="A1",
124
- label="Your French Level (CEFR)"
125
- )
126
- max_tokens = gr.Slider(
127
- minimum=1,
128
- maximum=2048,
129
- value=512,
130
- step=1,
131
- label="Response Length"
132
- )
133
- temperature = gr.Slider(
134
- minimum=0.1,
135
- maximum=4.0,
136
- value=0.7,
137
- step=0.1,
138
- label="Creativity"
139
- )
140
- top_p = gr.Slider(
141
- minimum=0.1,
142
- maximum=1.0,
143
- value=0.95,
144
- step=0.05,
145
- label="Dynamic Text"
146
- )
147
-
148
- gr.ChatInterface(
149
- respond,
150
- additional_inputs=[
151
- level, #Pass level now
152
- max_tokens,
153
- temperature,
154
- top_p
155
- ]
156
  )
157
 
158
  if __name__ == "__main__":
159
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
+ # 💡 Dynamic prompt builder based on CEFR level
5
+ def level_to_prompt(level):
6
+ return {
7
+ "A1": "You are a friendly French tutor. Speak mostly in English, use simple French, and explain everything.",
8
+ "A2": "You are a patient French tutor. Use short French phrases and explain them in English.",
9
+ "B1": "You are a helpful French tutor. Speak mostly in French but clarify in English when needed.",
10
+ "B2": "You are a French tutor. Speak primarily in French with rare English support.",
11
+ "C1": "You are a native French tutor. Speak entirely in French, clearly and professionally.",
12
+ "C2": "You are a native French professor. Speak in rich, complex French. Avoid English."
13
+ }.get(level, "You are a helpful French tutor.")
14
+
15
+ # Custom background CSS
16
  css = """
17
  @import url('https://fonts.googleapis.com/css2?family=Noto+Sans+JP&family=Playfair+Display&display=swap');
18
 
 
49
  margin-bottom: 1.5rem !important;
50
  width: 100%;
51
  }
 
 
 
52
  """
53
 
54
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
55
 
56
+ # Chat logic
57
  def respond(message, history, level, max_tokens, temperature, top_p):
58
  system_message = level_to_prompt(level)
 
 
 
 
 
 
 
 
 
59
  messages = [{"role": "system", "content": system_message}]
60
+
61
+ for user, bot in history:
62
+ if user:
63
+ messages.append({"role": "user", "content": user})
64
+ if bot:
65
+ messages.append({"role": "assistant", "content": bot})
 
66
  messages.append({"role": "user", "content": message})
67
 
68
  response = ""
69
+ for msg in client.chat_completion(
70
+ messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p
 
 
 
 
 
71
  ):
72
+ token = msg.choices[0].delta.content
73
  response += token
74
  yield response
75
 
76
+ # UI layout
 
 
 
 
 
 
 
 
 
 
77
  with gr.Blocks(css=css) as demo:
78
  gr.Markdown("French Tutor", elem_id="custom-title")
79
+
80
  with gr.Column(elem_id="chat-panel"):
81
  with gr.Accordion("⚙️ Advanced Settings", open=False):
82
  level = gr.Dropdown(
 
87
  max_tokens = gr.Slider(1, 2048, value=512, step=1, label="Response Length")
88
  temperature = gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Creativity")
89
  top_p = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Dynamic Text")
90
+
91
  gr.ChatInterface(
92
  fn=respond,
93
+ additional_inputs=[level, max_tokens, temperature, top_p],
94
+ type="messages" # prevents deprecation warning
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  )
96
 
97
  if __name__ == "__main__":
98
+ demo.launch()