CCockrum commited on
Commit
211a88e
·
verified ·
1 Parent(s): 7237d8a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -62
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
- # Custom background CSS with semi-transparent panel
5
  css = """
6
  body {
7
  background-image: url('https://cdn-uploads.huggingface.co/production/uploads/67351c643fe51cb1aa28f2e5/YcsJnPk8HJvXiB5WkVmf1.jpeg');
@@ -28,99 +28,84 @@ body {
28
  box-shadow: 0 0 12px rgba(0, 0, 0, 0.3);
29
  overflow-y: auto;
30
  }
31
- /* Improved title styling */
32
  .gradio-container .chatbot h1 {
33
  color: var(--custom-title-color) !important;
34
  font-family: 'Noto Sans', serif !important;
35
- font-size: 5rem !important; /* Increased font size */
36
  font-weight: bold !important;
37
  text-align: center !important;
38
  margin-bottom: 1.5rem !important;
39
- width: 100%; /* Ensure full width for centering */
40
  }
41
  """
42
 
 
43
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
44
 
45
- def respond(
46
- message,
47
- history,
48
- system_message,
49
- max_tokens,
50
- temperature,
51
- top_p,
52
- ):
 
 
 
 
 
 
53
  messages = [{"role": "system", "content": system_message}]
54
 
55
- # Handle history based on its format (tuple format or messages format)
56
  if history and isinstance(history[0], tuple):
57
- # Old tuple format
58
  for user_msg, assistant_msg in history:
59
  if user_msg:
60
  messages.append({"role": "user", "content": user_msg})
61
  if assistant_msg:
62
  messages.append({"role": "assistant", "content": assistant_msg})
63
  else:
64
- # New messages format
65
  messages.extend(history)
66
-
67
  messages.append({"role": "user", "content": message})
68
 
69
  response = ""
70
-
71
- for msg in client.chat_completion(
72
- messages,
73
- max_tokens=max_tokens,
74
- stream=True,
75
- temperature=temperature,
76
- top_p=top_p,
77
- ):
78
- token = msg.choices[0].delta.content
79
- response += token
80
- yield response
 
 
 
81
 
 
82
  with gr.Blocks(css=css) as demo:
83
- # Title Markdown block
84
  gr.Markdown("French Instructor", elem_id="custom-title")
85
 
86
  with gr.Column(elem_id="chat-panel"):
87
- with gr.Accordion("Advanced Settings", open=False):
88
- system_message = gr.Textbox(
89
- value="You are a helpful French language tutor. You help users learn French vocabulary, grammar, and cultural contexts. When appropriate, include both the French writing and pronunciation. For beginners, focus on simple phrases and gradually increase difficulty based on user proficiency.",
90
- label="System Message"
 
91
  )
92
- max_tokens = gr.Slider(
93
- minimum=1,
94
- maximum=2048,
95
- value=512,
96
- step=1,
97
- label="Response Length"
98
- )
99
- temperature = gr.Slider(
100
- minimum=0.1,
101
- maximum=4.0,
102
- value=0.7,
103
- step=0.1,
104
- label="Creativity"
105
- )
106
- top_p = gr.Slider(
107
- minimum=0.1,
108
- maximum=1.0,
109
- value=0.95,
110
- step=0.05,
111
- label="Dynamic Text"
112
- )
113
-
114
  gr.ChatInterface(
115
  respond,
116
- additional_inputs=[
117
- system_message,
118
- max_tokens,
119
- temperature,
120
- top_p
121
- ],
122
- type="messages" # Set to new message format
123
  )
124
 
125
  if __name__ == "__main__":
126
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
+ # Background CSS
5
  css = """
6
  body {
7
  background-image: url('https://cdn-uploads.huggingface.co/production/uploads/67351c643fe51cb1aa28f2e5/YcsJnPk8HJvXiB5WkVmf1.jpeg');
 
28
  box-shadow: 0 0 12px rgba(0, 0, 0, 0.3);
29
  overflow-y: auto;
30
  }
 
31
  .gradio-container .chatbot h1 {
32
  color: var(--custom-title-color) !important;
33
  font-family: 'Noto Sans', serif !important;
34
+ font-size: 5rem !important;
35
  font-weight: bold !important;
36
  text-align: center !important;
37
  margin-bottom: 1.5rem !important;
38
+ width: 100%;
39
  }
40
  """
41
 
42
+ # Model client (consider switching to a public model like mistralai if 401 persists)
43
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
44
 
45
+ # Level prompt selector
46
+ def level_to_prompt(level):
47
+ return {
48
+ "A1": "You are a friendly French tutor. Speak mostly in English, use simple French, and explain everything.",
49
+ "A2": "You are a patient French tutor. Use short French phrases and explain them in English.",
50
+ "B1": "You are a helpful French tutor. Speak mostly in French but clarify in English when needed.",
51
+ "B2": "You are a French tutor. Speak primarily in French with rare English support.",
52
+ "C1": "You are a native French tutor. Speak entirely in French, clearly and professionally.",
53
+ "C2": "You are a native French professor. Speak in rich, complex French. Avoid English."
54
+ }.get(level, "You are a helpful French tutor.")
55
+
56
+ # Chat handler
57
+ def respond(message, history, user_level, max_tokens, temperature, top_p):
58
+ system_message = level_to_prompt(user_level)
59
  messages = [{"role": "system", "content": system_message}]
60
 
61
+ # Handle history
62
  if history and isinstance(history[0], tuple):
 
63
  for user_msg, assistant_msg in history:
64
  if user_msg:
65
  messages.append({"role": "user", "content": user_msg})
66
  if assistant_msg:
67
  messages.append({"role": "assistant", "content": assistant_msg})
68
  else:
 
69
  messages.extend(history)
70
+
71
  messages.append({"role": "user", "content": message})
72
 
73
  response = ""
74
+ try:
75
+ for msg in client.chat_completion(
76
+ messages,
77
+ max_tokens=max_tokens,
78
+ stream=True,
79
+ temperature=temperature,
80
+ top_p=top_p,
81
+ ):
82
+ token = msg.choices[0].delta.content
83
+ if token:
84
+ response += token
85
+ yield response
86
+ except Exception as e:
87
+ yield f"Désolé! There was an error: {str(e)}"
88
 
89
+ # Gradio interface
90
  with gr.Blocks(css=css) as demo:
 
91
  gr.Markdown("French Instructor", elem_id="custom-title")
92
 
93
  with gr.Column(elem_id="chat-panel"):
94
+ with gr.Accordion("⚙️ Advanced Settings", open=False):
95
+ user_level = gr.Dropdown(
96
+ choices=["A1", "A2", "B1", "B2", "C1", "C2"],
97
+ value="A1",
98
+ label="Your French Level (CEFR)"
99
  )
100
+ max_tokens = gr.Slider(1, 2048, value=512, step=1, label="Response Length")
101
+ temperature = gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Creativity")
102
+ top_p = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Dynamic Text Sampling")
103
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  gr.ChatInterface(
105
  respond,
106
+ additional_inputs=[user_level, max_tokens, temperature, top_p],
107
+ type="messages"
 
 
 
 
 
108
  )
109
 
110
  if __name__ == "__main__":
111
+ demo.launch()