Spaces:
Runtime error
Runtime error
Commit
·
8d66a4e
0
Parent(s):
Duplicate from simonraj/OralCoachDemo
Browse files- .gitattributes +35 -0
- README.md +14 -0
- SBC4.jpg +0 -0
- app.py +112 -0
- data4.py +20 -0
- requirements.txt +1 -0
.gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: OralCoachDemo
|
3 |
+
emoji: 📚
|
4 |
+
colorFrom: purple
|
5 |
+
colorTo: yellow
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.42.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: openrail
|
11 |
+
duplicated_from: simonraj/OralCoachDemo
|
12 |
+
---
|
13 |
+
|
14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
SBC4.jpg
ADDED
![]() |
app.py
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import openai
|
3 |
+
import gradio as gr
|
4 |
+
import base64
|
5 |
+
from data4 import strategy_text, description, questions
|
6 |
+
|
7 |
+
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
8 |
+
openai.api_key = OPENAI_API_KEY
|
9 |
+
|
10 |
+
def transcribe_audio(audio_file_path):
|
11 |
+
# Use OpenAI's Whisper to transcribe the audio
|
12 |
+
audio_file = open(audio_file_path, "rb")
|
13 |
+
transcript = openai.Audio.transcribe("whisper-1", audio_file)
|
14 |
+
return transcript["text"]
|
15 |
+
|
16 |
+
def get_base64_image():
|
17 |
+
with open("SBC4.jpg", "rb") as img_file:
|
18 |
+
return base64.b64encode(img_file.read()).decode("utf-8")
|
19 |
+
|
20 |
+
def get_image_html():
|
21 |
+
return (
|
22 |
+
f"<img src='data:image/jpeg;base64,{get_base64_image()}' style='display: block; margin-left: auto; margin-right: auto; padding-bottom: 15px; width: 300px;'>"
|
23 |
+
)
|
24 |
+
|
25 |
+
current_question_index = 0
|
26 |
+
user_input_counter = 0
|
27 |
+
conversation_history = []
|
28 |
+
|
29 |
+
def intelligent_tutor(audio_file, provide_hints=False):
|
30 |
+
global current_question_index
|
31 |
+
global questions
|
32 |
+
global user_input_counter
|
33 |
+
global conversation_history
|
34 |
+
|
35 |
+
input_text = transcribe_audio(audio_file)
|
36 |
+
current_question = questions[current_question_index]
|
37 |
+
|
38 |
+
if provide_hints:
|
39 |
+
# If hints are requested, provide guidance on how to answer using the strategy text
|
40 |
+
hint_message = f"Consider using the {strategy_text[current_question_index]} to answer the question: '{questions[current_question_index]}'."
|
41 |
+
return f"Respond to this Question: {questions[current_question_index]}", hint_message
|
42 |
+
|
43 |
+
conversation = [
|
44 |
+
{
|
45 |
+
"role": "system",
|
46 |
+
"content": f"You are an expert English Language Teacher in a Singapore Primary school, directly guiding a Primary 6 student in Singapore. The student is answering the question: '{questions[current_question_index]}'. Based on their response, provide direct feedback to help them improve their spoken skills. Emphasize areas of strength, suggest areas of improvement, and guide them on how to better answer using the {strategy_text[current_question_index]} strategy. The feedback should be in second person, addressing the student directly."
|
47 |
+
},
|
48 |
+
{"role": "user", "content": input_text}
|
49 |
+
]
|
50 |
+
|
51 |
+
# Append the user's response to the conversation history
|
52 |
+
conversation_history.append(input_text)
|
53 |
+
|
54 |
+
response = openai.ChatCompletion.create(
|
55 |
+
model="gpt-3.5-turbo",
|
56 |
+
messages=conversation,
|
57 |
+
max_tokens=400
|
58 |
+
)
|
59 |
+
|
60 |
+
if not response.choices:
|
61 |
+
return "No response from the model.", ""
|
62 |
+
|
63 |
+
text_response = response.choices[0]['message']['content'].strip()
|
64 |
+
text_response = text_response.replace('\n', '<br>')
|
65 |
+
|
66 |
+
user_input_counter += 1
|
67 |
+
|
68 |
+
if user_input_counter % 2 == 0:
|
69 |
+
if current_question_index + 1 < len(questions):
|
70 |
+
current_question_index += 1
|
71 |
+
next_question = questions[current_question_index]
|
72 |
+
text_response += f"\n\nNext question ({current_question_index + 1}): {next_question}"
|
73 |
+
else:
|
74 |
+
# All questions have been answered, provide a summary
|
75 |
+
summary_prompt = {
|
76 |
+
"role": "system",
|
77 |
+
"content": f"Based on the entire conversation, provide a detailed feedback summary highlighting the overall performance, strengths, and areas of improvement. Reference the student's responses and evaluate how well they used the {strategy_text[current_question_index]} strategy to structure their answers. Format the feedback in bullet points."
|
78 |
+
}
|
79 |
+
summary_conversation = [summary_prompt, {"role": "user", "content": " ".join(conversation_history)}]
|
80 |
+
|
81 |
+
summary_response = openai.ChatCompletion.create(
|
82 |
+
model="gpt-3.5-turbo",
|
83 |
+
messages=summary_conversation,
|
84 |
+
max_tokens=600 # Increased token limit for detailed summary
|
85 |
+
)
|
86 |
+
|
87 |
+
if not summary_response.choices:
|
88 |
+
return "No response from the model.", ""
|
89 |
+
|
90 |
+
text_response = summary_response.choices[0]['message']['content'].strip()
|
91 |
+
text_response = text_response.replace('\n', '<br>')
|
92 |
+
|
93 |
+
wrapped_output_text = f'<div style="height: 300px; overflow-y: scroll;">{text_response}</div>'
|
94 |
+
return f"Current Question: {questions[current_question_index]}", wrapped_output_text
|
95 |
+
|
96 |
+
iface = gr.Interface(
|
97 |
+
fn=intelligent_tutor,
|
98 |
+
inputs=[
|
99 |
+
gr.Audio(source="microphone", type="filepath", label="Record audio", sampling_rate=16000),
|
100 |
+
gr.inputs.Checkbox(label="Provide Summary of Conversation"), # Checkbox for hints
|
101 |
+
],
|
102 |
+
outputs=[
|
103 |
+
gr.outputs.HTML(label="Question"),
|
104 |
+
gr.outputs.HTML(label="Output Text"),
|
105 |
+
],
|
106 |
+
title="Oral Coach for Stimulus Based Conversation",
|
107 |
+
description=(get_image_html() +
|
108 |
+
"<br> " + questions[0] +
|
109 |
+
"<br>You have two attempts for each question.<br>" +
|
110 |
+
"<b>Please answer the displayed question at the output screen after the 1st Question.</b>"),
|
111 |
+
)
|
112 |
+
iface.launch(share=False)
|
data4.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# data4.py
|
2 |
+
|
3 |
+
strategy_text = {
|
4 |
+
0: "Use the PEEL strategy (Point, Evidence, Experience(s), Link back to the question).",
|
5 |
+
1: "Use the 5W1H thinking frame (Who, What, Where, When, Why, How) to generate ideas.",
|
6 |
+
2: "Use the OREO thinking frame (Opening Statement, Reasons, Elaborate, Opinion) to guide the student."
|
7 |
+
}
|
8 |
+
|
9 |
+
description = (
|
10 |
+
"Poster calling for nomination for the Supportive Friend Award. It shows 3 pictures of 2 students exhibiting acts of friendship. "
|
11 |
+
"The first pictures shows 2 students and 1 of them seems to helping the other with their homework, "
|
12 |
+
"the other images shows two students playing a game of chess and the 3rd pictures shows 2 friends having a conversation with "
|
13 |
+
"the 3 respective captions: helps me, plays with me and listens to me."
|
14 |
+
)
|
15 |
+
|
16 |
+
questions = [
|
17 |
+
"1. Look at the picture. Would you like to have a schoolmate like this? Why? / Why not?",
|
18 |
+
"2. Do you like making new friends? Why? / Why not?",
|
19 |
+
"3. Would you like to be a buddy to a younger pupil? Why? / Why not?",
|
20 |
+
]
|
requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
openai
|