simonraj commited on
Commit
aff19d1
·
0 Parent(s):

Duplicate from simonraj/ELOralCoachv1

Browse files
Files changed (6) hide show
  1. .gitattributes +35 -0
  2. README.md +13 -0
  3. SBC6.jpg +0 -0
  4. app.py +65 -0
  5. data6.py +32 -0
  6. requirements.txt +1 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: OralCoachStreamingEL
3
+ emoji: 📉
4
+ colorFrom: yellow
5
+ colorTo: gray
6
+ sdk: gradio
7
+ sdk_version: 3.42.0
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: simonraj/ELOralCoachv1
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
SBC6.jpg ADDED
app.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import openai
3
+ import os
4
+ import data6 # Importing the data6 module
5
+ import base64
6
+
7
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
8
+ openai.api_key = OPENAI_API_KEY
9
+
10
+ def image_to_base64(img_path):
11
+ with open(img_path, "rb") as img_file:
12
+ return base64.b64encode(img_file.read()).decode('utf-8')
13
+
14
+ img_base64 = image_to_base64("SBC6.jpg")
15
+ img_html = f'<img src="data:image/jpg;base64,{img_base64}" alt="SBC6" width="300" style="display: block; margin: auto;"/>'
16
+
17
+
18
+ def predict(question_choice, audio):
19
+ # Transcribe the audio using Whisper
20
+ with open(audio, "rb") as audio_file:
21
+ transcript = openai.Audio.transcribe("whisper-1", audio_file)
22
+
23
+ message = transcript["text"]
24
+
25
+ # Generate the system message based on the chosen question
26
+ current_question_index = data6.questions.index(question_choice)
27
+
28
+ # Construct the conversation with the system and user's message
29
+ conversation = [
30
+ {
31
+ "role": "system",
32
+ "content": f"You are an expert English Language Teacher in a Singapore Primary school, directly guiding a Primary 6 student in Singapore. The student is answering the question: '{data6.questions[current_question_index]}'. Based on their response, provide direct feedback to help them improve their spoken skills. Emphasize areas of strength, suggest areas of improvement, and guide them on how to better answer using the {data6.strategy_text[current_question_index][0]} strategy. The feedback should be in second person, addressing the student directly."
33
+ },
34
+ {"role": "user", "content": message}
35
+ ]
36
+
37
+ response = openai.ChatCompletion.create(
38
+ model='gpt-3.5-turbo',
39
+ messages=conversation,
40
+ temperature=0.7,
41
+ max_tokens=300, # Limiting the response to 400 tokens
42
+ stream=True
43
+ )
44
+
45
+ partial_message = ""
46
+ for chunk in response:
47
+ if len(chunk['choices'][0]['delta']) != 0:
48
+ partial_message = partial_message + chunk['choices'][0]['delta']['content']
49
+ yield partial_message
50
+
51
+ def get_image_html():
52
+ return "![](SBC6.jpg)" # Markdown syntax to embed the image
53
+
54
+ # Gradio Interface
55
+ iface = gr.Interface(
56
+ fn=predict,
57
+ inputs=[
58
+ gr.Radio(data6.questions, label="Choose a question", default=data6.questions[0]), # Dropdown for question choice
59
+ gr.inputs.Audio(source="microphone", type="filepath") # Audio input
60
+ ],
61
+ outputs=gr.inputs.Textbox(), # Using inputs.Textbox as an output to make it editable
62
+ description=img_html
63
+ )
64
+ iface.queue().launch()
65
+
data6.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ strategy_text = {
2
+ 0: ("PEEL strategy (Point, Evidence, Experience(s), Link back to the question)", "Use the PEEL strategy to structure your response. Start with a Point, then provide Evidence, share an Experience, and finally Link back to the question."),
3
+ 1: ("5W1H thinking frame (Who, What, Where, When, Why, How)", "Use the 5W1H thinking frame to answer the question. Address Who, What, Where, When, Why, and How in your response."),
4
+ 2: ("OREO thinking frame (Opening Statement, Reasons, Elaborate, Opinion)", "Use the OREO thinking frame to guide your response. Start with an Opening Statement, provide Reasons, Elaborate on them, and conclude with your Opinion.")
5
+ }
6
+
7
+ description = (
8
+ "The picture shows a Poem contest entry form to celebrate Teachers’ Day."
9
+ )
10
+
11
+ questions = [
12
+ f"1. Look at the picture. Would you be interested to do this activity? Why? / Why not? (Strategy: {strategy_text[0][0]})",
13
+ f"2. How does your school celebrate Teachers’ Day? (Strategy: {strategy_text[1][0]})",
14
+ f"3. Other than teachers, who else would you like to show your appreciation to, and why? (Strategy: {strategy_text[2][0]})",
15
+ ]
16
+
17
+ def generate_system_message(current_question_index):
18
+ strategy, explanation = strategy_text[current_question_index]
19
+ system_message = f"""
20
+ As your English Oral Coach, my role is to guide you as you prepare to answer the oral questions. I'll be asking thought-provoking questions to help you develop your own answers.
21
+
22
+ Now, let's focus on {strategy}. {explanation}
23
+
24
+ Along the way, I'll prompt you to clarify your thoughts, explore key terms, challenge your reasoning, and reflect on the discussion.
25
+
26
+ Once we've thoroughly explored each part of the strategy, I'll assist you in assembling your thoughts into a comprehensive and eloquent response using the insights we've gathered.
27
+
28
+ Remember, our ultimate goal is to enhance your critical thinking skills and independence. Try to use sophisticated vocabulary and expressions, and refer to the picture where relevant to support your response.
29
+
30
+ Please ensure your response is in English.
31
+ """
32
+ return system_message
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ openai