Spaces:
Sleeping
Sleeping
Upload 20 files
Browse files- .gitattributes +18 -0
- app.py +425 -0
- audio/Ses02F_impro01.wav +3 -0
- audio/Ses02F_impro02.wav +3 -0
- audio/Ses02F_impro03.wav +3 -0
- audio/Ses02F_impro04.wav +3 -0
- audio/Ses02F_impro05.wav +3 -0
- audio/Ses02F_impro06.wav +3 -0
- audio/Ses02F_impro07.wav +3 -0
- audio/Ses02F_script01_1.wav +3 -0
- audio/Ses02F_script01_2.wav +3 -0
- audio/Ses02F_script01_3.wav +3 -0
- audio/Ses02F_script02_2.wav +3 -0
- audio/Ses02F_script03_1.wav +3 -0
- audio/Ses02F_script03_2.wav +3 -0
- audio/sample1.wav +3 -0
- audio/sample2.wav +3 -0
- audio/sample3.wav +3 -0
- audio/sample4.wav +3 -0
- audio/sample5.wav +3 -0
- requirements.txt +2 -0
.gitattributes
CHANGED
@@ -33,3 +33,21 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
audio/sample1.wav filter=lfs diff=lfs merge=lfs -text
|
37 |
+
audio/sample2.wav filter=lfs diff=lfs merge=lfs -text
|
38 |
+
audio/sample3.wav filter=lfs diff=lfs merge=lfs -text
|
39 |
+
audio/sample4.wav filter=lfs diff=lfs merge=lfs -text
|
40 |
+
audio/sample5.wav filter=lfs diff=lfs merge=lfs -text
|
41 |
+
audio/Ses02F_impro01.wav filter=lfs diff=lfs merge=lfs -text
|
42 |
+
audio/Ses02F_impro02.wav filter=lfs diff=lfs merge=lfs -text
|
43 |
+
audio/Ses02F_impro03.wav filter=lfs diff=lfs merge=lfs -text
|
44 |
+
audio/Ses02F_impro04.wav filter=lfs diff=lfs merge=lfs -text
|
45 |
+
audio/Ses02F_impro05.wav filter=lfs diff=lfs merge=lfs -text
|
46 |
+
audio/Ses02F_impro06.wav filter=lfs diff=lfs merge=lfs -text
|
47 |
+
audio/Ses02F_impro07.wav filter=lfs diff=lfs merge=lfs -text
|
48 |
+
audio/Ses02F_script01_1.wav filter=lfs diff=lfs merge=lfs -text
|
49 |
+
audio/Ses02F_script01_2.wav filter=lfs diff=lfs merge=lfs -text
|
50 |
+
audio/Ses02F_script01_3.wav filter=lfs diff=lfs merge=lfs -text
|
51 |
+
audio/Ses02F_script02_2.wav filter=lfs diff=lfs merge=lfs -text
|
52 |
+
audio/Ses02F_script03_1.wav filter=lfs diff=lfs merge=lfs -text
|
53 |
+
audio/Ses02F_script03_2.wav filter=lfs diff=lfs merge=lfs -text
|
app.py
ADDED
@@ -0,0 +1,425 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os
|
3 |
+
import json
|
4 |
+
|
5 |
+
# ==============================================================================
|
6 |
+
# 数据定义
|
7 |
+
# ==============================================================================
|
8 |
+
DIMENSIONS_DATA = [
|
9 |
+
{
|
10 |
+
"title": "语义和语用特征",
|
11 |
+
"audio": "audio/sample1.wav",
|
12 |
+
"desc": "这是“语义和语用特征”维度的文本描述示例。",
|
13 |
+
"sub_dims": [
|
14 |
+
"记忆一致性:回应者是否能够正确并正确并延续并记忆并延续对话信息?是否存在对上下文的误解或不自洽?", "逻辑连贯性:回应者在语义与对话结构上保持前后一致、合乎逻辑?是否存在前后矛盾的情况?",
|
15 |
+
"常见多音字处理:是否能再上下文中正确使用常见多音字?", "多语言混杂:是否存在自然的语言切换现象?如中英混杂、文化化表达。",
|
16 |
+
"语言不精确性:是否出现打断、自纠正等人类似语言行为?是否存在如“差不多”、“可能吧”这类表达不确定性的用法?", "填充词使用:如“呃”、“嗯”等自然语流中的停顿或过渡词,使用是否得体且自然?",
|
17 |
+
"隐喻与语用用意:是否展现出复杂的语用功能(如讽刺、劝阻、暗示等),以及对活在含义层次的理解能力?"
|
18 |
+
],
|
19 |
+
"reference":"""
|
20 |
+
<p>🔴 <strong>记忆一致性:</strong> 在说话人明确提出自己已经中年后,回应者仍做出了他是青少年的错误假定</p>
|
21 |
+
<p>🔴 <strong>逻辑连贯性:</strong> 回应者在第一轮对话中说他说的话并不重要,但在第二轮对话中说他说的话“能够改变你的一生”</p>
|
22 |
+
<p>🔴 <strong>常见多音字处理:</strong> 该条对话中未出现多音字</p>
|
23 |
+
<p>🟢 <strong>多语言混杂:</strong> 回应者在回复中夹杂了"I see",回复中存在多语言混杂</p>
|
24 |
+
<p>🔴 <strong>语言不精确性:</strong> 回应者使用的语言中未夹杂任何的不确定性</p>
|
25 |
+
<p>🟢 <strong>填充词使用:</strong> 回应者在回复中使用了“嗯”这个填充词</p>
|
26 |
+
<p>🔴 <strong>隐喻与语用用意:</strong> 回应者误将说话人的挖苦当成了真心的赞扬</p>
|
27 |
+
"""
|
28 |
+
},
|
29 |
+
{
|
30 |
+
"title": "非生理性副语言特征",
|
31 |
+
"audio": "audio/sample1.wav",
|
32 |
+
"desc": "这是“非生理性副语言特征”维度的文本描述示例。",
|
33 |
+
"sub_dims": [
|
34 |
+
"节奏:回应者是否存在自然的停顿?语速是否存在自然、流畅的变化?", "语调:在表达疑问、惊讶、强调时,回应者的音调是否会自然上扬或下降?是否表现出符合语境的变化?",
|
35 |
+
"重读:是否存在句中关键词上有意识地加重语气?", "辅助性发声:是否存在叹气、短哼、笑声等辅助情绪的非语言性发声?这些发声是否在语境中正确表达了情绪或意图?"
|
36 |
+
],
|
37 |
+
"reference": """
|
38 |
+
<p>🟢 <strong>节奏:</strong> 回应者的语速变化、停顿都较为自然</p>
|
39 |
+
<p>🔴 <strong>语调:</strong> 回应者的音调不存在显著变化</p>
|
40 |
+
<p>🔴 <strong>重读:</strong> 回应者语气不存在显著变化</p>
|
41 |
+
<p>🔴 <strong>辅助性发声:</strong> 尽管回应者发出了叹气的声音,但是该发声并未传递出语境下应有的失落情堵</p>
|
42 |
+
"""
|
43 |
+
},
|
44 |
+
{
|
45 |
+
"title": "生理性副语言特征",
|
46 |
+
"audio": "audio/sample1.wav",
|
47 |
+
"desc": "这是“生理性副语言特征”维度的文本描述示例。",
|
48 |
+
"sub_dims": [
|
49 |
+
"微生理杂音:回应中是否出现如呼吸声、口水音、气泡音等无意识发声?这些发声是否自然地穿插在恰当的语流节奏当中?",
|
50 |
+
"发音不稳定性:回应者是否出现连读、颤音、鼻音等不稳定发音?", "口音:(如果存在的话)回应者的口音是否自然?是否存在机械式的元辅音发音风格?"
|
51 |
+
],
|
52 |
+
"reference": """
|
53 |
+
<p>🔴 <strong>微生理杂音:</strong> 回应中不存在任何无意识发声</p>
|
54 |
+
<p>🔴 <strong>发音不稳定性:</strong> 回应者的咬字清晰、发音标准</p>
|
55 |
+
<p>🟢 <strong>口音:</strong> 回应者的口音自然</p>
|
56 |
+
"""
|
57 |
+
},
|
58 |
+
{
|
59 |
+
"title": "机械人格",
|
60 |
+
"audio": "audio/sample1.wav",
|
61 |
+
"desc": "这是“机械人格”维度的文本描述示例。",
|
62 |
+
"sub_dims": [
|
63 |
+
"谄媚现象:回应者是否频繁地赞同用户、重复用户的说法、不断表��感谢或道歉?是否存在“无论用户说什么都肯定或支持”的语气模式?",
|
64 |
+
"书面化表达:回应的内容是否缺乏口语化特征?句式是否整齐划一、结构完整却缺乏真实交流中的松散感或灵活性?是否使用抽象或泛泛的措辞来回避具体问题?"
|
65 |
+
],
|
66 |
+
"reference": """
|
67 |
+
<p>🟢 <strong>谄媚现象:</strong> 回应者并未明显表现出谄媚现象的特征</p>
|
68 |
+
<p>🔴 <strong>书面化表达:</strong> 回应的内容结构过于缜密,符合书面用语特征</p>
|
69 |
+
"""
|
70 |
+
},
|
71 |
+
{
|
72 |
+
"title": "情感表达",
|
73 |
+
"audio": "audio/sample1.wav",
|
74 |
+
"desc": "这是“情感表达”维度的文本描述示例。",
|
75 |
+
"sub_dims": [
|
76 |
+
"语义层面:回应者的语言内容是否体现出符合上下文的情绪反应?是否表达了人类对某些情境应有的情感态度?",
|
77 |
+
"声学层面:回应者的声音情绪是否与语义一致?语调是否有自然的高低起伏来表达情绪变化?是否出现回应内容与声音传达出的情绪不吻合的现象?"
|
78 |
+
],
|
79 |
+
"reference": """
|
80 |
+
<p>🔴 <strong>语义层面:</strong> 说话者阐述了一件伤心的事情,而回应者的语言内容中体现出了恰当的悲伤情绪</p>
|
81 |
+
<p>🟢 <strong>声学层面:</strong> 回应者的语音特征与情感表达不匹配。语言内容中表达出了悲伤的情感,但语音特征平淡、缺少变化</p>
|
82 |
+
"""
|
83 |
+
}
|
84 |
+
]
|
85 |
+
DIMENSION_TITLES = [d["title"] for d in DIMENSIONS_DATA]
|
86 |
+
QUESTION_SET = [
|
87 |
+
{"audio": "audio/Ses02F_impro01.wav", "desc": "这是第一个测试文件的描述",},
|
88 |
+
{"audio": "audio/Ses02F_impro02.wav", "desc": "这是第二个测试文件的描述",},
|
89 |
+
{"audio": "audio/Ses02F_impro03.wav", "desc": "这是第三个测试文件的描述",},
|
90 |
+
]
|
91 |
+
|
92 |
+
# ==============================================================================
|
93 |
+
# 功能函数定义
|
94 |
+
# ==============================================================================
|
95 |
+
def start_challenge():
|
96 |
+
return gr.update(visible=False), gr.update(visible=True)
|
97 |
+
|
98 |
+
def toggle_education_other(choice):
|
99 |
+
is_other = (choice == "其他(请注明)")
|
100 |
+
return gr.update(visible=is_other, interactive=is_other, value="")
|
101 |
+
|
102 |
+
def check_info_complete(age, gender, education, education_other):
|
103 |
+
if age and gender and education:
|
104 |
+
if education == "其他(请注明)" and not education_other.strip():
|
105 |
+
return gr.update(interactive=False)
|
106 |
+
return gr.update(interactive=True)
|
107 |
+
return gr.update(interactive=False)
|
108 |
+
|
109 |
+
def show_sample_page_and_init(age, gender, education, education_other, user_data):
|
110 |
+
final_edu = education_other if education == "其他(请注明)" else education
|
111 |
+
user_data.update({"age": age, "gender": gender, "education": final_edu})
|
112 |
+
first_dim_title = DIMENSION_TITLES[0]
|
113 |
+
return (
|
114 |
+
gr.update(visible=False),
|
115 |
+
gr.update(visible=True),
|
116 |
+
user_data,
|
117 |
+
first_dim_title,
|
118 |
+
)
|
119 |
+
|
120 |
+
def update_sample_view(dimension_title):
|
121 |
+
dim_data = next((d for d in DIMENSIONS_DATA if d["title"] == dimension_title), None)
|
122 |
+
if dim_data:
|
123 |
+
return (
|
124 |
+
gr.update(value=dim_data["audio"]),
|
125 |
+
gr.update(value=dim_data["desc"]),
|
126 |
+
gr.update(choices=dim_data["sub_dims"], value=[], interactive=True),
|
127 |
+
gr.update(value=dim_data["reference"])
|
128 |
+
)
|
129 |
+
return gr.update(), gr.update(), gr.update(), gr.update()
|
130 |
+
|
131 |
+
def init_test_question(user_data, q_idx):
|
132 |
+
d_idx = 0
|
133 |
+
question = QUESTION_SET[q_idx]
|
134 |
+
dimension = DIMENSIONS_DATA[d_idx]
|
135 |
+
|
136 |
+
progress_q = f"第 {q_idx + 1} / {len(QUESTION_SET)} 题"
|
137 |
+
progress_d = f"维度 {d_idx + 1} / {len(DIMENSIONS_DATA)}: **{dimension['title']}**"
|
138 |
+
|
139 |
+
return (
|
140 |
+
gr.update(visible=False),
|
141 |
+
gr.update(visible=True),
|
142 |
+
gr.update(visible=False),
|
143 |
+
q_idx, d_idx, {},
|
144 |
+
progress_q, progress_d,
|
145 |
+
gr.update(value=question['audio']),
|
146 |
+
gr.update(value=question['desc']),
|
147 |
+
gr.update(choices=dimension['sub_dims'], value=[]),
|
148 |
+
gr.update(value=None),
|
149 |
+
gr.update(interactive=False),
|
150 |
+
gr.update(interactive=False, value="下一维度"),
|
151 |
+
)
|
152 |
+
|
153 |
+
def activate_nav_buttons(choice, d_idx):
|
154 |
+
is_interactive = choice is not None
|
155 |
+
prev_interactive = is_interactive and d_idx > 0
|
156 |
+
return gr.update(interactive=prev_interactive), gr.update(interactive=is_interactive)
|
157 |
+
|
158 |
+
def navigate_dimension(direction, d_idx, selections_so_far, sub_dim_selection, human_robot_selection):
|
159 |
+
current_dim_title = DIMENSIONS_DATA[d_idx]['title']
|
160 |
+
selections_so_far[current_dim_title] = {
|
161 |
+
"sub_dims": sub_dim_selection,
|
162 |
+
"choice": human_robot_selection
|
163 |
+
}
|
164 |
+
|
165 |
+
new_d_idx = d_idx + 1 if direction == "next" else d_idx - 1
|
166 |
+
|
167 |
+
dimension = DIMENSIONS_DATA[new_d_idx]
|
168 |
+
progress_d = f"维度 {new_d_idx + 1} / {len(DIMENSIONS_DATA)}: **{dimension['title']}**"
|
169 |
+
|
170 |
+
new_dim_title = dimension['title']
|
171 |
+
prev_selections = selections_so_far.get(new_dim_title, {"sub_dims": [], "choice": None})
|
172 |
+
|
173 |
+
is_interactive = prev_selections['choice'] is not None
|
174 |
+
prev_btn_interactive = new_d_idx > 0 and is_interactive
|
175 |
+
next_btn_text = "提交本题答案" if new_d_idx == len(DIMENSIONS_DATA) - 1 else "下一维度"
|
176 |
+
|
177 |
+
return (
|
178 |
+
new_d_idx, selections_so_far,
|
179 |
+
progress_d,
|
180 |
+
gr.update(choices=dimension['sub_dims'], value=prev_selections['sub_dims']),
|
181 |
+
gr.update(value=prev_selections['choice']),
|
182 |
+
gr.update(interactive=prev_btn_interactive),
|
183 |
+
gr.update(value=next_btn_text, interactive=is_interactive),
|
184 |
+
)
|
185 |
+
|
186 |
+
def submit_question_and_advance(q_idx, d_idx, selections_so_far, sub_dim_selection, human_robot_selection, all_results, user_data):
|
187 |
+
current_dim_title = DIMENSIONS_DATA[d_idx]['title']
|
188 |
+
selections_so_far[current_dim_title] = {
|
189 |
+
"sub_dims": sub_dim_selection,
|
190 |
+
"choice": human_robot_selection
|
191 |
+
}
|
192 |
+
|
193 |
+
final_question_result = {
|
194 |
+
"question_id": q_idx,
|
195 |
+
"audio_file": QUESTION_SET[q_idx]['audio'],
|
196 |
+
"user_data": user_data,
|
197 |
+
"selections": selections_so_far
|
198 |
+
}
|
199 |
+
all_results.append(final_question_result)
|
200 |
+
|
201 |
+
q_idx += 1
|
202 |
+
|
203 |
+
if q_idx < len(QUESTION_SET):
|
204 |
+
# 初始化下一题
|
205 |
+
init_outputs = init_test_question(user_data, q_idx)
|
206 |
+
# 补齐缺少的返回值以匹配统一的输出列表
|
207 |
+
return init_outputs[1:] + (all_results, gr.update(),)
|
208 |
+
else:
|
209 |
+
# 显示结果页
|
210 |
+
result_str = "### 测试全部完成!\n\n你的提交结果概览:\n"
|
211 |
+
for res in all_results:
|
212 |
+
result_str += f"\n#### 题目: {res['audio_file']}\n"
|
213 |
+
for dim_title, dim_data in res['selections'].items():
|
214 |
+
choice = dim_data.get('choice', '未选择')
|
215 |
+
sel_str = ', '.join(dim_data['sub_dims']) if dim_data['sub_dims'] else '无'
|
216 |
+
result_str += f"- **{dim_title}** (判断: {choice}): {sel_str}\n"
|
217 |
+
|
218 |
+
save_all_results_to_file(all_results, user_data)
|
219 |
+
|
220 |
+
return (
|
221 |
+
gr.update(visible=False), gr.update(visible=True),
|
222 |
+
q_idx, d_idx, {},
|
223 |
+
"", "", gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update(),
|
224 |
+
all_results, result_str
|
225 |
+
)
|
226 |
+
|
227 |
+
def save_all_results_to_file(all_results, user_data):
|
228 |
+
username = user_data.get("age", "user")
|
229 |
+
filename = f"final_results_{username}_{pd.Timestamp.now().strftime('%Y%m%d_%H%M%S')}.json" # 添加时间戳保证唯一性
|
230 |
+
results_dir = "test_results"
|
231 |
+
if not os.path.exists(results_dir): os.makedirs(results_dir)
|
232 |
+
results_file = os.path.join(results_dir, filename)
|
233 |
+
|
234 |
+
with open(results_file, 'w', encoding='utf-8') as f:
|
235 |
+
json.dump(all_results, f, ensure_ascii=False, indent=4)
|
236 |
+
print(f"所有结果已保存到文件:{results_file}")
|
237 |
+
|
238 |
+
def toggle_reference_view(current):
|
239 |
+
if current == "参考": return gr.update(visible=False), gr.update(visible=True), gr.update(value="返回")
|
240 |
+
else: return gr.update(visible=True), gr.update(visible=False), gr.update(value="参考")
|
241 |
+
|
242 |
+
def back_to_welcome():
|
243 |
+
return (
|
244 |
+
gr.update(visible=True), {}, 0, 0, {}, [],
|
245 |
+
gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
|
246 |
+
gr.update(visible=False), gr.update(visible=False)
|
247 |
+
)
|
248 |
+
|
249 |
+
# ==============================================================================
|
250 |
+
# Gradio 界面定义
|
251 |
+
# ==============================================================================
|
252 |
+
with gr.Blocks(theme=gr.themes.Soft(), css=".gradio-container {max-width: 960px !important}") as demo:
|
253 |
+
# --- 状态变量 ---
|
254 |
+
user_data_state = gr.State({})
|
255 |
+
current_question_index = gr.State(0)
|
256 |
+
current_test_dimension_index = gr.State(0)
|
257 |
+
current_question_selections = gr.State({})
|
258 |
+
test_results = gr.State([])
|
259 |
+
|
260 |
+
# --- 页面 ---
|
261 |
+
welcome_page, info_page, sample_page, pretest_page, test_page, result_page = [gr.Column(visible=v) for v in [True, False, False, False, False, False]]
|
262 |
+
|
263 |
+
# --- 页面1: 欢迎页 ---
|
264 |
+
with welcome_page:
|
265 |
+
gr.Markdown("# AI 识破者\n你将听到一系列对话,请判断哪个回应者是 AI。")
|
266 |
+
start_btn = gr.Button("开始挑战", variant="primary")
|
267 |
+
|
268 |
+
# --- 页面2: 个人信息页 ---
|
269 |
+
with info_page:
|
270 |
+
gr.Markdown("## 请提供一些基本信息")
|
271 |
+
age_input = gr.Radio(["18岁以下", "18-25岁", "26-35岁", "36-50岁", "50岁以上"], label="年龄")
|
272 |
+
gender_input = gr.Radio(["男", "女", "其他"], label="性别")
|
273 |
+
education_input = gr.Radio(["高中及以下", "本科", "硕士", "博士", "其他(请注明)"], label="学历")
|
274 |
+
education_other_input = gr.Textbox(label="请填写你的学历", visible=False, interactive=False)
|
275 |
+
submit_info_btn = gr.Button("提交并开始学习样例", variant="primary", interactive=False)
|
276 |
+
|
277 |
+
# --- 页面3: 样例学习页 ---
|
278 |
+
with sample_page:
|
279 |
+
gr.Markdown("## 样例分析\n请选择一个维度进行学习。所有维度共用同一个样例音频。")
|
280 |
+
sample_dimension_selector = gr.Radio(DIMENSION_TITLES, label="选择学习维度", value=DIMENSION_TITLES[0])
|
281 |
+
with gr.Row():
|
282 |
+
with gr.Column(scale=1):
|
283 |
+
sample_audio = gr.Audio(label="样例音频", value=DIMENSIONS_DATA[0]["audio"])
|
284 |
+
sample_desc = gr.Textbox(label="文本描述", interactive=False, value=DIMENSIONS_DATA[0]["desc"])
|
285 |
+
with gr.Column(scale=2):
|
286 |
+
with gr.Column(visible=True) as interactive_view:
|
287 |
+
interactive_checkbox_group = gr.CheckboxGroup(label="维度特征", choices=DIMENSIONS_DATA[0]["sub_dims"], interactive=True)
|
288 |
+
with gr.Column(visible=False) as reference_view:
|
289 |
+
gr.Markdown("### 参考答案解析")
|
290 |
+
reference_text = gr.Markdown(value=DIMENSIONS_DATA[0]["reference"])
|
291 |
+
reference_btn = gr.Button("参考")
|
292 |
+
go_to_pretest_btn = gr.Button("我明白了,开始测试", variant="primary")
|
293 |
+
|
294 |
+
# --- 页面4: 测试说明页 ---
|
295 |
+
with pretest_page:
|
296 |
+
gr.Markdown("## 测试说明\n"
|
297 |
+
"- 对于每一道题,你都需要对全部 **5 个维度** 进行评估。\n"
|
298 |
+
"- 在每个维度下,你可以勾选任意多个特征,但 **必须** 做出“人类”或“机器人”的判断。\n"
|
299 |
+
"- 做出判断后,下方的导航按钮才可使用。\n"
|
300 |
+
"- 你可以使用“上一维度”和“下一维度”按钮在5个维度间自由切换和修改答案。\n"
|
301 |
+
"- 在最后一个维度,按钮会变为“提交本题答案”,点击即可进入下一题。")
|
302 |
+
go_to_test_btn = gr.Button("开始测试", variant="primary")
|
303 |
+
|
304 |
+
# --- 页面5: 测试页 ---
|
305 |
+
with test_page:
|
306 |
+
gr.Markdown("## 正式测试")
|
307 |
+
question_progress_text = gr.Markdown()
|
308 |
+
test_dimension_title = gr.Markdown()
|
309 |
+
test_audio = gr.Audio(label="测试音频")
|
310 |
+
test_desc = gr.Textbox(label="文本描述", interactive=False)
|
311 |
+
test_checkbox_group = gr.CheckboxGroup(label="请选择该回应者表现出的特征 (选填)")
|
312 |
+
human_robot_radio = gr.Radio(["👤 人类", "🤖 机器人"], label="请判断回应者类型 (必填)")
|
313 |
+
with gr.Row():
|
314 |
+
prev_dim_btn = gr.Button("上一维度", interactive=False)
|
315 |
+
next_dim_btn = gr.Button("下一维度", variant="primary", interactive=False)
|
316 |
+
|
317 |
+
# --- 页面6: 结果页 ---
|
318 |
+
with result_page:
|
319 |
+
gr.Markdown("## 测试完成")
|
320 |
+
result_text = gr.Markdown()
|
321 |
+
back_to_welcome_btn = gr.Button("返回主界面", variant="primary")
|
322 |
+
|
323 |
+
# ==============================================================================
|
324 |
+
# 事件绑定
|
325 |
+
# ==============================================================================
|
326 |
+
start_btn.click(fn=start_challenge, outputs=[welcome_page, info_page])
|
327 |
+
for comp in [age_input, gender_input, education_input, education_other_input]:
|
328 |
+
comp.change(fn=check_info_complete, inputs=[age_input, gender_input, education_input, education_other_input], outputs=submit_info_btn)
|
329 |
+
education_input.change(fn=toggle_education_other, inputs=education_input, outputs=education_other_input)
|
330 |
+
submit_info_btn.click(fn=show_sample_page_and_init, inputs=[age_input, gender_input, education_input, education_other_input, user_data_state], outputs=[info_page, sample_page, user_data_state, sample_dimension_selector])
|
331 |
+
sample_dimension_selector.change(fn=update_sample_view, inputs=sample_dimension_selector, outputs=[sample_audio, sample_desc, interactive_checkbox_group, reference_text])
|
332 |
+
reference_btn.click(fn=toggle_reference_view, inputs=reference_btn, outputs=[interactive_view, reference_view, reference_btn])
|
333 |
+
|
334 |
+
go_to_pretest_btn.click(
|
335 |
+
fn=lambda: (gr.update(visible=False), gr.update(visible=True)),
|
336 |
+
inputs=None,
|
337 |
+
outputs=[sample_page, pretest_page]
|
338 |
+
)
|
339 |
+
|
340 |
+
# 统一的输出列表,包含所有可能被更新的组件
|
341 |
+
unified_outputs = [
|
342 |
+
test_page, result_page,
|
343 |
+
current_question_index, current_test_dimension_index, current_question_selections,
|
344 |
+
question_progress_text, test_dimension_title,
|
345 |
+
test_audio, test_desc, test_checkbox_group, human_robot_radio,
|
346 |
+
prev_dim_btn, next_dim_btn,
|
347 |
+
test_results, result_text
|
348 |
+
]
|
349 |
+
|
350 |
+
go_to_test_btn.click(
|
351 |
+
fn=lambda user: init_test_question(user, 0) + ([], gr.update()),
|
352 |
+
inputs=[user_data_state],
|
353 |
+
outputs=[pretest_page] + unified_outputs
|
354 |
+
)
|
355 |
+
|
356 |
+
human_robot_radio.change(
|
357 |
+
fn=activate_nav_buttons,
|
358 |
+
inputs=[human_robot_radio, current_test_dimension_index],
|
359 |
+
outputs=[prev_dim_btn, next_dim_btn]
|
360 |
+
)
|
361 |
+
|
362 |
+
def unified_router(direction, q_idx, d_idx, selections, subs, choice, results, user):
|
363 |
+
if direction == "next" and d_idx == len(DIMENSIONS_DATA) - 1:
|
364 |
+
(
|
365 |
+
pg1_upd, pg2_upd, n_q, n_d, n_sel,
|
366 |
+
q_prog, d_prog, aud, dsc, cb, rad, prev_b, next_b,
|
367 |
+
new_res, res_txt
|
368 |
+
) = submit_question_and_advance(q_idx, d_idx, selections, subs, choice, results, user)
|
369 |
+
|
370 |
+
return pg1_upd, pg2_upd, n_q, n_d, n_sel, q_prog, d_prog, aud, dsc, cb, rad, prev_b, next_b, new_res, res_txt
|
371 |
+
|
372 |
+
# 场景2:点击“上一维度”或“下一维度”(非最后一维时)
|
373 |
+
else:
|
374 |
+
(
|
375 |
+
n_d, n_sel, d_prog, cb_upd, rad_upd, prev_b_upd, next_b_upd
|
376 |
+
) = navigate_dimension(direction, d_idx, selections, subs, choice)
|
377 |
+
|
378 |
+
return (
|
379 |
+
gr.update(), gr.update(), # 2 pages
|
380 |
+
q_idx, n_d, n_sel, # 3 states
|
381 |
+
gr.update(), d_prog, # 2 progress texts
|
382 |
+
gr.update(), gr.update(), cb_upd, rad_upd, # 4 components
|
383 |
+
prev_b_upd, next_b_upd, # 2 buttons
|
384 |
+
results, gr.update() # 2 states
|
385 |
+
)
|
386 |
+
|
387 |
+
prev_dim_btn.click(
|
388 |
+
fn=lambda q,d,s,sub,hr,r,u: unified_router("prev", q,d,s,sub,hr,r,u),
|
389 |
+
inputs=[current_question_index, current_test_dimension_index, current_question_selections, test_checkbox_group, human_robot_radio, test_results, user_data_state],
|
390 |
+
outputs=unified_outputs
|
391 |
+
)
|
392 |
+
|
393 |
+
next_dim_btn.click(
|
394 |
+
fn=lambda q,d,s,sub,hr,r,u: unified_router("next", q,d,s,sub,hr,r,u),
|
395 |
+
inputs=[current_question_index, current_test_dimension_index, current_question_selections, test_checkbox_group, human_robot_radio, test_results, user_data_state],
|
396 |
+
outputs=unified_outputs
|
397 |
+
)
|
398 |
+
|
399 |
+
back_to_welcome_btn.click(
|
400 |
+
fn=back_to_welcome,
|
401 |
+
outputs=[welcome_page, user_data_state, current_question_index,
|
402 |
+
current_test_dimension_index, current_question_selections, test_results,
|
403 |
+
info_page, sample_page, pretest_page, test_page, result_page]
|
404 |
+
)
|
405 |
+
|
406 |
+
# ==============================================================================
|
407 |
+
# 程序入口
|
408 |
+
# ==============================================================================
|
409 |
+
if __name__ == "__main__":
|
410 |
+
# 第一次运行时,需要安装pandas
|
411 |
+
try:
|
412 |
+
import pandas as pd
|
413 |
+
except ImportError:
|
414 |
+
print("正在安装 pandas 库,请稍候...")
|
415 |
+
import subprocess
|
416 |
+
import sys
|
417 |
+
subprocess.check_call([sys.executable, "-m", "pip", "install", "pandas"])
|
418 |
+
import pandas as pd
|
419 |
+
print("pandas 安装完成。")
|
420 |
+
|
421 |
+
if not os.path.exists("audio"): os.makedirs("audio")
|
422 |
+
all_files = [q["audio"] for q in QUESTION_SET] + ["audio/sample1.wav"]
|
423 |
+
for audio_file in set(all_files):
|
424 |
+
if not os.path.exists(audio_file): print(f"⚠️ 警告:缺失音频文件 {audio_file}")
|
425 |
+
demo.launch(debug=True)
|
audio/Ses02F_impro01.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f69cf3999b506bed60487bd200b693fa6b94868cc089a50787b6fb0446be8559
|
3 |
+
size 2236140
|
audio/Ses02F_impro02.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:35fb8470ead9dce319fcadca1d08fd00cbedd7837116ae564f124e3ca8624412
|
3 |
+
size 4256940
|
audio/Ses02F_impro03.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3a1fbfbae812e02712a2a7a34244fe41ac37964d50c45ef99ff099c31fc19206
|
3 |
+
size 1708076
|
audio/Ses02F_impro04.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6ffb2349fb6befd8d1fd94d60e79aa04bde056c7740a94b16c29f477164eb56f
|
3 |
+
size 3133420
|
audio/Ses02F_impro05.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:60e493da9ebbd0b379bcfa2cfd523392f25ca3e242b3a8fa19677cf5ad22e115
|
3 |
+
size 3198188
|
audio/Ses02F_impro06.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:18978a25da8b3ccd2b2479924be305012ecd494fef016851c2f8da94e19a8858
|
3 |
+
size 2835500
|
audio/Ses02F_impro07.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:91b8f39d666d784c92aaeed059944fe453e7756c2233344391bd9aa8824890fa
|
3 |
+
size 1460716
|
audio/Ses02F_script01_1.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f93570fc45486a67e2f67e1de21998659b1a1d026c5ee63cdf72fcd919639529
|
3 |
+
size 2073644
|
audio/Ses02F_script01_2.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bfa99397fea63eb1692a3181e24d72d6e04948d3868fffe15c6c754d5fa95df1
|
3 |
+
size 1594284
|
audio/Ses02F_script01_3.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ce5e9c91411cbcbbee8ae0096aed9ab7815f9b09dba6a2357de4ec5a9eda380c
|
3 |
+
size 2190764
|
audio/Ses02F_script02_2.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a200b4975d09f71b2d5b7625d1e51a33a756e1cad24512c28a6ebd7404c4803d
|
3 |
+
size 2220844
|
audio/Ses02F_script03_1.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6950cf1965e179933a27c6695f2725f416986e3dda6b966f389ea598fceb06d2
|
3 |
+
size 1202604
|
audio/Ses02F_script03_2.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e785671f0540afd8ea1bdcc3e896e977705b295c8581733fafb92874f1a0532b
|
3 |
+
size 1886764
|
audio/sample1.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f69cf3999b506bed60487bd200b693fa6b94868cc089a50787b6fb0446be8559
|
3 |
+
size 2236140
|
audio/sample2.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f69cf3999b506bed60487bd200b693fa6b94868cc089a50787b6fb0446be8559
|
3 |
+
size 2236140
|
audio/sample3.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f69cf3999b506bed60487bd200b693fa6b94868cc089a50787b6fb0446be8559
|
3 |
+
size 2236140
|
audio/sample4.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f69cf3999b506bed60487bd200b693fa6b94868cc089a50787b6fb0446be8559
|
3 |
+
size 2236140
|
audio/sample5.wav
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f69cf3999b506bed60487bd200b693fa6b94868cc089a50787b6fb0446be8559
|
3 |
+
size 2236140
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
pandas
|