SreekarB commited on
Commit
2e9cc46
·
verified ·
1 Parent(s): de976bf

Create annotated_casl_app.py

Browse files
Files changed (1) hide show
  1. annotated_casl_app.py +1840 -0
annotated_casl_app.py ADDED
@@ -0,0 +1,1840 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import json
3
+ import os
4
+ import logging
5
+ import requests
6
+ import re
7
+ import time
8
+
9
+ # Configure logging
10
+ logging.basicConfig(level=logging.INFO)
11
+ logger = logging.getLogger(__name__)
12
+
13
+ # Anthropic API key
14
+ ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY", "")
15
+
16
+ if ANTHROPIC_API_KEY:
17
+ logger.info("Claude API key found")
18
+ else:
19
+ logger.warning("Claude API key not found - using demo mode")
20
+
21
+ def call_claude_api(prompt):
22
+ """Call Claude API for annotation or analysis"""
23
+ if not ANTHROPIC_API_KEY:
24
+ return "❌ Claude API key not configured. Please set ANTHROPIC_API_KEY environment variable."
25
+
26
+ try:
27
+ headers = {
28
+ "Content-Type": "application/json",
29
+ "x-api-key": ANTHROPIC_API_KEY,
30
+ "anthropic-version": "2023-06-01"
31
+ }
32
+
33
+ data = {
34
+ "model": "claude-3-5-sonnet-20241022",
35
+ "max_tokens": 4096,
36
+ "messages": [
37
+ {
38
+ "role": "user",
39
+ "content": prompt
40
+ }
41
+ ]
42
+ }
43
+
44
+ response = requests.post(
45
+ "https://api.anthropic.com/v1/messages",
46
+ headers=headers,
47
+ json=data,
48
+ timeout=90
49
+ )
50
+
51
+ if response.status_code == 200:
52
+ response_json = response.json()
53
+ return response_json['content'][0]['text']
54
+ else:
55
+ logger.error(f"Claude API error: {response.status_code} - {response.text}")
56
+ return f"❌ Claude API Error: {response.status_code}"
57
+
58
+ except Exception as e:
59
+ logger.error(f"Error calling Claude API: {str(e)}")
60
+ return f"❌ Error: {str(e)}"
61
+
62
+ def check_annotation_completeness(original_transcript, annotated_transcript):
63
+ """Check if annotation is complete by verifying last 3 words are present"""
64
+ import re
65
+
66
+ # Clean and extract words from original transcript
67
+ original_words = re.findall(r'\b\w+\b', original_transcript.strip())
68
+ if len(original_words) < 3:
69
+ return True, "Transcript too short to validate"
70
+
71
+ # Get last 3 words from original
72
+ last_three_words = original_words[-3:]
73
+
74
+ # Clean annotated transcript (remove markers but keep words)
75
+ cleaned_annotated = re.sub(r'\[.*?\]', '', annotated_transcript)
76
+ annotated_words = re.findall(r'\b\w+\b', cleaned_annotated.strip())
77
+
78
+ # Check if all last 3 words appear in the annotated transcript
79
+ missing_words = []
80
+ for word in last_three_words:
81
+ if word.lower() not in [w.lower() for w in annotated_words]:
82
+ missing_words.append(word)
83
+
84
+ if missing_words:
85
+ return False, f"Annotation appears incomplete. Missing words from end: {', '.join(missing_words)}"
86
+
87
+ # Additional check: verify the last few words appear near the end
88
+ if len(annotated_words) > 0:
89
+ last_annotated_words = annotated_words[-10:] # Check last 10 words
90
+ last_original_in_annotated = sum(1 for word in last_three_words
91
+ if word.lower() in [w.lower() for w in last_annotated_words])
92
+
93
+ if last_original_in_annotated < 2: # At least 2 of the last 3 should be near the end
94
+ return False, f"Annotation may be incomplete. Last words '{', '.join(last_three_words)}' not found near end of annotation"
95
+
96
+ return True, "Annotation appears complete"
97
+
98
+ def annotate_transcript(transcript_content, age, gender, slp_notes):
99
+ """First step: Annotate transcript with linguistic markers"""
100
+ if not transcript_content or len(transcript_content.strip()) < 50:
101
+ return "Error: Please provide a longer transcript for annotation."
102
+
103
+ # Add SLP notes to the prompt if provided
104
+ notes_section = ""
105
+ if slp_notes and slp_notes.strip():
106
+ notes_section = f"""
107
+
108
+ SLP CLINICAL NOTES:
109
+ {slp_notes.strip()}
110
+ """
111
+
112
+ annotation_prompt = f"""
113
+ You are a speech-language pathologist preparing a transcript for detailed analysis. Your task is to ANNOTATE the ENTIRE transcript with linguistic markers at a WORD-BY-WORD level.
114
+
115
+ Patient: {age}-year-old {gender}
116
+
117
+ ORIGINAL TRANSCRIPT:
118
+ {transcript_content}{notes_section}
119
+
120
+ CRITICAL REQUIREMENT: You MUST annotate the COMPLETE transcript. Do NOT provide partial annotations or stop mid-sentence. Complete the ENTIRE transcript annotation in one response.
121
+
122
+ DETAILED ANNOTATION INSTRUCTIONS:
123
+ Annotate by adding markers in brackets IMMEDIATELY after each relevant word or phrase:
124
+
125
+ FLUENCY MARKERS:
126
+ - [FILLER] after: um[FILLER], uh[FILLER], like[FILLER], you know[FILLER], well[FILLER], so[FILLER]
127
+ - [FALSE_START] after incomplete words: "I was go-[FALSE_START] going"
128
+ - [REPETITION] after repeated words: "I I[REPETITION] went"
129
+ - [REVISION] after self-corrections: "I went to the-[REVISION] I mean"
130
+ - [PAUSE] for hesitations: "I was...[PAUSE] thinking"
131
+
132
+ WORD RETRIEVAL MARKERS:
133
+ - [CIRCUMLOCUTION] after roundabout descriptions: "that thing you write with[CIRCUMLOCUTION]"
134
+ - [INCOMPLETE] after abandoned thoughts: "I was thinking about the...[INCOMPLETE]"
135
+ - [GENERIC] after vague terms: thing[GENERIC], stuff[GENERIC], whatsit[GENERIC]
136
+ - [WORD_SEARCH] after searching: "the... um...[WORD_SEARCH] car"
137
+
138
+ GRAMMATICAL MARKERS:
139
+ - [GRAM_ERROR] after mistakes: "I goed[GRAM_ERROR]", "He don't[GRAM_ERROR]"
140
+ - [SYNTAX_ERROR] after word order problems: "Yesterday I to store went[SYNTAX_ERROR]"
141
+ - [MORPH_ERROR] after morphological errors: "runned[MORPH_ERROR]", "childs[MORPH_ERROR]"
142
+ - [RUN_ON] at end of run-on sentences
143
+
144
+ VOCABULARY MARKERS:
145
+ - [SIMPLE_VOCAB] after basic words: go[SIMPLE_VOCAB], big[SIMPLE_VOCAB], good[SIMPLE_VOCAB]
146
+ - [COMPLEX_VOCAB] after sophisticated words: magnificent[COMPLEX_VOCAB], elaborate[COMPLEX_VOCAB]
147
+ - [SEMANTIC_ERROR] after wrong word choices: "drove my bicycle[SEMANTIC_ERROR]"
148
+
149
+ PRAGMATIC MARKERS:
150
+ - [TOPIC_SHIFT] after topic changes: "Anyway, about cats[TOPIC_SHIFT]"
151
+ - [TANGENT] after going off-topic: "Speaking of dogs, my vacation[TANGENT]"
152
+ - [INAPPROPRIATE] after inappropriate content
153
+ - [COHERENCE_BREAK] after illogical statements
154
+
155
+ SENTENCE COMPLEXITY MARKERS:
156
+ - [SIMPLE_SENT] after simple sentences: "I went home.[SIMPLE_SENT]"
157
+ - [COMPLEX_SENT] after complex sentences: "When I got home, I made dinner.[COMPLEX_SENT]"
158
+ - [COMPOUND_SENT] after compound sentences: "I went home, and made dinner.[COMPOUND_SENT]"
159
+ - [FIGURATIVE] after metaphors/idioms: "raining cats and dogs[FIGURATIVE]"
160
+
161
+ ADDITIONAL MARKERS:
162
+ - [PRONOUN_REF] after unclear pronouns: "He told him that he[PRONOUN_REF] was wrong"
163
+ - [MAZING] after confusing constructions
164
+ - [PERSEVERATION] after repetitive patterns
165
+
166
+ MANDATORY REQUIREMENTS:
167
+ 1. Do NOT stop until the entire transcript is complete
168
+ 2. Keep ALL original text intact
169
+ 3. Mark overlapping features when applicable
170
+ 4. Be consistent throughout
171
+ 5. Complete the annotation in ONE response - no partial outputs allowed
172
+
173
+ PROVIDE THE COMPLETE ANNOTATED TRANSCRIPT - EVERY WORD MUST BE PROCESSED.
174
+ """
175
+
176
+ # Get initial annotation
177
+ annotated_result = call_claude_api(annotation_prompt)
178
+
179
+ # Check if annotation is complete
180
+ is_complete, validation_message = check_annotation_completeness(transcript_content, annotated_result)
181
+
182
+ if not is_complete:
183
+ logger.warning(f"Annotation incomplete: {validation_message}")
184
+
185
+ # Try once more with stronger emphasis on completion
186
+ retry_prompt = f"""
187
+ CRITICAL: The previous annotation was INCOMPLETE. You MUST complete the ENTIRE transcript.
188
+
189
+ {validation_message}
190
+
191
+ ORIGINAL TRANSCRIPT (COMPLETE THIS ENTIRELY):
192
+ {transcript_content}{notes_section}
193
+
194
+ MANDATORY REQUIREMENT: Annotate EVERY SINGLE WORD from start to finish. Do not stop until you reach the very last word of the transcript.
195
+
196
+ {annotation_prompt.split('DETAILED ANNOTATION INSTRUCTIONS:')[1]}
197
+
198
+ VERIFY: The last words of the original transcript are: {' '.join(transcript_content.strip().split()[-3:])}
199
+ ENSURE these words appear at the END of your annotated transcript.
200
+ """
201
+
202
+ retry_result = call_claude_api(retry_prompt)
203
+
204
+ # Check retry
205
+ retry_complete, retry_message = check_annotation_completeness(transcript_content, retry_result)
206
+
207
+ if retry_complete:
208
+ logger.info("Retry successful - annotation now complete")
209
+ return retry_result
210
+ else:
211
+ logger.error(f"Retry failed: {retry_message}")
212
+ return f"⚠️ ANNOTATION INCOMPLETE: {retry_message}\n\nPartial annotation:\n{retry_result}"
213
+
214
+ logger.info("Annotation completed successfully")
215
+ return annotated_result
216
+
217
+ def analyze_annotated_transcript(annotated_transcript, age, gender, slp_notes):
218
+ """Second step: Analyze the annotated transcript with comprehensive quantification"""
219
+ if not annotated_transcript or len(annotated_transcript.strip()) < 50:
220
+ return "Error: Please provide an annotated transcript for analysis."
221
+
222
+ # Add SLP notes to the prompt if provided
223
+ notes_section = ""
224
+ if slp_notes and slp_notes.strip():
225
+ notes_section = f"""
226
+
227
+ SLP CLINICAL NOTES:
228
+ {slp_notes.strip()}
229
+ """
230
+
231
+ analysis_prompt = f"""
232
+ You are a speech-language pathologist conducting a COMPREHENSIVE analysis of a word-by-word annotated speech sample. Count EVERY marker precisely and provide detailed quantitative analysis.
233
+
234
+ Patient: {age}-year-old {gender}
235
+
236
+ ANNOTATED TRANSCRIPT:
237
+ {annotated_transcript}{notes_section}
238
+
239
+ ORIGINAL TRANSCRIPT (for reference and backup analysis):
240
+ {annotated_transcript.replace('[FILLER]', '').replace('[FALSE_START]', '').replace('[REPETITION]', '').replace('[REVISION]', '').replace('[PAUSE]', '').replace('[CIRCUMLOCUTION]', '').replace('[INCOMPLETE]', '').replace('[GENERIC]', '').replace('[WORD_SEARCH]', '').replace('[GRAM_ERROR]', '').replace('[SYNTAX_ERROR]', '').replace('[MORPH_ERROR]', '').replace('[RUN_ON]', '').replace('[SIMPLE_VOCAB]', '').replace('[COMPLEX_VOCAB]', '').replace('[SEMANTIC_ERROR]', '').replace('[TOPIC_SHIFT]', '').replace('[TANGENT]', '').replace('[INAPPROPRIATE]', '').replace('[COHERENCE_BREAK]', '').replace('[SIMPLE_SENT]', '').replace('[COMPLEX_SENT]', '').replace('[COMPOUND_SENT]', '').replace('[FIGURATIVE]', '').replace('[PRONOUN_REF]', '').replace('[MAZING]', '').replace('[PERSEVERATION]', '')}
241
+
242
+ ANALYSIS INSTRUCTIONS:
243
+ Using the detailed linguistic markers in the annotated transcript, provide a comprehensive analysis with EXACT counts, percentages, and specific examples. If markers are missing or unclear, use the original transcript for backup analysis. Complete ALL sections below:
244
+
245
+ COMPREHENSIVE SPEECH SAMPLE ANALYSIS:
246
+
247
+ 1. FLUENCY ANALYSIS (count each marker type):
248
+ - Count [FILLER] markers: List each instance and calculate rate per 100 words
249
+ - Count [FALSE_START] markers: List examples and analyze patterns
250
+ - Count [REPETITION] markers: Categorize by type (word, phrase, sound)
251
+ - Count [REVISION] markers: Analyze self-correction patterns
252
+ - Count [PAUSE] markers: Assess hesitation frequency
253
+ - Calculate total disfluency rate and severity level
254
+ - Determine impact on communication effectiveness
255
+
256
+ 2. WORD RETRIEVAL ANALYSIS (precise counting):
257
+ - Count [CIRCUMLOCUTION] markers: List each roundabout description
258
+ - Count [INCOMPLETE] markers: Analyze abandoned thought patterns
259
+ - Count [GENERIC] markers: Calculate specificity ratio
260
+ - Count [WORD_SEARCH] markers: Identify retrieval difficulty areas
261
+ - Count [WORD_FINDING] markers: Assess overall retrieval efficiency
262
+ - Calculate word-finding accuracy percentage
263
+
264
+ 3. GRAMMATICAL ANALYSIS (detailed error counting):
265
+ - Count [GRAM_ERROR] markers by subcategory:
266
+ * Verb tense errors
267
+ * Subject-verb agreement errors
268
+ * Pronoun errors
269
+ * Article errors
270
+ - Count [SYNTAX_ERROR] markers: Analyze word order problems
271
+ - Count [MORPH_ERROR] markers: Categorize morphological mistakes
272
+ - Count [RUN_ON] markers: Assess sentence boundary awareness
273
+ - Calculate grammatical accuracy rate (correct vs. total attempts)
274
+
275
+ 4. VOCABULARY ANALYSIS (sophistication assessment):
276
+ - Count [SIMPLE_VOCAB] markers: List basic vocabulary used
277
+ - Count [COMPLEX_VOCAB] markers: List sophisticated vocabulary
278
+ - Count [SEMANTIC_ERROR] markers: Analyze word choice accuracy
279
+ - Calculate vocabulary sophistication ratio (complex/simple)
280
+ - Assess semantic appropriateness and precision
281
+ - Determine vocabulary diversity (type-token ratio)
282
+
283
+ 5. PRAGMATIC LANGUAGE ANALYSIS (coherence assessment):
284
+ - Count [TOPIC_SHIFT] markers: Assess transition appropriateness
285
+ - Count [TANGENT] markers: Analyze tangential speech patterns
286
+ - Count [INAPPROPRIATE] markers: Evaluate contextual appropriateness
287
+ - Count [COHERENCE_BREAK] markers: Assess logical flow
288
+ - Count [PRONOUN_REF] markers: Analyze referential clarity
289
+ - Evaluate overall discourse coherence and organization
290
+
291
+ 6. SENTENCE COMPLEXITY ANALYSIS (structural assessment):
292
+ - Count [SIMPLE_SENT] markers: Calculate simple sentence percentage
293
+ - Count [COMPLEX_SENT] markers: Analyze subordination use
294
+ - Count [COMPOUND_SENT] markers: Assess coordination patterns
295
+ - Count [FIGURATIVE] markers: Evaluate figurative language use
296
+ - Count [MAZING] markers: Assess confusing constructions
297
+ - Calculate syntactic complexity index
298
+
299
+ 7. QUANTITATIVE METRICS (comprehensive calculations):
300
+ - Total word count and morpheme count
301
+ - Mean Length of Utterance (MLU) in words and morphemes
302
+ - Type-Token Ratio (TTR) for vocabulary diversity
303
+ - Clauses per utterance ratio
304
+ - Error rate per linguistic domain
305
+ - Communication efficiency index
306
+
307
+ 8. ERROR PATTERN ANALYSIS:
308
+ - Most frequent error types with exact counts
309
+ - Error consistency vs. variability patterns
310
+ - Developmental appropriateness of errors
311
+ - Severity ranking of different error types
312
+ - Compensatory strategies observed
313
+
314
+ 9. CLINICAL IMPLICATIONS:
315
+ - Primary strengths: List with supporting evidence
316
+ - Primary weaknesses: Rank by severity with counts
317
+ - Intervention priorities: Based on error frequency and impact
318
+ - Therapy targets: Specific, measurable goals
319
+ - Prognosis indicators: Based on error patterns and consistency
320
+
321
+ 10. SUMMARY AND RECOMMENDATIONS:
322
+ - Overall communication profile with percentile estimates
323
+ - Priority treatment goals ranked by importance
324
+ - Functional communication impact assessment
325
+ - Recommended therapy approaches and frequency
326
+ - Follow-up assessment timeline
327
+
328
+ CRITICAL: Provide EXACT counts for every marker type, calculate precise percentages, and give specific examples from the transcript. Show your counting work clearly. Complete ALL 12 sections - use <CONTINUE> if needed.
329
+ """
330
+
331
+ return call_claude_api_with_continuation(analysis_prompt)
332
+
333
+ def calculate_linguistic_metrics(transcript_text):
334
+ """Calculate comprehensive linguistic metrics from transcript"""
335
+ import re
336
+ import numpy as np
337
+
338
+ if not transcript_text or not transcript_text.strip():
339
+ return {}
340
+
341
+ # Clean text and extract words
342
+ cleaned_text = re.sub(r'\[.*?\]', '', transcript_text) # Remove annotation markers
343
+ sentences = re.split(r'[.!?]+', cleaned_text)
344
+ sentences = [s.strip() for s in sentences if s.strip()]
345
+
346
+ # Extract all words
347
+ all_words = []
348
+ for sentence in sentences:
349
+ words = re.findall(r'\b\w+\b', sentence.lower())
350
+ all_words.extend(words)
351
+
352
+ if not all_words:
353
+ return {}
354
+
355
+ # Basic counts
356
+ total_words = len(all_words)
357
+ total_sentences = len(sentences)
358
+ unique_words = len(set(all_words))
359
+
360
+ # Type-Token Ratio
361
+ ttr = unique_words / total_words if total_words > 0 else 0
362
+
363
+ # Mean Length of Utterance (MLU)
364
+ mlu_words = total_words / total_sentences if total_sentences > 0 else 0
365
+
366
+ # Word frequency analysis
367
+ word_freq = {}
368
+ for word in all_words:
369
+ word_freq[word] = word_freq.get(word, 0) + 1
370
+
371
+ # Sort by frequency
372
+ sorted_word_freq = dict(sorted(word_freq.items(), key=lambda x: x[1], reverse=True))
373
+
374
+ # Sentence length statistics
375
+ sentence_lengths = []
376
+ for sentence in sentences:
377
+ words_in_sentence = len(re.findall(r'\b\w+\b', sentence))
378
+ sentence_lengths.append(words_in_sentence)
379
+
380
+ avg_sentence_length = np.mean(sentence_lengths) if sentence_lengths else 0
381
+ std_sentence_length = np.std(sentence_lengths) if sentence_lengths else 0
382
+
383
+ # Vocabulary sophistication (words > 6 characters as proxy for complex vocabulary)
384
+ complex_words = [word for word in all_words if len(word) > 6]
385
+ vocabulary_sophistication = len(complex_words) / total_words if total_words > 0 else 0
386
+
387
+ # Calculate morpheme count (approximate)
388
+ morpheme_count = 0
389
+ for word in all_words:
390
+ # Basic morpheme counting (word + common suffixes)
391
+ morpheme_count += 1
392
+ if word.endswith(('s', 'ed', 'ing', 'er', 'est', 'ly')):
393
+ morpheme_count += 1
394
+ if word.endswith(('tion', 'sion', 'ness', 'ment', 'able', 'ible')):
395
+ morpheme_count += 1
396
+
397
+ mlu_morphemes = morpheme_count / total_sentences if total_sentences > 0 else 0
398
+
399
+ return {
400
+ 'total_words': total_words,
401
+ 'total_sentences': total_sentences,
402
+ 'unique_words': unique_words,
403
+ 'type_token_ratio': round(ttr, 3),
404
+ 'mlu_words': round(mlu_words, 2),
405
+ 'mlu_morphemes': round(mlu_morphemes, 2),
406
+ 'avg_sentence_length': round(avg_sentence_length, 2),
407
+ 'sentence_length_std': round(std_sentence_length, 2),
408
+ 'vocabulary_sophistication': round(vocabulary_sophistication, 3),
409
+ 'word_frequency': dict(list(sorted_word_freq.items())[:20]), # Top 20 most frequent
410
+ 'sentence_lengths': sentence_lengths,
411
+ 'complex_word_count': len(complex_words),
412
+ 'morpheme_count': morpheme_count,
413
+ 'tokenized_words': all_words, # Add for lexical diversity analysis
414
+ 'cleaned_text': cleaned_text # Add for lexical diversity analysis
415
+ }
416
+
417
+ def calculate_advanced_lexical_diversity(transcript_text):
418
+ """Calculate advanced lexical diversity measures using lexical-diversity library"""
419
+ import re
420
+
421
+ try:
422
+ from lexical_diversity import lex_div as ld
423
+ lexdiv_available = True
424
+ except ImportError:
425
+ lexdiv_available = False
426
+
427
+ if not lexdiv_available:
428
+ return {
429
+ 'library_available': False,
430
+ 'error': 'lexical-diversity library not installed. Install with: pip install lexical-diversity'
431
+ }
432
+
433
+ if not transcript_text or not transcript_text.strip():
434
+ return {'library_available': True, 'error': 'No text provided'}
435
+
436
+ # Clean text and prepare for lexical diversity analysis
437
+ cleaned_text = re.sub(r'\[.*?\]', '', transcript_text) # Remove annotation markers
438
+
439
+ try:
440
+ # Tokenize using lexical-diversity
441
+ tokens = ld.tokenize(cleaned_text)
442
+
443
+ if len(tokens) < 10: # Need minimum tokens for meaningful analysis
444
+ return {
445
+ 'library_available': True,
446
+ 'error': f'Insufficient tokens for analysis (need ≥10, got {len(tokens)})'
447
+ }
448
+
449
+ # Calculate various lexical diversity measures
450
+ diversity_measures = {}
451
+
452
+ # Basic TTR (included for comparison, but noted as unreliable)
453
+ diversity_measures['simple_ttr'] = round(ld.ttr(tokens), 4)
454
+
455
+ # Recommended measures
456
+ try:
457
+ diversity_measures['root_ttr'] = round(ld.root_ttr(tokens), 4)
458
+ except:
459
+ diversity_measures['root_ttr'] = None
460
+
461
+ try:
462
+ diversity_measures['log_ttr'] = round(ld.log_ttr(tokens), 4)
463
+ except:
464
+ diversity_measures['log_ttr'] = None
465
+
466
+ try:
467
+ diversity_measures['maas_ttr'] = round(ld.maas_ttr(tokens), 4)
468
+ except:
469
+ diversity_measures['maas_ttr'] = None
470
+
471
+ # MSTTR (Mean Segmental TTR) - only if enough tokens
472
+ if len(tokens) >= 50:
473
+ try:
474
+ diversity_measures['msttr_50'] = round(ld.msttr(tokens, window_length=50), 4)
475
+ except:
476
+ diversity_measures['msttr_50'] = None
477
+
478
+ if len(tokens) >= 25:
479
+ try:
480
+ diversity_measures['msttr_25'] = round(ld.msttr(tokens, window_length=25), 4)
481
+ except:
482
+ diversity_measures['msttr_25'] = None
483
+
484
+ # MATTR (Moving Average TTR) - only if enough tokens
485
+ if len(tokens) >= 50:
486
+ try:
487
+ diversity_measures['mattr_50'] = round(ld.mattr(tokens, window_length=50), 4)
488
+ except:
489
+ diversity_measures['mattr_50'] = None
490
+
491
+ if len(tokens) >= 25:
492
+ try:
493
+ diversity_measures['mattr_25'] = round(ld.mattr(tokens, window_length=25), 4)
494
+ except:
495
+ diversity_measures['mattr_25'] = None
496
+
497
+ # HDD (Hypergeometric Distribution D)
498
+ try:
499
+ diversity_measures['hdd'] = round(ld.hdd(tokens), 4)
500
+ except:
501
+ diversity_measures['hdd'] = None
502
+
503
+ # MTLD (Measure of Textual Lexical Diversity) - only if enough tokens
504
+ if len(tokens) >= 50:
505
+ try:
506
+ diversity_measures['mtld'] = round(ld.mtld(tokens), 4)
507
+ except:
508
+ diversity_measures['mtld'] = None
509
+
510
+ try:
511
+ diversity_measures['mtld_ma_wrap'] = round(ld.mtld_ma_wrap(tokens), 4)
512
+ except:
513
+ diversity_measures['mtld_ma_wrap'] = None
514
+
515
+ try:
516
+ diversity_measures['mtld_ma_bid'] = round(ld.mtld_ma_bid(tokens), 4)
517
+ except:
518
+ diversity_measures['mtld_ma_bid'] = None
519
+
520
+ return {
521
+ 'library_available': True,
522
+ 'token_count': len(tokens),
523
+ 'diversity_measures': diversity_measures,
524
+ 'tokens': tokens[:50] # First 50 tokens for verification
525
+ }
526
+
527
+ except Exception as e:
528
+ return {
529
+ 'library_available': True,
530
+ 'error': f'Error calculating lexical diversity: {str(e)}'
531
+ }
532
+
533
+ def analyze_annotation_markers(annotated_transcript):
534
+ """Analyze and count all annotation markers in the transcript with detailed word-level analysis"""
535
+ import re
536
+
537
+ if not annotated_transcript:
538
+ return {}
539
+
540
+ # Define all marker types
541
+ marker_types = {
542
+ 'FILLER': r'\[FILLER\]',
543
+ 'FALSE_START': r'\[FALSE_START\]',
544
+ 'REPETITION': r'\[REPETITION\]',
545
+ 'REVISION': r'\[REVISION\]',
546
+ 'PAUSE': r'\[PAUSE\]',
547
+ 'CIRCUMLOCUTION': r'\[CIRCUMLOCUTION\]',
548
+ 'INCOMPLETE': r'\[INCOMPLETE\]',
549
+ 'GENERIC': r'\[GENERIC\]',
550
+ 'WORD_SEARCH': r'\[WORD_SEARCH\]',
551
+ 'GRAM_ERROR': r'\[GRAM_ERROR\]',
552
+ 'SYNTAX_ERROR': r'\[SYNTAX_ERROR\]',
553
+ 'MORPH_ERROR': r'\[MORPH_ERROR\]',
554
+ 'RUN_ON': r'\[RUN_ON\]',
555
+ 'SIMPLE_VOCAB': r'\[SIMPLE_VOCAB\]',
556
+ 'COMPLEX_VOCAB': r'\[COMPLEX_VOCAB\]',
557
+ 'SEMANTIC_ERROR': r'\[SEMANTIC_ERROR\]',
558
+ 'TOPIC_SHIFT': r'\[TOPIC_SHIFT\]',
559
+ 'TANGENT': r'\[TANGENT\]',
560
+ 'INAPPROPRIATE': r'\[INAPPROPRIATE\]',
561
+ 'COHERENCE_BREAK': r'\[COHERENCE_BREAK\]',
562
+ 'SIMPLE_SENT': r'\[SIMPLE_SENT\]',
563
+ 'COMPLEX_SENT': r'\[COMPLEX_SENT\]',
564
+ 'COMPOUND_SENT': r'\[COMPOUND_SENT\]',
565
+ 'FIGURATIVE': r'\[FIGURATIVE\]',
566
+ 'PRONOUN_REF': r'\[PRONOUN_REF\]',
567
+ 'MAZING': r'\[MAZING\]',
568
+ 'PERSEVERATION': r'\[PERSEVERATION\]'
569
+ }
570
+
571
+ # Count each marker type and extract the actual words
572
+ marker_counts = {}
573
+ marker_examples = {}
574
+ marker_words = {}
575
+
576
+ for marker_name, pattern in marker_types.items():
577
+ matches = re.findall(pattern, annotated_transcript)
578
+ marker_counts[marker_name] = len(matches)
579
+
580
+ # Find examples with context and extract the actual words
581
+ examples = []
582
+ words = []
583
+
584
+ # Find all instances of word[MARKER] pattern
585
+ word_pattern = r'(\w+)' + pattern
586
+ word_matches = re.finditer(word_pattern, annotated_transcript)
587
+
588
+ for match in word_matches:
589
+ word = match.group(1)
590
+ words.append(word)
591
+
592
+ # Get context around the match
593
+ start = max(0, match.start() - 30)
594
+ end = min(len(annotated_transcript), match.end() + 30)
595
+ context = annotated_transcript[start:end].strip()
596
+ examples.append(f'"{word}" in context: {context}')
597
+
598
+ marker_examples[marker_name] = examples[:10] # Keep first 10 examples
599
+ marker_words[marker_name] = words
600
+
601
+ # Calculate totals by category
602
+ fluency_total = sum([marker_counts.get(m, 0) for m in ['FILLER', 'FALSE_START', 'REPETITION', 'REVISION', 'PAUSE']])
603
+ grammar_total = sum([marker_counts.get(m, 0) for m in ['GRAM_ERROR', 'SYNTAX_ERROR', 'MORPH_ERROR', 'RUN_ON']])
604
+ vocab_simple = marker_counts.get('SIMPLE_VOCAB', 0)
605
+ vocab_complex = marker_counts.get('COMPLEX_VOCAB', 0)
606
+
607
+ return {
608
+ 'marker_counts': marker_counts,
609
+ 'marker_examples': marker_examples,
610
+ 'marker_words': marker_words,
611
+ 'category_totals': {
612
+ 'fluency_issues': fluency_total,
613
+ 'grammar_errors': grammar_total,
614
+ 'simple_vocabulary': vocab_simple,
615
+ 'complex_vocabulary': vocab_complex,
616
+ 'vocab_sophistication_ratio': vocab_complex / (vocab_simple + vocab_complex) if (vocab_simple + vocab_complex) > 0 else 0
617
+ }
618
+ }
619
+
620
+ def generate_comprehensive_analysis_report(annotated_transcript, original_transcript):
621
+ """Generate the most comprehensive analysis combining manual counts, lexical diversity, and linguistic metrics"""
622
+ import re
623
+
624
+ if not annotated_transcript:
625
+ return "No annotated transcript provided."
626
+
627
+ # Get all three types of analysis
628
+ linguistic_metrics = calculate_linguistic_metrics(original_transcript)
629
+ marker_analysis = analyze_annotation_markers(annotated_transcript)
630
+ lexical_diversity = calculate_advanced_lexical_diversity(original_transcript)
631
+
632
+ # Calculate rates per 100 words
633
+ total_words = linguistic_metrics.get('total_words', 0)
634
+
635
+ report_lines = []
636
+ report_lines.append("=" * 100)
637
+ report_lines.append("COMPREHENSIVE SPEECH ANALYSIS REPORT")
638
+ report_lines.append("Combining Manual Counts + Advanced Lexical Diversity + Linguistic Metrics")
639
+ report_lines.append("=" * 100)
640
+ report_lines.append("")
641
+
642
+ # SECTION 1: BASIC STATISTICS
643
+ report_lines.append("1. BASIC STATISTICS:")
644
+ report_lines.append(f" • Total words: {total_words}")
645
+ report_lines.append(f" • Total sentences: {linguistic_metrics.get('total_sentences', 0)}")
646
+ report_lines.append(f" • Unique words: {linguistic_metrics.get('unique_words', 0)}")
647
+ report_lines.append(f" • MLU (words): {linguistic_metrics.get('mlu_words', 0):.2f}")
648
+ report_lines.append(f" • MLU (morphemes): {linguistic_metrics.get('mlu_morphemes', 0):.2f}")
649
+ report_lines.append(f" • Average sentence length: {linguistic_metrics.get('avg_sentence_length', 0):.2f}")
650
+ report_lines.append("")
651
+
652
+ # SECTION 2: ADVANCED LEXICAL DIVERSITY MEASURES
653
+ report_lines.append("2. ADVANCED LEXICAL DIVERSITY MEASURES:")
654
+ if lexical_diversity.get('library_available', False) and 'diversity_measures' in lexical_diversity:
655
+ measures = lexical_diversity['diversity_measures']
656
+ report_lines.append(f" • Token count for analysis: {lexical_diversity.get('token_count', 0)}")
657
+ report_lines.append("")
658
+ report_lines.append(" RECOMMENDED MEASURES:")
659
+
660
+ if measures.get('root_ttr') is not None:
661
+ report_lines.append(f" • Root TTR: {measures['root_ttr']:.4f}")
662
+ if measures.get('log_ttr') is not None:
663
+ report_lines.append(f" • Log TTR: {measures['log_ttr']:.4f}")
664
+ if measures.get('maas_ttr') is not None:
665
+ report_lines.append(f" • Maas TTR: {measures['maas_ttr']:.4f}")
666
+ if measures.get('hdd') is not None:
667
+ report_lines.append(f" • HDD (Hypergeometric Distribution D): {measures['hdd']:.4f}")
668
+
669
+ report_lines.append("")
670
+ report_lines.append(" MOVING WINDOW MEASURES:")
671
+ if measures.get('msttr_25') is not None:
672
+ report_lines.append(f" • MSTTR (25-word window): {measures['msttr_25']:.4f}")
673
+ if measures.get('msttr_50') is not None:
674
+ report_lines.append(f" • MSTTR (50-word window): {measures['msttr_50']:.4f}")
675
+ if measures.get('mattr_25') is not None:
676
+ report_lines.append(f" • MATTR (25-word window): {measures['mattr_25']:.4f}")
677
+ if measures.get('mattr_50') is not None:
678
+ report_lines.append(f" • MATTR (50-word window): {measures['mattr_50']:.4f}")
679
+
680
+ report_lines.append("")
681
+ report_lines.append(" MTLD MEASURES:")
682
+ if measures.get('mtld') is not None:
683
+ report_lines.append(f" • MTLD: {measures['mtld']:.4f}")
684
+ if measures.get('mtld_ma_wrap') is not None:
685
+ report_lines.append(f" • MTLD (moving average, wrap): {measures['mtld_ma_wrap']:.4f}")
686
+ if measures.get('mtld_ma_bid') is not None:
687
+ report_lines.append(f" • MTLD (moving average, bidirectional): {measures['mtld_ma_bid']:.4f}")
688
+
689
+ report_lines.append("")
690
+ report_lines.append(" COMPARISON MEASURE:")
691
+ report_lines.append(f" • Simple TTR (not recommended): {measures.get('simple_ttr', 0):.4f}")
692
+
693
+ else:
694
+ report_lines.append(" ⚠️ Advanced lexical diversity measures not available")
695
+ if 'error' in lexical_diversity:
696
+ report_lines.append(f" Error: {lexical_diversity['error']}")
697
+
698
+ report_lines.append("")
699
+
700
+ # SECTION 3: MANUAL ANNOTATION COUNTS
701
+ report_lines.append("3. MANUAL ANNOTATION COUNTS:")
702
+ marker_counts = marker_analysis['marker_counts']
703
+ marker_words = marker_analysis['marker_words']
704
+
705
+ # Group markers by category for organized reporting
706
+ categories = {
707
+ 'FLUENCY MARKERS': ['FILLER', 'FALSE_START', 'REPETITION', 'REVISION', 'PAUSE'],
708
+ 'WORD RETRIEVAL MARKERS': ['CIRCUMLOCUTION', 'INCOMPLETE', 'GENERIC', 'WORD_SEARCH'],
709
+ 'GRAMMAR MARKERS': ['GRAM_ERROR', 'SYNTAX_ERROR', 'MORPH_ERROR', 'RUN_ON'],
710
+ 'VOCABULARY MARKERS': ['SIMPLE_VOCAB', 'COMPLEX_VOCAB', 'SEMANTIC_ERROR'],
711
+ 'PRAGMATIC MARKERS': ['TOPIC_SHIFT', 'TANGENT', 'INAPPROPRIATE', 'COHERENCE_BREAK', 'PRONOUN_REF'],
712
+ 'SENTENCE COMPLEXITY MARKERS': ['SIMPLE_SENT', 'COMPLEX_SENT', 'COMPOUND_SENT', 'FIGURATIVE'],
713
+ 'OTHER MARKERS': ['MAZING', 'PERSEVERATION']
714
+ }
715
+
716
+ for category, markers in categories.items():
717
+ category_total = sum(marker_counts.get(marker, 0) for marker in markers)
718
+ if category_total > 0:
719
+ report_lines.append(f" {category}:")
720
+
721
+ for marker in markers:
722
+ count = marker_counts.get(marker, 0)
723
+ if count > 0:
724
+ rate = (count / total_words * 100) if total_words > 0 else 0
725
+ words_list = marker_words.get(marker, [])
726
+
727
+ report_lines.append(f" • {marker}: {count} instances ({rate:.2f} per 100 words)")
728
+
729
+ if words_list:
730
+ # Count frequency of each word
731
+ word_freq = {}
732
+ for word in words_list:
733
+ word_freq[word] = word_freq.get(word, 0) + 1
734
+
735
+ # Sort by frequency
736
+ sorted_words = sorted(word_freq.items(), key=lambda x: x[1], reverse=True)
737
+ word_summary = []
738
+ for word, freq in sorted_words[:8]: # Top 8 most frequent
739
+ if freq > 1:
740
+ word_summary.append(f'"{word}" ({freq}x)')
741
+ else:
742
+ word_summary.append(f'"{word}"')
743
+
744
+ report_lines.append(f" Words: {', '.join(word_summary)}")
745
+
746
+ report_lines.append(f" CATEGORY TOTAL: {category_total} instances")
747
+ report_lines.append("")
748
+
749
+ # SECTION 4: SUMMARY STATISTICS
750
+ report_lines.append("4. SUMMARY STATISTICS:")
751
+ category_totals = marker_analysis['category_totals']
752
+
753
+ fluency_total = category_totals['fluency_issues']
754
+ grammar_total = category_totals['grammar_errors']
755
+ simple_vocab = category_totals['simple_vocabulary']
756
+ complex_vocab = category_totals['complex_vocabulary']
757
+
758
+ if total_words > 0:
759
+ report_lines.append(f" • Total fluency issues: {fluency_total} ({fluency_total/total_words*100:.2f} per 100 words)")
760
+ report_lines.append(f" • Total grammar errors: {grammar_total} ({grammar_total/total_words*100:.2f} per 100 words)")
761
+ report_lines.append(f" • Simple vocabulary: {simple_vocab} ({simple_vocab/total_words*100:.2f} per 100 words)")
762
+ report_lines.append(f" • Complex vocabulary: {complex_vocab} ({complex_vocab/total_words*100:.2f} per 100 words)")
763
+
764
+ if simple_vocab + complex_vocab > 0:
765
+ vocab_ratio = complex_vocab / (simple_vocab + complex_vocab)
766
+ report_lines.append(f" • Vocabulary sophistication ratio: {vocab_ratio:.3f}")
767
+
768
+ # SECTION 5: WORD FREQUENCY ANALYSIS
769
+ word_freq = linguistic_metrics.get('word_frequency', {})
770
+ if word_freq:
771
+ report_lines.append("")
772
+ report_lines.append("5. MOST FREQUENT WORDS:")
773
+ for i, (word, freq) in enumerate(list(word_freq.items())[:15], 1):
774
+ percentage = (freq / total_words * 100) if total_words > 0 else 0
775
+ report_lines.append(f" {i:2d}. '{word}': {freq} times ({percentage:.1f}%)")
776
+
777
+ report_lines.append("")
778
+ report_lines.append("=" * 100)
779
+ report_lines.append("END OF COMPREHENSIVE ANALYSIS REPORT")
780
+ report_lines.append("=" * 100)
781
+
782
+ return '\n'.join(report_lines)
783
+
784
+ def generate_manual_count_report(annotated_transcript):
785
+ """Generate a basic manual count report (legacy function for compatibility)"""
786
+ return generate_comprehensive_analysis_report(annotated_transcript, annotated_transcript)
787
+
788
+ def process_file(file):
789
+ """Process uploaded transcript file"""
790
+ if file is None:
791
+ return "Please upload a file first."
792
+
793
+ try:
794
+ with open(file.name, 'r', encoding='utf-8', errors='ignore') as f:
795
+ content = f.read()
796
+
797
+ if not content.strip():
798
+ return "File appears to be empty."
799
+
800
+ return content
801
+ except Exception as e:
802
+ return f"Error reading file: {str(e)}"
803
+
804
+ def segment_response_by_sections(response_text):
805
+ """Segment response by section titles and return a dictionary of sections"""
806
+ required_sections = [
807
+ "1. SPEECH FACTORS",
808
+ "2. LANGUAGE SKILLS ASSESSMENT",
809
+ "3. COMPLEX SENTENCE ANALYSIS",
810
+ "4. FIGURATIVE LANGUAGE ANALYSIS",
811
+ "5. PRAGMATIC LANGUAGE ASSESSMENT",
812
+ "6. VOCABULARY AND SEMANTIC ANALYSIS",
813
+ "7. MORPHOLOGICAL AND PHONOLOGICAL ANALYSIS",
814
+ "8. COGNITIVE-LINGUISTIC FACTORS",
815
+ "9. FLUENCY AND RHYTHM ANALYSIS",
816
+ "10. QUANTITATIVE METRICS",
817
+ "11. CLINICAL IMPLICATIONS",
818
+ "12. PROGNOSIS AND SUMMARY"
819
+ ]
820
+
821
+ sections = {}
822
+ current_section = None
823
+ current_content = []
824
+
825
+ lines = response_text.split('\n')
826
+
827
+ for line in lines:
828
+ # Check if this line is a section header
829
+ is_section_header = False
830
+ for section in required_sections:
831
+ if section in line:
832
+ # Save previous section if exists
833
+ if current_section and current_content:
834
+ sections[current_section] = '\n'.join(current_content).strip()
835
+
836
+ # Start new section
837
+ current_section = section
838
+ current_content = []
839
+ is_section_header = True
840
+ break
841
+
842
+ # If not a section header, add to current section content
843
+ if not is_section_header and current_section:
844
+ current_content.append(line)
845
+
846
+ # Save the last section
847
+ if current_section and current_content:
848
+ sections[current_section] = '\n'.join(current_content).strip()
849
+
850
+ return sections
851
+
852
+ def combine_sections_smartly(sections_dict):
853
+ """Combine sections in the correct order without duplicates"""
854
+ required_sections = [
855
+ "1. SPEECH FACTORS",
856
+ "2. LANGUAGE SKILLS ASSESSMENT",
857
+ "3. COMPLEX SENTENCE ANALYSIS",
858
+ "4. FIGURATIVE LANGUAGE ANALYSIS",
859
+ "5. PRAGMATIC LANGUAGE ASSESSMENT",
860
+ "6. VOCABULARY AND SEMANTIC ANALYSIS",
861
+ "7. MORPHOLOGICAL AND PHONOLOGICAL ANALYSIS",
862
+ "8. COGNITIVE-LINGUISTIC FACTORS",
863
+ "9. FLUENCY AND RHYTHM ANALYSIS",
864
+ "10. QUANTITATIVE METRICS",
865
+ "11. CLINICAL IMPLICATIONS",
866
+ "12. PROGNOSIS AND SUMMARY"
867
+ ]
868
+
869
+ combined_parts = []
870
+ combined_parts.append("COMPREHENSIVE SPEECH SAMPLE ANALYSIS")
871
+ combined_parts.append("")
872
+
873
+ for section in required_sections:
874
+ if section in sections_dict:
875
+ combined_parts.append(section)
876
+ combined_parts.append("")
877
+ combined_parts.append(sections_dict[section])
878
+ combined_parts.append("")
879
+
880
+ return '\n'.join(combined_parts)
881
+
882
+ def call_claude_api_with_continuation(prompt):
883
+ """Call Claude API with smart continuation system - unlimited continuations until complete"""
884
+ if not ANTHROPIC_API_KEY:
885
+ return "❌ Claude API key not configured. Please set ANTHROPIC_API_KEY environment variable."
886
+
887
+ # Define all required sections
888
+ required_sections = [
889
+ "1. SPEECH FACTORS",
890
+ "2. LANGUAGE SKILLS ASSESSMENT",
891
+ "3. COMPLEX SENTENCE ANALYSIS",
892
+ "4. FIGURATIVE LANGUAGE ANALYSIS",
893
+ "5. PRAGMATIC LANGUAGE ASSESSMENT",
894
+ "6. VOCABULARY AND SEMANTIC ANALYSIS",
895
+ "7. MORPHOLOGICAL AND PHONOLOGICAL ANALYSIS",
896
+ "8. COGNITIVE-LINGUISTIC FACTORS",
897
+ "9. FLUENCY AND RHYTHM ANALYSIS",
898
+ "10. QUANTITATIVE METRICS",
899
+ "11. CLINICAL IMPLICATIONS",
900
+ "12. PROGNOSIS AND SUMMARY"
901
+ ]
902
+
903
+ # Safety limits to prevent infinite loops
904
+ MAX_CONTINUATIONS = 30 # Increased from 20 to 30 API calls
905
+ MAX_TIME_MINUTES = 15 # Increased from 10 to 15 minutes total
906
+ MIN_PROGRESS_PER_CALL = 0 # Changed from 1 to 0 to allow more flexibility
907
+
908
+ try:
909
+ all_sections = {} # Store all sections found across all parts
910
+ continuation_count = 0
911
+ start_time = time.time()
912
+ last_section_count = 0 # Track progress between calls
913
+
914
+ # Add continuation instruction to original prompt
915
+ initial_prompt = prompt + "\n\nCRITICAL INSTRUCTIONS: You MUST complete ALL 12 sections of the analysis. If your response is cut off or incomplete, end with <CONTINUE> to indicate more content is needed. Do not skip any sections. Use the checklist to ensure all sections are completed."
916
+
917
+ while True: # Unlimited continuations until complete
918
+ if continuation_count == 0:
919
+ current_prompt = initial_prompt
920
+ else:
921
+ # For continuations, provide context about what was already covered
922
+ missing_sections = [s for s in required_sections if s not in all_sections]
923
+ missing_text = "\n".join([f"- {section}" for section in missing_sections])
924
+
925
+ current_prompt = prompt + f"\n\nCONTINUATION {continuation_count + 1}: The following sections are STILL MISSING and MUST be completed:\n\n{missing_text}\n\nCRITICAL: Provide ONLY these missing sections. Do not repeat any sections that are already complete. Focus exclusively on the missing sections listed above. Complete ALL missing sections in this response."
926
+
927
+ headers = {
928
+ "Content-Type": "application/json",
929
+ "x-api-key": ANTHROPIC_API_KEY,
930
+ "anthropic-version": "2023-06-01"
931
+ }
932
+
933
+ data = {
934
+ "model": "claude-3-5-sonnet-20241022",
935
+ "max_tokens": 4096,
936
+ "messages": [
937
+ {
938
+ "role": "user",
939
+ "content": current_prompt
940
+ }
941
+ ]
942
+ }
943
+
944
+ response = requests.post(
945
+ "https://api.anthropic.com/v1/messages",
946
+ headers=headers,
947
+ json=data,
948
+ timeout=90
949
+ )
950
+
951
+ if response.status_code == 200:
952
+ response_json = response.json()
953
+ response_text = response_json['content'][0]['text']
954
+
955
+ # Log response for debugging
956
+ print(f"\n=== PART {continuation_count + 1} RESPONSE ===")
957
+ print(f"Length: {len(response_text)} characters")
958
+ print(f"Contains CONTINUE: {'<CONTINUE>' in response_text}")
959
+ print(f"First 200 chars: {response_text[:200]}...")
960
+ print(f"Last 200 chars: {response_text[-200:]}...")
961
+ print("=" * 50)
962
+
963
+ # Segment this part and add new sections to our collection
964
+ part_sections = segment_response_by_sections(response_text)
965
+ for section, content in part_sections.items():
966
+ if section not in all_sections: # Only add if not already present
967
+ all_sections[section] = content
968
+ print(f"Added section: {section}")
969
+ else:
970
+ print(f"Skipped duplicate section: {section}")
971
+
972
+ # Check completion status
973
+ completed_sections = len(all_sections)
974
+ missing_sections = [s for s in required_sections if s not in all_sections]
975
+
976
+ print(f"Completed sections: {completed_sections}/12")
977
+ print(f"Missing sections: {missing_sections}")
978
+
979
+ # Check if response indicates continuation is needed
980
+ needs_continuation = "<CONTINUE>" in response_text
981
+
982
+ print(f"Needs continuation: {needs_continuation}")
983
+ print(f"Continuation count: {continuation_count}")
984
+
985
+ # Safety checks to prevent infinite loops
986
+ current_time = time.time()
987
+ elapsed_minutes = (current_time - start_time) / 60
988
+ current_section_count = len(all_sections)
989
+ progress_made = current_section_count - last_section_count
990
+
991
+ # Check if we're making progress
992
+ if continuation_count > 0 and progress_made < MIN_PROGRESS_PER_CALL:
993
+ # Only stop if we've made multiple calls with no progress
994
+ if continuation_count > 3: # Allow more attempts before giving up
995
+ logger.warning(f"No progress made in last call (added {progress_made} sections). Stopping to prevent infinite loop.")
996
+ break
997
+ else:
998
+ logger.info(f"No progress in call {continuation_count}, but continuing to allow more attempts...")
999
+
1000
+ # Check time limit
1001
+ if elapsed_minutes > MAX_TIME_MINUTES:
1002
+ logger.warning(f"Time limit exceeded ({elapsed_minutes:.1f} minutes). Stopping to prevent excessive API usage.")
1003
+ break
1004
+
1005
+ # Check continuation limit
1006
+ if continuation_count >= MAX_CONTINUATIONS:
1007
+ logger.warning(f"Continuation limit reached ({MAX_CONTINUATIONS} calls). Stopping to prevent excessive API usage.")
1008
+ break
1009
+
1010
+ # Continue if <CONTINUE> is present and safety checks pass
1011
+ if needs_continuation:
1012
+ continuation_count += 1
1013
+ last_section_count = current_section_count
1014
+ logger.info(f"Continuing analysis (attempt {continuation_count}/{MAX_CONTINUATIONS}, {elapsed_minutes:.1f} minutes elapsed)")
1015
+ continue
1016
+ else:
1017
+ break
1018
+ else:
1019
+ logger.error(f"Claude API error: {response.status_code} - {response.text}")
1020
+ return f"❌ Claude API Error: {response.status_code}"
1021
+
1022
+ except Exception as e:
1023
+ logger.error(f"Error calling Claude API: {str(e)}")
1024
+ return f"❌ Error: {str(e)}"
1025
+
1026
+ # Combine all sections in the correct order
1027
+ final_response = combine_sections_smartly(all_sections)
1028
+
1029
+ # Log final results
1030
+ print(f"\n=== FINAL SMART VALIDATION ===")
1031
+ print(f"Total sections found: {len(all_sections)}")
1032
+ print(f"All sections present: {len(all_sections) == 12}")
1033
+ print(f"Missing sections: {[s for s in required_sections if s not in all_sections]}")
1034
+ print(f"Total time: {(time.time() - start_time) / 60:.1f} minutes")
1035
+ print(f"Total API calls: {continuation_count + 1}")
1036
+ print("=" * 50)
1037
+
1038
+ # Add completion indicator with safety info
1039
+ if continuation_count > 0:
1040
+ final_response += f"\n\n[Analysis completed in {continuation_count + 1} parts over {(time.time() - start_time) / 60:.1f} minutes]"
1041
+
1042
+ # Add warning if incomplete due to safety limits
1043
+ if len(all_sections) < 12:
1044
+ missing_sections = [s for s in required_sections if s not in all_sections]
1045
+ final_response += f"\n\n⚠️ WARNING: Analysis incomplete due to safety limits. Missing sections: {', '.join(missing_sections)}"
1046
+ final_response += f"\n\n💡 TIP: Try running the analysis again, or use the 'Targeted Analysis' tab to focus on specific areas."
1047
+ final_response += f"\nThe 'Quick Questions' tab may also provide faster results for specific areas of interest."
1048
+
1049
+ return final_response
1050
+
1051
+ def analyze_with_backup(annotated_transcript, original_transcript, age, gender, slp_notes):
1052
+ """Analyze annotated transcript with original as backup"""
1053
+ if not annotated_transcript or len(annotated_transcript.strip()) < 50:
1054
+ return "Error: Please provide an annotated transcript for analysis."
1055
+
1056
+ # Add SLP notes to the prompt if provided
1057
+ notes_section = ""
1058
+ if slp_notes and slp_notes.strip():
1059
+ notes_section = f"""
1060
+
1061
+ SLP CLINICAL NOTES:
1062
+ {slp_notes.strip()}
1063
+ """
1064
+
1065
+ # Calculate quantitative metrics
1066
+ linguistic_metrics = calculate_linguistic_metrics(original_transcript)
1067
+ marker_analysis = analyze_annotation_markers(annotated_transcript)
1068
+
1069
+ # Format metrics for inclusion in prompt
1070
+ metrics_text = f"""
1071
+
1072
+ CALCULATED LINGUISTIC METRICS:
1073
+ - Total Words: {linguistic_metrics.get('total_words', 0)}
1074
+ - Total Sentences: {linguistic_metrics.get('total_sentences', 0)}
1075
+ - Unique Words: {linguistic_metrics.get('unique_words', 0)}
1076
+ - Type-Token Ratio: {linguistic_metrics.get('type_token_ratio', 0)}
1077
+ - MLU (Words): {linguistic_metrics.get('mlu_words', 0)}
1078
+ - MLU (Morphemes): {linguistic_metrics.get('mlu_morphemes', 0)}
1079
+ - Average Sentence Length: {linguistic_metrics.get('avg_sentence_length', 0)}
1080
+ - Vocabulary Sophistication: {linguistic_metrics.get('vocabulary_sophistication', 0)}
1081
+
1082
+ ANNOTATION MARKER COUNTS:
1083
+ - Fluency Issues: {marker_analysis.get('category_totals', {}).get('fluency_issues', 0)}
1084
+ - Grammar Errors: {marker_analysis.get('category_totals', {}).get('grammar_errors', 0)}
1085
+ - Simple Vocabulary: {marker_analysis.get('category_totals', {}).get('simple_vocabulary', 0)}
1086
+ - Complex Vocabulary: {marker_analysis.get('category_totals', {}).get('complex_vocabulary', 0)}
1087
+ - Vocabulary Sophistication Ratio: {marker_analysis.get('category_totals', {}).get('vocab_sophistication_ratio', 0):.3f}
1088
+ """
1089
+
1090
+ analysis_prompt = f"""
1091
+ You are a speech-language pathologist conducting a COMPREHENSIVE analysis of a word-by-word annotated speech sample. Use the provided quantitative metrics and count EVERY marker precisely.
1092
+
1093
+ Patient: {age}-year-old {gender}
1094
+
1095
+ ANNOTATED TRANSCRIPT:
1096
+ {annotated_transcript}{notes_section}
1097
+
1098
+ ORIGINAL TRANSCRIPT (for reference and backup analysis):
1099
+ {original_transcript}
1100
+
1101
+ {metrics_text}
1102
+
1103
+ ANALYSIS INSTRUCTIONS:
1104
+ Using the detailed linguistic markers in the annotated transcript and the calculated metrics above, provide a comprehensive analysis with EXACT counts, percentages, and specific examples. Complete ALL 12 sections below:
1105
+
1106
+ COMPREHENSIVE SPEECH SAMPLE ANALYSIS:
1107
+
1108
+ 1. SPEECH FACTORS (with EXACT counts and specific citations):
1109
+
1110
+ A. Fluency Issues:
1111
+ - Count [FILLER] markers: List each instance and calculate rate per 100 words
1112
+ - Count [FALSE_START] markers: List examples and analyze patterns
1113
+ - Count [REPETITION] markers: Categorize by type (word, phrase, sound)
1114
+ - Count [REVISION] markers: Analyze self-correction patterns
1115
+ - Count [PAUSE] markers: Assess hesitation frequency
1116
+ - Calculate total disfluency rate and severity level
1117
+
1118
+ B. Word Retrieval Issues:
1119
+ - Count [CIRCUMLOCUTION] markers: List each roundabout description
1120
+ - Count [INCOMPLETE] markers: Analyze abandoned thought patterns
1121
+ - Count [GENERIC] markers: Calculate specificity ratio
1122
+ - Count [WORD_SEARCH] markers: Identify retrieval difficulty areas
1123
+
1124
+ C. Grammatical Errors:
1125
+ - Count [GRAM_ERROR] markers by subcategory (verb tense, subject-verb agreement, etc.)
1126
+ - Count [SYNTAX_ERROR] markers: Analyze word order problems
1127
+ - Count [MORPH_ERROR] markers: Categorize morphological mistakes
1128
+ - Count [RUN_ON] markers: Assess sentence boundary awareness
1129
+
1130
+ 2. LANGUAGE SKILLS ASSESSMENT (with specific evidence):
1131
+
1132
+ A. Lexical/Semantic Skills:
1133
+ - Use calculated Type-Token Ratio: {linguistic_metrics.get('type_token_ratio', 0)}
1134
+ - Count [SIMPLE_VOCAB] vs [COMPLEX_VOCAB] markers
1135
+ - Assess vocabulary sophistication ratio: {marker_analysis.get('category_totals', {}).get('vocab_sophistication_ratio', 0):.3f}
1136
+ - Count [SEMANTIC_ERROR] markers and analyze patterns
1137
+
1138
+ B. Syntactic Skills:
1139
+ - Count [SIMPLE_SENT], [COMPLEX_SENT], [COMPOUND_SENT] markers
1140
+ - Calculate sentence complexity ratios
1141
+ - Assess clause complexity and embedding
1142
+
1143
+ C. Supralinguistic Skills:
1144
+ - Identify cause-effect relationships, inferences, non-literal language
1145
+ - Assess problem-solving language and metalinguistic awareness
1146
+
1147
+ 3. COMPLEX SENTENCE ANALYSIS (with exact counts):
1148
+
1149
+ A. Coordinating Conjunctions:
1150
+ - Count and cite EVERY use of: and, but, or, so, yet, for, nor
1151
+ - Analyze patterns and age-appropriateness
1152
+
1153
+ B. Subordinating Conjunctions:
1154
+ - Count and cite EVERY use of: because, although, while, since, if, when, where, that, which, who
1155
+ - Analyze clause complexity and embedding depth
1156
+
1157
+ C. Sentence Structure Analysis:
1158
+ - Use calculated MLU: {linguistic_metrics.get('mlu_words', 0)} words, {linguistic_metrics.get('mlu_morphemes', 0)} morphemes
1159
+ - Calculate complexity ratios and assess developmental appropriateness
1160
+
1161
+ 4. FIGURATIVE LANGUAGE ANALYSIS (with exact counts):
1162
+
1163
+ A. Similes and Metaphors:
1164
+ - Count [FIGURATIVE] markers for similes (using "like" or "as")
1165
+ - Count [FIGURATIVE] markers for metaphors (direct comparisons)
1166
+
1167
+ B. Idioms and Non-literal Language:
1168
+ - Count and analyze idiomatic expressions
1169
+ - Assess comprehension and appropriate use
1170
+
1171
+ 5. PRAGMATIC LANGUAGE ASSESSMENT (with specific examples):
1172
+
1173
+ A. Discourse Management:
1174
+ - Count [TOPIC_SHIFT] markers: Assess transition appropriateness
1175
+ - Count [TANGENT] markers: Analyze tangential speech patterns
1176
+ - Count [COHERENCE_BREAK] markers: Assess logical flow
1177
+
1178
+ B. Referential Communication:
1179
+ - Count [PRONOUN_REF] markers: Analyze referential clarity
1180
+ - Assess communicative effectiveness
1181
+
1182
+ 6. VOCABULARY AND SEMANTIC ANALYSIS (with quantification):
1183
+
1184
+ A. Vocabulary Diversity:
1185
+ - Total words: {linguistic_metrics.get('total_words', 0)}
1186
+ - Unique words: {linguistic_metrics.get('unique_words', 0)}
1187
+ - Type-Token Ratio: {linguistic_metrics.get('type_token_ratio', 0)}
1188
+ - Vocabulary sophistication: {linguistic_metrics.get('vocabulary_sophistication', 0)}
1189
+
1190
+ B. Semantic Relationships:
1191
+ - Analyze word frequency patterns
1192
+ - Assess semantic precision and relationships
1193
+
1194
+ 7. MORPHOLOGICAL AND PHONOLOGICAL ANALYSIS (with counts):
1195
+
1196
+ A. Morphological Markers:
1197
+ - Count [MORPH_ERROR] markers and categorize
1198
+ - Analyze morpheme use patterns
1199
+ - Assess morphological complexity
1200
+
1201
+ B. Phonological Patterns:
1202
+ - Identify speech sound patterns from transcript
1203
+ - Assess syllable structure complexity
1204
+
1205
+ 8. COGNITIVE-LINGUISTIC FACTORS (with evidence):
1206
+
1207
+ A. Working Memory:
1208
+ - Assess sentence length complexity using average: {linguistic_metrics.get('avg_sentence_length', 0)} words
1209
+ - Analyze information retention patterns
1210
+
1211
+ B. Processing Efficiency:
1212
+ - Analyze linguistic complexity and word-finding patterns
1213
+ - Assess cognitive demands of language structures
1214
+
1215
+ C. Executive Function:
1216
+ - Count self-correction patterns ([REVISION] markers)
1217
+ - Assess planning and organization in discourse
1218
+
1219
+ 9. FLUENCY AND RHYTHM ANALYSIS (with quantification):
1220
+
1221
+ A. Disfluency Patterns:
1222
+ - Total fluency issues: {marker_analysis.get('category_totals', {}).get('fluency_issues', 0)}
1223
+ - Calculate disfluency rate per 100 words
1224
+ - Analyze impact on communication
1225
+
1226
+ B. Language Flow:
1227
+ - Assess sentence length variability: std = {linguistic_metrics.get('sentence_length_std', 0)}
1228
+ - Analyze linguistic markers of hesitation
1229
+
1230
+ 10. QUANTITATIVE METRICS:
1231
+ - Total words: {linguistic_metrics.get('total_words', 0)}
1232
+ - Total sentences: {linguistic_metrics.get('total_sentences', 0)}
1233
+ - MLU (words): {linguistic_metrics.get('mlu_words', 0)}
1234
+ - MLU (morphemes): {linguistic_metrics.get('mlu_morphemes', 0)}
1235
+ - Type-Token Ratio: {linguistic_metrics.get('type_token_ratio', 0)}
1236
+ - Grammar error rate: Calculate from marker counts
1237
+ - Vocabulary sophistication ratio: {marker_analysis.get('category_totals', {}).get('vocab_sophistication_ratio', 0):.3f}
1238
+
1239
+ 11. CLINICAL IMPLICATIONS:
1240
+ - Primary strengths: List with supporting evidence from markers and metrics
1241
+ - Primary weaknesses: Rank by severity with exact counts
1242
+ - Intervention priorities: Based on error frequency and impact
1243
+ - Therapy targets: Specific, measurable goals
1244
+
1245
+ 12. PROGNOSIS AND SUMMARY:
1246
+ - Overall communication profile with percentile estimates
1247
+ - Developmental appropriateness assessment
1248
+ - Summary of key findings from quantitative analysis
1249
+ - Priority treatment goals and expected outcomes
1250
+
1251
+ CRITICAL REQUIREMENTS:
1252
+ - Use the provided calculated metrics in your analysis
1253
+ - Provide EXACT counts for every marker type
1254
+ - Calculate precise percentages and show your work
1255
+ - Give specific examples from the transcript
1256
+ - If annotation is incomplete, supplement with analysis of the original transcript
1257
+ - Complete ALL 12 sections - use <CONTINUE> if needed
1258
+ """
1259
+
1260
+ return call_claude_api_with_continuation(analysis_prompt)
1261
+
1262
+ def full_analysis_pipeline(transcript_content, age, gender, slp_notes, progress_callback=None):
1263
+ """Complete pipeline: annotate then analyze with progressive updates"""
1264
+ if not transcript_content or len(transcript_content.strip()) < 50:
1265
+ return "Error: Please provide a longer transcript for analysis.", ""
1266
+
1267
+ # Step 1: Annotate transcript
1268
+ logger.info("Step 1: Annotating transcript with linguistic markers...")
1269
+ if progress_callback:
1270
+ progress_callback("🏷️ Step 1: Annotating transcript with linguistic markers...")
1271
+
1272
+ annotated_transcript = annotate_transcript(transcript_content, age, gender, slp_notes)
1273
+
1274
+ if annotated_transcript.startswith("❌"):
1275
+ return annotated_transcript, ""
1276
+
1277
+ # Return annotated transcript immediately
1278
+ if progress_callback:
1279
+ progress_callback("✅ Step 1 Complete: Annotation finished! Starting analysis...")
1280
+
1281
+ # Check if annotation was incomplete
1282
+ if annotated_transcript.startswith("⚠️ ANNOTATION INCOMPLETE"):
1283
+ logger.warning("Annotation incomplete, proceeding with analysis using original transcript as primary source")
1284
+ analysis_note = "⚠️ Note: Annotation was incomplete. Analysis primarily based on original transcript.\n\n"
1285
+ else:
1286
+ analysis_note = ""
1287
+
1288
+ # Step 2: Analyze annotated transcript with original as backup
1289
+ logger.info("Step 2: Analyzing annotated transcript...")
1290
+ if progress_callback:
1291
+ progress_callback("📊 Step 2: Analyzing annotated transcript (this may take several minutes)...")
1292
+
1293
+ analysis_result = analyze_with_backup(annotated_transcript, transcript_content, age, gender, slp_notes)
1294
+
1295
+ if progress_callback:
1296
+ progress_callback("✅ Analysis Complete!")
1297
+
1298
+ return annotated_transcript, analysis_note + analysis_result
1299
+
1300
+ def progressive_analysis_pipeline(transcript_content, age, gender, slp_notes):
1301
+ """Generator function for progressive analysis updates"""
1302
+ if not transcript_content or len(transcript_content.strip()) < 50:
1303
+ yield "Error: Please provide a longer transcript for analysis.", "", "❌ Error"
1304
+ return
1305
+
1306
+ # Step 1: Annotate transcript
1307
+ logger.info("Step 1: Annotating transcript with linguistic markers...")
1308
+ yield "", "", "🏷️ Step 1: Annotating transcript with linguistic markers..."
1309
+
1310
+ annotated_transcript = annotate_transcript(transcript_content, age, gender, slp_notes)
1311
+
1312
+ if annotated_transcript.startswith("❌"):
1313
+ yield annotated_transcript, "", "❌ Annotation failed"
1314
+ return
1315
+
1316
+ # Return annotated transcript immediately after completion
1317
+ yield annotated_transcript, "", "✅ Step 1 Complete! Starting analysis..."
1318
+
1319
+ # Check if annotation was incomplete
1320
+ if annotated_transcript.startswith("⚠️ ANNOTATION INCOMPLETE"):
1321
+ logger.warning("Annotation incomplete, proceeding with analysis")
1322
+ analysis_note = "⚠️ Note: Annotation was incomplete. Analysis primarily based on original transcript.\n\n"
1323
+ yield annotated_transcript, "", "⚠️ Annotation incomplete, continuing with analysis..."
1324
+ else:
1325
+ analysis_note = ""
1326
+
1327
+ # Step 2: Analyze annotated transcript
1328
+ logger.info("Step 2: Analyzing annotated transcript...")
1329
+ yield annotated_transcript, "", "📊 Step 2: Analyzing annotated transcript (this may take several minutes)..."
1330
+
1331
+ analysis_result = analyze_with_backup(annotated_transcript, transcript_content, age, gender, slp_notes)
1332
+
1333
+ # Final result
1334
+ yield annotated_transcript, analysis_note + analysis_result, "✅ Analysis Complete!"
1335
+
1336
+ # Example transcript data
1337
+ example_transcript = """Well, um, I was thinking about, you know, the thing that happened yesterday. I was go- I mean I was going to the store and, uh, I seen this really big dog. Actually, it was more like a wolf or something. The dog, he was just standing there, and I thought to myself, "That's one magnificent creature." But then, um, I realized I forgot my wallet at home, so I had to turn around and go back. When I got home, my wife she says to me, "Where's the groceries?" And I'm like, "Well, honey, I had to come back because I forgot my thing." She wasn't too happy about that, let me tell you. Anyway, speaking of dogs, did I ever tell you about the time I went fishing? It was raining cats and dogs that day, and I caught three fishes. My brother, he don't like fishing much, but he came with me anyway. We was sitting there for hours, just waiting and waiting. The fish, they wasn't biting at all. But then, all of a sudden, I got a bite! I was so excited, I almost falled into the water. The fish was huge - well, maybe not huge, but pretty big for that lake. We cooked it up real good that night. My wife, she made some of that fancy stuff to go with it. What do you call it... that green thing... oh yeah, asparagus. She's always making these elaborate meals. Sometimes I think she tries too hard, you know? But I appreciate it. Life's been good to us, I guess. We been married for twenty-five years now. Time flies when you're having fun, as they say."""
1338
+
1339
+ example_annotated = """Well[FILLER], um[FILLER], I was thinking about, you[SIMPLE_VOCAB] know[FILLER], the thing[GENERIC] that happened yesterday[SIMPLE_VOCAB]. I was go-[FALSE_START] I mean I was going[SIMPLE_VOCAB] to the store[SIMPLE_VOCAB] and, uh[FILLER], I seen[GRAM_ERROR] this really big[SIMPLE_VOCAB] dog[SIMPLE_VOCAB].[SIMPLE_SENT] Actually, it was more like[FILLER] a wolf[SIMPLE_VOCAB] or something[GENERIC].[SIMPLE_SENT] The dog[SIMPLE_VOCAB], he[PRONOUN_REF] was just standing[SIMPLE_VOCAB] there, and I thought to myself, "That's one magnificent[COMPLEX_VOCAB] creature[COMPLEX_VOCAB]."[COMPLEX_SENT] But then, um[FILLER], I realized[COMPLEX_VOCAB] I forgot[SIMPLE_VOCAB] my wallet[SIMPLE_VOCAB] at home[SIMPLE_VOCAB], so I had to turn around and go[SIMPLE_VOCAB] back[SIMPLE_VOCAB].[COMPLEX_SENT] When I got home, my wife[SIMPLE_VOCAB] she[REPETITION] says[SIMPLE_VOCAB] to me, "Where's the groceries[SIMPLE_VOCAB]?"[COMPLEX_SENT] And I'm like[FILLER], "Well[FILLER], honey[SIMPLE_VOCAB], I had to come back because I forgot[SIMPLE_VOCAB] my thing[GENERIC]."[COMPLEX_SENT] She wasn't too happy[SIMPLE_VOCAB] about that, let me tell you.[SIMPLE_SENT] Anyway[TOPIC_SHIFT], speaking of dogs, did I ever tell you about the time I went fishing?[TANGENT][COMPLEX_SENT] It was raining cats and dogs[FIGURATIVE] that day, and I caught[SIMPLE_VOCAB] three fishes[MORPH_ERROR].[COMPOUND_SENT] My brother[SIMPLE_VOCAB], he[PRONOUN_REF] don't[GRAM_ERROR] like fishing[SIMPLE_VOCAB] much, but he came with me anyway[SIMPLE_VOCAB].[COMPLEX_SENT] We was[GRAM_ERROR] sitting[SIMPLE_VOCAB] there for hours[SIMPLE_VOCAB], just waiting[SIMPLE_VOCAB] and waiting[REPETITION].[SIMPLE_SENT] The fish[SIMPLE_VOCAB], they[PRONOUN_REF] wasn't[GRAM_ERROR] biting[SIMPLE_VOCAB] at all.[SIMPLE_SENT] But then, all of a sudden[SIMPLE_VOCAB], I got[SIMPLE_VOCAB] a bite[SIMPLE_VOCAB]![SIMPLE_SENT] I was so excited[SIMPLE_VOCAB], I almost falled[MORPH_ERROR] into the water[SIMPLE_VOCAB].[COMPLEX_SENT] The fish[SIMPLE_VOCAB] was huge[SIMPLE_VOCAB] - well[FILLER], maybe not huge[SIMPLE_VOCAB], but pretty big[SIMPLE_VOCAB] for that lake[SIMPLE_VOCAB].[REVISION][COMPLEX_SENT] We cooked[SIMPLE_VOCAB] it up real good[SIMPLE_VOCAB] that night[SIMPLE_VOCAB].[SIMPLE_SENT] My wife[SIMPLE_VOCAB], she[REPETITION] made some of that fancy[SIMPLE_VOCAB] stuff[GENERIC] to go[SIMPLE_VOCAB] with it.[SIMPLE_SENT] What do you call it... [WORD_SEARCH] that green[SIMPLE_VOCAB] thing[GENERIC]... [PAUSE] oh yeah, asparagus[COMPLEX_VOCAB].[CIRCUMLOCUTION] She's always making[SIMPLE_VOCAB] these elaborate[COMPLEX_VOCAB] meals[SIMPLE_VOCAB].[SIMPLE_SENT] Sometimes I think[SIMPLE_VOCAB] she tries[SIMPLE_VOCAB] too hard[SIMPLE_VOCAB], you know[FILLER]?[COMPLEX_SENT] But I appreciate[COMPLEX_VOCAB] it.[SIMPLE_SENT] Life's been good[SIMPLE_VOCAB] to us, I guess[SIMPLE_VOCAB].[SIMPLE_SENT] We been[GRAM_ERROR] married[SIMPLE_VOCAB] for twenty-five[COMPLEX_VOCAB] years[SIMPLE_VOCAB] now.[SIMPLE_SENT] Time flies when you're having fun[FIGURATIVE], as they say.[COMPLEX_SENT]"""
1340
+
1341
+ # Create Gradio interface
1342
+ with gr.Blocks(title="Speech Analysis", theme=gr.themes.Soft()) as demo:
1343
+ gr.Markdown("""
1344
+ # 📋 Speech Analysis Tool with Annotations
1345
+
1346
+ This tool performs a two-step comprehensive speech analysis:
1347
+ 1. **Annotation**: Marks linguistic features in the transcript
1348
+ 2. **Analysis**: Counts and analyzes the marked features for detailed assessment
1349
+
1350
+ Upload a transcript or paste text below to begin the analysis.
1351
+ """)
1352
+
1353
+ with gr.Tab("📝 Full Analysis Pipeline"):
1354
+ gr.Markdown("### Complete two-step analysis: annotation followed by comprehensive analysis")
1355
+
1356
+ with gr.Row():
1357
+ with gr.Column(scale=2):
1358
+ transcript_input = gr.Textbox(
1359
+ label="Speech Transcript",
1360
+ placeholder="Paste the speech transcript here...",
1361
+ lines=10,
1362
+ max_lines=20
1363
+ )
1364
+
1365
+ file_input = gr.File(
1366
+ label="Or upload transcript file",
1367
+ file_types=[".txt", ".doc", ".docx"]
1368
+ )
1369
+
1370
+ with gr.Row():
1371
+ age_input = gr.Textbox(
1372
+ label="Age",
1373
+ placeholder="e.g., 45",
1374
+ value="45"
1375
+ )
1376
+ gender_input = gr.Dropdown(
1377
+ label="Gender",
1378
+ choices=["Male", "Female", "Other"],
1379
+ value="Male"
1380
+ )
1381
+
1382
+ slp_notes_input = gr.Textbox(
1383
+ label="SLP Clinical Notes (Optional)",
1384
+ placeholder="Add any relevant clinical observations...",
1385
+ lines=3
1386
+ )
1387
+
1388
+ example_btn = gr.Button("📄 Load Example Transcript", variant="secondary", size="sm")
1389
+
1390
+ # Single main analysis button
1391
+ ultimate_analysis_btn = gr.Button("🚀 Run Complete CASL Analysis", variant="primary", size="lg")
1392
+
1393
+ with gr.Column(scale=3):
1394
+ status_display = gr.Markdown("Ready to analyze transcript")
1395
+
1396
+ annotated_output = gr.Textbox(
1397
+ label="Step 1: Annotated Transcript (✓ = Complete, ⚠️ = Incomplete)",
1398
+ lines=15,
1399
+ max_lines=25,
1400
+ show_copy_button=True
1401
+ )
1402
+
1403
+ analysis_output = gr.Textbox(
1404
+ label="Step 2: Comprehensive Analysis",
1405
+ lines=20,
1406
+ max_lines=30,
1407
+ show_copy_button=True
1408
+ )
1409
+
1410
+ with gr.Tab("🏷️ Annotation Only"):
1411
+ gr.Markdown("### Step 1: Annotate transcript with linguistic markers")
1412
+
1413
+ with gr.Row():
1414
+ with gr.Column():
1415
+ transcript_input_2 = gr.Textbox(
1416
+ label="Speech Transcript",
1417
+ placeholder="Paste the speech transcript here...",
1418
+ lines=10
1419
+ )
1420
+
1421
+ with gr.Row():
1422
+ age_input_2 = gr.Textbox(label="Age", value="45")
1423
+ gender_input_2 = gr.Dropdown(
1424
+ label="Gender",
1425
+ choices=["Male", "Female", "Other"],
1426
+ value="Male"
1427
+ )
1428
+
1429
+ slp_notes_input_2 = gr.Textbox(
1430
+ label="SLP Clinical Notes (Optional)",
1431
+ lines=3
1432
+ )
1433
+
1434
+ example_btn_2 = gr.Button("📄 Load Example Transcript", variant="secondary", size="sm")
1435
+ annotate_btn = gr.Button("🏷️ Annotate Transcript", variant="secondary")
1436
+
1437
+ with gr.Column():
1438
+ annotation_output = gr.Textbox(
1439
+ label="Annotated Transcript (✓ = Complete, ⚠️ = Incomplete)",
1440
+ lines=20,
1441
+ show_copy_button=True
1442
+ )
1443
+
1444
+ with gr.Tab("📊 Analysis Only"):
1445
+ gr.Markdown("### Step 2: Analyze pre-annotated transcript")
1446
+
1447
+ with gr.Row():
1448
+ with gr.Column():
1449
+ annotated_input = gr.Textbox(
1450
+ label="Annotated Transcript",
1451
+ placeholder="Paste annotated transcript with [MARKERS] here...",
1452
+ lines=10
1453
+ )
1454
+
1455
+ with gr.Row():
1456
+ age_input_3 = gr.Textbox(label="Age", value="45")
1457
+ gender_input_3 = gr.Dropdown(
1458
+ label="Gender",
1459
+ choices=["Male", "Female", "Other"],
1460
+ value="Male"
1461
+ )
1462
+
1463
+ slp_notes_input_3 = gr.Textbox(
1464
+ label="SLP Clinical Notes (Optional)",
1465
+ lines=3
1466
+ )
1467
+
1468
+ example_annotated_btn = gr.Button("📄 Load Example Annotated Transcript", variant="secondary", size="sm")
1469
+ analyze_only_btn = gr.Button("📊 Analyze Annotated Transcript", variant="secondary")
1470
+
1471
+ with gr.Column():
1472
+ analysis_only_output = gr.Textbox(
1473
+ label="Comprehensive Analysis",
1474
+ lines=20,
1475
+ show_copy_button=True
1476
+ )
1477
+
1478
+ # Event handlers - now all components are defined
1479
+ example_btn.click(fn=lambda: example_transcript, outputs=[transcript_input])
1480
+ example_btn_2.click(fn=lambda: example_transcript, outputs=[transcript_input_2])
1481
+ example_annotated_btn.click(fn=lambda: example_annotated, outputs=[annotated_input])
1482
+
1483
+ file_input.change(
1484
+ fn=process_file,
1485
+ inputs=[file_input],
1486
+ outputs=[transcript_input]
1487
+ )
1488
+
1489
+ def run_annotation_step(transcript_content, age, gender, slp_notes):
1490
+ """Run just the annotation step and return immediately"""
1491
+ if not transcript_content or len(transcript_content.strip()) < 50:
1492
+ return "Error: Please provide a longer transcript for annotation.", "❌ Error"
1493
+
1494
+ logger.info("Step 1: Annotating transcript with linguistic markers...")
1495
+ annotated_transcript = annotate_transcript(transcript_content, age, gender, slp_notes)
1496
+
1497
+ if annotated_transcript.startswith("❌"):
1498
+ return annotated_transcript, "❌ Annotation failed"
1499
+ elif annotated_transcript.startswith("⚠️ ANNOTATION INCOMPLETE"):
1500
+ return annotated_transcript, "⚠️ Annotation incomplete but proceeding"
1501
+ else:
1502
+ return annotated_transcript, "✅ Annotation complete! Click 'Run Analysis' to continue."
1503
+
1504
+ def run_analysis_step(annotated_transcript, original_transcript, age, gender, slp_notes):
1505
+ """Run the analysis step on the annotated transcript"""
1506
+ if not annotated_transcript or len(annotated_transcript.strip()) < 50:
1507
+ return "Error: Please provide an annotated transcript for analysis."
1508
+
1509
+ logger.info("Step 2: Analyzing annotated transcript...")
1510
+
1511
+ # Check if annotation was incomplete
1512
+ if annotated_transcript.startswith("⚠️ ANNOTATION INCOMPLETE"):
1513
+ analysis_note = "⚠️ Note: Annotation was incomplete. Analysis primarily based on original transcript.\n\n"
1514
+ else:
1515
+ analysis_note = ""
1516
+
1517
+ analysis_result = analyze_with_backup(annotated_transcript, original_transcript, age, gender, slp_notes)
1518
+ return analysis_note + analysis_result
1519
+
1520
+ def run_manual_count_only(annotated_transcript):
1521
+ """Generate only the manual count report without AI analysis"""
1522
+ if not annotated_transcript or len(annotated_transcript.strip()) < 50:
1523
+ return "Error: Please provide an annotated transcript for manual counting."
1524
+
1525
+ return generate_manual_count_report(annotated_transcript)
1526
+
1527
+ def run_verified_analysis(annotated_transcript, original_transcript, age, gender, slp_notes):
1528
+ """Run analysis with manual count verification"""
1529
+ if not annotated_transcript or len(annotated_transcript.strip()) < 50:
1530
+ return "Error: Please provide an annotated transcript for analysis."
1531
+
1532
+ # Generate comprehensive analysis report first
1533
+ comprehensive_report = generate_comprehensive_analysis_report(annotated_transcript, original_transcript)
1534
+
1535
+ # Get all the verified data
1536
+ marker_analysis = analyze_annotation_markers(annotated_transcript)
1537
+ linguistic_metrics = calculate_linguistic_metrics(original_transcript)
1538
+ lexical_diversity = calculate_advanced_lexical_diversity(original_transcript)
1539
+
1540
+ # Create a comprehensive verified analysis prompt
1541
+ verified_prompt = f"""
1542
+ You are a speech-language pathologist conducting analysis based on COMPREHENSIVE VERIFIED DATA.
1543
+ Do NOT recount anything - use ONLY the provided verified measurements below.
1544
+
1545
+ Patient: {age}-year-old {gender}
1546
+
1547
+ COMPREHENSIVE VERIFIED ANALYSIS DATA (DO NOT RECOUNT):
1548
+ {comprehensive_report}
1549
+
1550
+ ANNOTATED TRANSCRIPT (for examples only, do not recount):
1551
+ {annotated_transcript}...
1552
+
1553
+ INSTRUCTIONS:
1554
+ Use ONLY the verified data provided above. Do NOT count or calculate anything yourself.
1555
+
1556
+ Provide a comprehensive clinical interpretation organized into these sections:
1557
+
1558
+ 1. LEXICAL DIVERSITY INTERPRETATION:
1559
+ - Interpret the advanced lexical diversity measures (MTLD, HDD, MATTR, etc.)
1560
+ - Compare to age-appropriate norms
1561
+ - Clinical significance of diversity patterns
1562
+
1563
+ 2. FLUENCY PATTERN ANALYSIS:
1564
+ - Clinical interpretation of fluency marker counts and rates
1565
+ - Severity assessment based on verified counts
1566
+ - Impact on communication effectiveness
1567
+
1568
+ 3. GRAMMATICAL COMPETENCE ASSESSMENT:
1569
+ - Analysis of grammar error patterns from verified counts
1570
+ - Developmental appropriateness
1571
+ - Areas of strength vs. weakness
1572
+
1573
+ 4. VOCABULARY AND SEMANTIC ANALYSIS:
1574
+ - Interpretation of vocabulary sophistication measures
1575
+ - Word frequency pattern analysis
1576
+ - Semantic precision assessment
1577
+
1578
+ 5. PRAGMATIC LANGUAGE EVALUATION:
1579
+ - Discourse coherence based on verified markers
1580
+ - Social communication effectiveness
1581
+ - Conversational competence
1582
+
1583
+ 6. OVERALL COMMUNICATION PROFILE:
1584
+ - Integration of all verified measures
1585
+ - Strengths and areas of need
1586
+ - Functional communication impact
1587
+
1588
+ 7. CLINICAL RECOMMENDATIONS:
1589
+ - Specific intervention targets based on verified data
1590
+ - Therapy approaches and techniques
1591
+ - Progress monitoring suggestions
1592
+ - Prognosis and expected outcomes
1593
+
1594
+ Focus on INTERPRETATION and CLINICAL SIGNIFICANCE, not counting.
1595
+ All measurements are already verified and accurate.
1596
+ Cite specific examples from the transcript to support your interpretations.
1597
+ """
1598
+
1599
+ ai_interpretation = call_claude_api(verified_prompt)
1600
+
1601
+ return f"{comprehensive_report}\n\n{'='*100}\nCLINICAL INTERPRETATION BASED ON COMPREHENSIVE VERIFIED DATA\n{'='*100}\n\n{ai_interpretation}"
1602
+
1603
+ def run_ultimate_analysis(annotated_transcript, original_transcript, age, gender, slp_notes):
1604
+ """The ultimate analysis: gather all statistical data, then do final 12-section clinical analysis"""
1605
+ if not annotated_transcript or len(annotated_transcript.strip()) < 50:
1606
+ return "Error: Please provide an annotated transcript for analysis."
1607
+
1608
+ # STEP 1: Gather ALL statistical data
1609
+ linguistic_metrics = calculate_linguistic_metrics(original_transcript)
1610
+ marker_analysis = analyze_annotation_markers(annotated_transcript)
1611
+ lexical_diversity = calculate_advanced_lexical_diversity(original_transcript)
1612
+
1613
+ # STEP 2: Get AI clinical insights (for interpretation, not counting)
1614
+ ai_clinical_insights = analyze_with_backup(annotated_transcript, original_transcript, age, gender, slp_notes)
1615
+
1616
+ # STEP 3: Prepare all verified statistical values for final prompt
1617
+ stats_summary = f"""
1618
+ VERIFIED STATISTICAL VALUES (DO NOT RECOUNT - USE THESE EXACT NUMBERS):
1619
+
1620
+ BASIC METRICS:
1621
+ • Total words: {linguistic_metrics.get('total_words', 0)}
1622
+ • Total sentences: {linguistic_metrics.get('total_sentences', 0)}
1623
+ • Unique words: {linguistic_metrics.get('unique_words', 0)}
1624
+ • MLU (words): {linguistic_metrics.get('mlu_words', 0):.2f}
1625
+ • MLU (morphemes): {linguistic_metrics.get('mlu_morphemes', 0):.2f}
1626
+ • Average sentence length: {linguistic_metrics.get('avg_sentence_length', 0):.2f}
1627
+ • Sentence length std: {linguistic_metrics.get('sentence_length_std', 0):.2f}
1628
+
1629
+ LEXICAL DIVERSITY MEASURES (from lexical-diversity library):"""
1630
+
1631
+ if lexical_diversity.get('library_available', False) and 'diversity_measures' in lexical_diversity:
1632
+ measures = lexical_diversity['diversity_measures']
1633
+ stats_summary += f"""
1634
+ • Simple TTR: {measures.get('simple_ttr', 'N/A')}
1635
+ • Root TTR: {measures.get('root_ttr', 'N/A')}
1636
+ • Log TTR: {measures.get('log_ttr', 'N/A')}
1637
+ • Maas TTR: {measures.get('maas_ttr', 'N/A')}
1638
+ • HDD: {measures.get('hdd', 'N/A')}
1639
+ • MSTTR (25-word): {measures.get('msttr_25', 'N/A')}
1640
+ • MSTTR (50-word): {measures.get('msttr_50', 'N/A')}
1641
+ • MATTR (25-word): {measures.get('mattr_25', 'N/A')}
1642
+ • MATTR (50-word): {measures.get('mattr_50', 'N/A')}
1643
+ • MTLD: {measures.get('mtld', 'N/A')}
1644
+ • MTLD (MA wrap): {measures.get('mtld_ma_wrap', 'N/A')}
1645
+ • MTLD (MA bidirectional): {measures.get('mtld_ma_bid', 'N/A')}"""
1646
+ else:
1647
+ stats_summary += "\n • Lexical diversity measures not available"
1648
+
1649
+ # Add manual annotation counts
1650
+ marker_counts = marker_analysis['marker_counts']
1651
+ category_totals = marker_analysis['category_totals']
1652
+ total_words = linguistic_metrics.get('total_words', 0)
1653
+
1654
+ stats_summary += f"""
1655
+
1656
+ MANUAL ANNOTATION COUNTS:
1657
+ • FILLER markers: {marker_counts.get('FILLER', 0)} ({marker_counts.get('FILLER', 0)/total_words*100:.2f} per 100 words)
1658
+ • FALSE_START markers: {marker_counts.get('FALSE_START', 0)}
1659
+ • REPETITION markers: {marker_counts.get('REPETITION', 0)}
1660
+ • REVISION markers: {marker_counts.get('REVISION', 0)}
1661
+ • PAUSE markers: {marker_counts.get('PAUSE', 0)}
1662
+ • GRAM_ERROR markers: {marker_counts.get('GRAM_ERROR', 0)}
1663
+ • SYNTAX_ERROR markers: {marker_counts.get('SYNTAX_ERROR', 0)}
1664
+ • MORPH_ERROR markers: {marker_counts.get('MORPH_ERROR', 0)}
1665
+ • SIMPLE_VOCAB markers: {marker_counts.get('SIMPLE_VOCAB', 0)}
1666
+ • COMPLEX_VOCAB markers: {marker_counts.get('COMPLEX_VOCAB', 0)}
1667
+ • SIMPLE_SENT markers: {marker_counts.get('SIMPLE_SENT', 0)}
1668
+ • COMPLEX_SENT markers: {marker_counts.get('COMPLEX_SENT', 0)}
1669
+ • COMPOUND_SENT markers: {marker_counts.get('COMPOUND_SENT', 0)}
1670
+ • FIGURATIVE markers: {marker_counts.get('FIGURATIVE', 0)}
1671
+ • PRONOUN_REF markers: {marker_counts.get('PRONOUN_REF', 0)}
1672
+ ��� TOPIC_SHIFT markers: {marker_counts.get('TOPIC_SHIFT', 0)}
1673
+ • TANGENT markers: {marker_counts.get('TANGENT', 0)}
1674
+ • CIRCUMLOCUTION markers: {marker_counts.get('CIRCUMLOCUTION', 0)}
1675
+ • GENERIC markers: {marker_counts.get('GENERIC', 0)}
1676
+ • WORD_SEARCH markers: {marker_counts.get('WORD_SEARCH', 0)}
1677
+
1678
+ CATEGORY TOTALS:
1679
+ • Total fluency issues: {category_totals['fluency_issues']} ({category_totals['fluency_issues']/total_words*100:.2f} per 100 words)
1680
+ • Total grammar errors: {category_totals['grammar_errors']} ({category_totals['grammar_errors']/total_words*100:.2f} per 100 words)
1681
+ • Vocabulary sophistication ratio: {category_totals['vocab_sophistication_ratio']:.3f}
1682
+ """
1683
+
1684
+ # STEP 4: Create the final comprehensive prompt
1685
+ final_prompt = f"""
1686
+ You are a speech-language pathologist conducting the FINAL COMPREHENSIVE 12-SECTION CASL ANALYSIS.
1687
+
1688
+ Patient: {age}-year-old {gender}
1689
+
1690
+ {stats_summary}
1691
+
1692
+ CLINICAL INSIGHTS FROM AI ANALYSIS (for interpretation guidance):
1693
+ {ai_clinical_insights[:4000]}...
1694
+
1695
+ ANNOTATED TRANSCRIPT (for specific examples):
1696
+ {annotated_transcript}
1697
+
1698
+ CRITICAL INSTRUCTIONS:
1699
+ 1. Use ONLY the verified statistical values provided above - DO NOT recount anything
1700
+ 2. Use the clinical insights for interpretation guidance
1701
+ 3. Use the annotated transcript for specific examples and quotes
1702
+ 4. Complete ALL 12 sections of the comprehensive analysis
1703
+
1704
+ COMPREHENSIVE SPEECH SAMPLE ANALYSIS:
1705
+
1706
+ 1. SPEECH FACTORS (with EXACT verified counts and specific citations):
1707
+ A. Fluency Issues: Use the verified counts above, cite specific examples from transcript
1708
+ B. Word Retrieval Issues: Use verified counts, analyze patterns with examples
1709
+ C. Grammatical Errors: Use verified error counts, categorize with examples
1710
+
1711
+ 2. LANGUAGE SKILLS ASSESSMENT (with verified evidence):
1712
+ A. Lexical/Semantic Skills: Use verified lexical diversity measures and vocabulary data
1713
+ B. Syntactic Skills: Use verified sentence complexity counts and MLU data
1714
+ C. Supralinguistic Skills: Clinical interpretation with transcript examples
1715
+
1716
+ 3. COMPLEX SENTENCE ANALYSIS (with verified counts):
1717
+ A. Coordinating Conjunctions: Count from transcript, use verified sentence data
1718
+ B. Subordinating Conjunctions: Count from transcript, analyze complexity
1719
+ C. Sentence Structure Analysis: Use verified MLU and sentence type data
1720
+
1721
+ 4. FIGURATIVE LANGUAGE ANALYSIS (with verified counts):
1722
+ A. Similes and Metaphors: Use verified figurative markers, cite examples
1723
+ B. Idioms and Non-literal Language: Analysis with specific examples
1724
+
1725
+ 5. PRAGMATIC LANGUAGE ASSESSMENT (with verified examples):
1726
+ A. Discourse Management: Use verified pragmatic marker counts
1727
+ B. Referential Communication: Use verified pronoun reference data
1728
+
1729
+ 6. VOCABULARY AND SEMANTIC ANALYSIS (with verified quantification):
1730
+ A. Vocabulary Diversity: Use ALL verified lexical diversity measures (MTLD, HDD, etc.)
1731
+ B. Semantic Relationships: Use verified word frequency and sophistication data
1732
+
1733
+ 7. MORPHOLOGICAL AND PHONOLOGICAL ANALYSIS (with verified counts):
1734
+ A. Morphological Markers: Use verified morphological data and MLU morphemes
1735
+ B. Phonological Patterns: Analysis from transcript evidence
1736
+
1737
+ 8. COGNITIVE-LINGUISTIC FACTORS (with verified evidence):
1738
+ A. Working Memory: Use verified sentence length and complexity data
1739
+ B. Processing Efficiency: Use verified fluency and error pattern data
1740
+ C. Executive Function: Use verified self-correction patterns
1741
+
1742
+ 9. FLUENCY AND RHYTHM ANALYSIS (with verified quantification):
1743
+ A. Disfluency Patterns: Use verified fluency counts and rates
1744
+ B. Language Flow: Use verified sentence variability data
1745
+
1746
+ 10. QUANTITATIVE METRICS (report ALL verified data):
1747
+ Report all the verified statistical values provided above
1748
+
1749
+ 11. CLINICAL IMPLICATIONS:
1750
+ Based on verified data, provide clinical interpretation and recommendations
1751
+
1752
+ 12. PROGNOSIS AND SUMMARY:
1753
+ Overall profile based on comprehensive verified data
1754
+
1755
+ REQUIREMENTS:
1756
+ - Complete ALL 12 sections
1757
+ - Use ONLY verified statistical values (never recount)
1758
+ - Cite specific examples from annotated transcript
1759
+ - Provide clinical interpretation of the verified data
1760
+ - If response is cut off, end with <CONTINUE>
1761
+ """
1762
+
1763
+ # STEP 5: Get the final comprehensive analysis
1764
+ final_result = call_claude_api_with_continuation(final_prompt)
1765
+
1766
+ return final_result
1767
+
1768
+ def run_full_pipeline(transcript_content, age, gender, slp_notes):
1769
+ """Run the complete pipeline but return annotation immediately"""
1770
+ if not transcript_content or len(transcript_content.strip()) < 50:
1771
+ return "Error: Please provide a longer transcript for analysis.", "", "❌ Error"
1772
+
1773
+ # Step 1: Get annotation
1774
+ annotated_transcript, annotation_status = run_annotation_step(transcript_content, age, gender, slp_notes)
1775
+
1776
+ if annotated_transcript.startswith("❌"):
1777
+ return annotated_transcript, "", annotation_status
1778
+
1779
+ # Step 2: Run analysis
1780
+ analysis_result = run_analysis_step(annotated_transcript, transcript_content, age, gender, slp_notes)
1781
+
1782
+ return annotated_transcript, analysis_result, "✅ Complete analysis finished!"
1783
+
1784
+ def run_complete_casl_analysis(transcript_content, age, gender, slp_notes):
1785
+ """Run the complete CASL analysis pipeline with ultimate analysis"""
1786
+ if not transcript_content or len(transcript_content.strip()) < 50:
1787
+ return "Error: Please provide a longer transcript for analysis.", "", "❌ Error"
1788
+
1789
+ # Step 1: Annotate transcript
1790
+ annotated_transcript, annotation_status = run_annotation_step(transcript_content, age, gender, slp_notes)
1791
+
1792
+ if annotated_transcript.startswith("❌"):
1793
+ return annotated_transcript, "", annotation_status
1794
+
1795
+ # Step 2: Run ultimate analysis
1796
+ ultimate_result = run_ultimate_analysis(annotated_transcript, transcript_content, age, gender, slp_notes)
1797
+
1798
+ return annotated_transcript, ultimate_result, "✅ Complete CASL analysis finished!"
1799
+
1800
+ # Single main event handler
1801
+ ultimate_analysis_btn.click(
1802
+ fn=run_complete_casl_analysis,
1803
+ inputs=[transcript_input, age_input, gender_input, slp_notes_input],
1804
+ outputs=[annotated_output, analysis_output, status_display]
1805
+ )
1806
+
1807
+ annotate_btn.click(
1808
+ fn=annotate_transcript,
1809
+ inputs=[transcript_input_2, age_input_2, gender_input_2, slp_notes_input_2],
1810
+ outputs=[annotation_output]
1811
+ )
1812
+
1813
+ def analyze_standalone(annotated_transcript, age, gender, slp_notes):
1814
+ """Analyze standalone annotated transcript"""
1815
+ # Extract original transcript by removing markers
1816
+ original_transcript = annotated_transcript
1817
+ for marker in ['[FILLER]', '[FALSE_START]', '[REPETITION]', '[REVISION]', '[PAUSE]',
1818
+ '[CIRCUMLOCUTION]', '[INCOMPLETE]', '[GENERIC]', '[WORD_SEARCH]',
1819
+ '[GRAM_ERROR]', '[SYNTAX_ERROR]', '[MORPH_ERROR]', '[RUN_ON]',
1820
+ '[SIMPLE_VOCAB]', '[COMPLEX_VOCAB]', '[SEMANTIC_ERROR]',
1821
+ '[TOPIC_SHIFT]', '[TANGENT]', '[INAPPROPRIATE]', '[COHERENCE_BREAK]',
1822
+ '[SIMPLE_SENT]', '[COMPLEX_SENT]', '[COMPOUND_SENT]', '[FIGURATIVE]',
1823
+ '[PRONOUN_REF]', '[MAZING]', '[PERSEVERATION]']:
1824
+ original_transcript = original_transcript.replace(marker, '')
1825
+
1826
+ return analyze_with_backup(annotated_transcript, original_transcript, age, gender, slp_notes)
1827
+
1828
+ analyze_only_btn.click(
1829
+ fn=analyze_standalone,
1830
+ inputs=[annotated_input, age_input_3, gender_input_3, slp_notes_input_3],
1831
+ outputs=[analysis_only_output]
1832
+ )
1833
+
1834
+ if __name__ == "__main__":
1835
+ demo.launch(
1836
+ server_name="0.0.0.0",
1837
+ server_port=7860,
1838
+ share=True,
1839
+ show_error=True
1840
+ )